Index: head/sys/amd64/linux/syscalls.master =================================================================== --- head/sys/amd64/linux/syscalls.master (revision 329872) +++ head/sys/amd64/linux/syscalls.master (revision 329873) @@ -1,601 +1,601 @@ $FreeBSD$ ; @(#)syscalls.master 8.1 (Berkeley) 7/19/93 ; System call name/number master file (or rather, slave, from LINUX). ; Processed to create linux_sysent.c, linux_proto.h and linux_syscall.h. ; Columns: number audit type nargs name alt{name,tag,rtyp}/comments ; number system call number, must be in order ; audit the audit event associated with the system call ; A value of AUE_NULL means no auditing, but it also means that ; there is no audit event for the call at this time. For the ; case where the event exists, but we don't want auditing, the ; event should be #defined to AUE_NULL in audit_kevents.h. ; type one of STD, NOPROTO, UNIMPL -; name psuedo-prototype of syscall routine +; name pseudo-prototype of syscall routine ; If one of the following alts is different, then all appear: ; altname name of system call if different ; alttag name of args struct tag if different from [o]`name'"_args" ; altrtyp return type if not int (bogus - syscalls always return int) ; for UNIMPL, name continues with comments ; types: ; STD always included ; UNIMPL not implemented, placeholder only ; NOPROTO same as STD except do not create structure or ; function prototype in sys/sysproto.h. Does add a ; definition to syscall.h besides adding a sysent. #include #include #include #include #include #include ; Isn't pretty, but there seems to be no other way to trap nosys #define nosys linux_nosys ; #ifdef's, etc. may be included, and are copied to the output files. 0 AUE_NULL NOPROTO { int read(int fd, char *buf, \ u_int nbyte); } 1 AUE_NULL NOPROTO { int write(int fd, char *buf, \ u_int nbyte); } 2 AUE_OPEN_RWTC STD { int linux_open(char *path, l_int flags, \ l_int mode); } 3 AUE_CLOSE NOPROTO { int close(int fd); } 4 AUE_STAT STD { int linux_newstat(char *path, \ struct l_newstat *buf); } 5 AUE_FSTAT STD { int linux_newfstat(l_uint fd, \ struct l_newstat *buf); } 6 AUE_LSTAT STD { int linux_newlstat(char *path, \ struct l_newstat *buf); } 7 AUE_POLL NOPROTO { int poll(struct pollfd *fds, u_int nfds, \ int timeout); } 8 AUE_LSEEK STD { int linux_lseek(l_uint fdes, l_off_t off, \ l_int whence); } 9 AUE_MMAP STD { int linux_mmap2(l_ulong addr, l_ulong len, \ l_ulong prot, l_ulong flags, l_ulong fd, \ l_ulong pgoff); } 10 AUE_MPROTECT STD { int linux_mprotect(caddr_t addr, int len, \ int prot); } 11 AUE_MUNMAP NOPROTO { int munmap(caddr_t addr, int len); } 12 AUE_NULL STD { int linux_brk(l_ulong dsend); } 13 AUE_NULL STD { int linux_rt_sigaction(l_int sig, \ l_sigaction_t *act, l_sigaction_t *oact, \ l_size_t sigsetsize); } 14 AUE_NULL STD { int linux_rt_sigprocmask(l_int how, \ l_sigset_t *mask, l_sigset_t *omask, \ l_size_t sigsetsize); } 15 AUE_NULL STD { int linux_rt_sigreturn( \ struct l_ucontext *ucp); } 16 AUE_IOCTL STD { int linux_ioctl(l_uint fd, l_uint cmd, \ uintptr_t arg); } 17 AUE_PREAD STD { int linux_pread(l_uint fd, char *buf, \ l_size_t nbyte, l_loff_t offset); } 18 AUE_PWRITE STD { int linux_pwrite(l_uint fd, char *buf, \ l_size_t nbyte, l_loff_t offset); } 19 AUE_READV NOPROTO { int readv(int fd, struct iovec *iovp, \ u_int iovcnt); } 20 AUE_WRITEV NOPROTO { int writev(int fd, struct iovec *iovp, \ u_int iovcnt); } 21 AUE_ACCESS STD { int linux_access(char *path, l_int amode); } 22 AUE_PIPE STD { int linux_pipe(l_ulong *pipefds); } 23 AUE_SELECT STD { int linux_select(l_int nfds, \ l_fd_set *readfds, l_fd_set *writefds, \ l_fd_set *exceptfds, \ struct l_timeval *timeout); } 24 AUE_NULL NOPROTO { int sched_yield(void); } 25 AUE_NULL STD { int linux_mremap(l_ulong addr, \ l_ulong old_len, l_ulong new_len, \ l_ulong flags, l_ulong new_addr); } 26 AUE_MSYNC STD { int linux_msync(l_ulong addr, \ l_size_t len, l_int fl); } 27 AUE_MINCORE STD { int linux_mincore(l_ulong start, \ l_size_t len, u_char *vec); } 28 AUE_MADVISE NOPROTO { int madvise(void *addr, size_t len, \ int behav); } 29 AUE_NULL STD { int linux_shmget(l_key_t key, l_size_t size, \ l_int shmflg); } 30 AUE_NULL STD { int linux_shmat(l_int shmid, char *shmaddr, \ l_int shmflg); } 31 AUE_NULL STD { int linux_shmctl(l_int shmid, l_int cmd, \ struct l_shmid_ds *buf); } 32 AUE_DUP NOPROTO { int dup(u_int fd); } 33 AUE_DUP2 NOPROTO { int dup2(u_int from, u_int to); } 34 AUE_NULL STD { int linux_pause(void); } 35 AUE_NULL STD { int linux_nanosleep( \ const struct l_timespec *rqtp, \ struct l_timespec *rmtp); } 36 AUE_GETITIMER STD { int linux_getitimer(l_int which, \ struct l_itimerval *itv); } 37 AUE_NULL STD { int linux_alarm(l_uint secs); } 38 AUE_SETITIMER STD { int linux_setitimer(l_int which, \ struct l_itimerval *itv, \ struct l_itimerval *oitv); } 39 AUE_GETPID STD { int linux_getpid(void); } 40 AUE_SENDFILE STD { int linux_sendfile(int out, int in, \ l_long *offset, l_size_t count); } 41 AUE_SOCKET STD { int linux_socket(l_int domain, l_int type, \ l_int protocol); } 42 AUE_CONNECT STD { int linux_connect(l_int s, l_uintptr_t name, \ l_int namelen); } 43 AUE_ACCEPT STD { int linux_accept(l_int s, l_uintptr_t addr, \ l_uintptr_t namelen); } 44 AUE_SENDTO STD { int linux_sendto(l_int s, l_uintptr_t msg, \ l_int len, l_int flags, l_uintptr_t to, \ l_int tolen); } 45 AUE_RECVFROM STD { int linux_recvfrom(l_int s, l_uintptr_t buf, \ l_size_t len, l_int flags, l_uintptr_t from, \ l_uintptr_t fromlen); } 46 AUE_SENDMSG STD { int linux_sendmsg(l_int s, l_uintptr_t msg, \ l_int flags); } 47 AUE_RECVMSG STD { int linux_recvmsg(l_int s, l_uintptr_t msg, \ l_int flags); } 48 AUE_NULL STD { int linux_shutdown(l_int s, l_int how); } 49 AUE_BIND STD { int linux_bind(l_int s, l_uintptr_t name, \ l_int namelen); } 50 AUE_LISTEN STD { int linux_listen(l_int s, l_int backlog); } 51 AUE_GETSOCKNAME STD { int linux_getsockname(l_int s, \ l_uintptr_t addr, l_uintptr_t namelen); } 52 AUE_GETPEERNAME STD { int linux_getpeername(l_int s, \ l_uintptr_t addr, l_uintptr_t namelen); } 53 AUE_SOCKETPAIR STD { int linux_socketpair(l_int domain, \ l_int type, l_int protocol, l_uintptr_t rsv); } 54 AUE_SETSOCKOPT STD { int linux_setsockopt(l_int s, l_int level, \ l_int optname, l_uintptr_t optval, \ l_int optlen); } 55 AUE_GETSOCKOPT STD { int linux_getsockopt(l_int s, l_int level, \ l_int optname, l_uintptr_t optval, \ l_uintptr_t optlen); } 56 AUE_RFORK STD { int linux_clone(l_int flags, void *stack, \ void *parent_tidptr, void * child_tidptr, void *tls ); } 57 AUE_FORK STD { int linux_fork(void); } 58 AUE_VFORK STD { int linux_vfork(void); } 59 AUE_EXECVE STD { int linux_execve(char *path, char **argp, \ char **envp); } 60 AUE_EXIT STD { void linux_exit(int rval); } 61 AUE_WAIT4 STD { int linux_wait4(l_pid_t pid, \ l_int *status, l_int options, \ struct rusage *rusage); } 62 AUE_KILL STD { int linux_kill(l_int pid, l_int signum); } 63 AUE_NULL STD { int linux_newuname( \ struct l_new_utsname *buf); } 64 AUE_NULL STD { int linux_semget(l_key_t key, \ l_int nsems, l_int semflg); } 65 AUE_NULL STD { int linux_semop(l_int semid, \ struct l_sembuf *tsops, l_uint nsops); } 66 AUE_NULL STD { int linux_semctl(l_int semid, \ l_int semnum, l_int cmd, union l_semun arg); } 67 AUE_NULL STD { int linux_shmdt(char *shmaddr); } 68 AUE_NULL STD { int linux_msgget(l_key_t key, l_int msgflg); } 69 AUE_NULL STD { int linux_msgsnd(l_int msqid, \ struct l_msgbuf *msgp, l_size_t msgsz, \ l_int msgflg); } 70 AUE_NULL STD { int linux_msgrcv(l_int msqid, \ struct l_msgbuf *msgp, l_size_t msgsz, \ l_long msgtyp, l_int msgflg); } 71 AUE_NULL STD { int linux_msgctl(l_int msqid, l_int cmd, \ struct l_msqid_ds *buf); } 72 AUE_FCNTL STD { int linux_fcntl(l_uint fd, l_uint cmd, \ l_ulong arg); } 73 AUE_FLOCK NOPROTO { int flock(int fd, int how); } 74 AUE_FSYNC NOPROTO { int fsync(int fd); } 75 AUE_NULL STD { int linux_fdatasync(l_uint fd); } 76 AUE_TRUNCATE STD { int linux_truncate(char *path, \ l_ulong length); } 77 AUE_FTRUNCATE STD { int linux_ftruncate(l_int fd, l_long length); } 78 AUE_GETDIRENTRIES STD { int linux_getdents(l_uint fd, void *dent, \ l_uint count); } 79 AUE_GETCWD STD { int linux_getcwd(char *buf, \ l_ulong bufsize); } 80 AUE_CHDIR STD { int linux_chdir(char *path); } 81 AUE_FCHDIR NOPROTO { int fchdir(int fd); } 82 AUE_RENAME STD { int linux_rename(char *from, char *to); } 83 AUE_MKDIR STD { int linux_mkdir(char *path, l_int mode); } 84 AUE_RMDIR STD { int linux_rmdir(char *path); } 85 AUE_CREAT STD { int linux_creat(char *path, \ l_int mode); } 86 AUE_LINK STD { int linux_link(char *path, char *to); } 87 AUE_UNLINK STD { int linux_unlink(char *path); } 88 AUE_SYMLINK STD { int linux_symlink(char *path, char *to); } 89 AUE_READLINK STD { int linux_readlink(char *name, char *buf, \ l_int count); } 90 AUE_CHMOD STD { int linux_chmod(char *path, \ l_mode_t mode); } 91 AUE_FCHMOD NOPROTO { int fchmod(int fd, int mode); } 92 AUE_LCHOWN STD { int linux_chown(char *path, \ l_uid_t uid, l_gid_t gid); } 93 AUE_FCHOWN NOPROTO { int fchown(int fd, int uid, int gid); } 94 AUE_LCHOWN STD { int linux_lchown(char *path, l_uid_t uid, \ l_gid_t gid); } 95 AUE_UMASK NOPROTO { int umask(int newmask); } 96 AUE_NULL NOPROTO { int gettimeofday(struct l_timeval *tp, \ struct timezone *tzp); } 97 AUE_GETRLIMIT STD { int linux_getrlimit(l_uint resource, \ struct l_rlimit *rlim); } 98 AUE_GETRUSAGE NOPROTO { int getrusage(int who, struct rusage *rusage); } 99 AUE_NULL STD { int linux_sysinfo(struct l_sysinfo *info); } 100 AUE_NULL STD { int linux_times(struct l_times_argv *buf); } 101 AUE_PTRACE STD { int linux_ptrace(l_long req, l_long pid, \ l_ulong addr, l_ulong data); } 102 AUE_GETUID STD { int linux_getuid(void); } 103 AUE_NULL STD { int linux_syslog(l_int type, char *buf, \ l_int len); } 104 AUE_GETGID STD { int linux_getgid(void); } 105 AUE_SETUID NOPROTO { int setuid(uid_t uid); } 106 AUE_SETGID NOPROTO { int setgid(gid_t gid); } 107 AUE_GETEUID NOPROTO { int geteuid(void); } 108 AUE_GETEGID NOPROTO { int getegid(void); } 109 AUE_SETPGRP NOPROTO { int setpgid(int pid, int pgid); } 110 AUE_GETPPID STD { int linux_getppid(void); } 111 AUE_GETPGRP NOPROTO { int getpgrp(void); } 112 AUE_SETSID NOPROTO { int setsid(void); } 113 AUE_SETREUID NOPROTO { int setreuid(uid_t ruid, uid_t euid); } 114 AUE_SETREGID NOPROTO { int setregid(gid_t rgid, gid_t egid); } 115 AUE_GETGROUPS STD { int linux_getgroups(l_int gidsetsize, \ l_gid_t *grouplist); } 116 AUE_SETGROUPS STD { int linux_setgroups(l_int gidsetsize, \ l_gid_t *grouplist); } 117 AUE_SETRESUID NOPROTO { int setresuid(uid_t ruid, uid_t euid, \ uid_t suid); } 118 AUE_GETRESUID NOPROTO { int getresuid(uid_t *ruid, uid_t *euid, \ uid_t *suid); } 119 AUE_SETRESGID NOPROTO { int setresgid(gid_t rgid, gid_t egid, \ gid_t sgid); } 120 AUE_GETRESGID NOPROTO { int getresgid(gid_t *rgid, gid_t *egid, \ gid_t *sgid); } 121 AUE_GETPGID NOPROTO { int getpgid(int pid); } 122 AUE_SETFSUID STD { int linux_setfsuid(l_uid_t uid); } 123 AUE_SETFSGID STD { int linux_setfsgid(l_gid_t gid); } 124 AUE_GETSID STD { int linux_getsid(l_pid_t pid); } 125 AUE_CAPGET STD { int linux_capget(struct l_user_cap_header *hdrp, \ struct l_user_cap_data *datap); } 126 AUE_CAPSET STD { int linux_capset(struct l_user_cap_header *hdrp, \ struct l_user_cap_data *datap); } 127 AUE_NULL STD { int linux_rt_sigpending(l_sigset_t *set, \ l_size_t sigsetsize); } 128 AUE_NULL STD { int linux_rt_sigtimedwait(l_sigset_t *mask, \ l_siginfo_t *ptr, \ struct l_timeval *timeout, \ l_size_t sigsetsize); } 129 AUE_NULL STD { int linux_rt_sigqueueinfo(l_pid_t pid, l_int sig, \ l_siginfo_t *info); } 130 AUE_NULL STD { int linux_rt_sigsuspend( \ l_sigset_t *newset, \ l_size_t sigsetsize); } 131 AUE_NULL STD { int linux_sigaltstack(l_stack_t *uss, \ l_stack_t *uoss); } 132 AUE_UTIME STD { int linux_utime(char *fname, \ struct l_utimbuf *times); } 133 AUE_MKNOD STD { int linux_mknod(char *path, l_int mode, \ l_dev_t dev); } 134 AUE_USELIB UNIMPL uselib 135 AUE_PERSONALITY STD { int linux_personality(l_uint per); } 136 AUE_NULL STD { int linux_ustat(l_dev_t dev, \ struct l_ustat *ubuf); } 137 AUE_STATFS STD { int linux_statfs(char *path, \ struct l_statfs_buf *buf); } 138 AUE_FSTATFS STD { int linux_fstatfs(l_uint fd, \ struct l_statfs_buf *buf); } 139 AUE_NULL STD { int linux_sysfs(l_int option, \ l_ulong arg1, l_ulong arg2); } 140 AUE_GETPRIORITY STD { int linux_getpriority(int which, int who); } 141 AUE_SETPRIORITY NOPROTO { int setpriority(int which, int who, \ int prio); } 142 AUE_SCHED_SETPARAM STD { int linux_sched_setparam(l_pid_t pid, \ struct sched_param *param); } 143 AUE_SCHED_GETPARAM STD { int linux_sched_getparam(l_pid_t pid, \ struct sched_param *param); } 144 AUE_SCHED_SETSCHEDULER STD { int linux_sched_setscheduler( \ l_pid_t pid, l_int policy, \ struct sched_param *param); } 145 AUE_SCHED_GETSCHEDULER STD { int linux_sched_getscheduler( \ l_pid_t pid); } 146 AUE_SCHED_GET_PRIORITY_MAX STD { int linux_sched_get_priority_max( \ l_int policy); } 147 AUE_SCHED_GET_PRIORITY_MIN STD { int linux_sched_get_priority_min( \ l_int policy); } 148 AUE_SCHED_RR_GET_INTERVAL STD { int linux_sched_rr_get_interval(l_pid_t pid, \ struct l_timespec *interval); } 149 AUE_MLOCK NOPROTO { int mlock(const void *addr, size_t len); } 150 AUE_MUNLOCK NOPROTO { int munlock(const void *addr, size_t len); } 151 AUE_MLOCKALL NOPROTO { int mlockall(int how); } 152 AUE_MUNLOCKALL NOPROTO { int munlockall(void); } 153 AUE_NULL STD { int linux_vhangup(void); } 154 AUE_NULL UNIMPL modify_ldt 155 AUE_PIVOT_ROOT STD { int linux_pivot_root(void); } 156 AUE_SYSCTL STD { int linux_sysctl( \ struct l___sysctl_args *args); } 157 AUE_PRCTL STD { int linux_prctl(l_int option, l_uintptr_t arg2, \ l_uintptr_t arg3, l_uintptr_t arg4, \ l_uintptr_t arg5); } 158 AUE_PRCTL STD { int linux_arch_prctl(l_int code, l_ulong addr); } 159 AUE_ADJTIME STD { int linux_adjtimex(void); } 160 AUE_SETRLIMIT STD { int linux_setrlimit(l_uint resource, \ struct l_rlimit *rlim); } 161 AUE_CHROOT NOPROTO { int chroot(char *path); } 162 AUE_SYNC NOPROTO { int sync(void); } 163 AUE_ACCT NOPROTO { int acct(char *path); } 164 AUE_SETTIMEOFDAY NOPROTO { int settimeofday(struct l_timeval *tv, struct timezone *tzp); } 165 AUE_MOUNT STD { int linux_mount(char *specialfile, \ char *dir, char *filesystemtype, \ l_ulong rwflag, void *data); } 166 AUE_UMOUNT STD { int linux_umount(char *path, l_int flags); } 167 AUE_SWAPON NOPROTO { int swapon(char *name); } 168 AUE_SWAPOFF STD { int linux_swapoff(void); } 169 AUE_REBOOT STD { int linux_reboot(l_int magic1, \ l_int magic2, l_uint cmd, void *arg); } 170 AUE_SYSCTL STD { int linux_sethostname(char *hostname, \ l_uint len); } 171 AUE_SYSCTL STD { int linux_setdomainname(char *name, \ l_int len); } 172 AUE_NULL STD { int linux_iopl(l_uint level); } 173 AUE_NULL UNIMPL ioperm 174 AUE_NULL UNIMPL create_module 175 AUE_NULL STD { int linux_init_module(void); } 176 AUE_NULL STD { int linux_delete_module(void); } 177 AUE_NULL UNIMPL get_kernel_syms 178 AUE_NULL UNIMPL query_module 179 AUE_QUOTACTL STD { int linux_quotactl(void); } 180 AUE_NULL UNIMPL nfsservctl 181 AUE_GETPMSG UNIMPL getpmsg 182 AUE_PUTPMSG UNIMPL putpmsg 183 AUE_NULL UNIMPL afs_syscall 184 AUE_NULL UNIMPL tuxcall 185 AUE_NULL UNIMPL security 186 AUE_NULL STD { int linux_gettid(void); } 187 AUE_NULL UNIMPL linux_readahead 188 AUE_NULL STD { int linux_setxattr(void); } 189 AUE_NULL STD { int linux_lsetxattr(void); } 190 AUE_NULL STD { int linux_fsetxattr(void); } 191 AUE_NULL STD { int linux_getxattr(void); } 192 AUE_NULL STD { int linux_lgetxattr(void); } 193 AUE_NULL STD { int linux_fgetxattr(void); } 194 AUE_NULL STD { int linux_listxattr(void); } 195 AUE_NULL STD { int linux_llistxattr(void); } 196 AUE_NULL STD { int linux_flistxattr(void); } 197 AUE_NULL STD { int linux_removexattr(void); } 198 AUE_NULL STD { int linux_lremovexattr(void); } 199 AUE_NULL STD { int linux_fremovexattr(void); } 200 AUE_NULL STD { int linux_tkill(int tid, int sig); } 201 AUE_NULL STD { int linux_time(l_time_t *tm); } 202 AUE_NULL STD { int linux_sys_futex(void *uaddr, int op, int val, \ struct l_timespec *timeout, void *uaddr2, int val3); } 203 AUE_NULL STD { int linux_sched_setaffinity(l_pid_t pid, l_uint len, \ l_ulong *user_mask_ptr); } 204 AUE_NULL STD { int linux_sched_getaffinity(l_pid_t pid, l_uint len, \ l_ulong *user_mask_ptr); } 205 AUE_NULL UNIMPL set_thread_area 206 AUE_NULL UNIMPL linux_io_setup 207 AUE_NULL UNIMPL linux_io_destroy 208 AUE_NULL UNIMPL linux_io_getevents 209 AUE_NULL UNIMPL linux_io_submit 210 AUE_NULL UNIMPL linux_io_cancel 211 AUE_NULL UNIMPL get_thread_area 212 AUE_NULL STD { int linux_lookup_dcookie(void); } 213 AUE_NULL STD { int linux_epoll_create(l_int size); } 214 AUE_NULL UNIMPL epoll_ctl_old 215 AUE_NULL UNIMPL epoll_wait_old 216 AUE_NULL STD { int linux_remap_file_pages(void); } 217 AUE_GETDIRENTRIES STD { int linux_getdents64(l_uint fd, \ void *dirent, l_uint count); } 218 AUE_NULL STD { int linux_set_tid_address(int *tidptr); } 219 AUE_NULL UNIMPL restart_syscall 220 AUE_NULL STD { int linux_semtimedop(void); } 221 AUE_NULL STD { int linux_fadvise64(int fd, l_loff_t offset, \ l_size_t len, int advice); } 222 AUE_NULL STD { int linux_timer_create(clockid_t clock_id, \ struct sigevent *evp, l_timer_t *timerid); } 223 AUE_NULL STD { int linux_timer_settime(l_timer_t timerid, l_int flags, \ const struct itimerspec *new, struct itimerspec *old); } 224 AUE_NULL STD { int linux_timer_gettime(l_timer_t timerid, struct itimerspec *setting); } 225 AUE_NULL STD { int linux_timer_getoverrun(l_timer_t timerid); } 226 AUE_NULL STD { int linux_timer_delete(l_timer_t timerid); } 227 AUE_CLOCK_SETTIME STD { int linux_clock_settime(clockid_t which, struct l_timespec *tp); } 228 AUE_NULL STD { int linux_clock_gettime(clockid_t which, struct l_timespec *tp); } 229 AUE_NULL STD { int linux_clock_getres(clockid_t which, struct l_timespec *tp); } 230 AUE_NULL STD { int linux_clock_nanosleep(clockid_t which, int flags, \ struct l_timespec *rqtp, struct l_timespec *rmtp); } 231 AUE_EXIT STD { int linux_exit_group(int error_code); } 232 AUE_NULL STD { int linux_epoll_wait(l_int epfd, struct epoll_event *events, \ l_int maxevents, l_int timeout); } 233 AUE_NULL STD { int linux_epoll_ctl(l_int epfd, l_int op, l_int fd, \ struct epoll_event *event); } 234 AUE_NULL STD { int linux_tgkill(int tgid, int pid, int sig); } 235 AUE_UTIMES STD { int linux_utimes(char *fname, \ struct l_timeval *tptr); } 236 AUE_NULL UNIMPL vserver 237 AUE_NULL STD { int linux_mbind(void); } 238 AUE_NULL STD { int linux_set_mempolicy(void); } 239 AUE_NULL STD { int linux_get_mempolicy(void); } 240 AUE_NULL STD { int linux_mq_open(void); } 241 AUE_NULL STD { int linux_mq_unlink(void); } 242 AUE_NULL STD { int linux_mq_timedsend(void); } 243 AUE_NULL STD { int linux_mq_timedreceive(void); } 244 AUE_NULL STD { int linux_mq_notify(void); } 245 AUE_NULL STD { int linux_mq_getsetattr(void); } 246 AUE_NULL STD { int linux_kexec_load(void); } 247 AUE_WAIT6 STD { int linux_waitid(int idtype, l_pid_t id, \ l_siginfo_t *info, int options, \ struct rusage *rusage); } 248 AUE_NULL STD { int linux_add_key(void); } 249 AUE_NULL STD { int linux_request_key(void); } 250 AUE_NULL STD { int linux_keyctl(void); } 251 AUE_NULL STD { int linux_ioprio_set(void); } 252 AUE_NULL STD { int linux_ioprio_get(void); } 253 AUE_NULL STD { int linux_inotify_init(void); } 254 AUE_NULL STD { int linux_inotify_add_watch(void); } 255 AUE_NULL STD { int linux_inotify_rm_watch(void); } 256 AUE_NULL STD { int linux_migrate_pages(void); } 257 AUE_OPEN_RWTC STD { int linux_openat(l_int dfd, const char *filename, \ l_int flags, l_int mode); } 258 AUE_MKDIRAT STD { int linux_mkdirat(l_int dfd, const char *pathname, \ l_int mode); } 259 AUE_MKNODAT STD { int linux_mknodat(l_int dfd, const char *filename, \ l_int mode, l_uint dev); } 260 AUE_FCHOWNAT STD { int linux_fchownat(l_int dfd, const char *filename, \ l_uid_t uid, l_gid_t gid, l_int flag); } 261 AUE_FUTIMESAT STD { int linux_futimesat(l_int dfd, char *filename, \ struct l_timeval *utimes); } 262 AUE_FSTATAT STD { int linux_newfstatat(l_int dfd, char *pathname, \ struct l_stat64 *statbuf, l_int flag); } 263 AUE_UNLINKAT STD { int linux_unlinkat(l_int dfd, const char *pathname, \ l_int flag); } 264 AUE_RENAMEAT STD { int linux_renameat(l_int olddfd, const char *oldname, \ l_int newdfd, const char *newname); } 265 AUE_LINKAT STD { int linux_linkat(l_int olddfd, const char *oldname, \ l_int newdfd, const char *newname, l_int flag); } 266 AUE_SYMLINKAT STD { int linux_symlinkat(const char *oldname, l_int newdfd, \ const char *newname); } 267 AUE_READLINKAT STD { int linux_readlinkat(l_int dfd, const char *path, \ char *buf, l_int bufsiz); } 268 AUE_FCHMODAT STD { int linux_fchmodat(l_int dfd, const char *filename, \ l_mode_t mode); } 269 AUE_FACCESSAT STD { int linux_faccessat(l_int dfd, const char *filename, \ l_int amode); } 270 AUE_SELECT STD { int linux_pselect6(l_int nfds, \ l_fd_set *readfds, l_fd_set *writefds, l_fd_set *exceptfds, \ struct l_timespec *tsp, l_uintptr_t *sig); } 271 AUE_POLL STD { int linux_ppoll(struct pollfd *fds, uint32_t nfds, \ struct l_timespec *tsp, l_sigset_t *sset, l_size_t ssize); } 272 AUE_NULL STD { int linux_unshare(void); } 273 AUE_NULL STD { int linux_set_robust_list(struct linux_robust_list_head *head, \ l_size_t len); } 274 AUE_NULL STD { int linux_get_robust_list(l_int pid, \ struct linux_robust_list_head **head, l_size_t *len); } 275 AUE_NULL STD { int linux_splice(void); } 276 AUE_NULL STD { int linux_tee(void); } 277 AUE_NULL STD { int linux_sync_file_range(void); } 278 AUE_NULL STD { int linux_vmsplice(void); } 279 AUE_NULL STD { int linux_move_pages(void); } 280 AUE_FUTIMESAT STD { int linux_utimensat(l_int dfd, const char *pathname, \ const struct l_timespec *times, l_int flags); } 281 AUE_NULL STD { int linux_epoll_pwait(l_int epfd, struct epoll_event *events, \ l_int maxevents, l_int timeout, l_sigset_t *mask, \ l_size_t sigsetsize); } 282 AUE_NULL STD { int linux_signalfd(void); } 283 AUE_NULL STD { int linux_timerfd_create(l_int clockid, l_int flags); } 284 AUE_NULL STD { int linux_eventfd(l_uint initval); } 285 AUE_NULL STD { int linux_fallocate(l_int fd, l_int mode, \ l_loff_t offset, l_loff_t len); } 286 AUE_NULL STD { int linux_timerfd_settime(l_int fd, l_int flags, \ const struct l_itimerspec *new_value, \ struct l_itimerspec *old_value); } 287 AUE_NULL STD { int linux_timerfd_gettime(l_int fd, \ struct l_itimerspec *old_value); } 288 AUE_ACCEPT STD { int linux_accept4(l_int s, l_uintptr_t addr, \ l_uintptr_t namelen, int flags); } ; Linux 2.6.27: 289 AUE_NULL STD { int linux_signalfd4(void); } 290 AUE_NULL STD { int linux_eventfd2(l_uint initval, l_int flags); } 291 AUE_NULL STD { int linux_epoll_create1(l_int flags); } 292 AUE_NULL STD { int linux_dup3(l_int oldfd, \ l_int newfd, l_int flags); } 293 AUE_NULL STD { int linux_pipe2(l_int *pipefds, l_int flags); } 294 AUE_NULL STD { int linux_inotify_init1(l_int flags); } ; Linux 2.6.30: 295 AUE_NULL STD { int linux_preadv(l_ulong fd, \ struct iovec *vec, l_ulong vlen, \ l_ulong pos_l, l_ulong pos_h); } 296 AUE_NULL STD { int linux_pwritev(l_ulong fd, \ struct iovec *vec, l_ulong vlen, \ l_ulong pos_l, l_ulong pos_h); } ; Linux 2.6.31: 297 AUE_NULL STD { int linux_rt_tgsigqueueinfo(l_pid_t tgid, \ l_pid_t tid, l_int sig, l_siginfo_t *uinfo); } 298 AUE_NULL STD { int linux_perf_event_open(void); } ; Linux 2.6.33: 299 AUE_NULL STD { int linux_recvmmsg(l_int s, \ struct l_mmsghdr *msg, l_uint vlen, \ l_uint flags, struct l_timespec *timeout); } ; Linux 2.6.37: 300 AUE_NULL STD { int linux_fanotify_init(void); } 301 AUE_NULL STD { int linux_fanotify_mark(void); } ; Linux 2.6.36: 302 AUE_NULL STD { int linux_prlimit64(l_pid_t pid, l_uint resource, \ struct rlimit *new, struct rlimit *old); } ; Linux 2.6.39 (glibc 2.14): 303 AUE_NULL STD { int linux_name_to_handle_at(void); } 304 AUE_NULL STD { int linux_open_by_handle_at(void); } 305 AUE_NULL STD { int linux_clock_adjtime(void); } 306 AUE_SYNC STD { int linux_syncfs(l_int fd); } ; Linux 3.0 (glibc 2.14): 307 AUE_NULL STD { int linux_sendmmsg(l_int s, \ struct l_mmsghdr *msg, l_uint vlen, \ l_uint flags); } 308 AUE_NULL STD { int linux_setns(l_int fd, l_int nstype); } ; Linux 2.6.19 (no glibc wrapper): 309 AUE_NULL STD { int linux_getcpu(l_uint *cpu, l_uint *node, \ void *cache); } ; Linux 3.2 (glibc 2.15): 310 AUE_NULL STD { int linux_process_vm_readv(l_pid_t pid, \ const struct iovec *lvec, l_ulong liovcnt, \ const struct iovec *rvec, l_ulong riovcnt, \ l_ulong flags); } 311 AUE_NULL STD { int linux_process_vm_writev(l_pid_t pid, \ const struct iovec *lvec, l_ulong liovcnt, \ const struct iovec *rvec, l_ulong riovcnt, \ l_ulong flags); } ; Linux 3.5 (no glibc wrapper): 312 AUE_NULL STD { int linux_kcmp(l_pid_t pid1, l_pid_t pid2, \ l_int type, l_ulong idx1, l_ulong idx); } ; Linux 3.8 (no glibc wrapper): 313 AUE_NULL STD { int linux_finit_module(l_int fd, \ const char *uargs, l_int flags); } ; Linux 3.14: 314 AUE_NULL STD { int linux_sched_setattr(l_pid_t pid, \ void *attr, l_uint flags); } 315 AUE_NULL STD { int linux_sched_getattr(l_pid_t pid, \ void *attr, l_uint size, l_uint flags); } ; Linux 3.15: 316 AUE_NULL STD { int linux_renameat2(l_int oldfd, \ const char *oldname, l_int newfd, \ const char *newname, unsigned int flags); } ; Linux 3.17: 317 AUE_NULL STD { int linux_seccomp(l_uint op, l_uint flags, \ const char *uargs); } 318 AUE_NULL STD { int linux_getrandom(char *buf, \ l_size_t count, l_uint flags); } 319 AUE_NULL STD { int linux_memfd_create(const char *uname_ptr, \ l_uint flags); } 320 AUE_NULL STD { int linux_kexec_file_load(l_int kernel_fd, \ l_int initrd_fd, l_ulong cmdline_len, \ const char *cmdline_ptr, l_ulong flags); } ; Linux 3.18: 321 AUE_NULL STD { int linux_bpf(l_int cmd, void *attr, \ l_uint size); } ; Linux 3.19: 322 AUE_NULL STD { int linux_execveat(l_int dfd, \ const char *filename, const char **argv, \ const char **envp, l_int flags); } ; Linux 4.2: 323 AUE_NULL STD { int linux_userfaultfd(l_int flags); } ; Linux 4.3: 324 AUE_NULL STD { int linux_membarrier(l_int cmd, l_int flags); } ; Linux 4.4: 325 AUE_NULL STD { int linux_mlock2(l_ulong start, l_size_t len, \ l_int flags); } ; Linux 4.5: 326 AUE_NULL STD { int linux_copy_file_range(l_int fd_in, \ l_loff_t *off_in, l_int fd_out, \ l_loff_t *off_out, l_size_t len, \ l_uint flags); } ; Linux 4.6: 327 AUE_NULL STD { int linux_preadv2(l_ulong fd, \ const struct iovec *vec, l_ulong vlen, \ l_ulong pos_l, l_ulong pos_h, l_int flags); } 328 AUE_NULL STD { int linux_pwritev2(l_ulong fd, \ const struct iovec *vec, l_ulong vlen, \ l_ulong pos_l, l_ulong pos_h, l_int flags); } ; Linux 4.8: 329 AUE_NULL STD { int linux_pkey_mprotect(l_ulong start, \ l_size_t len, l_ulong prot, l_int pkey); } 330 AUE_NULL STD { int linux_pkey_alloc(l_ulong flags, \ l_ulong init_val); } 331 AUE_NULL STD { int linux_pkey_free(l_int pkey); } ; please, keep this line at the end. 332 AUE_NULL UNIMPL nosys ; vim: syntax=off Index: head/sys/amd64/linux32/syscalls.master =================================================================== --- head/sys/amd64/linux32/syscalls.master (revision 329872) +++ head/sys/amd64/linux32/syscalls.master (revision 329873) @@ -1,693 +1,693 @@ $FreeBSD$ ; @(#)syscalls.master 8.1 (Berkeley) 7/19/93 ; System call name/number master file (or rather, slave, from LINUX). ; Processed to create linux32_sysent.c, linux32_proto.h and linux32_syscall.h. ; Columns: number audit type nargs name alt{name,tag,rtyp}/comments ; number system call number, must be in order ; audit the audit event associated with the system call ; A value of AUE_NULL means no auditing, but it also means that ; there is no audit event for the call at this time. For the ; case where the event exists, but we don't want auditing, the ; event should be #defined to AUE_NULL in audit_kevents.h. ; type one of STD, NOPROTO, UNIMPL -; name psuedo-prototype of syscall routine +; name pseudo-prototype of syscall routine ; If one of the following alts is different, then all appear: ; altname name of system call if different ; alttag name of args struct tag if different from [o]`name'"_args" ; altrtyp return type if not int (bogus - syscalls always return int) ; for UNIMPL, name continues with comments ; types: ; STD always included ; UNIMPL not implemented, placeholder only ; NOPROTO same as STD except do not create structure or ; function prototype in sys/sysproto.h. Does add a ; definition to syscall.h besides adding a sysent. #include "opt_compat.h" #include #include #include #include #include #include ; Isn't pretty, but there seems to be no other way to trap nosys #define nosys linux_nosys ; #ifdef's, etc. may be included, and are copied to the output files. 0 AUE_NULL UNIMPL setup 1 AUE_EXIT STD { void linux_exit(int rval); } 2 AUE_FORK STD { int linux_fork(void); } 3 AUE_NULL NOPROTO { int read(int fd, char *buf, \ u_int nbyte); } 4 AUE_NULL NOPROTO { int write(int fd, char *buf, \ u_int nbyte); } 5 AUE_OPEN_RWTC STD { int linux_open(char *path, l_int flags, \ l_int mode); } 6 AUE_CLOSE NOPROTO { int close(int fd); } 7 AUE_WAIT4 STD { int linux_waitpid(l_pid_t pid, \ l_int *status, l_int options); } 8 AUE_CREAT STD { int linux_creat(char *path, \ l_int mode); } 9 AUE_LINK STD { int linux_link(char *path, char *to); } 10 AUE_UNLINK STD { int linux_unlink(char *path); } 11 AUE_EXECVE STD { int linux_execve(char *path, uint32_t *argp, \ uint32_t *envp); } 12 AUE_CHDIR STD { int linux_chdir(char *path); } 13 AUE_NULL STD { int linux_time(l_time_t *tm); } 14 AUE_MKNOD STD { int linux_mknod(char *path, l_int mode, \ l_dev_t dev); } 15 AUE_CHMOD STD { int linux_chmod(char *path, \ l_mode_t mode); } 16 AUE_LCHOWN STD { int linux_lchown16(char *path, \ l_uid16_t uid, l_gid16_t gid); } 17 AUE_NULL UNIMPL break 18 AUE_STAT STD { int linux_stat(char *path, \ struct linux_stat *up); } 19 AUE_LSEEK STD { int linux_lseek(l_uint fdes, l_off_t off, \ l_int whence); } 20 AUE_GETPID STD { int linux_getpid(void); } 21 AUE_MOUNT STD { int linux_mount(char *specialfile, \ char *dir, char *filesystemtype, \ l_ulong rwflag, void *data); } 22 AUE_UMOUNT STD { int linux_oldumount(char *path); } 23 AUE_SETUID STD { int linux_setuid16(l_uid16_t uid); } 24 AUE_GETUID STD { int linux_getuid16(void); } 25 AUE_SETTIMEOFDAY STD { int linux_stime(void); } 26 AUE_PTRACE STD { int linux_ptrace(l_long req, l_long pid, \ l_long addr, l_long data); } 27 AUE_NULL STD { int linux_alarm(l_uint secs); } 28 AUE_FSTAT UNIMPL fstat 29 AUE_NULL STD { int linux_pause(void); } 30 AUE_UTIME STD { int linux_utime(char *fname, \ struct l_utimbuf *times); } 31 AUE_NULL UNIMPL stty 32 AUE_NULL UNIMPL gtty 33 AUE_ACCESS STD { int linux_access(char *path, l_int amode); } 34 AUE_NICE STD { int linux_nice(l_int inc); } 35 AUE_NULL UNIMPL ftime 36 AUE_SYNC NOPROTO { int sync(void); } 37 AUE_KILL STD { int linux_kill(l_int pid, l_int signum); } 38 AUE_RENAME STD { int linux_rename(char *from, char *to); } 39 AUE_MKDIR STD { int linux_mkdir(char *path, l_int mode); } 40 AUE_RMDIR STD { int linux_rmdir(char *path); } 41 AUE_DUP NOPROTO { int dup(u_int fd); } 42 AUE_PIPE STD { int linux_pipe(l_int *pipefds); } 43 AUE_NULL STD { int linux_times(struct l_times_argv *buf); } 44 AUE_NULL UNIMPL prof 45 AUE_NULL STD { int linux_brk(l_ulong dsend); } 46 AUE_SETGID STD { int linux_setgid16(l_gid16_t gid); } 47 AUE_GETGID STD { int linux_getgid16(void); } 48 AUE_NULL STD { int linux_signal(l_int sig, \ l_handler_t handler); } 49 AUE_GETEUID STD { int linux_geteuid16(void); } 50 AUE_GETEGID STD { int linux_getegid16(void); } 51 AUE_ACCT NOPROTO { int acct(char *path); } 52 AUE_UMOUNT STD { int linux_umount(char *path, l_int flags); } 53 AUE_NULL UNIMPL lock 54 AUE_IOCTL STD { int linux_ioctl(l_uint fd, l_uint cmd, \ uintptr_t arg); } 55 AUE_FCNTL STD { int linux_fcntl(l_uint fd, l_uint cmd, \ uintptr_t arg); } 56 AUE_NULL UNIMPL mpx 57 AUE_SETPGRP NOPROTO { int setpgid(int pid, int pgid); } 58 AUE_NULL UNIMPL ulimit 59 AUE_NULL STD { int linux_olduname(void); } 60 AUE_UMASK NOPROTO { int umask(int newmask); } 61 AUE_CHROOT NOPROTO { int chroot(char *path); } 62 AUE_NULL STD { int linux_ustat(l_dev_t dev, \ struct l_ustat *ubuf); } 63 AUE_DUP2 NOPROTO { int dup2(u_int from, u_int to); } 64 AUE_GETPPID STD { int linux_getppid(void); } 65 AUE_GETPGRP NOPROTO { int getpgrp(void); } 66 AUE_SETSID NOPROTO { int setsid(void); } 67 AUE_NULL STD { int linux_sigaction(l_int sig, \ l_osigaction_t *nsa, \ l_osigaction_t *osa); } 68 AUE_NULL STD { int linux_sgetmask(void); } 69 AUE_NULL STD { int linux_ssetmask(l_osigset_t mask); } 70 AUE_SETREUID STD { int linux_setreuid16(l_uid16_t ruid, \ l_uid16_t euid); } 71 AUE_SETREGID STD { int linux_setregid16(l_gid16_t rgid, \ l_gid16_t egid); } 72 AUE_NULL STD { int linux_sigsuspend(l_int hist0, \ l_int hist1, l_osigset_t mask); } 73 AUE_NULL STD { int linux_sigpending(l_osigset_t *mask); } 74 AUE_SYSCTL STD { int linux_sethostname(char *hostname, \ u_int len); } 75 AUE_SETRLIMIT STD { int linux_setrlimit(l_uint resource, \ struct l_rlimit *rlim); } 76 AUE_GETRLIMIT STD { int linux_old_getrlimit(l_uint resource, \ struct l_rlimit *rlim); } 77 AUE_GETRUSAGE STD { int linux_getrusage(int who, \ struct l_rusage *rusage); } 78 AUE_NULL STD { int linux_gettimeofday( \ struct l_timeval *tp, \ struct timezone *tzp); } 79 AUE_SETTIMEOFDAY STD { int linux_settimeofday( \ struct l_timeval *tp, \ struct timezone *tzp); } 80 AUE_GETGROUPS STD { int linux_getgroups16(l_uint gidsetsize, \ l_gid16_t *gidset); } 81 AUE_SETGROUPS STD { int linux_setgroups16(l_uint gidsetsize, \ l_gid16_t *gidset); } 82 AUE_SELECT STD { int linux_old_select( \ struct l_old_select_argv *ptr); } 83 AUE_SYMLINK STD { int linux_symlink(char *path, char *to); } ; 84: oldlstat 84 AUE_LSTAT STD { int linux_lstat(char *path, struct linux_lstat *up); } 85 AUE_READLINK STD { int linux_readlink(char *name, char *buf, \ l_int count); } 86 AUE_USELIB UNIMPL linux_uselib 87 AUE_SWAPON NOPROTO { int swapon(char *name); } 88 AUE_REBOOT STD { int linux_reboot(l_int magic1, \ l_int magic2, l_uint cmd, void *arg); } ; 89: old_readdir 89 AUE_GETDIRENTRIES STD { int linux_readdir(l_uint fd, \ struct l_dirent *dent, l_uint count); } ; 90: old_mmap 90 AUE_MMAP STD { int linux_mmap(struct l_mmap_argv *ptr); } 91 AUE_MUNMAP NOPROTO { int munmap(caddr_t addr, int len); } 92 AUE_TRUNCATE STD { int linux_truncate(char *path, \ l_ulong length); } 93 AUE_FTRUNCATE STD { int linux_ftruncate(int fd, long length); } 94 AUE_FCHMOD NOPROTO { int fchmod(int fd, int mode); } 95 AUE_FCHOWN NOPROTO { int fchown(int fd, int uid, int gid); } 96 AUE_GETPRIORITY STD { int linux_getpriority(int which, int who); } 97 AUE_SETPRIORITY NOPROTO { int setpriority(int which, int who, \ int prio); } 98 AUE_PROFILE UNIMPL profil 99 AUE_STATFS STD { int linux_statfs(char *path, \ struct l_statfs_buf *buf); } 100 AUE_FSTATFS STD { int linux_fstatfs(l_uint fd, \ struct l_statfs_buf *buf); } 101 AUE_NULL UNIMPL ioperm 102 AUE_NULL STD { int linux_socketcall(l_int what, \ l_ulong args); } 103 AUE_NULL STD { int linux_syslog(l_int type, char *buf, \ l_int len); } 104 AUE_SETITIMER STD { int linux_setitimer(l_int which, \ struct l_itimerval *itv, \ struct l_itimerval *oitv); } 105 AUE_GETITIMER STD { int linux_getitimer(l_int which, \ struct l_itimerval *itv); } 106 AUE_STAT STD { int linux_newstat(char *path, \ struct l_newstat *buf); } 107 AUE_LSTAT STD { int linux_newlstat(char *path, \ struct l_newstat *buf); } 108 AUE_FSTAT STD { int linux_newfstat(l_uint fd, \ struct l_newstat *buf); } ; 109: olduname 109 AUE_NULL STD { int linux_uname(void); } 110 AUE_NULL STD { int linux_iopl(l_int level); } 111 AUE_NULL STD { int linux_vhangup(void); } 112 AUE_NULL UNIMPL idle 113 AUE_NULL UNIMPL vm86old 114 AUE_WAIT4 STD { int linux_wait4(l_pid_t pid, \ l_int *status, l_int options, \ struct l_rusage *rusage); } 115 AUE_SWAPOFF STD { int linux_swapoff(void); } 116 AUE_NULL STD { int linux_sysinfo(struct l_sysinfo *info); } 117 AUE_NULL STD { int linux_ipc(l_uint what, l_int arg1, \ l_int arg2, l_int arg3, void *ptr, \ l_long arg5); } 118 AUE_FSYNC NOPROTO { int fsync(int fd); } 119 AUE_SIGRETURN STD { int linux_sigreturn( \ struct l_sigframe *sfp); } 120 AUE_RFORK STD { int linux_clone(l_int flags, void *stack, \ void *parent_tidptr, void *tls, void * child_tidptr); } 121 AUE_SYSCTL STD { int linux_setdomainname(char *name, \ int len); } 122 AUE_NULL STD { int linux_newuname( \ struct l_new_utsname *buf); } 123 AUE_NULL UNIMPL modify_ldt 124 AUE_ADJTIME STD { int linux_adjtimex(void); } 125 AUE_MPROTECT STD { int linux_mprotect(caddr_t addr, int len, \ int prot); } 126 AUE_SIGPROCMASK STD { int linux_sigprocmask(l_int how, \ l_osigset_t *mask, l_osigset_t *omask); } 127 AUE_NULL UNIMPL create_module 128 AUE_NULL STD { int linux_init_module(void); } 129 AUE_NULL STD { int linux_delete_module(void); } 130 AUE_NULL UNIMPL get_kernel_syms 131 AUE_QUOTACTL STD { int linux_quotactl(void); } 132 AUE_GETPGID NOPROTO { int getpgid(int pid); } 133 AUE_FCHDIR NOPROTO { int fchdir(int fd); } 134 AUE_BDFLUSH STD { int linux_bdflush(void); } 135 AUE_NULL STD { int linux_sysfs(l_int option, \ l_ulong arg1, l_ulong arg2); } 136 AUE_PERSONALITY STD { int linux_personality(l_uint per); } 137 AUE_NULL UNIMPL afs_syscall 138 AUE_SETFSUID STD { int linux_setfsuid16(l_uid16_t uid); } 139 AUE_SETFSGID STD { int linux_setfsgid16(l_gid16_t gid); } 140 AUE_LSEEK STD { int linux_llseek(l_int fd, l_ulong ohigh, \ l_ulong olow, l_loff_t *res, \ l_uint whence); } 141 AUE_GETDIRENTRIES STD { int linux_getdents(l_uint fd, void *dent, \ l_uint count); } ; 142: newselect 142 AUE_SELECT STD { int linux_select(l_int nfds, \ l_fd_set *readfds, l_fd_set *writefds, \ l_fd_set *exceptfds, \ struct l_timeval *timeout); } 143 AUE_FLOCK NOPROTO { int flock(int fd, int how); } 144 AUE_MSYNC STD { int linux_msync(l_ulong addr, \ l_size_t len, l_int fl); } 145 AUE_READV STD { int linux_readv(l_ulong fd, struct l_iovec32 *iovp, \ l_ulong iovcnt); } 146 AUE_WRITEV STD { int linux_writev(l_ulong fd, struct l_iovec32 *iovp, \ l_ulong iovcnt); } 147 AUE_GETSID STD { int linux_getsid(l_pid_t pid); } 148 AUE_NULL STD { int linux_fdatasync(l_uint fd); } 149 AUE_SYSCTL STD { int linux_sysctl( \ struct l___sysctl_args *args); } 150 AUE_MLOCK NOPROTO { int mlock(const void *addr, size_t len); } 151 AUE_MUNLOCK NOPROTO { int munlock(const void *addr, size_t len); } 152 AUE_MLOCKALL NOPROTO { int mlockall(int how); } 153 AUE_MUNLOCKALL NOPROTO { int munlockall(void); } 154 AUE_SCHED_SETPARAM STD { int linux_sched_setparam(l_pid_t pid, \ struct sched_param *param); } 155 AUE_SCHED_GETPARAM STD { int linux_sched_getparam(l_pid_t pid, \ struct sched_param *param); } 156 AUE_SCHED_SETSCHEDULER STD { int linux_sched_setscheduler( \ l_pid_t pid, l_int policy, \ struct sched_param *param); } 157 AUE_SCHED_GETSCHEDULER STD { int linux_sched_getscheduler( \ l_pid_t pid); } 158 AUE_NULL NOPROTO { int sched_yield(void); } 159 AUE_SCHED_GET_PRIORITY_MAX STD { int linux_sched_get_priority_max( \ l_int policy); } 160 AUE_SCHED_GET_PRIORITY_MIN STD { int linux_sched_get_priority_min( \ l_int policy); } 161 AUE_SCHED_RR_GET_INTERVAL STD { int linux_sched_rr_get_interval(l_pid_t pid, \ struct l_timespec *interval); } 162 AUE_NULL STD { int linux_nanosleep( \ const struct l_timespec *rqtp, \ struct l_timespec *rmtp); } 163 AUE_NULL STD { int linux_mremap(l_ulong addr, \ l_ulong old_len, l_ulong new_len, \ l_ulong flags, l_ulong new_addr); } 164 AUE_SETRESUID STD { int linux_setresuid16(l_uid16_t ruid, \ l_uid16_t euid, l_uid16_t suid); } 165 AUE_GETRESUID STD { int linux_getresuid16(l_uid16_t *ruid, \ l_uid16_t *euid, l_uid16_t *suid); } 166 AUE_NULL UNIMPL vm86 167 AUE_NULL UNIMPL query_module 168 AUE_POLL NOPROTO { int poll(struct pollfd *fds, \ unsigned int nfds, int timeout); } 169 AUE_NULL UNIMPL nfsservctl 170 AUE_SETRESGID STD { int linux_setresgid16(l_gid16_t rgid, \ l_gid16_t egid, l_gid16_t sgid); } 171 AUE_GETRESGID STD { int linux_getresgid16(l_gid16_t *rgid, \ l_gid16_t *egid, l_gid16_t *sgid); } 172 AUE_PRCTL STD { int linux_prctl(l_int option, l_int arg2, l_int arg3, \ l_int arg4, l_int arg5); } 173 AUE_NULL STD { int linux_rt_sigreturn( \ struct l_ucontext *ucp); } 174 AUE_NULL STD { int linux_rt_sigaction(l_int sig, \ l_sigaction_t *act, l_sigaction_t *oact, \ l_size_t sigsetsize); } 175 AUE_NULL STD { int linux_rt_sigprocmask(l_int how, \ l_sigset_t *mask, l_sigset_t *omask, \ l_size_t sigsetsize); } 176 AUE_NULL STD { int linux_rt_sigpending(l_sigset_t *set, \ l_size_t sigsetsize); } 177 AUE_NULL STD { int linux_rt_sigtimedwait(l_sigset_t *mask, \ l_siginfo_t *ptr, \ struct l_timeval *timeout, \ l_size_t sigsetsize); } 178 AUE_NULL STD { int linux_rt_sigqueueinfo(l_pid_t pid, l_int sig, \ l_siginfo_t *info); } 179 AUE_NULL STD { int linux_rt_sigsuspend( \ l_sigset_t *newset, \ l_size_t sigsetsize); } 180 AUE_PREAD STD { int linux_pread(l_uint fd, char *buf, \ l_size_t nbyte, l_loff_t offset); } 181 AUE_PWRITE STD { int linux_pwrite(l_uint fd, char *buf, \ l_size_t nbyte, l_loff_t offset); } 182 AUE_CHOWN STD { int linux_chown16(char *path, \ l_uid16_t uid, l_gid16_t gid); } 183 AUE_GETCWD STD { int linux_getcwd(char *buf, \ l_ulong bufsize); } 184 AUE_CAPGET STD { int linux_capget(struct l_user_cap_header *hdrp, \ struct l_user_cap_data *datap); } 185 AUE_CAPSET STD { int linux_capset(struct l_user_cap_header *hdrp, \ struct l_user_cap_data *datap); } 186 AUE_NULL STD { int linux_sigaltstack(l_stack_t *uss, \ l_stack_t *uoss); } 187 AUE_SENDFILE STD { int linux_sendfile(void); } 188 AUE_GETPMSG UNIMPL getpmsg 189 AUE_PUTPMSG UNIMPL putpmsg 190 AUE_VFORK STD { int linux_vfork(void); } ; 191: ugetrlimit 191 AUE_GETRLIMIT STD { int linux_getrlimit(l_uint resource, \ struct l_rlimit *rlim); } 192 AUE_MMAP STD { int linux_mmap2(l_ulong addr, l_ulong len, \ l_ulong prot, l_ulong flags, l_ulong fd, \ l_ulong pgoff); } 193 AUE_TRUNCATE STD { int linux_truncate64(char *path, \ l_loff_t length); } 194 AUE_FTRUNCATE STD { int linux_ftruncate64(l_uint fd, \ l_loff_t length); } 195 AUE_STAT STD { int linux_stat64(const char *filename, \ struct l_stat64 *statbuf); } 196 AUE_LSTAT STD { int linux_lstat64(const char *filename, \ struct l_stat64 *statbuf); } 197 AUE_FSTAT STD { int linux_fstat64(l_int fd, \ struct l_stat64 *statbuf); } 198 AUE_LCHOWN STD { int linux_lchown(char *path, l_uid_t uid, \ l_gid_t gid); } 199 AUE_GETUID STD { int linux_getuid(void); } 200 AUE_GETGID STD { int linux_getgid(void); } 201 AUE_GETEUID NOPROTO { int geteuid(void); } 202 AUE_GETEGID NOPROTO { int getegid(void); } 203 AUE_SETREUID NOPROTO { int setreuid(uid_t ruid, uid_t euid); } 204 AUE_SETREGID NOPROTO { int setregid(gid_t rgid, gid_t egid); } 205 AUE_GETGROUPS STD { int linux_getgroups(l_int gidsetsize, \ l_gid_t *grouplist); } 206 AUE_SETGROUPS STD { int linux_setgroups(l_int gidsetsize, \ l_gid_t *grouplist); } 207 AUE_FCHOWN NODEF fchown fchown fchown_args int 208 AUE_SETRESUID NOPROTO { int setresuid(uid_t ruid, uid_t euid, \ uid_t suid); } 209 AUE_GETRESUID NOPROTO { int getresuid(uid_t *ruid, uid_t *euid, \ uid_t *suid); } 210 AUE_SETRESGID NOPROTO { int setresgid(gid_t rgid, gid_t egid, \ gid_t sgid); } 211 AUE_GETRESGID NOPROTO { int getresgid(gid_t *rgid, gid_t *egid, \ gid_t *sgid); } 212 AUE_CHOWN STD { int linux_chown(char *path, l_uid_t uid, \ l_gid_t gid); } 213 AUE_SETUID NOPROTO { int setuid(uid_t uid); } 214 AUE_SETGID NOPROTO { int setgid(gid_t gid); } 215 AUE_SETFSUID STD { int linux_setfsuid(l_uid_t uid); } 216 AUE_SETFSGID STD { int linux_setfsgid(l_gid_t gid); } 217 AUE_PIVOT_ROOT STD { int linux_pivot_root(char *new_root, \ char *put_old); } 218 AUE_MINCORE STD { int linux_mincore(l_ulong start, \ l_size_t len, u_char *vec); } 219 AUE_MADVISE NOPROTO { int madvise(void *addr, size_t len, \ int behav); } 220 AUE_GETDIRENTRIES STD { int linux_getdents64(l_uint fd, \ void *dirent, l_uint count); } 221 AUE_FCNTL STD { int linux_fcntl64(l_uint fd, l_uint cmd, \ uintptr_t arg); } 222 AUE_NULL UNIMPL 223 AUE_NULL UNIMPL 224 AUE_NULL STD { long linux_gettid(void); } 225 AUE_NULL UNIMPL linux_readahead 226 AUE_NULL STD { int linux_setxattr(void); } 227 AUE_NULL STD { int linux_lsetxattr(void); } 228 AUE_NULL STD { int linux_fsetxattr(void); } 229 AUE_NULL STD { int linux_getxattr(void); } 230 AUE_NULL STD { int linux_lgetxattr(void); } 231 AUE_NULL STD { int linux_fgetxattr(void); } 232 AUE_NULL STD { int linux_listxattr(void); } 233 AUE_NULL STD { int linux_llistxattr(void); } 234 AUE_NULL STD { int linux_flistxattr(void); } 235 AUE_NULL STD { int linux_removexattr(void); } 236 AUE_NULL STD { int linux_lremovexattr(void); } 237 AUE_NULL STD { int linux_fremovexattr(void); } 238 AUE_NULL STD { int linux_tkill(int tid, int sig); } 239 AUE_SENDFILE UNIMPL linux_sendfile64 240 AUE_NULL STD { int linux_sys_futex(void *uaddr, int op, uint32_t val, \ struct l_timespec *timeout, uint32_t *uaddr2, uint32_t val3); } 241 AUE_NULL STD { int linux_sched_setaffinity(l_pid_t pid, l_uint len, \ l_ulong *user_mask_ptr); } 242 AUE_NULL STD { int linux_sched_getaffinity(l_pid_t pid, l_uint len, \ l_ulong *user_mask_ptr); } 243 AUE_NULL STD { int linux_set_thread_area(struct l_user_desc *desc); } 244 AUE_NULL UNIMPL linux_get_thread_area 245 AUE_NULL UNIMPL linux_io_setup 246 AUE_NULL UNIMPL linux_io_destroy 247 AUE_NULL UNIMPL linux_io_getevents 248 AUE_NULL UNIMPL linux_io_submit 249 AUE_NULL UNIMPL linux_io_cancel 250 AUE_NULL STD { int linux_fadvise64(int fd, l_loff_t offset, \ l_size_t len, int advice); } 251 AUE_NULL UNIMPL 252 AUE_EXIT STD { int linux_exit_group(int error_code); } 253 AUE_NULL STD { int linux_lookup_dcookie(void); } 254 AUE_NULL STD { int linux_epoll_create(l_int size); } 255 AUE_NULL STD { int linux_epoll_ctl(l_int epfd, l_int op, l_int fd, \ struct epoll_event *event); } 256 AUE_NULL STD { int linux_epoll_wait(l_int epfd, struct epoll_event *events, \ l_int maxevents, l_int timeout); } 257 AUE_NULL STD { int linux_remap_file_pages(void); } 258 AUE_NULL STD { int linux_set_tid_address(int *tidptr); } 259 AUE_NULL STD { int linux_timer_create(clockid_t clock_id, \ struct sigevent *evp, l_timer_t *timerid); } 260 AUE_NULL STD { int linux_timer_settime(l_timer_t timerid, l_int flags, \ const struct itimerspec *new, struct itimerspec *old); } 261 AUE_NULL STD { int linux_timer_gettime(l_timer_t timerid, struct itimerspec *setting); } 262 AUE_NULL STD { int linux_timer_getoverrun(l_timer_t timerid); } 263 AUE_NULL STD { int linux_timer_delete(l_timer_t timerid); } 264 AUE_CLOCK_SETTIME STD { int linux_clock_settime(clockid_t which, struct l_timespec *tp); } 265 AUE_NULL STD { int linux_clock_gettime(clockid_t which, struct l_timespec *tp); } 266 AUE_NULL STD { int linux_clock_getres(clockid_t which, struct l_timespec *tp); } 267 AUE_NULL STD { int linux_clock_nanosleep(clockid_t which, int flags, \ struct l_timespec *rqtp, struct l_timespec *rmtp); } 268 AUE_STATFS STD { int linux_statfs64(char *path, size_t bufsize, struct l_statfs64_buf *buf); } 269 AUE_FSTATFS STD { int linux_fstatfs64(l_uint fd, size_t bufsize, struct l_statfs64_buf *buf); } 270 AUE_NULL STD { int linux_tgkill(int tgid, int pid, int sig); } 271 AUE_UTIMES STD { int linux_utimes(char *fname, \ struct l_timeval *tptr); } 272 AUE_NULL STD { int linux_fadvise64_64(int fd, \ l_loff_t offset, l_loff_t len, \ int advice); } 273 AUE_NULL UNIMPL vserver 274 AUE_NULL STD { int linux_mbind(void); } 275 AUE_NULL STD { int linux_get_mempolicy(void); } 276 AUE_NULL STD { int linux_set_mempolicy(void); } ; Linux 2.6.6: 277 AUE_NULL STD { int linux_mq_open(void); } 278 AUE_NULL STD { int linux_mq_unlink(void); } 279 AUE_NULL STD { int linux_mq_timedsend(void); } 280 AUE_NULL STD { int linux_mq_timedreceive(void); } 281 AUE_NULL STD { int linux_mq_notify(void); } 282 AUE_NULL STD { int linux_mq_getsetattr(void); } 283 AUE_NULL STD { int linux_kexec_load(void); } 284 AUE_WAIT6 STD { int linux_waitid(int idtype, l_pid_t id, \ l_siginfo_t *info, int options, \ struct l_rusage *rusage); } 285 AUE_NULL UNIMPL ; Linux 2.6.11: 286 AUE_NULL STD { int linux_add_key(void); } 287 AUE_NULL STD { int linux_request_key(void); } 288 AUE_NULL STD { int linux_keyctl(void); } ; Linux 2.6.13: 289 AUE_NULL STD { int linux_ioprio_set(void); } 290 AUE_NULL STD { int linux_ioprio_get(void); } 291 AUE_NULL STD { int linux_inotify_init(void); } 292 AUE_NULL STD { int linux_inotify_add_watch(void); } 293 AUE_NULL STD { int linux_inotify_rm_watch(void); } ; Linux 2.6.16: 294 AUE_NULL STD { int linux_migrate_pages(void); } 295 AUE_OPEN_RWTC STD { int linux_openat(l_int dfd, const char *filename, \ l_int flags, l_int mode); } 296 AUE_MKDIRAT STD { int linux_mkdirat(l_int dfd, const char *pathname, \ l_int mode); } 297 AUE_MKNODAT STD { int linux_mknodat(l_int dfd, const char *filename, \ l_int mode, l_uint dev); } 298 AUE_FCHOWNAT STD { int linux_fchownat(l_int dfd, const char *filename, \ l_uid16_t uid, l_gid16_t gid, l_int flag); } 299 AUE_FUTIMESAT STD { int linux_futimesat(l_int dfd, char *filename, \ struct l_timeval *utimes); } 300 AUE_FSTATAT STD { int linux_fstatat64(l_int dfd, char *pathname, \ struct l_stat64 *statbuf, l_int flag); } 301 AUE_UNLINKAT STD { int linux_unlinkat(l_int dfd, const char *pathname, \ l_int flag); } 302 AUE_RENAMEAT STD { int linux_renameat(l_int olddfd, const char *oldname, \ l_int newdfd, const char *newname); } 303 AUE_LINKAT STD { int linux_linkat(l_int olddfd, const char *oldname, \ l_int newdfd, const char *newname, l_int flag); } 304 AUE_SYMLINKAT STD { int linux_symlinkat(const char *oldname, l_int newdfd, \ const char *newname); } 305 AUE_READLINKAT STD { int linux_readlinkat(l_int dfd, const char *path, \ char *buf, l_int bufsiz); } 306 AUE_FCHMODAT STD { int linux_fchmodat(l_int dfd, const char *filename, \ l_mode_t mode); } 307 AUE_FACCESSAT STD { int linux_faccessat(l_int dfd, const char *filename, \ l_int amode); } 308 AUE_SELECT STD { int linux_pselect6(l_int nfds, l_fd_set *readfds, \ l_fd_set *writefds, l_fd_set *exceptfds, \ struct l_timespec *tsp, l_uintptr_t *sig); } 309 AUE_POLL STD { int linux_ppoll(struct pollfd *fds, uint32_t nfds, \ struct l_timespec *tsp, l_sigset_t *sset, l_size_t ssize); } 310 AUE_NULL STD { int linux_unshare(void); } ; Linux 2.6.17: 311 AUE_NULL STD { int linux_set_robust_list(struct linux_robust_list_head *head, \ l_size_t len); } 312 AUE_NULL STD { int linux_get_robust_list(l_int pid, \ struct linux_robust_list_head **head, l_size_t *len); } 313 AUE_NULL STD { int linux_splice(void); } 314 AUE_NULL STD { int linux_sync_file_range(void); } 315 AUE_NULL STD { int linux_tee(void); } 316 AUE_NULL STD { int linux_vmsplice(void); } ; Linux 2.6.18: 317 AUE_NULL STD { int linux_move_pages(void); } ; Linux 2.6.19: 318 AUE_NULL STD { int linux_getcpu(void); } 319 AUE_NULL STD { int linux_epoll_pwait(l_int epfd, struct epoll_event *events, \ l_int maxevents, l_int timeout, l_sigset_t *mask, \ l_size_t sigsetsize); } ; Linux 2.6.22: 320 AUE_FUTIMESAT STD { int linux_utimensat(l_int dfd, const char *pathname, \ const struct l_timespec *times, l_int flags); } 321 AUE_NULL STD { int linux_signalfd(void); } 322 AUE_NULL STD { int linux_timerfd_create(l_int clockid, l_int flags); } 323 AUE_NULL STD { int linux_eventfd(l_uint initval); } ; Linux 2.6.23: 324 AUE_NULL STD { int linux_fallocate(l_int fd, l_int mode, \ l_loff_t offset, l_loff_t len); } ; Linux 2.6.25: 325 AUE_NULL STD { int linux_timerfd_settime(l_int fd, l_int flags, \ const struct l_itimerspec *new_value, \ struct l_itimerspec *old_value); } 326 AUE_NULL STD { int linux_timerfd_gettime(l_int fd, \ struct l_itimerspec *old_value); } ; Linux 2.6.27: 327 AUE_NULL STD { int linux_signalfd4(void); } 328 AUE_NULL STD { int linux_eventfd2(l_uint initval, l_int flags); } 329 AUE_NULL STD { int linux_epoll_create1(l_int flags); } 330 AUE_NULL STD { int linux_dup3(l_int oldfd, \ l_int newfd, l_int flags); } 331 AUE_NULL STD { int linux_pipe2(l_int *pipefds, l_int flags); } 332 AUE_NULL STD { int linux_inotify_init1(void); } ; Linux 2.6.30: 333 AUE_NULL STD { int linux_preadv(l_ulong fd, \ struct iovec *vec, l_ulong vlen, \ l_ulong pos_l, l_ulong pos_h); } 334 AUE_NULL STD { int linux_pwritev(l_ulong fd, \ struct iovec *vec, l_ulong vlen, \ l_ulong pos_l, l_ulong pos_h); } ; Linux 2.6.31: 335 AUE_NULL STD { int linux_rt_tgsigqueueinfo(l_pid_t tgid, \ l_pid_t tid, l_int sig, l_siginfo_t *uinfo); } 336 AUE_NULL STD { int linux_perf_event_open(void); } ; Linux 2.6.33: 337 AUE_NULL STD { int linux_recvmmsg(l_int s, \ struct l_mmsghdr *msg, l_uint vlen, \ l_uint flags, struct l_timespec *timeout); } 338 AUE_NULL STD { int linux_fanotify_init(void); } 339 AUE_NULL STD { int linux_fanotify_mark(void); } ; Linux 2.6.36: 340 AUE_NULL STD { int linux_prlimit64(l_pid_t pid, \ l_uint resource, \ struct rlimit *new, \ struct rlimit *old); } ; Linux 2.6.39: 341 AUE_NULL STD { int linux_name_to_handle_at(void); } 342 AUE_NULL STD { int linux_open_by_handle_at(void); } 343 AUE_NULL STD { int linux_clock_adjtime(void); } 344 AUE_SYNC STD { int linux_syncfs(l_int fd); } ; Linux 3.0: 345 AUE_NULL STD { int linux_sendmmsg(l_int s, \ struct l_mmsghdr *msg, l_uint vlen, \ l_uint flags); } 346 AUE_NULL STD { int linux_setns(void); } ; Linux 3.2 (glibc 2.15): 347 AUE_NULL STD { int linux_process_vm_readv(l_pid_t pid, \ const struct iovec *lvec, l_ulong liovcnt, \ const struct iovec *rvec, l_ulong riovcnt, \ l_ulong flags); } 348 AUE_NULL STD { int linux_process_vm_writev(l_pid_t pid, \ const struct iovec *lvec, l_ulong liovcnt, \ const struct iovec *rvec, l_ulong riovcnt, \ l_ulong flags); } ; Linux 3.5 (no glibc wrapper): 349 AUE_NULL STD { int linux_kcmp(l_pid_t pid1, l_pid_t pid2, \ l_int type, l_ulong idx1, l_ulong idx); } ; Linux 3.8 (no glibc wrapper): 350 AUE_NULL STD { int linux_finit_module(l_int fd, \ const char *uargs, l_int flags); } ; Linux 3.14: 351 AUE_NULL STD { int linux_sched_setattr(l_pid_t pid, \ void *attr, l_uint flags); } 352 AUE_NULL STD { int linux_sched_getattr(l_pid_t pid, \ void *attr, l_uint size, l_uint flags); } ; Linux 3.15: 353 AUE_NULL STD { int linux_renameat2(l_int oldfd, \ const char *oldname, l_int newfd, \ const char *newname, unsigned int flags); } ; Linux 3.17: 354 AUE_NULL STD { int linux_seccomp(l_uint op, l_uint flags, \ const char *uargs); } 355 AUE_NULL STD { int linux_getrandom(char *buf, \ l_size_t count, l_uint flags); } 356 AUE_NULL STD { int linux_memfd_create(const char *uname_ptr, \ l_uint flags); } ; Linux 3.18: 357 AUE_NULL STD { int linux_bpf(l_int cmd, void *attr, \ l_uint size); } ; Linux 3.19: 358 AUE_NULL STD { int linux_execveat(l_int dfd, \ const char *filename, const char **argv, \ const char **envp, l_int flags); } ; Linux 4.3: sockets now direct system calls: 359 AUE_SOCKET STD { int linux_socket(l_int domain, l_int type, \ l_int protocol); } 360 AUE_SOCKETPAIR STD { int linux_socketpair(l_int domain, \ l_int type, l_int protocol, l_uintptr_t rsv); } 361 AUE_BIND STD { int linux_bind(l_int s, l_uintptr_t name, \ l_int namelen); } 362 AUE_CONNECT STD { int linux_connect(l_int s, l_uintptr_t name, \ l_int namelen); } 363 AUE_LISTEN STD { int linux_listen(l_int s, l_int backlog); } 364 AUE_ACCEPT STD { int linux_accept4(l_int s, l_uintptr_t addr, \ l_uintptr_t namelen, l_int flags); } 365 AUE_GETSOCKOPT STD { int linux_getsockopt(l_int s, l_int level, \ l_int optname, l_uintptr_t optval, \ l_uintptr_t optlen); } 366 AUE_SETSOCKOPT STD { int linux_setsockopt(l_int s, l_int level, \ l_int optname, l_uintptr_t optval, \ l_int optlen); } 367 AUE_GETSOCKNAME STD { int linux_getsockname(l_int s, \ l_uintptr_t addr, l_uintptr_t namelen); } 368 AUE_GETPEERNAME STD { int linux_getpeername(l_int s, \ l_uintptr_t addr, l_uintptr_t namelen); } 369 AUE_SENDTO STD { int linux_sendto(l_int s, l_uintptr_t msg, \ l_int len, l_int flags, l_uintptr_t to, \ l_int tolen); } 370 AUE_SENDMSG STD { int linux_sendmsg(l_int s, l_uintptr_t msg, \ l_int flags); } 371 AUE_RECVFROM STD { int linux_recvfrom(l_int s, l_uintptr_t buf, \ l_size_t len, l_int flags, l_uintptr_t from, \ l_uintptr_t fromlen); } 372 AUE_RECVMSG STD { int linux_recvmsg(l_int s, l_uintptr_t msg, \ l_int flags); } 373 AUE_NULL STD { int linux_shutdown(l_int s, l_int how); } ; ; Linux 4.2: 374 AUE_NULL STD { int linux_userfaultfd(l_int flags); } ; Linux 4.3: 375 AUE_NULL STD { int linux_membarrier(l_int cmd, l_int flags); } ; Linux 4.4: 376 AUE_NULL STD { int linux_mlock2(l_ulong start, l_size_t len, \ l_int flags); } ; Linux 4.5: 377 AUE_NULL STD { int linux_copy_file_range(l_int fd_in, \ l_loff_t *off_in, l_int fd_out, \ l_loff_t *off_out, l_size_t len, \ l_uint flags); } ; Linux 4.6: 378 AUE_NULL STD { int linux_preadv2(l_ulong fd, \ const struct iovec *vec, l_ulong vlen, \ l_ulong pos_l, l_ulong pos_h, l_int flags); } 379 AUE_NULL STD { int linux_pwritev2(l_ulong fd, \ const struct iovec *vec, l_ulong vlen, \ l_ulong pos_l, l_ulong pos_h, l_int flags); } ; Linux 4.8: 380 AUE_NULL STD { int linux_pkey_mprotect(l_ulong start, \ l_size_t len, l_ulong prot, l_int pkey); } 381 AUE_NULL STD { int linux_pkey_alloc(l_ulong flags, \ l_ulong init_val); } 382 AUE_NULL STD { int linux_pkey_free(l_int pkey); } ; please, keep this line at the end. 383 AUE_NULL UNIMPL nosys ; vim: syntax=off Index: head/sys/compat/freebsd32/syscalls.master =================================================================== --- head/sys/compat/freebsd32/syscalls.master (revision 329872) +++ head/sys/compat/freebsd32/syscalls.master (revision 329873) @@ -1,1127 +1,1127 @@ $FreeBSD$ ; from: @(#)syscalls.master 8.2 (Berkeley) 1/13/94 ; from: src/sys/kern/syscalls.master 1.107 ; ; System call name/number master file. ; Processed to created init_sysent.c, syscalls.c and syscall.h. ; Columns: number audit type name alt{name,tag,rtyp}/comments ; number system call number, must be in order ; audit the audit event associated with the system call ; A value of AUE_NULL means no auditing, but it also means that ; there is no audit event for the call at this time. For the ; case where the event exists, but we don't want auditing, the ; event should be #defined to AUE_NULL in audit_kevents.h. ; type one of STD, OBSOL, UNIMPL, COMPAT, COMPAT4, COMPAT6, ; COMPAT7, COMPAT11, NODEF, NOARGS, NOPROTO, NOSTD ; The COMPAT* options may be combined with one or more NO* ; options separated by '|' with no spaces (e.g. COMPAT|NOARGS) -; name psuedo-prototype of syscall routine +; name pseudo-prototype of syscall routine ; If one of the following alts is different, then all appear: ; altname name of system call if different ; alttag name of args struct tag if different from [o]`name'"_args" ; altrtyp return type if not int (bogus - syscalls always return int) ; for UNIMPL/OBSOL, name continues with comments ; types: ; STD always included ; COMPAT included on COMPAT #ifdef ; COMPAT4 included on COMPAT4 #ifdef (FreeBSD 4 compat) ; COMPAT6 included on COMPAT6 #ifdef (FreeBSD 6 compat) ; COMPAT7 included on COMPAT7 #ifdef (FreeBSD 7 compat) ; COMPAT10 included on COMPAT10 #ifdef (FreeBSD 10 compat) ; COMPAT11 included on COMPAT11 #ifdef (FreeBSD 11 compat) ; OBSOL obsolete, not included in system, only specifies name ; UNIMPL not implemented, placeholder only ; NOSTD implemented but as a lkm that can be statically ; compiled in; sysent entry will be filled with lkmressys ; so the SYSCALL_MODULE macro works ; NOARGS same as STD except do not create structure in sys/sysproto.h ; NODEF same as STD except only have the entry in the syscall table ; added. Meaning - do not create structure or function ; prototype in sys/sysproto.h ; NOPROTO same as STD except do not create structure or ; function prototype in sys/sysproto.h. Does add a ; definition to syscall.h besides adding a sysent. ; #ifdef's, etc. may be included, and are copied to the output files. #include #include #include #include #include #include #include #if !defined(PAD64_REQUIRED) && (defined(__powerpc__) || defined(__mips__)) #define PAD64_REQUIRED #endif ; Reserved/unimplemented system calls in the range 0-150 inclusive ; are reserved for use in future Berkeley releases. ; Additional system calls implemented in vendor and other ; redistributions should be placed in the reserved range at the end ; of the current calls. 0 AUE_NULL NOPROTO { int nosys(void); } syscall nosys_args int 1 AUE_EXIT NOPROTO { void sys_exit(int rval); } exit \ sys_exit_args void 2 AUE_FORK NOPROTO { int fork(void); } 3 AUE_READ NOPROTO { ssize_t read(int fd, void *buf, \ size_t nbyte); } 4 AUE_WRITE NOPROTO { ssize_t write(int fd, const void *buf, \ size_t nbyte); } 5 AUE_OPEN_RWTC NOPROTO { int open(char *path, int flags, \ int mode); } 6 AUE_CLOSE NOPROTO { int close(int fd); } 7 AUE_WAIT4 STD { int freebsd32_wait4(int pid, int *status, \ int options, struct rusage32 *rusage); } 8 AUE_CREAT OBSOL old creat 9 AUE_LINK NOPROTO { int link(char *path, char *link); } 10 AUE_UNLINK NOPROTO { int unlink(char *path); } 11 AUE_NULL OBSOL execv 12 AUE_CHDIR NOPROTO { int chdir(char *path); } 13 AUE_FCHDIR NOPROTO { int fchdir(int fd); } 14 AUE_MKNOD COMPAT11 { int freebsd32_mknod(char *path, \ int mode, int dev); } 15 AUE_CHMOD NOPROTO { int chmod(char *path, int mode); } 16 AUE_CHOWN NOPROTO { int chown(char *path, int uid, int gid); } 17 AUE_NULL NOPROTO { int obreak(char *nsize); } break \ obreak_args int 18 AUE_GETFSSTAT COMPAT4 { int freebsd32_getfsstat( \ struct statfs32 *buf, long bufsize, \ int mode); } 19 AUE_LSEEK COMPAT { int freebsd32_lseek(int fd, int offset, \ int whence); } 20 AUE_GETPID NOPROTO { pid_t getpid(void); } 21 AUE_MOUNT NOPROTO { int mount(char *type, char *path, \ int flags, caddr_t data); } 22 AUE_UMOUNT NOPROTO { int unmount(char *path, int flags); } 23 AUE_SETUID NOPROTO { int setuid(uid_t uid); } 24 AUE_GETUID NOPROTO { uid_t getuid(void); } 25 AUE_GETEUID NOPROTO { uid_t geteuid(void); } 26 AUE_PTRACE NOPROTO { int ptrace(int req, pid_t pid, \ caddr_t addr, int data); } 27 AUE_RECVMSG STD { int freebsd32_recvmsg(int s, struct msghdr32 *msg, \ int flags); } 28 AUE_SENDMSG STD { int freebsd32_sendmsg(int s, struct msghdr32 *msg, \ int flags); } 29 AUE_RECVFROM STD { int freebsd32_recvfrom(int s, uint32_t buf, \ uint32_t len, int flags, uint32_t from, \ uint32_t fromlenaddr); } 30 AUE_ACCEPT NOPROTO { int accept(int s, caddr_t name, \ int *anamelen); } 31 AUE_GETPEERNAME NOPROTO { int getpeername(int fdes, caddr_t asa, \ int *alen); } 32 AUE_GETSOCKNAME NOPROTO { int getsockname(int fdes, caddr_t asa, \ int *alen); } 33 AUE_ACCESS NOPROTO { int access(char *path, int amode); } 34 AUE_CHFLAGS NOPROTO { int chflags(const char *path, u_long flags); } 35 AUE_FCHFLAGS NOPROTO { int fchflags(int fd, u_long flags); } 36 AUE_SYNC NOPROTO { int sync(void); } 37 AUE_KILL NOPROTO { int kill(int pid, int signum); } 38 AUE_STAT COMPAT { int freebsd32_stat(char *path, \ struct ostat32 *ub); } 39 AUE_GETPPID NOPROTO { pid_t getppid(void); } 40 AUE_LSTAT COMPAT { int freebsd32_lstat(char *path, \ struct ostat *ub); } 41 AUE_DUP NOPROTO { int dup(u_int fd); } 42 AUE_PIPE COMPAT10 { int freebsd32_pipe(void); } 43 AUE_GETEGID NOPROTO { gid_t getegid(void); } 44 AUE_PROFILE NOPROTO { int profil(caddr_t samples, size_t size, \ size_t offset, u_int scale); } 45 AUE_KTRACE NOPROTO { int ktrace(const char *fname, int ops, \ int facs, int pid); } 46 AUE_SIGACTION COMPAT { int freebsd32_sigaction( int signum, \ struct osigaction32 *nsa, \ struct osigaction32 *osa); } 47 AUE_GETGID NOPROTO { gid_t getgid(void); } 48 AUE_SIGPROCMASK COMPAT { int freebsd32_sigprocmask(int how, \ osigset_t mask); } 49 AUE_GETLOGIN NOPROTO { int getlogin(char *namebuf, \ u_int namelen); } 50 AUE_SETLOGIN NOPROTO { int setlogin(char *namebuf); } 51 AUE_ACCT NOPROTO { int acct(char *path); } 52 AUE_SIGPENDING COMPAT { int freebsd32_sigpending(void); } 53 AUE_SIGALTSTACK STD { int freebsd32_sigaltstack( \ struct sigaltstack32 *ss, \ struct sigaltstack32 *oss); } 54 AUE_IOCTL STD { int freebsd32_ioctl(int fd, uint32_t com, \ struct md_ioctl32 *data); } 55 AUE_REBOOT NOPROTO { int reboot(int opt); } 56 AUE_REVOKE NOPROTO { int revoke(char *path); } 57 AUE_SYMLINK NOPROTO { int symlink(char *path, char *link); } 58 AUE_READLINK NOPROTO { ssize_t readlink(char *path, char *buf, \ size_t count); } 59 AUE_EXECVE STD { int freebsd32_execve(char *fname, \ uint32_t *argv, uint32_t *envv); } 60 AUE_UMASK NOPROTO { int umask(int newmask); } umask \ umask_args int 61 AUE_CHROOT NOPROTO { int chroot(char *path); } 62 AUE_FSTAT COMPAT { int freebsd32_fstat(int fd, \ struct ostat32 *ub); } 63 AUE_NULL OBSOL ogetkerninfo 64 AUE_NULL COMPAT { int freebsd32_getpagesize( \ int32_t dummy); } 65 AUE_MSYNC NOPROTO { int msync(void *addr, size_t len, \ int flags); } 66 AUE_VFORK NOPROTO { int vfork(void); } 67 AUE_NULL OBSOL vread 68 AUE_NULL OBSOL vwrite 69 AUE_SBRK NOPROTO { int sbrk(int incr); } 70 AUE_SSTK NOPROTO { int sstk(int incr); } 71 AUE_MMAP COMPAT|NOPROTO { int mmap(void *addr, int len, \ int prot, int flags, int fd, int pos); } 72 AUE_O_VADVISE NOPROTO { int ovadvise(int anom); } vadvise \ ovadvise_args int 73 AUE_MUNMAP NOPROTO { int munmap(void *addr, size_t len); } 74 AUE_MPROTECT STD { int freebsd32_mprotect(void *addr, \ size_t len, int prot); } 75 AUE_MADVISE NOPROTO { int madvise(void *addr, size_t len, \ int behav); } 76 AUE_NULL OBSOL vhangup 77 AUE_NULL OBSOL vlimit 78 AUE_MINCORE NOPROTO { int mincore(const void *addr, size_t len, \ char *vec); } 79 AUE_GETGROUPS NOPROTO { int getgroups(u_int gidsetsize, \ gid_t *gidset); } 80 AUE_SETGROUPS NOPROTO { int setgroups(u_int gidsetsize, \ gid_t *gidset); } 81 AUE_GETPGRP NOPROTO { int getpgrp(void); } 82 AUE_SETPGRP NOPROTO { int setpgid(int pid, int pgid); } 83 AUE_SETITIMER STD { int freebsd32_setitimer(u_int which, \ struct itimerval32 *itv, \ struct itimerval32 *oitv); } 84 AUE_NULL OBSOL owait ; XXX implement 85 AUE_SWAPON NOPROTO { int swapon(char *name); } 86 AUE_GETITIMER STD { int freebsd32_getitimer(u_int which, \ struct itimerval32 *itv); } 87 AUE_O_GETHOSTNAME OBSOL ogethostname 88 AUE_O_SETHOSTNAME OBSOL osethostname 89 AUE_GETDTABLESIZE NOPROTO { int getdtablesize(void); } 90 AUE_DUP2 NOPROTO { int dup2(u_int from, u_int to); } 91 AUE_NULL UNIMPL getdopt 92 AUE_FCNTL STD { int freebsd32_fcntl(int fd, int cmd, \ int arg); } 93 AUE_SELECT STD { int freebsd32_select(int nd, fd_set *in, \ fd_set *ou, fd_set *ex, \ struct timeval32 *tv); } 94 AUE_NULL UNIMPL setdopt 95 AUE_FSYNC NOPROTO { int fsync(int fd); } 96 AUE_SETPRIORITY NOPROTO { int setpriority(int which, int who, \ int prio); } 97 AUE_SOCKET NOPROTO { int socket(int domain, int type, \ int protocol); } 98 AUE_CONNECT NOPROTO { int connect(int s, caddr_t name, \ int namelen); } 99 AUE_NULL OBSOL oaccept 100 AUE_GETPRIORITY NOPROTO { int getpriority(int which, int who); } 101 AUE_NULL OBSOL osend 102 AUE_NULL OBSOL orecv 103 AUE_SIGRETURN COMPAT { int freebsd32_sigreturn( \ struct ia32_sigcontext3 *sigcntxp); } 104 AUE_BIND NOPROTO { int bind(int s, caddr_t name, \ int namelen); } 105 AUE_SETSOCKOPT NOPROTO { int setsockopt(int s, int level, \ int name, caddr_t val, int valsize); } 106 AUE_LISTEN NOPROTO { int listen(int s, int backlog); } 107 AUE_NULL OBSOL vtimes 108 AUE_O_SIGVEC COMPAT { int freebsd32_sigvec(int signum, \ struct sigvec32 *nsv, \ struct sigvec32 *osv); } 109 AUE_O_SIGBLOCK COMPAT { int freebsd32_sigblock(int mask); } 110 AUE_O_SIGSETMASK COMPAT { int freebsd32_sigsetmask( int mask); } 111 AUE_SIGSUSPEND COMPAT { int freebsd32_sigsuspend( int mask); } 112 AUE_O_SIGSTACK COMPAT { int freebsd32_sigstack( \ struct sigstack32 *nss, \ struct sigstack32 *oss); } 113 AUE_NULL OBSOL orecvmsg 114 AUE_NULL OBSOL osendmsg 115 AUE_NULL OBSOL vtrace 116 AUE_GETTIMEOFDAY STD { int freebsd32_gettimeofday( \ struct timeval32 *tp, \ struct timezone *tzp); } 117 AUE_GETRUSAGE STD { int freebsd32_getrusage(int who, \ struct rusage32 *rusage); } 118 AUE_GETSOCKOPT NOPROTO { int getsockopt(int s, int level, \ int name, caddr_t val, int *avalsize); } 119 AUE_NULL UNIMPL resuba (BSD/OS 2.x) 120 AUE_READV STD { int freebsd32_readv(int fd, \ struct iovec32 *iovp, u_int iovcnt); } 121 AUE_WRITEV STD { int freebsd32_writev(int fd, \ struct iovec32 *iovp, u_int iovcnt); } 122 AUE_SETTIMEOFDAY STD { int freebsd32_settimeofday( \ struct timeval32 *tv, \ struct timezone *tzp); } 123 AUE_FCHOWN NOPROTO { int fchown(int fd, int uid, int gid); } 124 AUE_FCHMOD NOPROTO { int fchmod(int fd, int mode); } 125 AUE_RECVFROM OBSOL orecvfrom 126 AUE_SETREUID NOPROTO { int setreuid(int ruid, int euid); } 127 AUE_SETREGID NOPROTO { int setregid(int rgid, int egid); } 128 AUE_RENAME NOPROTO { int rename(char *from, char *to); } 129 AUE_TRUNCATE COMPAT|NOPROTO { int truncate(char *path, \ int length); } 130 AUE_FTRUNCATE COMPAT|NOPROTO { int ftruncate(int fd, int length); } 131 AUE_FLOCK NOPROTO { int flock(int fd, int how); } 132 AUE_MKFIFO NOPROTO { int mkfifo(char *path, int mode); } 133 AUE_SENDTO NOPROTO { int sendto(int s, caddr_t buf, \ size_t len, int flags, caddr_t to, \ int tolen); } 134 AUE_SHUTDOWN NOPROTO { int shutdown(int s, int how); } 135 AUE_SOCKETPAIR NOPROTO { int socketpair(int domain, int type, \ int protocol, int *rsv); } 136 AUE_MKDIR NOPROTO { int mkdir(char *path, int mode); } 137 AUE_RMDIR NOPROTO { int rmdir(char *path); } 138 AUE_UTIMES STD { int freebsd32_utimes(char *path, \ struct timeval32 *tptr); } 139 AUE_NULL OBSOL 4.2 sigreturn 140 AUE_ADJTIME STD { int freebsd32_adjtime( \ struct timeval32 *delta, \ struct timeval32 *olddelta); } 141 AUE_GETPEERNAME OBSOL ogetpeername 142 AUE_SYSCTL OBSOL ogethostid 143 AUE_SYSCTL OBSOL sethostid 144 AUE_GETRLIMIT OBSOL getrlimit 145 AUE_SETRLIMIT OBSOL setrlimit 146 AUE_KILLPG OBSOL killpg 147 AUE_SETSID NOPROTO { int setsid(void); } 148 AUE_QUOTACTL NOPROTO { int quotactl(char *path, int cmd, int uid, \ caddr_t arg); } 149 AUE_O_QUOTA OBSOL oquota 150 AUE_GETSOCKNAME OBSOL ogetsockname ; Syscalls 151-180 inclusive are reserved for vendor-specific ; system calls. (This includes various calls added for compatibity ; with other Unix variants.) ; Some of these calls are now supported by BSD... 151 AUE_NULL UNIMPL sem_lock (BSD/OS 2.x) 152 AUE_NULL UNIMPL sem_wakeup (BSD/OS 2.x) 153 AUE_NULL UNIMPL asyncdaemon (BSD/OS 2.x) ; 154 is initialised by the NLM code, if present. 154 AUE_NULL UNIMPL nlm_syscall ; 155 is initialized by the NFS code, if present. ; XXX this is a problem!!! 155 AUE_NFS_SVC UNIMPL nfssvc 156 AUE_GETDIRENTRIES COMPAT { int freebsd32_getdirentries(int fd, \ char *buf, u_int count, uint32_t *basep); } 157 AUE_STATFS COMPAT4 { int freebsd32_statfs(char *path, \ struct statfs32 *buf); } 158 AUE_FSTATFS COMPAT4 { int freebsd32_fstatfs(int fd, \ struct statfs32 *buf); } 159 AUE_NULL UNIMPL nosys 160 AUE_LGETFH UNIMPL lgetfh 161 AUE_NFS_GETFH NOPROTO { int getfh(char *fname, \ struct fhandle *fhp); } 162 AUE_SYSCTL OBSOL getdomainname 163 AUE_SYSCTL OBSOL setdomainname 164 AUE_NULL OBSOL uname 165 AUE_SYSARCH STD { int freebsd32_sysarch(int op, char *parms); } 166 AUE_RTPRIO NOPROTO { int rtprio(int function, pid_t pid, \ struct rtprio *rtp); } 167 AUE_NULL UNIMPL nosys 168 AUE_NULL UNIMPL nosys 169 AUE_SEMSYS NOSTD { int freebsd32_semsys(int which, int a2, \ int a3, int a4, int a5); } 170 AUE_MSGSYS NOSTD { int freebsd32_msgsys(int which, int a2, \ int a3, int a4, int a5, int a6); } 171 AUE_SHMSYS NOSTD { int freebsd32_shmsys(uint32_t which, uint32_t a2, \ uint32_t a3, uint32_t a4); } 172 AUE_NULL UNIMPL nosys 173 AUE_PREAD COMPAT6 { ssize_t freebsd32_pread(int fd, void *buf, \ size_t nbyte, int pad, \ uint32_t offset1, uint32_t offset2); } 174 AUE_PWRITE COMPAT6 { ssize_t freebsd32_pwrite(int fd, \ const void *buf, size_t nbyte, int pad, \ uint32_t offset1, uint32_t offset2); } 175 AUE_NULL UNIMPL nosys 176 AUE_NTP_ADJTIME NOPROTO { int ntp_adjtime(struct timex *tp); } 177 AUE_NULL UNIMPL sfork (BSD/OS 2.x) 178 AUE_NULL UNIMPL getdescriptor (BSD/OS 2.x) 179 AUE_NULL UNIMPL setdescriptor (BSD/OS 2.x) 180 AUE_NULL UNIMPL nosys ; Syscalls 181-199 are used by/reserved for BSD 181 AUE_SETGID NOPROTO { int setgid(gid_t gid); } 182 AUE_SETEGID NOPROTO { int setegid(gid_t egid); } 183 AUE_SETEUID NOPROTO { int seteuid(uid_t euid); } 184 AUE_NULL UNIMPL lfs_bmapv 185 AUE_NULL UNIMPL lfs_markv 186 AUE_NULL UNIMPL lfs_segclean 187 AUE_NULL UNIMPL lfs_segwait 188 AUE_STAT COMPAT11 { int freebsd32_stat(char *path, \ struct freebsd11_stat32 *ub); } 189 AUE_FSTAT COMPAT11 { int freebsd32_fstat(int fd, \ struct freebsd11_stat32 *ub); } 190 AUE_LSTAT COMPAT11 { int freebsd32_lstat(char *path, \ struct freebsd11_stat32 *ub); } 191 AUE_PATHCONF NOPROTO { int pathconf(char *path, int name); } 192 AUE_FPATHCONF NOPROTO { int fpathconf(int fd, int name); } 193 AUE_NULL UNIMPL nosys 194 AUE_GETRLIMIT NOPROTO { int getrlimit(u_int which, \ struct rlimit *rlp); } getrlimit \ __getrlimit_args int 195 AUE_SETRLIMIT NOPROTO { int setrlimit(u_int which, \ struct rlimit *rlp); } setrlimit \ __setrlimit_args int 196 AUE_GETDIRENTRIES COMPAT11 { int freebsd32_getdirentries(int fd, \ char *buf, u_int count, int32_t *basep); } 197 AUE_MMAP COMPAT6 { caddr_t freebsd32_mmap(caddr_t addr, \ size_t len, int prot, int flags, int fd, \ int pad, uint32_t pos1, uint32_t pos2); } 198 AUE_NULL NOPROTO { int nosys(void); } __syscall \ __syscall_args int 199 AUE_LSEEK COMPAT6 { off_t freebsd32_lseek(int fd, int pad, \ uint32_t offset1, uint32_t offset2, \ int whence); } 200 AUE_TRUNCATE COMPAT6 { int freebsd32_truncate(char *path, \ int pad, uint32_t length1, \ uint32_t length2); } 201 AUE_FTRUNCATE COMPAT6 { int freebsd32_ftruncate(int fd, int pad, \ uint32_t length1, uint32_t length2); } 202 AUE_SYSCTL STD { int freebsd32_sysctl(int *name, \ u_int namelen, void *old, \ uint32_t *oldlenp, void *new, \ uint32_t newlen); } 203 AUE_MLOCK NOPROTO { int mlock(const void *addr, \ size_t len); } 204 AUE_MUNLOCK NOPROTO { int munlock(const void *addr, \ size_t len); } 205 AUE_UNDELETE NOPROTO { int undelete(char *path); } 206 AUE_FUTIMES STD { int freebsd32_futimes(int fd, \ struct timeval32 *tptr); } 207 AUE_GETPGID NOPROTO { int getpgid(pid_t pid); } 208 AUE_NULL UNIMPL newreboot (NetBSD) 209 AUE_POLL NOPROTO { int poll(struct pollfd *fds, u_int nfds, \ int timeout); } ; ; The following are reserved for loadable syscalls ; 210 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 211 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 212 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 213 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 214 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 215 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 216 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 217 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 218 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 219 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int ; ; The following were introduced with NetBSD/4.4Lite-2 ; They are initialized by their respective modules/sysinits ; XXX PROBLEM!! 220 AUE_SEMCTL COMPAT7|NOSTD { int freebsd32_semctl( \ int semid, int semnum, \ int cmd, union semun32 *arg); } 221 AUE_SEMGET NOSTD|NOPROTO { int semget(key_t key, int nsems, \ int semflg); } 222 AUE_SEMOP NOSTD|NOPROTO { int semop(int semid, \ struct sembuf *sops, u_int nsops); } 223 AUE_NULL UNIMPL semconfig 224 AUE_MSGCTL COMPAT7|NOSTD { int freebsd32_msgctl( \ int msqid, int cmd, \ struct msqid_ds32_old *buf); } 225 AUE_MSGGET NOSTD|NOPROTO { int msgget(key_t key, int msgflg); } 226 AUE_MSGSND NOSTD { int freebsd32_msgsnd(int msqid, void *msgp, \ size_t msgsz, int msgflg); } 227 AUE_MSGRCV NOSTD { int freebsd32_msgrcv(int msqid, void *msgp, \ size_t msgsz, long msgtyp, int msgflg); } 228 AUE_SHMAT NOSTD|NOPROTO { int shmat(int shmid, void *shmaddr, \ int shmflg); } 229 AUE_SHMCTL COMPAT7|NOSTD { int freebsd32_shmctl( \ int shmid, int cmd, \ struct shmid_ds32_old *buf); } 230 AUE_SHMDT NOSTD|NOPROTO { int shmdt(void *shmaddr); } 231 AUE_SHMGET NOSTD|NOPROTO { int shmget(key_t key, int size, \ int shmflg); } ; 232 AUE_NULL STD { int freebsd32_clock_gettime(clockid_t clock_id, \ struct timespec32 *tp); } 233 AUE_CLOCK_SETTIME STD { int freebsd32_clock_settime(clockid_t clock_id, \ const struct timespec32 *tp); } 234 AUE_NULL STD { int freebsd32_clock_getres(clockid_t clock_id, \ struct timespec32 *tp); } 235 AUE_NULL STD { int freebsd32_ktimer_create(\ clockid_t clock_id, \ struct sigevent32 *evp, int *timerid); } 236 AUE_NULL NOPROTO { int ktimer_delete(int timerid); } 237 AUE_NULL STD { int freebsd32_ktimer_settime(int timerid,\ int flags, \ const struct itimerspec32 *value, \ struct itimerspec32 *ovalue); } 238 AUE_NULL STD { int freebsd32_ktimer_gettime(int timerid,\ struct itimerspec32 *value); } 239 AUE_NULL NOPROTO { int ktimer_getoverrun(int timerid); } 240 AUE_NULL STD { int freebsd32_nanosleep( \ const struct timespec32 *rqtp, \ struct timespec32 *rmtp); } 241 AUE_NULL NOPROTO { int ffclock_getcounter(ffcounter *ffcount); } 242 AUE_NULL NOPROTO { int ffclock_setestimate( \ struct ffclock_estimate *cest); } 243 AUE_NULL NOPROTO { int ffclock_getestimate( \ struct ffclock_estimate *cest); } 244 AUE_NULL STD { int freebsd32_clock_nanosleep( \ clockid_t clock_id, int flags, \ const struct timespec32 *rqtp, \ struct timespec32 *rmtp); } 245 AUE_NULL UNIMPL nosys 246 AUE_NULL UNIMPL nosys 247 AUE_NULL STD { int freebsd32_clock_getcpuclockid2(\ uint32_t id1, uint32_t id2,\ int which, clockid_t *clock_id); } 248 AUE_NULL UNIMPL ntp_gettime 249 AUE_NULL UNIMPL nosys ; syscall numbers initially used in OpenBSD 250 AUE_MINHERIT NOPROTO { int minherit(void *addr, size_t len, \ int inherit); } 251 AUE_RFORK NOPROTO { int rfork(int flags); } 252 AUE_POLL OBSOL openbsd_poll 253 AUE_ISSETUGID NOPROTO { int issetugid(void); } 254 AUE_LCHOWN NOPROTO { int lchown(char *path, int uid, int gid); } 255 AUE_AIO_READ STD { int freebsd32_aio_read( \ struct aiocb32 *aiocbp); } 256 AUE_AIO_WRITE STD { int freebsd32_aio_write( \ struct aiocb32 *aiocbp); } 257 AUE_LIO_LISTIO STD { int freebsd32_lio_listio(int mode, \ struct aiocb32 * const *acb_list, \ int nent, struct sigevent32 *sig); } 258 AUE_NULL UNIMPL nosys 259 AUE_NULL UNIMPL nosys 260 AUE_NULL UNIMPL nosys 261 AUE_NULL UNIMPL nosys 262 AUE_NULL UNIMPL nosys 263 AUE_NULL UNIMPL nosys 264 AUE_NULL UNIMPL nosys 265 AUE_NULL UNIMPL nosys 266 AUE_NULL UNIMPL nosys 267 AUE_NULL UNIMPL nosys 268 AUE_NULL UNIMPL nosys 269 AUE_NULL UNIMPL nosys 270 AUE_NULL UNIMPL nosys 271 AUE_NULL UNIMPL nosys 272 AUE_O_GETDENTS COMPAT11 { int freebsd32_getdents(int fd, char *buf, \ int count); } 273 AUE_NULL UNIMPL nosys 274 AUE_LCHMOD NOPROTO { int lchmod(char *path, mode_t mode); } 275 AUE_LCHOWN NOPROTO { int lchown(char *path, uid_t uid, \ gid_t gid); } netbsd_lchown \ lchown_args int 276 AUE_LUTIMES STD { int freebsd32_lutimes(char *path, \ struct timeval32 *tptr); } 277 AUE_MSYNC NOPROTO { int msync(void *addr, size_t len, \ int flags); } netbsd_msync msync_args int 278 AUE_STAT COMPAT11|NOPROTO { int nstat(char *path, struct nstat *ub); } 279 AUE_FSTAT COMPAT11|NOPROTO { int nfstat(int fd, struct nstat *sb); } 280 AUE_LSTAT COMPAT11|NOPROTO { int nlstat(char *path, struct nstat *ub); } 281 AUE_NULL UNIMPL nosys 282 AUE_NULL UNIMPL nosys 283 AUE_NULL UNIMPL nosys 284 AUE_NULL UNIMPL nosys 285 AUE_NULL UNIMPL nosys 286 AUE_NULL UNIMPL nosys 287 AUE_NULL UNIMPL nosys 288 AUE_NULL UNIMPL nosys ; 289 and 290 from NetBSD (OpenBSD: 267 and 268) 289 AUE_PREADV STD { ssize_t freebsd32_preadv(int fd, \ struct iovec32 *iovp, \ u_int iovcnt, \ uint32_t offset1, uint32_t offset2); } 290 AUE_PWRITEV STD { ssize_t freebsd32_pwritev(int fd, \ struct iovec32 *iovp, \ u_int iovcnt, \ uint32_t offset1, uint32_t offset2); } 291 AUE_NULL UNIMPL nosys 292 AUE_NULL UNIMPL nosys 293 AUE_NULL UNIMPL nosys 294 AUE_NULL UNIMPL nosys 295 AUE_NULL UNIMPL nosys 296 AUE_NULL UNIMPL nosys ; XXX 297 is 300 in NetBSD 297 AUE_FHSTATFS COMPAT4 { int freebsd32_fhstatfs( \ const struct fhandle *u_fhp, \ struct statfs32 *buf); } 298 AUE_FHOPEN NOPROTO { int fhopen(const struct fhandle *u_fhp, \ int flags); } 299 AUE_FHSTAT COMPAT11 { int freebsd32_fhstat( \ const struct fhandle *u_fhp, \ struct freebsd11_stat32 *sb); } ; syscall numbers for FreeBSD 300 AUE_NULL NOPROTO { int modnext(int modid); } 301 AUE_NULL STD { int freebsd32_modstat(int modid, \ struct module_stat32* stat); } 302 AUE_NULL NOPROTO { int modfnext(int modid); } 303 AUE_NULL NOPROTO { int modfind(const char *name); } 304 AUE_MODLOAD NOPROTO { int kldload(const char *file); } 305 AUE_MODUNLOAD NOPROTO { int kldunload(int fileid); } 306 AUE_NULL NOPROTO { int kldfind(const char *file); } 307 AUE_NULL NOPROTO { int kldnext(int fileid); } 308 AUE_NULL STD { int freebsd32_kldstat(int fileid, \ struct kld32_file_stat* stat); } 309 AUE_NULL NOPROTO { int kldfirstmod(int fileid); } 310 AUE_GETSID NOPROTO { int getsid(pid_t pid); } 311 AUE_SETRESUID NOPROTO { int setresuid(uid_t ruid, uid_t euid, \ uid_t suid); } 312 AUE_SETRESGID NOPROTO { int setresgid(gid_t rgid, gid_t egid, \ gid_t sgid); } 313 AUE_NULL OBSOL signanosleep 314 AUE_AIO_RETURN STD { int freebsd32_aio_return( \ struct aiocb32 *aiocbp); } 315 AUE_AIO_SUSPEND STD { int freebsd32_aio_suspend( \ struct aiocb32 * const * aiocbp, int nent, \ const struct timespec32 *timeout); } 316 AUE_AIO_CANCEL NOPROTO { int aio_cancel(int fd, \ struct aiocb *aiocbp); } 317 AUE_AIO_ERROR STD { int freebsd32_aio_error( \ struct aiocb32 *aiocbp); } 318 AUE_AIO_READ COMPAT6 { int freebsd32_aio_read( \ struct oaiocb32 *aiocbp); } 319 AUE_AIO_WRITE COMPAT6 { int freebsd32_aio_write( \ struct oaiocb32 *aiocbp); } 320 AUE_LIO_LISTIO COMPAT6 { int freebsd32_lio_listio(int mode, \ struct oaiocb32 * const *acb_list, \ int nent, struct osigevent32 *sig); } 321 AUE_NULL NOPROTO { int yield(void); } 322 AUE_NULL OBSOL thr_sleep 323 AUE_NULL OBSOL thr_wakeup 324 AUE_MLOCKALL NOPROTO { int mlockall(int how); } 325 AUE_MUNLOCKALL NOPROTO { int munlockall(void); } 326 AUE_GETCWD NOPROTO { int __getcwd(char *buf, size_t buflen); } 327 AUE_NULL NOPROTO { int sched_setparam (pid_t pid, \ const struct sched_param *param); } 328 AUE_NULL NOPROTO { int sched_getparam (pid_t pid, \ struct sched_param *param); } 329 AUE_NULL NOPROTO { int sched_setscheduler (pid_t pid, \ int policy, \ const struct sched_param *param); } 330 AUE_NULL NOPROTO { int sched_getscheduler (pid_t pid); } 331 AUE_NULL NOPROTO { int sched_yield (void); } 332 AUE_NULL NOPROTO { int sched_get_priority_max (int policy); } 333 AUE_NULL NOPROTO { int sched_get_priority_min (int policy); } 334 AUE_NULL NOPROTO { int sched_rr_get_interval (pid_t pid, \ struct timespec *interval); } 335 AUE_NULL NOPROTO { int utrace(const void *addr, size_t len); } 336 AUE_SENDFILE COMPAT4 { int freebsd32_sendfile(int fd, int s, \ uint32_t offset1, uint32_t offset2, \ size_t nbytes, struct sf_hdtr32 *hdtr, \ off_t *sbytes, int flags); } 337 AUE_NULL NOPROTO { int kldsym(int fileid, int cmd, \ void *data); } 338 AUE_JAIL STD { int freebsd32_jail(struct jail32 *jail); } 339 AUE_NULL UNIMPL pioctl 340 AUE_SIGPROCMASK NOPROTO { int sigprocmask(int how, \ const sigset_t *set, sigset_t *oset); } 341 AUE_SIGSUSPEND NOPROTO { int sigsuspend(const sigset_t *sigmask); } 342 AUE_SIGACTION COMPAT4 { int freebsd32_sigaction(int sig, \ struct sigaction32 *act, \ struct sigaction32 *oact); } 343 AUE_SIGPENDING NOPROTO { int sigpending(sigset_t *set); } 344 AUE_SIGRETURN COMPAT4 { int freebsd32_sigreturn( \ const struct freebsd4_freebsd32_ucontext *sigcntxp); } 345 AUE_SIGWAIT STD { int freebsd32_sigtimedwait(const sigset_t *set, \ siginfo_t *info, \ const struct timespec *timeout); } 346 AUE_NULL STD { int freebsd32_sigwaitinfo(const sigset_t *set, \ siginfo_t *info); } 347 AUE_ACL_GET_FILE NOPROTO { int __acl_get_file(const char *path, \ acl_type_t type, struct acl *aclp); } 348 AUE_ACL_SET_FILE NOPROTO { int __acl_set_file(const char *path, \ acl_type_t type, struct acl *aclp); } 349 AUE_ACL_GET_FD NOPROTO { int __acl_get_fd(int filedes, \ acl_type_t type, struct acl *aclp); } 350 AUE_ACL_SET_FD NOPROTO { int __acl_set_fd(int filedes, \ acl_type_t type, struct acl *aclp); } 351 AUE_ACL_DELETE_FILE NOPROTO { int __acl_delete_file(const char *path, \ acl_type_t type); } 352 AUE_ACL_DELETE_FD NOPROTO { int __acl_delete_fd(int filedes, \ acl_type_t type); } 353 AUE_ACL_CHECK_FILE NOPROTO { int __acl_aclcheck_file(const char *path, \ acl_type_t type, struct acl *aclp); } 354 AUE_ACL_CHECK_FD NOPROTO { int __acl_aclcheck_fd(int filedes, \ acl_type_t type, struct acl *aclp); } 355 AUE_EXTATTRCTL NOPROTO { int extattrctl(const char *path, int cmd, \ const char *filename, int attrnamespace, \ const char *attrname); } 356 AUE_EXTATTR_SET_FILE NOPROTO { ssize_t extattr_set_file( \ const char *path, int attrnamespace, \ const char *attrname, void *data, \ size_t nbytes); } 357 AUE_EXTATTR_GET_FILE NOPROTO { ssize_t extattr_get_file( \ const char *path, int attrnamespace, \ const char *attrname, void *data, \ size_t nbytes); } 358 AUE_EXTATTR_DELETE_FILE NOPROTO { int extattr_delete_file( \ const char *path, int attrnamespace, \ const char *attrname); } 359 AUE_AIO_WAITCOMPLETE STD { int freebsd32_aio_waitcomplete( \ struct aiocb32 **aiocbp, \ struct timespec32 *timeout); } 360 AUE_GETRESUID NOPROTO { int getresuid(uid_t *ruid, uid_t *euid, \ uid_t *suid); } 361 AUE_GETRESGID NOPROTO { int getresgid(gid_t *rgid, gid_t *egid, \ gid_t *sgid); } 362 AUE_KQUEUE NOPROTO { int kqueue(void); } 363 AUE_KEVENT COMPAT11 { int freebsd32_kevent(int fd, \ const struct kevent32_freebsd11 * \ changelist, \ int nchanges, \ struct kevent32_freebsd11 *eventlist, \ int nevents, \ const struct timespec32 *timeout); } 364 AUE_NULL UNIMPL __cap_get_proc 365 AUE_NULL UNIMPL __cap_set_proc 366 AUE_NULL UNIMPL __cap_get_fd 367 AUE_NULL UNIMPL __cap_get_file 368 AUE_NULL UNIMPL __cap_set_fd 369 AUE_NULL UNIMPL __cap_set_file 370 AUE_NULL UNIMPL nosys 371 AUE_EXTATTR_SET_FD NOPROTO { ssize_t extattr_set_fd(int fd, \ int attrnamespace, const char *attrname, \ void *data, size_t nbytes); } 372 AUE_EXTATTR_GET_FD NOPROTO { ssize_t extattr_get_fd(int fd, \ int attrnamespace, const char *attrname, \ void *data, size_t nbytes); } 373 AUE_EXTATTR_DELETE_FD NOPROTO { int extattr_delete_fd(int fd, \ int attrnamespace, \ const char *attrname); } 374 AUE_SETUGID NOPROTO { int __setugid(int flag); } 375 AUE_NULL UNIMPL nfsclnt 376 AUE_EACCESS NOPROTO { int eaccess(char *path, int amode); } 377 AUE_NULL UNIMPL afs_syscall 378 AUE_NMOUNT STD { int freebsd32_nmount(struct iovec32 *iovp, \ unsigned int iovcnt, int flags); } 379 AUE_NULL UNIMPL kse_exit 380 AUE_NULL UNIMPL kse_wakeup 381 AUE_NULL UNIMPL kse_create 382 AUE_NULL UNIMPL kse_thr_interrupt 383 AUE_NULL UNIMPL kse_release 384 AUE_NULL UNIMPL __mac_get_proc 385 AUE_NULL UNIMPL __mac_set_proc 386 AUE_NULL UNIMPL __mac_get_fd 387 AUE_NULL UNIMPL __mac_get_file 388 AUE_NULL UNIMPL __mac_set_fd 389 AUE_NULL UNIMPL __mac_set_file 390 AUE_NULL NOPROTO { int kenv(int what, const char *name, \ char *value, int len); } 391 AUE_LCHFLAGS NOPROTO { int lchflags(const char *path, \ u_long flags); } 392 AUE_NULL NOPROTO { int uuidgen(struct uuid *store, \ int count); } 393 AUE_SENDFILE STD { int freebsd32_sendfile(int fd, int s, \ uint32_t offset1, uint32_t offset2, \ size_t nbytes, struct sf_hdtr32 *hdtr, \ off_t *sbytes, int flags); } 394 AUE_NULL UNIMPL mac_syscall 395 AUE_GETFSSTAT COMPAT11|NOPROTO { int getfsstat( \ struct freebsd11_statfs *buf, \ long bufsize, int mode); } 396 AUE_STATFS COMPAT11|NOPROTO { int statfs(char *path, \ struct statfs *buf); } 397 AUE_FSTATFS COMPAT11|NOPROTO { int fstatfs(int fd, \ struct freebsd11_statfs *buf); } 398 AUE_FHSTATFS COMPAT11|NOPROTO { int fhstatfs( \ const struct fhandle *u_fhp, \ struct freebsd11_statfs *buf); } 399 AUE_NULL UNIMPL nosys 400 AUE_SEMCLOSE NOSTD|NOPROTO { int ksem_close(semid_t id); } 401 AUE_SEMPOST NOSTD|NOPROTO { int ksem_post(semid_t id); } 402 AUE_SEMWAIT NOSTD|NOPROTO { int ksem_wait(semid_t id); } 403 AUE_SEMTRYWAIT NOSTD|NOPROTO { int ksem_trywait(semid_t id); } 404 AUE_SEMINIT NOSTD { int freebsd32_ksem_init(semid_t *idp, \ unsigned int value); } 405 AUE_SEMOPEN NOSTD { int freebsd32_ksem_open(semid_t *idp, \ const char *name, int oflag, \ mode_t mode, unsigned int value); } 406 AUE_SEMUNLINK NOSTD|NOPROTO { int ksem_unlink(const char *name); } 407 AUE_SEMGETVALUE NOSTD|NOPROTO { int ksem_getvalue(semid_t id, \ int *val); } 408 AUE_SEMDESTROY NOSTD|NOPROTO { int ksem_destroy(semid_t id); } 409 AUE_NULL UNIMPL __mac_get_pid 410 AUE_NULL UNIMPL __mac_get_link 411 AUE_NULL UNIMPL __mac_set_link 412 AUE_EXTATTR_SET_LINK NOPROTO { ssize_t extattr_set_link( \ const char *path, int attrnamespace, \ const char *attrname, void *data, \ size_t nbytes); } 413 AUE_EXTATTR_GET_LINK NOPROTO { ssize_t extattr_get_link( \ const char *path, int attrnamespace, \ const char *attrname, void *data, \ size_t nbytes); } 414 AUE_EXTATTR_DELETE_LINK NOPROTO { int extattr_delete_link( \ const char *path, int attrnamespace, \ const char *attrname); } 415 AUE_NULL UNIMPL __mac_execve 416 AUE_SIGACTION STD { int freebsd32_sigaction(int sig, \ struct sigaction32 *act, \ struct sigaction32 *oact); } 417 AUE_SIGRETURN STD { int freebsd32_sigreturn( \ const struct freebsd32_ucontext *sigcntxp); } 418 AUE_NULL UNIMPL __xstat 419 AUE_NULL UNIMPL __xfstat 420 AUE_NULL UNIMPL __xlstat 421 AUE_NULL STD { int freebsd32_getcontext( \ struct freebsd32_ucontext *ucp); } 422 AUE_NULL STD { int freebsd32_setcontext( \ const struct freebsd32_ucontext *ucp); } 423 AUE_NULL STD { int freebsd32_swapcontext( \ struct freebsd32_ucontext *oucp, \ const struct freebsd32_ucontext *ucp); } 424 AUE_SWAPOFF UNIMPL swapoff 425 AUE_ACL_GET_LINK NOPROTO { int __acl_get_link(const char *path, \ acl_type_t type, struct acl *aclp); } 426 AUE_ACL_SET_LINK NOPROTO { int __acl_set_link(const char *path, \ acl_type_t type, struct acl *aclp); } 427 AUE_ACL_DELETE_LINK NOPROTO { int __acl_delete_link(const char *path, \ acl_type_t type); } 428 AUE_ACL_CHECK_LINK NOPROTO { int __acl_aclcheck_link(const char *path, \ acl_type_t type, struct acl *aclp); } 429 AUE_SIGWAIT NOPROTO { int sigwait(const sigset_t *set, \ int *sig); } 430 AUE_THR_CREATE UNIMPL thr_create; 431 AUE_THR_EXIT NOPROTO { void thr_exit(long *state); } 432 AUE_NULL NOPROTO { int thr_self(long *id); } 433 AUE_THR_KILL NOPROTO { int thr_kill(long id, int sig); } 434 AUE_NULL UNIMPL nosys 435 AUE_NULL UNIMPL nosys 436 AUE_JAIL_ATTACH NOPROTO { int jail_attach(int jid); } 437 AUE_EXTATTR_LIST_FD NOPROTO { ssize_t extattr_list_fd(int fd, \ int attrnamespace, void *data, \ size_t nbytes); } 438 AUE_EXTATTR_LIST_FILE NOPROTO { ssize_t extattr_list_file( \ const char *path, int attrnamespace, \ void *data, size_t nbytes); } 439 AUE_EXTATTR_LIST_LINK NOPROTO { ssize_t extattr_list_link( \ const char *path, int attrnamespace, \ void *data, size_t nbytes); } 440 AUE_NULL UNIMPL kse_switchin 441 AUE_SEMWAIT NOSTD { int freebsd32_ksem_timedwait(semid_t id, \ const struct timespec32 *abstime); } 442 AUE_NULL STD { int freebsd32_thr_suspend( \ const struct timespec32 *timeout); } 443 AUE_NULL NOPROTO { int thr_wake(long id); } 444 AUE_MODUNLOAD NOPROTO { int kldunloadf(int fileid, int flags); } 445 AUE_AUDIT NOPROTO { int audit(const void *record, \ u_int length); } 446 AUE_AUDITON NOPROTO { int auditon(int cmd, void *data, \ u_int length); } 447 AUE_GETAUID NOPROTO { int getauid(uid_t *auid); } 448 AUE_SETAUID NOPROTO { int setauid(uid_t *auid); } 449 AUE_GETAUDIT NOPROTO { int getaudit(struct auditinfo *auditinfo); } 450 AUE_SETAUDIT NOPROTO { int setaudit(struct auditinfo *auditinfo); } 451 AUE_GETAUDIT_ADDR NOPROTO { int getaudit_addr( \ struct auditinfo_addr *auditinfo_addr, \ u_int length); } 452 AUE_SETAUDIT_ADDR NOPROTO { int setaudit_addr( \ struct auditinfo_addr *auditinfo_addr, \ u_int length); } 453 AUE_AUDITCTL NOPROTO { int auditctl(char *path); } 454 AUE_NULL STD { int freebsd32_umtx_op(void *obj, int op,\ u_long val, void *uaddr, \ void *uaddr2); } 455 AUE_THR_NEW STD { int freebsd32_thr_new( \ struct thr_param32 *param, \ int param_size); } 456 AUE_NULL STD { int freebsd32_sigqueue(pid_t pid, \ int signum, int value); } 457 AUE_MQ_OPEN NOSTD { int freebsd32_kmq_open( \ const char *path, int flags, mode_t mode, \ const struct mq_attr32 *attr); } 458 AUE_MQ_SETATTR NOSTD { int freebsd32_kmq_setattr(int mqd, \ const struct mq_attr32 *attr, \ struct mq_attr32 *oattr); } 459 AUE_MQ_TIMEDRECEIVE NOSTD { int freebsd32_kmq_timedreceive(int mqd, \ char *msg_ptr, size_t msg_len, \ unsigned *msg_prio, \ const struct timespec32 *abs_timeout); } 460 AUE_MQ_TIMEDSEND NOSTD { int freebsd32_kmq_timedsend(int mqd, \ const char *msg_ptr, size_t msg_len,\ unsigned msg_prio, \ const struct timespec32 *abs_timeout);} 461 AUE_MQ_NOTIFY NOSTD { int freebsd32_kmq_notify(int mqd, \ const struct sigevent32 *sigev); } 462 AUE_MQ_UNLINK NOPROTO|NOSTD { int kmq_unlink(const char *path); } 463 AUE_NULL NOPROTO { int abort2(const char *why, int nargs, void **args); } 464 AUE_NULL NOPROTO { int thr_set_name(long id, const char *name); } 465 AUE_AIO_FSYNC STD { int freebsd32_aio_fsync(int op, \ struct aiocb32 *aiocbp); } 466 AUE_RTPRIO NOPROTO { int rtprio_thread(int function, \ lwpid_t lwpid, struct rtprio *rtp); } 467 AUE_NULL UNIMPL nosys 468 AUE_NULL UNIMPL nosys 469 AUE_NULL UNIMPL __getpath_fromfd 470 AUE_NULL UNIMPL __getpath_fromaddr 471 AUE_SCTP_PEELOFF NOPROTO|NOSTD { int sctp_peeloff(int sd, uint32_t name); } 472 AUE_SCTP_GENERIC_SENDMSG NOPROTO|NOSTD { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, \ caddr_t to, __socklen_t tolen, \ struct sctp_sndrcvinfo *sinfo, int flags); } 473 AUE_SCTP_GENERIC_SENDMSG_IOV NOPROTO|NOSTD { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, \ caddr_t to, __socklen_t tolen, \ struct sctp_sndrcvinfo *sinfo, int flags); } 474 AUE_SCTP_GENERIC_RECVMSG NOPROTO|NOSTD { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, \ struct sockaddr * from, __socklen_t *fromlenaddr, \ struct sctp_sndrcvinfo *sinfo, int *msg_flags); } #ifdef PAD64_REQUIRED 475 AUE_PREAD STD { ssize_t freebsd32_pread(int fd, \ void *buf,size_t nbyte, \ int pad, \ uint32_t offset1, uint32_t offset2); } 476 AUE_PWRITE STD { ssize_t freebsd32_pwrite(int fd, \ const void *buf, size_t nbyte, \ int pad, \ uint32_t offset1, uint32_t offset2); } 477 AUE_MMAP STD { caddr_t freebsd32_mmap(caddr_t addr, \ size_t len, int prot, int flags, int fd, \ int pad, \ uint32_t pos1, uint32_t pos2); } 478 AUE_LSEEK STD { off_t freebsd32_lseek(int fd, \ int pad, \ uint32_t offset1, uint32_t offset2, \ int whence); } 479 AUE_TRUNCATE STD { int freebsd32_truncate(char *path, \ int pad, \ uint32_t length1, uint32_t length2); } 480 AUE_FTRUNCATE STD { int freebsd32_ftruncate(int fd, \ int pad, \ uint32_t length1, uint32_t length2); } #else 475 AUE_PREAD STD { ssize_t freebsd32_pread(int fd, \ void *buf,size_t nbyte, \ uint32_t offset1, uint32_t offset2); } 476 AUE_PWRITE STD { ssize_t freebsd32_pwrite(int fd, \ const void *buf, size_t nbyte, \ uint32_t offset1, uint32_t offset2); } 477 AUE_MMAP STD { caddr_t freebsd32_mmap(caddr_t addr, \ size_t len, int prot, int flags, int fd, \ uint32_t pos1, uint32_t pos2); } 478 AUE_LSEEK STD { off_t freebsd32_lseek(int fd, \ uint32_t offset1, uint32_t offset2, \ int whence); } 479 AUE_TRUNCATE STD { int freebsd32_truncate(char *path, \ uint32_t length1, uint32_t length2); } 480 AUE_FTRUNCATE STD { int freebsd32_ftruncate(int fd, \ uint32_t length1, uint32_t length2); } #endif 481 AUE_THR_KILL2 NOPROTO { int thr_kill2(pid_t pid, long id, int sig); } 482 AUE_SHMOPEN NOPROTO { int shm_open(const char *path, int flags, \ mode_t mode); } 483 AUE_SHMUNLINK NOPROTO { int shm_unlink(const char *path); } 484 AUE_NULL NOPROTO { int cpuset(cpusetid_t *setid); } #ifdef PAD64_REQUIRED 485 AUE_NULL STD { int freebsd32_cpuset_setid(cpuwhich_t which, \ int pad, \ uint32_t id1, uint32_t id2, \ cpusetid_t setid); } #else 485 AUE_NULL STD { int freebsd32_cpuset_setid(cpuwhich_t which, \ uint32_t id1, uint32_t id2, \ cpusetid_t setid); } #endif 486 AUE_NULL STD { int freebsd32_cpuset_getid(cpulevel_t level, \ cpuwhich_t which, \ uint32_t id1, uint32_t id2, \ cpusetid_t *setid); } 487 AUE_NULL STD { int freebsd32_cpuset_getaffinity( \ cpulevel_t level, cpuwhich_t which, \ uint32_t id1, uint32_t id2, \ size_t cpusetsize, \ cpuset_t *mask); } 488 AUE_NULL STD { int freebsd32_cpuset_setaffinity( \ cpulevel_t level, cpuwhich_t which, \ uint32_t id1, uint32_t id2, \ size_t cpusetsize, \ const cpuset_t *mask); } 489 AUE_FACCESSAT NOPROTO { int faccessat(int fd, char *path, int amode, \ int flag); } 490 AUE_FCHMODAT NOPROTO { int fchmodat(int fd, const char *path, \ mode_t mode, int flag); } 491 AUE_FCHOWNAT NOPROTO { int fchownat(int fd, char *path, uid_t uid, \ gid_t gid, int flag); } 492 AUE_FEXECVE STD { int freebsd32_fexecve(int fd, \ uint32_t *argv, uint32_t *envv); } 493 AUE_FSTATAT COMPAT11 { int freebsd32_fstatat(int fd, \ char *path, struct freebsd11_stat32 *buf, \ int flag); } 494 AUE_FUTIMESAT STD { int freebsd32_futimesat(int fd, char *path, \ struct timeval *times); } 495 AUE_LINKAT NOPROTO { int linkat(int fd1, char *path1, int fd2, \ char *path2, int flag); } 496 AUE_MKDIRAT NOPROTO { int mkdirat(int fd, char *path, \ mode_t mode); } 497 AUE_MKFIFOAT NOPROTO { int mkfifoat(int fd, char *path, \ mode_t mode); } 498 AUE_MKNODAT COMPAT11 { int freebsd32_mknodat(int fd, char *path, \ mode_t mode, uint32_t dev); } 499 AUE_OPENAT_RWTC NOPROTO { int openat(int fd, char *path, int flag, \ mode_t mode); } 500 AUE_READLINKAT NOPROTO { int readlinkat(int fd, char *path, char *buf, \ size_t bufsize); } 501 AUE_RENAMEAT NOPROTO { int renameat(int oldfd, char *old, int newfd, \ const char *new); } 502 AUE_SYMLINKAT NOPROTO { int symlinkat(char *path1, int fd, \ char *path2); } 503 AUE_UNLINKAT NOPROTO { int unlinkat(int fd, char *path, \ int flag); } 504 AUE_POSIX_OPENPT NOPROTO { int posix_openpt(int flags); } ; 505 is initialised by the kgssapi code, if present. 505 AUE_NULL UNIMPL gssd_syscall 506 AUE_JAIL_GET STD { int freebsd32_jail_get(struct iovec32 *iovp, \ unsigned int iovcnt, int flags); } 507 AUE_JAIL_SET STD { int freebsd32_jail_set(struct iovec32 *iovp, \ unsigned int iovcnt, int flags); } 508 AUE_JAIL_REMOVE NOPROTO { int jail_remove(int jid); } 509 AUE_CLOSEFROM NOPROTO { int closefrom(int lowfd); } 510 AUE_SEMCTL NOSTD { int freebsd32_semctl(int semid, int semnum, \ int cmd, union semun32 *arg); } 511 AUE_MSGCTL NOSTD { int freebsd32_msgctl(int msqid, int cmd, \ struct msqid_ds32 *buf); } 512 AUE_SHMCTL NOSTD { int freebsd32_shmctl(int shmid, int cmd, \ struct shmid_ds32 *buf); } 513 AUE_LPATHCONF NOPROTO { int lpathconf(char *path, int name); } 514 AUE_NULL OBSOL cap_new 515 AUE_CAP_RIGHTS_GET NOPROTO { int __cap_rights_get(int version, \ int fd, cap_rights_t *rightsp); } 516 AUE_CAP_ENTER NOPROTO { int cap_enter(void); } 517 AUE_CAP_GETMODE NOPROTO { int cap_getmode(u_int *modep); } 518 AUE_PDFORK NOPROTO { int pdfork(int *fdp, int flags); } 519 AUE_PDKILL NOPROTO { int pdkill(int fd, int signum); } 520 AUE_PDGETPID NOPROTO { int pdgetpid(int fd, pid_t *pidp); } 521 AUE_PDWAIT UNIMPL pdwait4 522 AUE_SELECT STD { int freebsd32_pselect(int nd, fd_set *in, \ fd_set *ou, fd_set *ex, \ const struct timespec32 *ts, \ const sigset_t *sm); } 523 AUE_GETLOGINCLASS NOPROTO { int getloginclass(char *namebuf, \ size_t namelen); } 524 AUE_SETLOGINCLASS NOPROTO { int setloginclass(const char *namebuf); } 525 AUE_NULL NOPROTO { int rctl_get_racct(const void *inbufp, \ size_t inbuflen, void *outbufp, \ size_t outbuflen); } 526 AUE_NULL NOPROTO { int rctl_get_rules(const void *inbufp, \ size_t inbuflen, void *outbufp, \ size_t outbuflen); } 527 AUE_NULL NOPROTO { int rctl_get_limits(const void *inbufp, \ size_t inbuflen, void *outbufp, \ size_t outbuflen); } 528 AUE_NULL NOPROTO { int rctl_add_rule(const void *inbufp, \ size_t inbuflen, void *outbufp, \ size_t outbuflen); } 529 AUE_NULL NOPROTO { int rctl_remove_rule(const void *inbufp, \ size_t inbuflen, void *outbufp, \ size_t outbuflen); } #ifdef PAD64_REQUIRED 530 AUE_POSIX_FALLOCATE STD { int freebsd32_posix_fallocate(int fd, \ int pad, \ uint32_t offset1, uint32_t offset2,\ uint32_t len1, uint32_t len2); } 531 AUE_POSIX_FADVISE STD { int freebsd32_posix_fadvise(int fd, \ int pad, \ uint32_t offset1, uint32_t offset2,\ uint32_t len1, uint32_t len2, \ int advice); } 532 AUE_WAIT6 STD { int freebsd32_wait6(int idtype, int pad, \ uint32_t id1, uint32_t id2, \ int *status, int options, \ struct wrusage32 *wrusage, \ siginfo_t *info); } #else 530 AUE_POSIX_FALLOCATE STD { int freebsd32_posix_fallocate(int fd,\ uint32_t offset1, uint32_t offset2,\ uint32_t len1, uint32_t len2); } 531 AUE_POSIX_FADVISE STD { int freebsd32_posix_fadvise(int fd, \ uint32_t offset1, uint32_t offset2,\ uint32_t len1, uint32_t len2, \ int advice); } 532 AUE_WAIT6 STD { int freebsd32_wait6(int idtype, \ uint32_t id1, uint32_t id2, \ int *status, int options, \ struct wrusage32 *wrusage, \ siginfo_t *info); } #endif 533 AUE_CAP_RIGHTS_LIMIT NOPROTO { \ int cap_rights_limit(int fd, \ cap_rights_t *rightsp); } 534 AUE_CAP_IOCTLS_LIMIT STD { \ int freebsd32_cap_ioctls_limit(int fd, \ const uint32_t *cmds, size_t ncmds); } 535 AUE_CAP_IOCTLS_GET STD { \ ssize_t freebsd32_cap_ioctls_get(int fd, \ uint32_t *cmds, size_t maxcmds); } 536 AUE_CAP_FCNTLS_LIMIT NOPROTO { int cap_fcntls_limit(int fd, \ uint32_t fcntlrights); } 537 AUE_CAP_FCNTLS_GET NOPROTO { int cap_fcntls_get(int fd, \ uint32_t *fcntlrightsp); } 538 AUE_BINDAT NOPROTO { int bindat(int fd, int s, caddr_t name, \ int namelen); } 539 AUE_CONNECTAT NOPROTO { int connectat(int fd, int s, caddr_t name, \ int namelen); } 540 AUE_CHFLAGSAT NOPROTO { int chflagsat(int fd, const char *path, \ u_long flags, int atflag); } 541 AUE_ACCEPT NOPROTO { int accept4(int s, \ struct sockaddr * __restrict name, \ __socklen_t * __restrict anamelen, \ int flags); } 542 AUE_PIPE NOPROTO { int pipe2(int *fildes, int flags); } 543 AUE_AIO_MLOCK STD { int freebsd32_aio_mlock( \ struct aiocb32 *aiocbp); } #ifdef PAD64_REQUIRED 544 AUE_PROCCTL STD { int freebsd32_procctl(int idtype, int pad, \ uint32_t id1, uint32_t id2, int com, \ void *data); } #else 544 AUE_PROCCTL STD { int freebsd32_procctl(int idtype, \ uint32_t id1, uint32_t id2, int com, \ void *data); } #endif 545 AUE_POLL STD { int freebsd32_ppoll(struct pollfd *fds, \ u_int nfds, const struct timespec32 *ts, \ const sigset_t *set); } 546 AUE_FUTIMES STD { int freebsd32_futimens(int fd, \ struct timespec *times); } 547 AUE_FUTIMESAT STD { int freebsd32_utimensat(int fd, \ char *path, \ struct timespec *times, int flag); } 548 AUE_NULL UNIMPL numa_getaffinity 549 AUE_NULL UNIMPL numa_setaffinity 550 AUE_FSYNC NOPROTO { int fdatasync(int fd); } 551 AUE_FSTAT STD { int freebsd32_fstat(int fd, \ struct stat32 *ub); } 552 AUE_FSTATAT STD { int freebsd32_fstatat(int fd, \ char *path, struct stat32 *buf, \ int flag); } 553 AUE_FHSTAT STD { int freebsd32_fhstat( \ const struct fhandle *u_fhp, \ struct stat32 *sb); } 554 AUE_GETDIRENTRIES NOPROTO { ssize_t getdirentries( \ int fd, char *buf, size_t count, \ off_t *basep); } 555 AUE_STATFS NOPROTO { int statfs(char *path, \ struct statfs32 *buf); } 556 AUE_FSTATFS NOPROTO { int fstatfs(int fd, struct statfs32 *buf); } 557 AUE_GETFSSTAT NOPROTO { int getfsstat(struct statfs32 *buf, \ long bufsize, int mode); } 558 AUE_FHSTATFS NOPROTO { int fhstatfs(const struct fhandle *u_fhp, \ struct statfs32 *buf); } 559 AUE_MKNODAT NOPROTO { int mknodat(int fd, char *path, mode_t mode, \ dev_t dev); } 560 AUE_KEVENT STD { int freebsd32_kevent(int fd, \ const struct kevent32 *changelist, \ int nchanges, \ struct kevent32 *eventlist, \ int nevents, \ const struct timespec32 *timeout); } 561 AUE_NULL STD { int freebsd32_cpuset_getdomain(cpulevel_t level, \ cpuwhich_t which, uint32_t id1, uint32_t id2, \ size_t domainsetsize, domainset_t *mask, \ int *policy); } 562 AUE_NULL STD { int freebsd32_cpuset_setdomain(cpulevel_t level, \ cpuwhich_t which, uint32_t id1, uint32_t id2, \ size_t domainsetsize, domainset_t *mask, \ int policy); } ; vim: syntax=off Index: head/sys/dev/aic7xxx/aic7xxx.seq =================================================================== --- head/sys/dev/aic7xxx/aic7xxx.seq (revision 329872) +++ head/sys/dev/aic7xxx/aic7xxx.seq (revision 329873) @@ -1,2403 +1,2403 @@ /*- * Adaptec 274x/284x/294x device driver firmware for Linux and FreeBSD. * * Copyright (c) 1994-2001 Justin T. Gibbs. * Copyright (c) 2000-2001 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $FreeBSD$ */ VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $" PATCH_ARG_LIST = "struct ahc_softc *ahc" PREFIX = "ahc_" #include "aic7xxx.reg" #include "scsi_message.h" /* * A few words on the waiting SCB list: * After starting the selection hardware, we check for reconnecting targets * as well as for our selection to complete just in case the reselection wins * bus arbitration. The problem with this is that we must keep track of the * SCB that we've already pulled from the QINFIFO and started the selection * on just in case the reselection wins so that we can retry the selection at * a later time. This problem cannot be resolved by holding a single entry * in scratch ram since a reconnecting target can request sense and this will * create yet another SCB waiting for selection. The solution used here is to - * use byte 27 of the SCB as a psuedo-next pointer and to thread a list + * use byte 27 of the SCB as a pseudo-next pointer and to thread a list * of SCBs that are awaiting selection. Since 0-0xfe are valid SCB indexes, * SCB_LIST_NULL is 0xff which is out of range. An entry is also added to * this list every time a request sense occurs or after completing a non-tagged * command for which a second SCB has been queued. The sequencer will * automatically consume the entries. */ bus_free_sel: /* * Turn off the selection hardware. We need to reset the * selection request in order to perform a new selection. */ and SCSISEQ, TEMODE|ENSELI|ENRSELI|ENAUTOATNP; and SIMODE1, ~ENBUSFREE; poll_for_work: call clear_target_state; and SXFRCTL0, ~SPIOEN; if ((ahc->features & AHC_ULTRA2) != 0) { clr SCSIBUSL; } test SCSISEQ, ENSELO jnz poll_for_selection; if ((ahc->features & AHC_TWIN) != 0) { xor SBLKCTL,SELBUSB; /* Toggle to the other bus */ test SCSISEQ, ENSELO jnz poll_for_selection; } BEGIN_CRITICAL; cmp WAITING_SCBH,SCB_LIST_NULL jne start_waiting; END_CRITICAL; poll_for_work_loop: if ((ahc->features & AHC_TWIN) != 0) { xor SBLKCTL,SELBUSB; /* Toggle to the other bus */ } test SSTAT0, SELDO|SELDI jnz selection; test_queue: /* Has the driver posted any work for us? */ BEGIN_CRITICAL; if ((ahc->features & AHC_QUEUE_REGS) != 0) { test QOFF_CTLSTA, SCB_AVAIL jz poll_for_work_loop; } else { mov A, QINPOS; cmp KERNEL_QINPOS, A je poll_for_work_loop; } mov ARG_1, NEXT_QUEUED_SCB; /* * We have at least one queued SCB now and we don't have any * SCBs in the list of SCBs awaiting selection. Allocate a * card SCB for the host's SCB and get to work on it. */ if ((ahc->flags & AHC_PAGESCBS) != 0) { mov ALLZEROS call get_free_or_disc_scb; } else { /* In the non-paging case, the SCBID == hardware SCB index */ mov SCBPTR, ARG_1; } or SEQ_FLAGS2, SCB_DMA; END_CRITICAL; dma_queued_scb: /* * DMA the SCB from host ram into the current SCB location. */ mvi DMAPARAMS, HDMAEN|DIRECTION|FIFORESET; mov ARG_1 call dma_scb; /* * Check one last time to see if this SCB was canceled * before we completed the DMA operation. If it was, * the QINFIFO next pointer will not match our saved * value. */ mov A, ARG_1; BEGIN_CRITICAL; cmp NEXT_QUEUED_SCB, A jne abort_qinscb; if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { cmp SCB_TAG, A je . + 2; mvi SCB_MISMATCH call set_seqint; } mov NEXT_QUEUED_SCB, SCB_NEXT; mov SCB_NEXT,WAITING_SCBH; mov WAITING_SCBH, SCBPTR; if ((ahc->features & AHC_QUEUE_REGS) != 0) { mov NONE, SNSCB_QOFF; } else { inc QINPOS; } and SEQ_FLAGS2, ~SCB_DMA; start_waiting: /* * Start the first entry on the waiting SCB list. */ mov SCBPTR, WAITING_SCBH; call start_selection; END_CRITICAL; poll_for_selection: /* * Twin channel devices cannot handle things like SELTO * interrupts on the "background" channel. So, while * selecting, keep polling the current channel until * either a selection or reselection occurs. */ test SSTAT0, SELDO|SELDI jz poll_for_selection; selection: /* * We aren't expecting a bus free, so interrupt * the kernel driver if it happens. */ mvi CLRSINT1,CLRBUSFREE; if ((ahc->features & AHC_DT) == 0) { or SIMODE1, ENBUSFREE; } /* * Guard against a bus free after (re)selection * but prior to enabling the busfree interrupt. SELDI * and SELDO will be cleared in that case. */ test SSTAT0, SELDI|SELDO jz bus_free_sel; test SSTAT0,SELDO jnz select_out; select_in: if ((ahc->flags & AHC_TARGETROLE) != 0) { if ((ahc->flags & AHC_INITIATORROLE) != 0) { test SSTAT0, TARGET jz initiator_reselect; } mvi CLRSINT0, CLRSELDI; /* * We've just been selected. Assert BSY and * setup the phase for receiving messages * from the target. */ mvi SCSISIGO, P_MESGOUT|BSYO; /* * Setup the DMA for sending the identify and * command information. */ mvi SEQ_FLAGS, CMDPHASE_PENDING; mov A, TQINPOS; if ((ahc->features & AHC_CMD_CHAN) != 0) { mvi DINDEX, CCHADDR; mvi SHARED_DATA_ADDR call set_32byte_addr; mvi CCSCBCTL, CCSCBRESET; } else { mvi DINDEX, HADDR; mvi SHARED_DATA_ADDR call set_32byte_addr; mvi DFCNTRL, FIFORESET; } /* Initiator that selected us */ and SAVED_SCSIID, SELID_MASK, SELID; /* The Target ID we were selected at */ if ((ahc->features & AHC_MULTI_TID) != 0) { and A, OID, TARGIDIN; } else if ((ahc->features & AHC_ULTRA2) != 0) { and A, OID, SCSIID_ULTRA2; } else { and A, OID, SCSIID; } or SAVED_SCSIID, A; if ((ahc->features & AHC_TWIN) != 0) { test SBLKCTL, SELBUSB jz . + 2; or SAVED_SCSIID, TWIN_CHNLB; } if ((ahc->features & AHC_CMD_CHAN) != 0) { mov CCSCBRAM, SAVED_SCSIID; } else { mov DFDAT, SAVED_SCSIID; } /* * If ATN isn't asserted, the target isn't interested * in talking to us. Go directly to bus free. * XXX SCSI-1 may require us to assume lun 0 if * ATN is false. */ test SCSISIGI, ATNI jz target_busfree; /* * Watch ATN closely now as we pull in messages from the * initiator. We follow the guidlines from section 6.5 * of the SCSI-2 spec for what messages are allowed when. */ call target_inb; /* * Our first message must be one of IDENTIFY, ABORT, or * BUS_DEVICE_RESET. */ test DINDEX, MSG_IDENTIFYFLAG jz host_target_message_loop; /* Store for host */ if ((ahc->features & AHC_CMD_CHAN) != 0) { mov CCSCBRAM, DINDEX; } else { mov DFDAT, DINDEX; } and SAVED_LUN, MSG_IDENTIFY_LUNMASK, DINDEX; /* Remember for disconnection decision */ test DINDEX, MSG_IDENTIFY_DISCFLAG jnz . + 2; /* XXX Honor per target settings too */ or SEQ_FLAGS, NO_DISCONNECT; test SCSISIGI, ATNI jz ident_messages_done; call target_inb; /* * If this is a tagged request, the tagged message must * immediately follow the identify. We test for a valid * tag message by seeing if it is >= MSG_SIMPLE_Q_TAG and * < MSG_IGN_WIDE_RESIDUE. */ add A, -MSG_SIMPLE_Q_TAG, DINDEX; jnc ident_messages_done_msg_pending; add A, -MSG_IGN_WIDE_RESIDUE, DINDEX; jc ident_messages_done_msg_pending; /* Store for host */ if ((ahc->features & AHC_CMD_CHAN) != 0) { mov CCSCBRAM, DINDEX; } else { mov DFDAT, DINDEX; } /* * If the initiator doesn't feel like providing a tag number, * we've got a failed selection and must transition to bus * free. */ test SCSISIGI, ATNI jz target_busfree; /* * Store the tag for the host. */ call target_inb; if ((ahc->features & AHC_CMD_CHAN) != 0) { mov CCSCBRAM, DINDEX; } else { mov DFDAT, DINDEX; } mov INITIATOR_TAG, DINDEX; or SEQ_FLAGS, TARGET_CMD_IS_TAGGED; ident_messages_done: /* Terminate the ident list */ if ((ahc->features & AHC_CMD_CHAN) != 0) { mvi CCSCBRAM, SCB_LIST_NULL; } else { mvi DFDAT, SCB_LIST_NULL; } or SEQ_FLAGS, TARG_CMD_PENDING; test SEQ_FLAGS2, TARGET_MSG_PENDING jnz target_mesgout_pending; test SCSISIGI, ATNI jnz target_mesgout_continue; jmp target_ITloop; ident_messages_done_msg_pending: or SEQ_FLAGS2, TARGET_MSG_PENDING; jmp ident_messages_done; /* * Pushed message loop to allow the kernel to * run it's own target mode message state engine. */ host_target_message_loop: mvi HOST_MSG_LOOP call set_seqint; cmp RETURN_1, EXIT_MSG_LOOP je target_ITloop; test SSTAT0, SPIORDY jz .; jmp host_target_message_loop; } if ((ahc->flags & AHC_INITIATORROLE) != 0) { /* * Reselection has been initiated by a target. Make a note that we've been * reselected, but haven't seen an IDENTIFY message from the target yet. */ initiator_reselect: /* XXX test for and handle ONE BIT condition */ or SXFRCTL0, SPIOEN|CLRSTCNT|CLRCHN; and SAVED_SCSIID, SELID_MASK, SELID; if ((ahc->features & AHC_ULTRA2) != 0) { and A, OID, SCSIID_ULTRA2; } else { and A, OID, SCSIID; } or SAVED_SCSIID, A; if ((ahc->features & AHC_TWIN) != 0) { test SBLKCTL, SELBUSB jz . + 2; or SAVED_SCSIID, TWIN_CHNLB; } mvi CLRSINT0, CLRSELDI; jmp ITloop; } abort_qinscb: call add_scb_to_free_list; jmp poll_for_work_loop; BEGIN_CRITICAL; start_selection: /* * If bus reset interrupts have been disabled (from a previous * reset), re-enable them now. Resets are only of interest * when we have outstanding transactions, so we can safely * defer re-enabling the interrupt until, as an initiator, * we start sending out transactions again. */ test SIMODE1, ENSCSIRST jnz . + 3; mvi CLRSINT1, CLRSCSIRSTI; or SIMODE1, ENSCSIRST; if ((ahc->features & AHC_TWIN) != 0) { and SINDEX,~SELBUSB,SBLKCTL;/* Clear channel select bit */ test SCB_SCSIID, TWIN_CHNLB jz . + 2; or SINDEX, SELBUSB; mov SBLKCTL,SINDEX; /* select channel */ } initialize_scsiid: if ((ahc->features & AHC_ULTRA2) != 0) { mov SCSIID_ULTRA2, SCB_SCSIID; } else if ((ahc->features & AHC_TWIN) != 0) { and SCSIID, TWIN_TID|OID, SCB_SCSIID; } else { mov SCSIID, SCB_SCSIID; } if ((ahc->flags & AHC_TARGETROLE) != 0) { mov SINDEX, SCSISEQ_TEMPLATE; test SCB_CONTROL, TARGET_SCB jz . + 2; or SINDEX, TEMODE; mov SCSISEQ, SINDEX ret; } else { mov SCSISEQ, SCSISEQ_TEMPLATE ret; } END_CRITICAL; /* * Initialize transfer settings with SCB provided settings. */ set_transfer_settings: if ((ahc->features & AHC_ULTRA) != 0) { test SCB_CONTROL, ULTRAENB jz . + 2; or SXFRCTL0, FAST20; } /* * Initialize SCSIRATE with the appropriate value for this target. */ if ((ahc->features & AHC_ULTRA2) != 0) { bmov SCSIRATE, SCB_SCSIRATE, 2 ret; } else { mov SCSIRATE, SCB_SCSIRATE ret; } if ((ahc->flags & AHC_TARGETROLE) != 0) { /* * We carefully toggle SPIOEN to allow us to return the * message byte we receive so it can be checked prior to * driving REQ on the bus for the next byte. */ target_inb: /* * Drive REQ on the bus by enabling SCSI PIO. */ or SXFRCTL0, SPIOEN; /* Wait for the byte */ test SSTAT0, SPIORDY jz .; /* Prevent our read from triggering another REQ */ and SXFRCTL0, ~SPIOEN; /* Save latched contents */ mov DINDEX, SCSIDATL ret; } /* * After the selection, remove this SCB from the "waiting SCB" * list. This is achieved by simply moving our "next" pointer into * WAITING_SCBH. Our next pointer will be set to null the next time this * SCB is used, so don't bother with it now. */ select_out: /* Turn off the selection hardware */ and SCSISEQ, TEMODE|ENSELI|ENRSELI|ENAUTOATNP, SCSISEQ; mov SCBPTR, WAITING_SCBH; mov WAITING_SCBH,SCB_NEXT; mov SAVED_SCSIID, SCB_SCSIID; and SAVED_LUN, LID, SCB_LUN; call set_transfer_settings; if ((ahc->flags & AHC_TARGETROLE) != 0) { test SSTAT0, TARGET jz initiator_select; or SXFRCTL0, CLRSTCNT|CLRCHN; /* * Put tag in connonical location since not * all connections have an SCB. */ mov INITIATOR_TAG, SCB_TARGET_ITAG; /* * We've just re-selected an initiator. * Assert BSY and setup the phase for * sending our identify messages. */ mvi P_MESGIN|BSYO call change_phase; mvi CLRSINT0, CLRSELDO; /* * Start out with a simple identify message. */ or SAVED_LUN, MSG_IDENTIFYFLAG call target_outb; /* * If we are the result of a tagged command, send * a simple Q tag and the tag id. */ test SCB_CONTROL, TAG_ENB jz . + 3; mvi MSG_SIMPLE_Q_TAG call target_outb; mov SCB_TARGET_ITAG call target_outb; target_synccmd: /* * Now determine what phases the host wants us * to go through. */ mov SEQ_FLAGS, SCB_TARGET_PHASES; test SCB_CONTROL, MK_MESSAGE jz target_ITloop; mvi P_MESGIN|BSYO call change_phase; jmp host_target_message_loop; target_ITloop: /* * Start honoring ATN signals now that * we properly identified ourselves. */ test SCSISIGI, ATNI jnz target_mesgout; test SEQ_FLAGS, CMDPHASE_PENDING jnz target_cmdphase; test SEQ_FLAGS, DPHASE_PENDING jnz target_dphase; test SEQ_FLAGS, SPHASE_PENDING jnz target_sphase; /* * No more work to do. Either disconnect or not depending * on the state of NO_DISCONNECT. */ test SEQ_FLAGS, NO_DISCONNECT jz target_disconnect; mvi TARG_IMMEDIATE_SCB, SCB_LIST_NULL; call complete_target_cmd; if ((ahc->flags & AHC_PAGESCBS) != 0) { mov ALLZEROS call get_free_or_disc_scb; } cmp TARG_IMMEDIATE_SCB, SCB_LIST_NULL je .; mvi DMAPARAMS, HDMAEN|DIRECTION|FIFORESET; mov TARG_IMMEDIATE_SCB call dma_scb; call set_transfer_settings; or SXFRCTL0, CLRSTCNT|CLRCHN; jmp target_synccmd; target_mesgout: mvi SCSISIGO, P_MESGOUT|BSYO; target_mesgout_continue: call target_inb; target_mesgout_pending: and SEQ_FLAGS2, ~TARGET_MSG_PENDING; /* Local Processing goes here... */ jmp host_target_message_loop; target_disconnect: mvi P_MESGIN|BSYO call change_phase; test SEQ_FLAGS, DPHASE jz . + 2; mvi MSG_SAVEDATAPOINTER call target_outb; mvi MSG_DISCONNECT call target_outb; target_busfree_wait: /* Wait for preceding I/O session to complete. */ test SCSISIGI, ACKI jnz .; target_busfree: and SIMODE1, ~ENBUSFREE; if ((ahc->features & AHC_ULTRA2) != 0) { clr SCSIBUSL; } clr SCSISIGO; mvi LASTPHASE, P_BUSFREE; call complete_target_cmd; jmp poll_for_work; target_cmdphase: /* * The target has dropped ATN (doesn't want to abort or BDR) * and we believe this selection to be valid. If the ring * buffer for new commands is full, return busy or queue full. */ if ((ahc->features & AHC_HS_MAILBOX) != 0) { and A, HOST_TQINPOS, HS_MAILBOX; } else { mov A, KERNEL_TQINPOS; } cmp TQINPOS, A jne tqinfifo_has_space; mvi P_STATUS|BSYO call change_phase; test SEQ_FLAGS, TARGET_CMD_IS_TAGGED jz . + 3; mvi STATUS_QUEUE_FULL call target_outb; jmp target_busfree_wait; mvi STATUS_BUSY call target_outb; jmp target_busfree_wait; tqinfifo_has_space: mvi P_COMMAND|BSYO call change_phase; call target_inb; mov A, DINDEX; /* Store for host */ if ((ahc->features & AHC_CMD_CHAN) != 0) { mov CCSCBRAM, A; } else { mov DFDAT, A; } /* * Determine the number of bytes to read * based on the command group code via table lookup. * We reuse the first 8 bytes of the TARG_SCSIRATE * BIOS array for this table. Count is one less than * the total for the command since we've already fetched * the first byte. */ shr A, CMD_GROUP_CODE_SHIFT; add SINDEX, CMDSIZE_TABLE, A; mov A, SINDIR; test A, 0xFF jz command_phase_done; or SXFRCTL0, SPIOEN; command_loop: test SSTAT0, SPIORDY jz .; cmp A, 1 jne . + 2; and SXFRCTL0, ~SPIOEN; /* Last Byte */ if ((ahc->features & AHC_CMD_CHAN) != 0) { mov CCSCBRAM, SCSIDATL; } else { mov DFDAT, SCSIDATL; } dec A; test A, 0xFF jnz command_loop; command_phase_done: and SEQ_FLAGS, ~CMDPHASE_PENDING; jmp target_ITloop; target_dphase: /* * Data phases on the bus are from the * perspective of the initiator. The dma * code looks at LASTPHASE to determine the * data direction of the DMA. Toggle it for * target transfers. */ xor LASTPHASE, IOI, SCB_TARGET_DATA_DIR; or SCB_TARGET_DATA_DIR, BSYO call change_phase; jmp p_data; target_sphase: mvi P_STATUS|BSYO call change_phase; mvi LASTPHASE, P_STATUS; mov SCB_SCSI_STATUS call target_outb; /* XXX Watch for ATN or parity errors??? */ mvi SCSISIGO, P_MESGIN|BSYO; /* MSG_CMDCMPLT is 0, but we can't do an immediate of 0 */ mov ALLZEROS call target_outb; jmp target_busfree_wait; complete_target_cmd: test SEQ_FLAGS, TARG_CMD_PENDING jnz . + 2; mov SCB_TAG jmp complete_post; if ((ahc->features & AHC_CMD_CHAN) != 0) { /* Set the valid byte */ mvi CCSCBADDR, 24; mov CCSCBRAM, ALLONES; mvi CCHCNT, 28; or CCSCBCTL, CCSCBEN|CCSCBRESET; test CCSCBCTL, CCSCBDONE jz .; clr CCSCBCTL; } else { /* Set the valid byte */ or DFCNTRL, FIFORESET; mvi DFWADDR, 3; /* Third 64bit word or byte 24 */ mov DFDAT, ALLONES; mvi 28 call set_hcnt; or DFCNTRL, HDMAEN|FIFOFLUSH; call dma_finish; } inc TQINPOS; mvi INTSTAT,CMDCMPLT ret; } if ((ahc->flags & AHC_INITIATORROLE) != 0) { initiator_select: or SXFRCTL0, SPIOEN|CLRSTCNT|CLRCHN; /* * As soon as we get a successful selection, the target * should go into the message out phase since we have ATN * asserted. */ mvi MSG_OUT, MSG_IDENTIFYFLAG; mvi SEQ_FLAGS, NO_CDB_SENT; mvi CLRSINT0, CLRSELDO; /* * Main loop for information transfer phases. Wait for the * target to assert REQ before checking MSG, C/D and I/O for * the bus phase. */ mesgin_phasemis: ITloop: call phase_lock; mov A, LASTPHASE; test A, ~P_DATAIN jz p_data; cmp A,P_COMMAND je p_command; cmp A,P_MESGOUT je p_mesgout; cmp A,P_STATUS je p_status; cmp A,P_MESGIN je p_mesgin; mvi BAD_PHASE call set_seqint; jmp ITloop; /* Try reading the bus again. */ await_busfree: and SIMODE1, ~ENBUSFREE; mov NONE, SCSIDATL; /* Ack the last byte */ if ((ahc->features & AHC_ULTRA2) != 0) { clr SCSIBUSL; /* Prevent bit leakage durint SELTO */ } and SXFRCTL0, ~SPIOEN; mvi SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT; test SSTAT1,REQINIT|BUSFREE jz .; test SSTAT1, BUSFREE jnz poll_for_work; mvi MISSED_BUSFREE call set_seqint; } clear_target_state: /* * We assume that the kernel driver may reset us * at any time, even in the middle of a DMA, so * clear DFCNTRL too. */ clr DFCNTRL; or SXFRCTL0, CLRSTCNT|CLRCHN; /* * We don't know the target we will connect to, * so default to narrow transfers to avoid * parity problems. */ if ((ahc->features & AHC_ULTRA2) != 0) { bmov SCSIRATE, ALLZEROS, 2; } else { clr SCSIRATE; if ((ahc->features & AHC_ULTRA) != 0) { and SXFRCTL0, ~(FAST20); } } mvi LASTPHASE, P_BUSFREE; /* clear target specific flags */ mvi SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT ret; sg_advance: clr A; /* add sizeof(struct scatter) */ add SCB_RESIDUAL_SGPTR[0],SG_SIZEOF; adc SCB_RESIDUAL_SGPTR[1],A; adc SCB_RESIDUAL_SGPTR[2],A; adc SCB_RESIDUAL_SGPTR[3],A ret; if ((ahc->features & AHC_CMD_CHAN) != 0) { disable_ccsgen: test CCSGCTL, CCSGEN jz return; test CCSGCTL, CCSGDONE jz .; disable_ccsgen_fetch_done: clr CCSGCTL; test CCSGCTL, CCSGEN jnz .; ret; idle_loop: /* * Do we need any more segments for this transfer? */ test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jnz return; /* Did we just finish fetching segs? */ cmp CCSGCTL, CCSGEN|CCSGDONE je idle_sgfetch_complete; /* Are we actively fetching segments? */ test CCSGCTL, CCSGEN jnz return; /* * Do we have any prefetch left??? */ cmp CCSGADDR, SG_PREFETCH_CNT jne idle_sg_avail; /* * Need to fetch segments, but we can only do that * if the command channel is completely idle. Make * sure we don't have an SCB prefetch going on. */ test CCSCBCTL, CCSCBEN jnz return; /* * We fetch a "cacheline aligned" and sized amount of data * so we don't end up referencing a non-existant page. * Cacheline aligned is in quotes because the kernel will * set the prefetch amount to a reasonable level if the * cacheline size is unknown. */ mvi CCHCNT, SG_PREFETCH_CNT; and CCHADDR[0], SG_PREFETCH_ALIGN_MASK, SCB_RESIDUAL_SGPTR; bmov CCHADDR[1], SCB_RESIDUAL_SGPTR[1], 3; mvi CCSGCTL, CCSGEN|CCSGRESET ret; idle_sgfetch_complete: call disable_ccsgen_fetch_done; and CCSGADDR, SG_PREFETCH_ADDR_MASK, SCB_RESIDUAL_SGPTR; idle_sg_avail: if ((ahc->features & AHC_ULTRA2) != 0) { /* Does the hardware have space for another SG entry? */ test DFSTATUS, PRELOAD_AVAIL jz return; bmov HADDR, CCSGRAM, 7; bmov SCB_RESIDUAL_DATACNT[3], CCSGRAM, 1; if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { mov SCB_RESIDUAL_DATACNT[3] call set_hhaddr; } call sg_advance; mov SINDEX, SCB_RESIDUAL_SGPTR[0]; test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz . + 2; or SINDEX, LAST_SEG; mov SG_CACHE_PRE, SINDEX; /* Load the segment */ or DFCNTRL, PRELOADEN; } ret; } if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0 && ahc->pci_cachesize != 0) { /* * Calculate the trailing portion of this S/G segment that cannot * be transferred using memory write and invalidate PCI transactions. * XXX Can we optimize this for PCI writes only??? */ calc_mwi_residual: /* * If the ending address is on a cacheline boundary, * there is no need for an extra segment. */ mov A, HCNT[0]; add A, A, HADDR[0]; and A, CACHESIZE_MASK; test A, 0xFF jz return; /* * If the transfer is less than a cachline, * there is no need for an extra segment. */ test HCNT[1], 0xFF jnz calc_mwi_residual_final; test HCNT[2], 0xFF jnz calc_mwi_residual_final; add NONE, INVERTED_CACHESIZE_MASK, HCNT[0]; jnc return; calc_mwi_residual_final: mov MWI_RESIDUAL, A; not A; inc A; add HCNT[0], A; adc HCNT[1], -1; adc HCNT[2], -1 ret; } p_data: test SEQ_FLAGS,NOT_IDENTIFIED|NO_CDB_SENT jz p_data_allowed; mvi PROTO_VIOLATION call set_seqint; p_data_allowed: if ((ahc->features & AHC_ULTRA2) != 0) { mvi DMAPARAMS, PRELOADEN|SCSIEN|HDMAEN; } else { mvi DMAPARAMS, WIDEODD|SCSIEN|SDMAEN|HDMAEN|FIFORESET; } test LASTPHASE, IOI jnz . + 2; or DMAPARAMS, DIRECTION; if ((ahc->features & AHC_CMD_CHAN) != 0) { /* We don't have any valid S/G elements */ mvi CCSGADDR, SG_PREFETCH_CNT; } test SEQ_FLAGS, DPHASE jz data_phase_initialize; /* * If we re-enter the data phase after going through another * phase, our transfer location has almost certainly been * corrupted by the interveining, non-data, transfers. Ask * the host driver to fix us up based on the transfer residual. */ mvi PDATA_REINIT call set_seqint; jmp data_phase_loop; data_phase_initialize: /* We have seen a data phase for the first time */ or SEQ_FLAGS, DPHASE; /* * Initialize the DMA address and counter from the SCB. * Also set SCB_RESIDUAL_SGPTR, including the LAST_SEG * flag in the highest byte of the data count. We cannot * modify the saved values in the SCB until we see a save * data pointers message. */ if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { /* The lowest address byte must be loaded last. */ mov SCB_DATACNT[3] call set_hhaddr; } if ((ahc->features & AHC_CMD_CHAN) != 0) { bmov HADDR, SCB_DATAPTR, 7; bmov SCB_RESIDUAL_DATACNT[3], SCB_DATACNT[3], 5; } else { mvi DINDEX, HADDR; mvi SCB_DATAPTR call bcopy_7; mvi DINDEX, SCB_RESIDUAL_DATACNT + 3; mvi SCB_DATACNT + 3 call bcopy_5; } if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0 && ahc->pci_cachesize != 0) { call calc_mwi_residual; } and SCB_RESIDUAL_SGPTR[0], ~SG_FULL_RESID; if ((ahc->features & AHC_ULTRA2) == 0) { if ((ahc->features & AHC_CMD_CHAN) != 0) { bmov STCNT, HCNT, 3; } else { call set_stcnt_from_hcnt; } } data_phase_loop: /* Guard against overruns */ test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz data_phase_inbounds; /* * Turn on `Bit Bucket' mode, wait until the target takes * us to another phase, and then notify the host. */ and DMAPARAMS, DIRECTION; mov DFCNTRL, DMAPARAMS; or SXFRCTL1,BITBUCKET; if ((ahc->features & AHC_DT) == 0) { test SSTAT1,PHASEMIS jz .; } else { test SCSIPHASE, DATA_PHASE_MASK jnz .; } and SXFRCTL1, ~BITBUCKET; mvi DATA_OVERRUN call set_seqint; jmp ITloop; data_phase_inbounds: if ((ahc->features & AHC_ULTRA2) != 0) { mov SINDEX, SCB_RESIDUAL_SGPTR[0]; test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz . + 2; or SINDEX, LAST_SEG; mov SG_CACHE_PRE, SINDEX; mov DFCNTRL, DMAPARAMS; ultra2_dma_loop: call idle_loop; /* * The transfer is complete if either the last segment * completes or the target changes phase. */ test SG_CACHE_SHADOW, LAST_SEG_DONE jnz ultra2_dmafinish; if ((ahc->features & AHC_DT) == 0) { if ((ahc->flags & AHC_TARGETROLE) != 0) { /* * As a target, we control the phases, * so ignore PHASEMIS. */ test SSTAT0, TARGET jnz ultra2_dma_loop; } if ((ahc->flags & AHC_INITIATORROLE) != 0) { test SSTAT1,PHASEMIS jz ultra2_dma_loop; } } else { test DFCNTRL, SCSIEN jnz ultra2_dma_loop; } ultra2_dmafinish: /* * The transfer has terminated either due to a phase * change, and/or the completion of the last segment. * We have two goals here. Do as much other work * as possible while the data fifo drains on a read * and respond as quickly as possible to the standard * messages (save data pointers/disconnect and command * complete) that usually follow a data phase. */ if ((ahc->bugs & AHC_AUTOFLUSH_BUG) != 0) { /* * On chips with broken auto-flush, start * the flushing process now. We'll poke * the chip from time to time to keep the * flush process going as we complete the * data phase. */ or DFCNTRL, FIFOFLUSH; } /* * We assume that, even though data may still be * transferring to the host, that the SCSI side of * the DMA engine is now in a static state. This * allows us to update our notion of where we are * in this transfer. * * If, by chance, we stopped before being able * to fetch additional segments for this transfer, * yet the last S/G was completely exhausted, * call our idle loop until it is able to load * another segment. This will allow us to immediately * pickup on the next segment on the next data phase. * * If we happened to stop on the last segment, then * our residual information is still correct from * the idle loop and there is no need to perform * any fixups. */ ultra2_ensure_sg: test SG_CACHE_SHADOW, LAST_SEG jz ultra2_shvalid; /* Record if we've consumed all S/G entries */ test SSTAT2, SHVALID jnz residuals_correct; or SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL; jmp residuals_correct; ultra2_shvalid: test SSTAT2, SHVALID jnz sgptr_fixup; call idle_loop; jmp ultra2_ensure_sg; sgptr_fixup: /* * Fixup the residual next S/G pointer. The S/G preload * feature of the chip allows us to load two elements * in addition to the currently active element. We * store the bottom byte of the next S/G pointer in * the SG_CACEPTR register so we can restore the * correct value when the DMA completes. If the next * sg ptr value has advanced to the point where higher * bytes in the address have been affected, fix them * too. */ test SG_CACHE_SHADOW, 0x80 jz sgptr_fixup_done; test SCB_RESIDUAL_SGPTR[0], 0x80 jnz sgptr_fixup_done; add SCB_RESIDUAL_SGPTR[1], -1; adc SCB_RESIDUAL_SGPTR[2], -1; adc SCB_RESIDUAL_SGPTR[3], -1; sgptr_fixup_done: and SCB_RESIDUAL_SGPTR[0], SG_ADDR_MASK, SG_CACHE_SHADOW; /* We are not the last seg */ and SCB_RESIDUAL_DATACNT[3], ~SG_LAST_SEG; residuals_correct: /* * Go ahead and shut down the DMA engine now. * In the future, we'll want to handle end of * transfer messages prior to doing this, but this * requires similar restructuring for pre-ULTRA2 * controllers. */ test DMAPARAMS, DIRECTION jnz ultra2_fifoempty; ultra2_fifoflush: if ((ahc->features & AHC_DT) == 0) { if ((ahc->bugs & AHC_AUTOFLUSH_BUG) != 0) { /* * On Rev A of the aic7890, the autoflush * feature doesn't function correctly. * Perform an explicit manual flush. During * a manual flush, the FIFOEMP bit becomes * true every time the PCI FIFO empties * regardless of the state of the SCSI FIFO. * It can take up to 4 clock cycles for the * SCSI FIFO to get data into the PCI FIFO * and for FIFOEMP to de-assert. Here we * guard against this condition by making * sure the FIFOEMP bit stays on for 5 full * clock cycles. */ or DFCNTRL, FIFOFLUSH; test DFSTATUS, FIFOEMP jz ultra2_fifoflush; test DFSTATUS, FIFOEMP jz ultra2_fifoflush; test DFSTATUS, FIFOEMP jz ultra2_fifoflush; test DFSTATUS, FIFOEMP jz ultra2_fifoflush; } test DFSTATUS, FIFOEMP jz ultra2_fifoflush; } else { /* * We enable the auto-ack feature on DT capable * controllers. This means that the controller may * have already transferred some overrun bytes into * the data FIFO and acked them on the bus. The only * way to detect this situation is to wait for * LAST_SEG_DONE to come true on a completed transfer * and then test to see if the data FIFO is non-empty. */ test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz ultra2_wait_fifoemp; test SG_CACHE_SHADOW, LAST_SEG_DONE jz .; /* * FIFOEMP can lag LAST_SEG_DONE. Wait a few * clocks before calling this an overrun. */ test DFSTATUS, FIFOEMP jnz ultra2_fifoempty; test DFSTATUS, FIFOEMP jnz ultra2_fifoempty; test DFSTATUS, FIFOEMP jnz ultra2_fifoempty; /* Overrun */ jmp data_phase_loop; ultra2_wait_fifoemp: test DFSTATUS, FIFOEMP jz .; } ultra2_fifoempty: /* Don't clobber an inprogress host data transfer */ test DFSTATUS, MREQPEND jnz ultra2_fifoempty; ultra2_dmahalt: and DFCNTRL, ~(SCSIEN|HDMAEN); test DFCNTRL, SCSIEN|HDMAEN jnz .; if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { /* * Keep HHADDR cleared for future, 32bit addressed * only, DMA operations. * * Due to bayonette style S/G handling, our residual * data must be "fixed up" once the transfer is halted. * Here we fixup the HSHADDR stored in the high byte * of the residual data cnt. By postponing the fixup, * we can batch the clearing of HADDR with the fixup. * If we halted on the last segment, the residual is * already correct. If we are not on the last * segment, copy the high address directly from HSHADDR. * We don't need to worry about maintaining the * SG_LAST_SEG flag as it will always be false in the * case where an update is required. */ or DSCOMMAND1, HADDLDSEL0; test SG_CACHE_SHADOW, LAST_SEG jnz . + 2; mov SCB_RESIDUAL_DATACNT[3], SHADDR; clr HADDR; and DSCOMMAND1, ~HADDLDSEL0; } } else { /* If we are the last SG block, tell the hardware. */ if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0 && ahc->pci_cachesize != 0) { test MWI_RESIDUAL, 0xFF jnz dma_mid_sg; } test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz dma_mid_sg; if ((ahc->flags & AHC_TARGETROLE) != 0) { test SSTAT0, TARGET jz dma_last_sg; if ((ahc->bugs & AHC_TMODE_WIDEODD_BUG) != 0) { test DMAPARAMS, DIRECTION jz dma_mid_sg; } } dma_last_sg: and DMAPARAMS, ~WIDEODD; dma_mid_sg: /* Start DMA data transfer. */ mov DFCNTRL, DMAPARAMS; dma_loop: if ((ahc->features & AHC_CMD_CHAN) != 0) { call idle_loop; } test SSTAT0,DMADONE jnz dma_dmadone; test SSTAT1,PHASEMIS jz dma_loop; /* ie. underrun */ dma_phasemis: /* * We will be "done" DMAing when the transfer count goes to * zero, or the target changes the phase (in light of this, * it makes sense that the DMA circuitry doesn't ACK when * PHASEMIS is active). If we are doing a SCSI->Host transfer, * the data FIFO should be flushed auto-magically on STCNT=0 * or a phase change, so just wait for FIFO empty status. */ dma_checkfifo: test DFCNTRL,DIRECTION jnz dma_fifoempty; dma_fifoflush: test DFSTATUS,FIFOEMP jz dma_fifoflush; dma_fifoempty: /* Don't clobber an inprogress host data transfer */ test DFSTATUS, MREQPEND jnz dma_fifoempty; /* * Now shut off the DMA and make sure that the DMA * hardware has actually stopped. Touching the DMA * counters, etc. while a DMA is active will result * in an ILLSADDR exception. */ dma_dmadone: and DFCNTRL, ~(SCSIEN|SDMAEN|HDMAEN); dma_halt: /* * Some revisions of the aic78XX have a problem where, if the * data fifo is full, but the PCI input latch is not empty, * HDMAEN cannot be cleared. The fix used here is to drain * the prefetched but unused data from the data fifo until * there is space for the input latch to drain. */ if ((ahc->bugs & AHC_PCI_2_1_RETRY_BUG) != 0) { mov NONE, DFDAT; } test DFCNTRL, (SCSIEN|SDMAEN|HDMAEN) jnz dma_halt; /* See if we have completed this last segment */ test STCNT[0], 0xff jnz data_phase_finish; test STCNT[1], 0xff jnz data_phase_finish; test STCNT[2], 0xff jnz data_phase_finish; /* * Advance the scatter-gather pointers if needed */ if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0 && ahc->pci_cachesize != 0) { test MWI_RESIDUAL, 0xFF jz no_mwi_resid; /* * Reload HADDR from SHADDR and setup the * count to be the size of our residual. */ if ((ahc->features & AHC_CMD_CHAN) != 0) { bmov HADDR, SHADDR, 4; mov HCNT, MWI_RESIDUAL; bmov HCNT[1], ALLZEROS, 2; } else { mvi DINDEX, HADDR; mvi SHADDR call bcopy_4; mov MWI_RESIDUAL call set_hcnt; } clr MWI_RESIDUAL; jmp sg_load_done; no_mwi_resid: } test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz sg_load; or SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL; jmp data_phase_finish; sg_load: /* * Load the next SG element's data address and length * into the DMA engine. If we don't have hardware * to perform a prefetch, we'll have to fetch the * segment from host memory first. */ if ((ahc->features & AHC_CMD_CHAN) != 0) { /* Wait for the idle loop to complete */ test CCSGCTL, CCSGEN jz . + 3; call idle_loop; test CCSGCTL, CCSGEN jnz . - 1; bmov HADDR, CCSGRAM, 7; /* * Workaround for flaky external SCB RAM * on certain aic7895 setups. It seems * unable to handle direct transfers from * S/G ram to certain SCB locations. */ mov SINDEX, CCSGRAM; mov SCB_RESIDUAL_DATACNT[3], SINDEX; } else { if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { mov ALLZEROS call set_hhaddr; } mvi DINDEX, HADDR; mvi SCB_RESIDUAL_SGPTR call bcopy_4; mvi SG_SIZEOF call set_hcnt; or DFCNTRL, HDMAEN|DIRECTION|FIFORESET; call dma_finish; mvi DINDEX, HADDR; call dfdat_in_7; mov SCB_RESIDUAL_DATACNT[3], DFDAT; } if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { mov SCB_RESIDUAL_DATACNT[3] call set_hhaddr; /* * The lowest address byte must be loaded * last as it triggers the computation of * some items in the PCI block. The ULTRA2 * chips do this on PRELOAD. */ mov HADDR, HADDR; } if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0 && ahc->pci_cachesize != 0) { call calc_mwi_residual; } /* Point to the new next sg in memory */ call sg_advance; sg_load_done: if ((ahc->features & AHC_CMD_CHAN) != 0) { bmov STCNT, HCNT, 3; } else { call set_stcnt_from_hcnt; } if ((ahc->flags & AHC_TARGETROLE) != 0) { test SSTAT0, TARGET jnz data_phase_loop; } } data_phase_finish: /* * If the target has left us in data phase, loop through * the dma code again. In the case of ULTRA2 adapters, * we should only loop if there is a data overrun. For * all other adapters, we'll loop after each S/G element * is loaded as well as if there is an overrun. */ if ((ahc->flags & AHC_TARGETROLE) != 0) { test SSTAT0, TARGET jnz data_phase_done; } if ((ahc->flags & AHC_INITIATORROLE) != 0) { test SSTAT1, REQINIT jz .; if ((ahc->features & AHC_DT) == 0) { test SSTAT1,PHASEMIS jz data_phase_loop; } else { test SCSIPHASE, DATA_PHASE_MASK jnz data_phase_loop; } } data_phase_done: /* * After a DMA finishes, save the SG and STCNT residuals back into * the SCB. We use STCNT instead of HCNT, since it's a reflection * of how many bytes were transferred on the SCSI (as opposed to the * host) bus. */ if ((ahc->features & AHC_CMD_CHAN) != 0) { /* Kill off any pending prefetch */ call disable_ccsgen; } if ((ahc->features & AHC_ULTRA2) == 0) { /* * Clear the high address byte so that all other DMA * operations, which use 32bit addressing, can assume * HHADDR is 0. */ if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { mov ALLZEROS call set_hhaddr; } } /* * Update our residual information before the information is * lost by some other type of SCSI I/O (e.g. PIO). If we have * transferred all data, no update is needed. * */ test SCB_RESIDUAL_SGPTR, SG_LIST_NULL jnz residual_update_done; if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0 && ahc->pci_cachesize != 0) { if ((ahc->features & AHC_CMD_CHAN) != 0) { test MWI_RESIDUAL, 0xFF jz bmov_resid; } mov A, MWI_RESIDUAL; add SCB_RESIDUAL_DATACNT[0], A, STCNT[0]; clr A; adc SCB_RESIDUAL_DATACNT[1], A, STCNT[1]; adc SCB_RESIDUAL_DATACNT[2], A, STCNT[2]; clr MWI_RESIDUAL; if ((ahc->features & AHC_CMD_CHAN) != 0) { jmp . + 2; bmov_resid: bmov SCB_RESIDUAL_DATACNT, STCNT, 3; } } else if ((ahc->features & AHC_CMD_CHAN) != 0) { bmov SCB_RESIDUAL_DATACNT, STCNT, 3; } else { mov SCB_RESIDUAL_DATACNT[0], STCNT[0]; mov SCB_RESIDUAL_DATACNT[1], STCNT[1]; mov SCB_RESIDUAL_DATACNT[2], STCNT[2]; } residual_update_done: /* * Since we've been through a data phase, the SCB_RESID* fields * are now initialized. Clear the full residual flag. */ and SCB_SGPTR[0], ~SG_FULL_RESID; if ((ahc->features & AHC_ULTRA2) != 0) { /* Clear the channel in case we return to data phase later */ or SXFRCTL0, CLRSTCNT|CLRCHN; or SXFRCTL0, CLRSTCNT|CLRCHN; } if ((ahc->flags & AHC_TARGETROLE) != 0) { test SEQ_FLAGS, DPHASE_PENDING jz ITloop; and SEQ_FLAGS, ~DPHASE_PENDING; /* * For data-in phases, wait for any pending acks from the * initiator before changing phase. We only need to * send Ignore Wide Residue messages for data-in phases. */ test DFCNTRL, DIRECTION jz target_ITloop; test SSTAT1, REQINIT jnz .; test SCB_LUN, SCB_XFERLEN_ODD jz target_ITloop; test SCSIRATE, WIDEXFER jz target_ITloop; /* * Issue an Ignore Wide Residue Message. */ mvi P_MESGIN|BSYO call change_phase; mvi MSG_IGN_WIDE_RESIDUE call target_outb; mvi 1 call target_outb; jmp target_ITloop; } else { jmp ITloop; } if ((ahc->flags & AHC_INITIATORROLE) != 0) { /* * Command phase. Set up the DMA registers and let 'er rip. */ p_command: test SEQ_FLAGS, NOT_IDENTIFIED jz p_command_okay; mvi PROTO_VIOLATION call set_seqint; p_command_okay: if ((ahc->features & AHC_ULTRA2) != 0) { bmov HCNT[0], SCB_CDB_LEN, 1; bmov HCNT[1], ALLZEROS, 2; mvi SG_CACHE_PRE, LAST_SEG; } else if ((ahc->features & AHC_CMD_CHAN) != 0) { bmov STCNT[0], SCB_CDB_LEN, 1; bmov STCNT[1], ALLZEROS, 2; } else { mov STCNT[0], SCB_CDB_LEN; clr STCNT[1]; clr STCNT[2]; } add NONE, -13, SCB_CDB_LEN; mvi SCB_CDB_STORE jnc p_command_embedded; p_command_from_host: if ((ahc->features & AHC_ULTRA2) != 0) { bmov HADDR[0], SCB_CDB_PTR, 4; mvi DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN|DIRECTION); } else { if ((ahc->features & AHC_CMD_CHAN) != 0) { bmov HADDR[0], SCB_CDB_PTR, 4; bmov HCNT, STCNT, 3; } else { mvi DINDEX, HADDR; mvi SCB_CDB_PTR call bcopy_4; mov SCB_CDB_LEN call set_hcnt; } mvi DFCNTRL, (SCSIEN|SDMAEN|HDMAEN|DIRECTION|FIFORESET); } jmp p_command_xfer; p_command_embedded: /* * The data fifo seems to require 4 byte aligned * transfers from the sequencer. Force this to * be the case by clearing HADDR[0] even though * we aren't going to touch host memory. */ clr HADDR[0]; if ((ahc->features & AHC_ULTRA2) != 0) { mvi DFCNTRL, (PRELOADEN|SCSIEN|DIRECTION); bmov DFDAT, SCB_CDB_STORE, 12; } else if ((ahc->features & AHC_CMD_CHAN) != 0) { if ((ahc->flags & AHC_SCB_BTT) != 0) { /* * On the 7895 the data FIFO will * get corrupted if you try to dump * data from external SCB memory into * the FIFO while it is enabled. So, * fill the fifo and then enable SCSI * transfers. */ mvi DFCNTRL, (DIRECTION|FIFORESET); } else { mvi DFCNTRL, (SCSIEN|SDMAEN|DIRECTION|FIFORESET); } bmov DFDAT, SCB_CDB_STORE, 12; if ((ahc->flags & AHC_SCB_BTT) != 0) { mvi DFCNTRL, (SCSIEN|SDMAEN|DIRECTION|FIFOFLUSH); } else { or DFCNTRL, FIFOFLUSH; } } else { mvi DFCNTRL, (SCSIEN|SDMAEN|DIRECTION|FIFORESET); call copy_to_fifo_6; call copy_to_fifo_6; or DFCNTRL, FIFOFLUSH; } p_command_xfer: and SEQ_FLAGS, ~NO_CDB_SENT; if ((ahc->features & AHC_DT) == 0) { test SSTAT0, SDONE jnz . + 2; test SSTAT1, PHASEMIS jz . - 1; /* * Wait for our ACK to go-away on its own * instead of being killed by SCSIEN getting cleared. */ test SCSISIGI, ACKI jnz .; } else { test DFCNTRL, SCSIEN jnz .; } test SSTAT0, SDONE jnz p_command_successful; /* * Don't allow a data phase if the command * was not fully transferred. */ or SEQ_FLAGS, NO_CDB_SENT; p_command_successful: and DFCNTRL, ~(SCSIEN|SDMAEN|HDMAEN); test DFCNTRL, (SCSIEN|SDMAEN|HDMAEN) jnz .; jmp ITloop; /* * Status phase. Wait for the data byte to appear, then read it * and store it into the SCB. */ p_status: test SEQ_FLAGS, NOT_IDENTIFIED jnz mesgin_proto_violation; p_status_okay: mov SCB_SCSI_STATUS, SCSIDATL; or SCB_CONTROL, STATUS_RCVD; jmp ITloop; /* * Message out phase. If MSG_OUT is MSG_IDENTIFYFLAG, build a full * indentify message sequence and send it to the target. The host may * override this behavior by setting the MK_MESSAGE bit in the SCB * control byte. This will cause us to interrupt the host and allow * it to handle the message phase completely on its own. If the bit * associated with this target is set, we will also interrupt the host, * thereby allowing it to send a message on the next selection regardless * of the transaction being sent. * * If MSG_OUT is == HOST_MSG, also interrupt the host and take a message. * This is done to allow the host to send messages outside of an identify * sequence while protecting the seqencer from testing the MK_MESSAGE bit * on an SCB that might not be for the current nexus. (For example, a * BDR message in response to a bad reselection would leave us pointed to * an SCB that doesn't have anything to do with the current target). * * Otherwise, treat MSG_OUT as a 1 byte message to send (abort, abort tag, * bus device reset). * * When there are no messages to send, MSG_OUT should be set to MSG_NOOP, * in case the target decides to put us in this phase for some strange * reason. */ p_mesgout_retry: /* Turn on ATN for the retry */ if ((ahc->features & AHC_DT) == 0) { or SCSISIGO, ATNO, LASTPHASE; } else { mvi SCSISIGO, ATNO; } p_mesgout: mov SINDEX, MSG_OUT; cmp SINDEX, MSG_IDENTIFYFLAG jne p_mesgout_from_host; test SCB_CONTROL,MK_MESSAGE jnz host_message_loop; p_mesgout_identify: or SINDEX, MSG_IDENTIFYFLAG|DISCENB, SAVED_LUN; test SCB_CONTROL, DISCENB jnz . + 2; and SINDEX, ~DISCENB; /* * Send a tag message if TAG_ENB is set in the SCB control block. * Use SCB_TAG (the position in the kernel's SCB array) as the tag value. */ p_mesgout_tag: test SCB_CONTROL,TAG_ENB jz p_mesgout_onebyte; mov SCSIDATL, SINDEX; /* Send the identify message */ call phase_lock; cmp LASTPHASE, P_MESGOUT jne p_mesgout_done; and SCSIDATL,TAG_ENB|SCB_TAG_TYPE,SCB_CONTROL; call phase_lock; cmp LASTPHASE, P_MESGOUT jne p_mesgout_done; mov SCB_TAG jmp p_mesgout_onebyte; /* * Interrupt the driver, and allow it to handle this message * phase and any required retries. */ p_mesgout_from_host: cmp SINDEX, HOST_MSG jne p_mesgout_onebyte; jmp host_message_loop; p_mesgout_onebyte: mvi CLRSINT1, CLRATNO; mov SCSIDATL, SINDEX; /* * If the next bus phase after ATN drops is message out, it means * that the target is requesting that the last message(s) be resent. */ call phase_lock; cmp LASTPHASE, P_MESGOUT je p_mesgout_retry; p_mesgout_done: mvi CLRSINT1,CLRATNO; /* Be sure to turn ATNO off */ mov LAST_MSG, MSG_OUT; mvi MSG_OUT, MSG_NOOP; /* No message left */ jmp ITloop; /* * Message in phase. Bytes are read using Automatic PIO mode. */ p_mesgin: mvi ACCUM call inb_first; /* read the 1st message byte */ test A,MSG_IDENTIFYFLAG jnz mesgin_identify; cmp A,MSG_DISCONNECT je mesgin_disconnect; cmp A,MSG_SAVEDATAPOINTER je mesgin_sdptrs; cmp ALLZEROS,A je mesgin_complete; cmp A,MSG_RESTOREPOINTERS je mesgin_rdptrs; cmp A,MSG_IGN_WIDE_RESIDUE je mesgin_ign_wide_residue; cmp A,MSG_NOOP je mesgin_done; /* * Pushed message loop to allow the kernel to * run it's own message state engine. To avoid an * extra nop instruction after signaling the kernel, * we perform the phase_lock before checking to see * if we should exit the loop and skip the phase_lock * in the ITloop. Performing back to back phase_locks * shouldn't hurt, but why do it twice... */ host_message_loop: mvi HOST_MSG_LOOP call set_seqint; call phase_lock; cmp RETURN_1, EXIT_MSG_LOOP je ITloop + 1; jmp host_message_loop; mesgin_ign_wide_residue: if ((ahc->features & AHC_WIDE) != 0) { test SCSIRATE, WIDEXFER jz mesgin_reject; /* Pull the residue byte */ mvi ARG_1 call inb_next; cmp ARG_1, 0x01 jne mesgin_reject; test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz . + 2; test SCB_LUN, SCB_XFERLEN_ODD jnz mesgin_done; mvi IGN_WIDE_RES call set_seqint; jmp mesgin_done; } mesgin_proto_violation: mvi PROTO_VIOLATION call set_seqint; jmp mesgin_done; mesgin_reject: mvi MSG_MESSAGE_REJECT call mk_mesg; mesgin_done: mov NONE,SCSIDATL; /*dummy read from latch to ACK*/ jmp ITloop; /* * We received a "command complete" message. Put the SCB_TAG into the QOUTFIFO, * and trigger a completion interrupt. Before doing so, check to see if there * is a residual or the status byte is something other than STATUS_GOOD (0). * In either of these conditions, we upload the SCB back to the host so it can * process this information. In the case of a non zero status byte, we * additionally interrupt the kernel driver synchronously, allowing it to * decide if sense should be retrieved. If the kernel driver wishes to request * sense, it will fill the kernel SCB with a request sense command, requeue * it to the QINFIFO and tell us not to post to the QOUTFIFO by setting * RETURN_1 to SEND_SENSE. */ mesgin_complete: /* * If ATN is raised, we still want to give the target a message. * Perhaps there was a parity error on this last message byte. * Either way, the target should take us to message out phase * and then attempt to complete the command again. We should use a * critical section here to guard against a timeout triggering * for this command and setting ATN while we are still processing * the completion. test SCSISIGI, ATNI jnz mesgin_done; */ /* * If we are identified and have successfully sent the CDB, * any status will do. Optimize this fast path. */ test SCB_CONTROL, STATUS_RCVD jz mesgin_proto_violation; test SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT jz complete_accepted; /* * If the target never sent an identify message but instead went * to mesgin to give an invalid message, let the host abort us. */ test SEQ_FLAGS, NOT_IDENTIFIED jnz mesgin_proto_violation; /* * If we recevied good status but never successfully sent the * cdb, abort the command. */ test SCB_SCSI_STATUS,0xff jnz complete_accepted; test SEQ_FLAGS, NO_CDB_SENT jnz mesgin_proto_violation; complete_accepted: /* * See if we attempted to deliver a message but the target ingnored us. */ test SCB_CONTROL, MK_MESSAGE jz . + 2; mvi MKMSG_FAILED call set_seqint; /* * Check for residuals */ test SCB_SGPTR, SG_LIST_NULL jnz check_status;/* No xfer */ test SCB_SGPTR, SG_FULL_RESID jnz upload_scb;/* Never xfered */ test SCB_RESIDUAL_SGPTR, SG_LIST_NULL jz upload_scb; check_status: test SCB_SCSI_STATUS,0xff jz complete; /* Good Status? */ upload_scb: or SCB_SGPTR, SG_RESID_VALID; mvi DMAPARAMS, FIFORESET; mov SCB_TAG call dma_scb; test SCB_SCSI_STATUS, 0xff jz complete; /* Just a residual? */ mvi BAD_STATUS call set_seqint; /* let driver know */ cmp RETURN_1, SEND_SENSE jne complete; call add_scb_to_free_list; jmp await_busfree; complete: mov SCB_TAG call complete_post; jmp await_busfree; } complete_post: /* Post the SCBID in SINDEX and issue an interrupt */ call add_scb_to_free_list; mov ARG_1, SINDEX; if ((ahc->features & AHC_QUEUE_REGS) != 0) { mov A, SDSCB_QOFF; } else { mov A, QOUTPOS; } mvi QOUTFIFO_OFFSET call post_byte_setup; mov ARG_1 call post_byte; if ((ahc->features & AHC_QUEUE_REGS) == 0) { inc QOUTPOS; } mvi INTSTAT,CMDCMPLT ret; if ((ahc->flags & AHC_INITIATORROLE) != 0) { /* * Is it a disconnect message? Set a flag in the SCB to remind us * and await the bus going free. If this is an untagged transaction * store the SCB id for it in our untagged target table for lookup on * a reselction. */ mesgin_disconnect: /* * If ATN is raised, we still want to give the target a message. * Perhaps there was a parity error on this last message byte * or we want to abort this command. Either way, the target * should take us to message out phase and then attempt to * disconnect again. * XXX - Wait for more testing. test SCSISIGI, ATNI jnz mesgin_done; */ test SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT jnz mesgin_proto_violation; or SCB_CONTROL,DISCONNECTED; if ((ahc->flags & AHC_PAGESCBS) != 0) { call add_scb_to_disc_list; } test SCB_CONTROL, TAG_ENB jnz await_busfree; mov ARG_1, SCB_TAG; and SAVED_LUN, LID, SCB_LUN; mov SCB_SCSIID call set_busy_target; jmp await_busfree; /* * Save data pointers message: * Copying RAM values back to SCB, for Save Data Pointers message, but * only if we've actually been into a data phase to change them. This * protects against bogus data in scratch ram and the residual counts * since they are only initialized when we go into data_in or data_out. * Ack the message as soon as possible. For chips without S/G pipelining, * we can only ack the message after SHADDR has been saved. On these * chips, SHADDR increments with every bus transaction, even PIO. */ mesgin_sdptrs: if ((ahc->features & AHC_ULTRA2) != 0) { mov NONE,SCSIDATL; /*dummy read from latch to ACK*/ test SEQ_FLAGS, DPHASE jz ITloop; } else { test SEQ_FLAGS, DPHASE jz mesgin_done; } /* * If we are asked to save our position at the end of the * transfer, just mark us at the end rather than perform a * full save. */ test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz mesgin_sdptrs_full; or SCB_SGPTR, SG_LIST_NULL; if ((ahc->features & AHC_ULTRA2) != 0) { jmp ITloop; } else { jmp mesgin_done; } mesgin_sdptrs_full: /* * The SCB_SGPTR becomes the next one we'll download, * and the SCB_DATAPTR becomes the current SHADDR. * Use the residual number since STCNT is corrupted by * any message transfer. */ if ((ahc->features & AHC_CMD_CHAN) != 0) { bmov SCB_DATAPTR, SHADDR, 4; if ((ahc->features & AHC_ULTRA2) == 0) { mov NONE,SCSIDATL; /*dummy read from latch to ACK*/ } bmov SCB_DATACNT, SCB_RESIDUAL_DATACNT, 8; } else { mvi DINDEX, SCB_DATAPTR; mvi SHADDR call bcopy_4; mov NONE,SCSIDATL; /*dummy read from latch to ACK*/ mvi SCB_RESIDUAL_DATACNT call bcopy_8; } jmp ITloop; /* * Restore pointers message? Data pointers are recopied from the * SCB anytime we enter a data phase for the first time, so all * we need to do is clear the DPHASE flag and let the data phase * code do the rest. We also reset/reallocate the FIFO to make * sure we have a clean start for the next data or command phase. */ mesgin_rdptrs: and SEQ_FLAGS, ~DPHASE; /* * We'll reload them * the next time through * the dataphase. */ or SXFRCTL0, CLRSTCNT|CLRCHN; jmp mesgin_done; /* * Index into our Busy Target table. SINDEX and DINDEX are modified * upon return. SCBPTR may be modified by this action. */ set_busy_target: shr DINDEX, 4, SINDEX; if ((ahc->flags & AHC_SCB_BTT) != 0) { mov SCBPTR, SAVED_LUN; add DINDEX, SCB_64_BTT; } else { add DINDEX, BUSY_TARGETS; } mov DINDIR, ARG_1 ret; /* * Identify message? For a reconnecting target, this tells us the lun * that the reconnection is for - find the correct SCB and switch to it, * clearing the "disconnected" bit so we don't "find" it by accident later. */ mesgin_identify: /* * Determine whether a target is using tagged or non-tagged * transactions by first looking at the transaction stored in * the busy target array. If there is no untagged transaction * for this target or the transaction is for a different lun, then * this must be a tagged transaction. */ shr SINDEX, 4, SAVED_SCSIID; and SAVED_LUN, MSG_IDENTIFY_LUNMASK, A; if ((ahc->flags & AHC_SCB_BTT) != 0) { add SINDEX, SCB_64_BTT; mov SCBPTR, SAVED_LUN; if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { add NONE, -SCB_64_BTT, SINDEX; jc . + 2; mvi INTSTAT, OUT_OF_RANGE; nop; add NONE, -(SCB_64_BTT + 16), SINDEX; jnc . + 2; mvi INTSTAT, OUT_OF_RANGE; nop; } } else { add SINDEX, BUSY_TARGETS; if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { add NONE, -BUSY_TARGETS, SINDEX; jc . + 2; mvi INTSTAT, OUT_OF_RANGE; nop; add NONE, -(BUSY_TARGETS + 16), SINDEX; jnc . + 2; mvi INTSTAT, OUT_OF_RANGE; nop; } } mov ARG_1, SINDIR; cmp ARG_1, SCB_LIST_NULL je snoop_tag; if ((ahc->flags & AHC_PAGESCBS) != 0) { mov ARG_1 call findSCB; } else { mov SCBPTR, ARG_1; } if ((ahc->flags & AHC_SCB_BTT) != 0) { jmp setup_SCB_id_lun_okay; } else { /* * We only allow one untagged command per-target * at a time. So, if the lun doesn't match, look * for a tag message. */ and A, LID, SCB_LUN; cmp SAVED_LUN, A je setup_SCB_id_lun_okay; if ((ahc->flags & AHC_PAGESCBS) != 0) { /* * findSCB removes the SCB from the * disconnected list, so we must replace * it there should this SCB be for another * lun. */ call cleanup_scb; } } /* * Here we "snoop" the bus looking for a SIMPLE QUEUE TAG message. * If we get one, we use the tag returned to find the proper * SCB. With SCB paging, we must search for non-tagged * transactions since the SCB may exist in any slot. If we're not * using SCB paging, we can use the tag as the direct index to the * SCB. */ snoop_tag: if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { or SEQ_FLAGS, 0x80; } mov NONE,SCSIDATL; /* ACK Identify MSG */ call phase_lock; if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { or SEQ_FLAGS, 0x1; } cmp LASTPHASE, P_MESGIN jne not_found; if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { or SEQ_FLAGS, 0x2; } cmp SCSIBUSL,MSG_SIMPLE_Q_TAG jne not_found; get_tag: if ((ahc->flags & AHC_PAGESCBS) != 0) { mvi ARG_1 call inb_next; /* tag value */ mov ARG_1 call findSCB; } else { mvi ARG_1 call inb_next; /* tag value */ mov SCBPTR, ARG_1; } /* * Ensure that the SCB the tag points to is for * an SCB transaction to the reconnecting target. */ setup_SCB: if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { or SEQ_FLAGS, 0x4; } mov A, SCB_SCSIID; cmp SAVED_SCSIID, A jne not_found_cleanup_scb; if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { or SEQ_FLAGS, 0x8; } setup_SCB_id_okay: and A, LID, SCB_LUN; cmp SAVED_LUN, A jne not_found_cleanup_scb; setup_SCB_id_lun_okay: if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { or SEQ_FLAGS, 0x10; } test SCB_CONTROL,DISCONNECTED jz not_found_cleanup_scb; and SCB_CONTROL,~DISCONNECTED; test SCB_CONTROL, TAG_ENB jnz setup_SCB_tagged; if ((ahc->flags & AHC_SCB_BTT) != 0) { mov A, SCBPTR; } mvi ARG_1, SCB_LIST_NULL; mov SAVED_SCSIID call set_busy_target; if ((ahc->flags & AHC_SCB_BTT) != 0) { mov SCBPTR, A; } setup_SCB_tagged: clr SEQ_FLAGS; /* make note of IDENTIFY */ call set_transfer_settings; /* See if the host wants to send a message upon reconnection */ test SCB_CONTROL, MK_MESSAGE jz mesgin_done; mvi HOST_MSG call mk_mesg; jmp mesgin_done; not_found_cleanup_scb: if ((ahc->flags & AHC_PAGESCBS) != 0) { call cleanup_scb; } not_found: mvi NO_MATCH call set_seqint; jmp mesgin_done; mk_mesg: if ((ahc->features & AHC_DT) == 0) { or SCSISIGO, ATNO, LASTPHASE; } else { mvi SCSISIGO, ATNO; } mov MSG_OUT,SINDEX ret; /* * Functions to read data in Automatic PIO mode. * * According to Adaptec's documentation, an ACK is not sent on input from * the target until SCSIDATL is read from. So we wait until SCSIDATL is * latched (the usual way), then read the data byte directly off the bus * using SCSIBUSL. When we have pulled the ATN line, or we just want to * acknowledge the byte, then we do a dummy read from SCISDATL. The SCSI * spec guarantees that the target will hold the data byte on the bus until * we send our ACK. * * The assumption here is that these are called in a particular sequence, * and that REQ is already set when inb_first is called. inb_{first,next} * use the same calling convention as inb. */ inb_next_wait_perr: mvi PERR_DETECTED call set_seqint; jmp inb_next_wait; inb_next: mov NONE,SCSIDATL; /*dummy read from latch to ACK*/ inb_next_wait: /* * If there is a parity error, wait for the kernel to * see the interrupt and prepare our message response * before continuing. */ test SSTAT1, REQINIT jz inb_next_wait; test SSTAT1, SCSIPERR jnz inb_next_wait_perr; inb_next_check_phase: and LASTPHASE, PHASE_MASK, SCSISIGI; cmp LASTPHASE, P_MESGIN jne mesgin_phasemis; inb_first: mov DINDEX,SINDEX; mov DINDIR,SCSIBUSL ret; /*read byte directly from bus*/ inb_last: mov NONE,SCSIDATL ret; /*dummy read from latch to ACK*/ } if ((ahc->flags & AHC_TARGETROLE) != 0) { /* * Change to a new phase. If we are changing the state of the I/O signal, * from out to in, wait an additional data release delay before continuing. */ change_phase: /* Wait for preceding I/O session to complete. */ test SCSISIGI, ACKI jnz .; /* Change the phase */ and DINDEX, IOI, SCSISIGI; mov SCSISIGO, SINDEX; and A, IOI, SINDEX; /* * If the data direction has changed, from * out (initiator driving) to in (target driving), * we must wait at least a data release delay plus * the normal bus settle delay. [SCSI III SPI 10.11.0] */ cmp DINDEX, A je change_phase_wait; test SINDEX, IOI jz change_phase_wait; call change_phase_wait; change_phase_wait: nop; nop; nop; nop ret; /* * Send a byte to an initiator in Automatic PIO mode. */ target_outb: or SXFRCTL0, SPIOEN; test SSTAT0, SPIORDY jz .; mov SCSIDATL, SINDEX; test SSTAT0, SPIORDY jz .; and SXFRCTL0, ~SPIOEN ret; } /* * Locate a disconnected SCB by SCBID. Upon return, SCBPTR and SINDEX will * be set to the position of the SCB. If the SCB cannot be found locally, * it will be paged in from host memory. RETURN_2 stores the address of the * preceding SCB in the disconnected list which can be used to speed up * removal of the found SCB from the disconnected list. */ if ((ahc->flags & AHC_PAGESCBS) != 0) { BEGIN_CRITICAL; findSCB: mov A, SINDEX; /* Tag passed in SINDEX */ cmp DISCONNECTED_SCBH, SCB_LIST_NULL je findSCB_notFound; mov SCBPTR, DISCONNECTED_SCBH; /* Initialize SCBPTR */ mvi ARG_2, SCB_LIST_NULL; /* Head of list */ jmp findSCB_loop; findSCB_next: cmp SCB_NEXT, SCB_LIST_NULL je findSCB_notFound; mov ARG_2, SCBPTR; mov SCBPTR,SCB_NEXT; findSCB_loop: cmp SCB_TAG, A jne findSCB_next; rem_scb_from_disc_list: cmp ARG_2, SCB_LIST_NULL je rHead; mov DINDEX, SCB_NEXT; mov SINDEX, SCBPTR; mov SCBPTR, ARG_2; mov SCB_NEXT, DINDEX; mov SCBPTR, SINDEX ret; rHead: mov DISCONNECTED_SCBH,SCB_NEXT ret; END_CRITICAL; findSCB_notFound: /* * We didn't find it. Page in the SCB. */ mov ARG_1, A; /* Save tag */ mov ALLZEROS call get_free_or_disc_scb; mvi DMAPARAMS, HDMAEN|DIRECTION|FIFORESET; mov ARG_1 jmp dma_scb; } /* * Prepare the hardware to post a byte to host memory given an * index of (A + (256 * SINDEX)) and a base address of SHARED_DATA_ADDR. */ post_byte_setup: mov ARG_2, SINDEX; if ((ahc->features & AHC_CMD_CHAN) != 0) { mvi DINDEX, CCHADDR; mvi SHARED_DATA_ADDR call set_1byte_addr; mvi CCHCNT, 1; mvi CCSCBCTL, CCSCBRESET ret; } else { mvi DINDEX, HADDR; mvi SHARED_DATA_ADDR call set_1byte_addr; mvi 1 call set_hcnt; mvi DFCNTRL, FIFORESET ret; } post_byte: if ((ahc->features & AHC_CMD_CHAN) != 0) { bmov CCSCBRAM, SINDEX, 1; or CCSCBCTL, CCSCBEN|CCSCBRESET; test CCSCBCTL, CCSCBDONE jz .; clr CCSCBCTL ret; } else { mov DFDAT, SINDEX; or DFCNTRL, HDMAEN|FIFOFLUSH; jmp dma_finish; } phase_lock_perr: mvi PERR_DETECTED call set_seqint; phase_lock: /* * If there is a parity error, wait for the kernel to * see the interrupt and prepare our message response * before continuing. */ test SSTAT1, REQINIT jz phase_lock; test SSTAT1, SCSIPERR jnz phase_lock_perr; phase_lock_latch_phase: if ((ahc->features & AHC_DT) == 0) { and SCSISIGO, PHASE_MASK, SCSISIGI; } and LASTPHASE, PHASE_MASK, SCSISIGI ret; if ((ahc->features & AHC_CMD_CHAN) == 0) { set_hcnt: mov HCNT[0], SINDEX; clear_hcnt: clr HCNT[1]; clr HCNT[2] ret; set_stcnt_from_hcnt: mov STCNT[0], HCNT[0]; mov STCNT[1], HCNT[1]; mov STCNT[2], HCNT[2] ret; bcopy_8: mov DINDIR, SINDIR; bcopy_7: mov DINDIR, SINDIR; mov DINDIR, SINDIR; bcopy_5: mov DINDIR, SINDIR; bcopy_4: mov DINDIR, SINDIR; bcopy_3: mov DINDIR, SINDIR; mov DINDIR, SINDIR; mov DINDIR, SINDIR ret; } if ((ahc->flags & AHC_TARGETROLE) != 0) { /* * Setup addr assuming that A is an index into * an array of 32byte objects, SINDEX contains * the base address of that array, and DINDEX * contains the base address of the location * to store the indexed address. */ set_32byte_addr: shr ARG_2, 3, A; shl A, 5; jmp set_1byte_addr; } /* * Setup addr assuming that A is an index into * an array of 64byte objects, SINDEX contains * the base address of that array, and DINDEX * contains the base address of the location * to store the indexed address. */ set_64byte_addr: shr ARG_2, 2, A; shl A, 6; /* * Setup addr assuming that A + (ARG_2 * 256) is an * index into an array of 1byte objects, SINDEX contains * the base address of that array, and DINDEX contains * the base address of the location to store the computed * address. */ set_1byte_addr: add DINDIR, A, SINDIR; mov A, ARG_2; adc DINDIR, A, SINDIR; clr A; adc DINDIR, A, SINDIR; adc DINDIR, A, SINDIR ret; /* * Either post or fetch an SCB from host memory based on the * DIRECTION bit in DMAPARAMS. The host SCB index is in SINDEX. */ dma_scb: mov A, SINDEX; if ((ahc->features & AHC_CMD_CHAN) != 0) { mvi DINDEX, CCHADDR; mvi HSCB_ADDR call set_64byte_addr; mov CCSCBPTR, SCBPTR; test DMAPARAMS, DIRECTION jz dma_scb_tohost; if ((ahc->flags & AHC_SCB_BTT) != 0) { mvi CCHCNT, SCB_DOWNLOAD_SIZE_64; } else { mvi CCHCNT, SCB_DOWNLOAD_SIZE; } mvi CCSCBCTL, CCARREN|CCSCBEN|CCSCBDIR|CCSCBRESET; cmp CCSCBCTL, CCSCBDONE|ARRDONE|CCARREN|CCSCBEN|CCSCBDIR jne .; jmp dma_scb_finish; dma_scb_tohost: mvi CCHCNT, SCB_UPLOAD_SIZE; if ((ahc->features & AHC_ULTRA2) == 0) { mvi CCSCBCTL, CCSCBRESET; bmov CCSCBRAM, SCB_BASE, SCB_UPLOAD_SIZE; or CCSCBCTL, CCSCBEN|CCSCBRESET; test CCSCBCTL, CCSCBDONE jz .; } else if ((ahc->bugs & AHC_SCBCHAN_UPLOAD_BUG) != 0) { mvi CCSCBCTL, CCARREN|CCSCBRESET; cmp CCSCBCTL, ARRDONE|CCARREN jne .; mvi CCHCNT, SCB_UPLOAD_SIZE; mvi CCSCBCTL, CCSCBEN|CCSCBRESET; cmp CCSCBCTL, CCSCBDONE|CCSCBEN jne .; } else { mvi CCSCBCTL, CCARREN|CCSCBEN|CCSCBRESET; cmp CCSCBCTL, CCSCBDONE|ARRDONE|CCARREN|CCSCBEN jne .; } dma_scb_finish: clr CCSCBCTL; test CCSCBCTL, CCARREN|CCSCBEN jnz .; ret; } else { mvi DINDEX, HADDR; mvi HSCB_ADDR call set_64byte_addr; mvi SCB_DOWNLOAD_SIZE call set_hcnt; mov DFCNTRL, DMAPARAMS; test DMAPARAMS, DIRECTION jnz dma_scb_fromhost; /* Fill it with the SCB data */ copy_scb_tofifo: mvi SINDEX, SCB_BASE; add A, SCB_DOWNLOAD_SIZE, SINDEX; copy_scb_tofifo_loop: call copy_to_fifo_8; cmp SINDEX, A jne copy_scb_tofifo_loop; or DFCNTRL, HDMAEN|FIFOFLUSH; jmp dma_finish; dma_scb_fromhost: mvi DINDEX, SCB_BASE; if ((ahc->bugs & AHC_PCI_2_1_RETRY_BUG) != 0) { /* * The PCI module will only issue a PCI * retry if the data FIFO is empty. If the * host disconnects in the middle of a * transfer, we must empty the fifo of all * available data to force the chip to * continue the transfer. This does not * happen for SCSI transfers as the SCSI module * will drain the FIFO as data are made available. * When the hang occurs, we know that a multiple * of 8 bytes is in the FIFO because the PCI * module has an 8 byte input latch that only * dumps to the FIFO when HCNT == 0 or the * latch is full. */ clr A; /* Wait for at least 8 bytes of data to arrive. */ dma_scb_hang_fifo: test DFSTATUS, FIFOQWDEMP jnz dma_scb_hang_fifo; dma_scb_hang_wait: test DFSTATUS, MREQPEND jnz dma_scb_hang_wait; test DFSTATUS, HDONE jnz dma_scb_hang_dma_done; test DFSTATUS, HDONE jnz dma_scb_hang_dma_done; test DFSTATUS, HDONE jnz dma_scb_hang_dma_done; /* * The PCI module no longer intends to perform * a PCI transaction. Drain the fifo. */ dma_scb_hang_dma_drain_fifo: not A, HCNT; add A, SCB_DOWNLOAD_SIZE+SCB_BASE+1; and A, ~0x7; mov DINDIR,DFDAT; cmp DINDEX, A jne . - 1; cmp DINDEX, SCB_DOWNLOAD_SIZE+SCB_BASE je dma_finish_nowait; /* Restore A as the lines left to transfer. */ add A, -SCB_BASE, DINDEX; shr A, 3; jmp dma_scb_hang_fifo; dma_scb_hang_dma_done: and DFCNTRL, ~HDMAEN; test DFCNTRL, HDMAEN jnz .; add SEQADDR0, A; } else { call dma_finish; } call dfdat_in_8; call dfdat_in_8; call dfdat_in_8; dfdat_in_8: mov DINDIR,DFDAT; dfdat_in_7: mov DINDIR,DFDAT; mov DINDIR,DFDAT; mov DINDIR,DFDAT; mov DINDIR,DFDAT; mov DINDIR,DFDAT; dfdat_in_2: mov DINDIR,DFDAT; mov DINDIR,DFDAT ret; } copy_to_fifo_8: mov DFDAT,SINDIR; mov DFDAT,SINDIR; copy_to_fifo_6: mov DFDAT,SINDIR; copy_to_fifo_5: mov DFDAT,SINDIR; copy_to_fifo_4: mov DFDAT,SINDIR; mov DFDAT,SINDIR; mov DFDAT,SINDIR; mov DFDAT,SINDIR ret; /* * Wait for DMA from host memory to data FIFO to complete, then disable * DMA and wait for it to acknowledge that it's off. */ dma_finish: test DFSTATUS,HDONE jz dma_finish; dma_finish_nowait: /* Turn off DMA */ and DFCNTRL, ~HDMAEN; test DFCNTRL, HDMAEN jnz .; ret; /* * Restore an SCB that failed to match an incoming reselection * to the correct/safe state. If the SCB is for a disconnected * transaction, it must be returned to the disconnected list. * If it is not in the disconnected state, it must be free. */ cleanup_scb: if ((ahc->flags & AHC_PAGESCBS) != 0) { test SCB_CONTROL,DISCONNECTED jnz add_scb_to_disc_list; } add_scb_to_free_list: if ((ahc->flags & AHC_PAGESCBS) != 0) { BEGIN_CRITICAL; mov SCB_NEXT, FREE_SCBH; mvi SCB_TAG, SCB_LIST_NULL; mov FREE_SCBH, SCBPTR ret; END_CRITICAL; } else { mvi SCB_TAG, SCB_LIST_NULL ret; } if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { set_hhaddr: or DSCOMMAND1, HADDLDSEL0; and HADDR, SG_HIGH_ADDR_BITS, SINDEX; and DSCOMMAND1, ~HADDLDSEL0 ret; } if ((ahc->flags & AHC_PAGESCBS) != 0) { get_free_or_disc_scb: BEGIN_CRITICAL; cmp FREE_SCBH, SCB_LIST_NULL jne dequeue_free_scb; cmp DISCONNECTED_SCBH, SCB_LIST_NULL jne dequeue_disc_scb; return_error: mvi NO_FREE_SCB call set_seqint; mvi SINDEX, SCB_LIST_NULL ret; dequeue_disc_scb: mov SCBPTR, DISCONNECTED_SCBH; mov DISCONNECTED_SCBH, SCB_NEXT; END_CRITICAL; mvi DMAPARAMS, FIFORESET; mov SCB_TAG jmp dma_scb; BEGIN_CRITICAL; dequeue_free_scb: mov SCBPTR, FREE_SCBH; mov FREE_SCBH, SCB_NEXT ret; END_CRITICAL; add_scb_to_disc_list: /* * Link this SCB into the DISCONNECTED list. This list holds the * candidates for paging out an SCB if one is needed for a new command. * Modifying the disconnected list is a critical(pause dissabled) section. */ BEGIN_CRITICAL; mov SCB_NEXT, DISCONNECTED_SCBH; mov DISCONNECTED_SCBH, SCBPTR ret; END_CRITICAL; } set_seqint: mov INTSTAT, SINDEX; nop; return: ret; Index: head/sys/dev/drm2/drm_fb_helper.c =================================================================== --- head/sys/dev/drm2/drm_fb_helper.c (revision 329872) +++ head/sys/dev/drm2/drm_fb_helper.c (revision 329873) @@ -1,1490 +1,1490 @@ /* * Copyright (c) 2006-2009 Red Hat Inc. * Copyright (c) 2006-2008 Intel Corporation * Copyright (c) 2007 Dave Airlie * * DRM framebuffer helper functions * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting documentation, and * that the name of the copyright holders not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. The copyright holders make no representations * about the suitability of this software for any purpose. It is provided "as * is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. * * Authors: * Dave Airlie * Jesse Barnes */ #include __FBSDID("$FreeBSD$"); #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include #include #include MODULE_AUTHOR("David Airlie, Jesse Barnes"); MODULE_DESCRIPTION("DRM KMS helper"); MODULE_LICENSE("GPL and additional rights"); static DRM_LIST_HEAD(kernel_fb_helper_list); #include #include #include struct vt_kms_softc { struct drm_fb_helper *fb_helper; struct task fb_mode_task; }; /* Call restore out of vt(9) locks. */ static void vt_restore_fbdev_mode(void *arg, int pending) { struct drm_fb_helper *fb_helper; struct vt_kms_softc *sc; sc = (struct vt_kms_softc *)arg; fb_helper = sc->fb_helper; sx_xlock(&fb_helper->dev->mode_config.mutex); drm_fb_helper_restore_fbdev_mode(fb_helper); sx_xunlock(&fb_helper->dev->mode_config.mutex); } static int vt_kms_postswitch(void *arg) { struct vt_kms_softc *sc; sc = (struct vt_kms_softc *)arg; if (!kdb_active && panicstr == NULL) taskqueue_enqueue(taskqueue_thread, &sc->fb_mode_task); else drm_fb_helper_restore_fbdev_mode(sc->fb_helper); return (0); } struct fb_info * framebuffer_alloc() { struct fb_info *info; struct vt_kms_softc *sc; info = malloc(sizeof(*info), DRM_MEM_KMS, M_WAITOK | M_ZERO); sc = malloc(sizeof(*sc), DRM_MEM_KMS, M_WAITOK | M_ZERO); TASK_INIT(&sc->fb_mode_task, 0, vt_restore_fbdev_mode, sc); info->fb_priv = sc; info->enter = &vt_kms_postswitch; return (info); } void framebuffer_release(struct fb_info *info) { free(info->fb_priv, DRM_MEM_KMS); free(info, DRM_MEM_KMS); } static int fb_get_options(const char *connector_name, char **option) { char tunable[64]; /* * A user may use loader tunables to set a specific mode for the * console. Tunables are read in the following order: * 1. kern.vt.fb.modes.$connector_name * 2. kern.vt.fb.default_mode * * Example of a mode specific to the LVDS connector: * kern.vt.fb.modes.LVDS="1024x768" * * Example of a mode applied to all connectors not having a * connector-specific mode: * kern.vt.fb.default_mode="640x480" */ snprintf(tunable, sizeof(tunable), "kern.vt.fb.modes.%s", connector_name); DRM_INFO("Connector %s: get mode from tunables:\n", connector_name); DRM_INFO(" - %s\n", tunable); DRM_INFO(" - kern.vt.fb.default_mode\n"); *option = kern_getenv(tunable); if (*option == NULL) *option = kern_getenv("kern.vt.fb.default_mode"); return (*option != NULL ? 0 : -ENOENT); } /** * DOC: fbdev helpers * * The fb helper functions are useful to provide an fbdev on top of a drm kernel * mode setting driver. They can be used mostly independantely from the crtc * helper functions used by many drivers to implement the kernel mode setting * interfaces. */ /* simple single crtc case helper function */ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) { struct drm_device *dev = fb_helper->dev; struct drm_connector *connector; int i; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct drm_fb_helper_connector *fb_helper_connector; fb_helper_connector = malloc(sizeof(struct drm_fb_helper_connector), DRM_MEM_KMS, M_NOWAIT | M_ZERO); if (!fb_helper_connector) goto fail; fb_helper_connector->connector = connector; fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector; } return 0; fail: for (i = 0; i < fb_helper->connector_count; i++) { free(fb_helper->connector_info[i], DRM_MEM_KMS); fb_helper->connector_info[i] = NULL; } fb_helper->connector_count = 0; return -ENOMEM; } EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors); static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper) { struct drm_fb_helper_connector *fb_helper_conn; int i; for (i = 0; i < fb_helper->connector_count; i++) { struct drm_cmdline_mode *mode; struct drm_connector *connector; char *option = NULL; fb_helper_conn = fb_helper->connector_info[i]; connector = fb_helper_conn->connector; mode = &fb_helper_conn->cmdline_mode; /* do something on return - turn off connector maybe */ if (fb_get_options(drm_get_connector_name(connector), &option)) continue; if (drm_mode_parse_command_line_for_connector(option, connector, mode)) { if (mode->force) { const char *s; switch (mode->force) { case DRM_FORCE_OFF: s = "OFF"; break; case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break; default: case DRM_FORCE_ON: s = "ON"; break; } DRM_INFO("forcing %s connector %s\n", drm_get_connector_name(connector), s); connector->force = mode->force; } DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n", drm_get_connector_name(connector), mode->xres, mode->yres, mode->refresh_specified ? mode->refresh : 60, mode->rb ? " reduced blanking" : "", mode->margins ? " with margins" : "", mode->interlace ? " interlaced" : ""); } freeenv(option); } return 0; } #if 0 && defined(FREEBSD_NOTYET) static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper) { uint16_t *r_base, *g_base, *b_base; int i; r_base = crtc->gamma_store; g_base = r_base + crtc->gamma_size; b_base = g_base + crtc->gamma_size; for (i = 0; i < crtc->gamma_size; i++) helper->funcs->gamma_get(crtc, &r_base[i], &g_base[i], &b_base[i], i); } static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc) { uint16_t *r_base, *g_base, *b_base; if (crtc->funcs->gamma_set == NULL) return; r_base = crtc->gamma_store; g_base = r_base + crtc->gamma_size; b_base = g_base + crtc->gamma_size; crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size); } int drm_fb_helper_debug_enter(struct fb_info *info) { struct drm_fb_helper *helper = info->par; struct drm_crtc_helper_funcs *funcs; int i; if (list_empty(&kernel_fb_helper_list)) return false; list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) { for (i = 0; i < helper->crtc_count; i++) { struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set; if (!mode_set->crtc->enabled) continue; funcs = mode_set->crtc->helper_private; drm_fb_helper_save_lut_atomic(mode_set->crtc, helper); funcs->mode_set_base_atomic(mode_set->crtc, mode_set->fb, mode_set->x, mode_set->y, ENTER_ATOMIC_MODE_SET); } } return 0; } EXPORT_SYMBOL(drm_fb_helper_debug_enter); /* Find the real fb for a given fb helper CRTC */ static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_crtc *c; list_for_each_entry(c, &dev->mode_config.crtc_list, head) { if (crtc->base.id == c->base.id) return c->fb; } return NULL; } int drm_fb_helper_debug_leave(struct fb_info *info) { struct drm_fb_helper *helper = info->par; struct drm_crtc *crtc; struct drm_crtc_helper_funcs *funcs; struct drm_framebuffer *fb; int i; for (i = 0; i < helper->crtc_count; i++) { struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set; crtc = mode_set->crtc; funcs = crtc->helper_private; fb = drm_mode_config_fb(crtc); if (!crtc->enabled) continue; if (!fb) { DRM_ERROR("no fb to restore??\n"); continue; } drm_fb_helper_restore_lut_atomic(mode_set->crtc); funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x, crtc->y, LEAVE_ATOMIC_MODE_SET); } return 0; } EXPORT_SYMBOL(drm_fb_helper_debug_leave); #endif /* FREEBSD_NOTYET */ bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper) { bool error = false; int i, ret; for (i = 0; i < fb_helper->crtc_count; i++) { struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set; ret = mode_set->crtc->funcs->set_config(mode_set); if (ret) error = true; } return error; } EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode); static bool drm_fb_helper_force_kernel_mode(void) { bool ret, error = false; struct drm_fb_helper *helper; if (list_empty(&kernel_fb_helper_list)) return false; list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) { if (helper->dev->switch_power_state == DRM_SWITCH_POWER_OFF) continue; ret = drm_fb_helper_restore_fbdev_mode(helper); if (ret) error = true; } return error; } #if 0 && defined(FREEBSD_NOTYET) int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed, void *panic_str) { /* * It's a waste of time and effort to switch back to text console * if the kernel should reboot before panic messages can be seen. */ if (panic_timeout < 0) return 0; pr_err("panic occurred, switching back to text console\n"); return drm_fb_helper_force_kernel_mode(); } EXPORT_SYMBOL(drm_fb_helper_panic); static struct notifier_block paniced = { .notifier_call = drm_fb_helper_panic, }; #endif /* FREEBSD_NOTYET */ /** * drm_fb_helper_restore - restore the framebuffer console (kernel) config * * Restore's the kernel's fbcon mode, used for lastclose & panic paths. */ void drm_fb_helper_restore(void) { bool ret; ret = drm_fb_helper_force_kernel_mode(); if (ret == true) DRM_ERROR("Failed to restore crtc configuration\n"); } EXPORT_SYMBOL(drm_fb_helper_restore); #ifdef __linux__ #ifdef CONFIG_MAGIC_SYSRQ static void drm_fb_helper_restore_work_fn(struct work_struct *ignored) { drm_fb_helper_restore(); } static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn); static void drm_fb_helper_sysrq(int dummy1) { schedule_work(&drm_fb_helper_restore_work); } static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { .handler = drm_fb_helper_sysrq, .help_msg = "force-fb(V)", .action_msg = "Restore framebuffer console", }; #else static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { }; #endif #endif #if 0 && defined(FREEBSD_NOTYET) static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode) { struct drm_fb_helper *fb_helper = info->par; struct drm_device *dev = fb_helper->dev; struct drm_crtc *crtc; struct drm_connector *connector; int i, j; /* * For each CRTC in this fb, turn the connectors on/off. */ sx_xlock(&dev->mode_config.mutex); for (i = 0; i < fb_helper->crtc_count; i++) { crtc = fb_helper->crtc_info[i].mode_set.crtc; if (!crtc->enabled) continue; /* Walk the connectors & encoders on this fb turning them on/off */ for (j = 0; j < fb_helper->connector_count; j++) { connector = fb_helper->connector_info[j]->connector; connector->funcs->dpms(connector, dpms_mode); drm_object_property_set_value(&connector->base, dev->mode_config.dpms_property, dpms_mode); } } sx_xunlock(&dev->mode_config.mutex); } int drm_fb_helper_blank(int blank, struct fb_info *info) { switch (blank) { /* Display: On; HSync: On, VSync: On */ case FB_BLANK_UNBLANK: drm_fb_helper_dpms(info, DRM_MODE_DPMS_ON); break; /* Display: Off; HSync: On, VSync: On */ case FB_BLANK_NORMAL: drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY); break; /* Display: Off; HSync: Off, VSync: On */ case FB_BLANK_HSYNC_SUSPEND: drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY); break; /* Display: Off; HSync: On, VSync: Off */ case FB_BLANK_VSYNC_SUSPEND: drm_fb_helper_dpms(info, DRM_MODE_DPMS_SUSPEND); break; /* Display: Off; HSync: Off, VSync: Off */ case FB_BLANK_POWERDOWN: drm_fb_helper_dpms(info, DRM_MODE_DPMS_OFF); break; } return 0; } EXPORT_SYMBOL(drm_fb_helper_blank); #endif /* FREEBSD_NOTYET */ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper) { int i; for (i = 0; i < helper->connector_count; i++) free(helper->connector_info[i], DRM_MEM_KMS); free(helper->connector_info, DRM_MEM_KMS); for (i = 0; i < helper->crtc_count; i++) { free(helper->crtc_info[i].mode_set.connectors, DRM_MEM_KMS); if (helper->crtc_info[i].mode_set.mode) drm_mode_destroy(helper->dev, helper->crtc_info[i].mode_set.mode); } free(helper->crtc_info, DRM_MEM_KMS); } int drm_fb_helper_init(struct drm_device *dev, struct drm_fb_helper *fb_helper, int crtc_count, int max_conn_count) { struct drm_crtc *crtc; int i; fb_helper->dev = dev; INIT_LIST_HEAD(&fb_helper->kernel_fb_list); fb_helper->crtc_info = malloc(crtc_count * sizeof(struct drm_fb_helper_crtc), DRM_MEM_KMS, M_NOWAIT | M_ZERO); if (!fb_helper->crtc_info) return -ENOMEM; fb_helper->crtc_count = crtc_count; fb_helper->connector_info = malloc(dev->mode_config.num_connector * sizeof(struct drm_fb_helper_connector *), DRM_MEM_KMS, M_NOWAIT | M_ZERO); if (!fb_helper->connector_info) { free(fb_helper->crtc_info, DRM_MEM_KMS); return -ENOMEM; } fb_helper->connector_count = 0; for (i = 0; i < crtc_count; i++) { fb_helper->crtc_info[i].mode_set.connectors = malloc(max_conn_count * sizeof(struct drm_connector *), DRM_MEM_KMS, M_NOWAIT | M_ZERO); if (!fb_helper->crtc_info[i].mode_set.connectors) goto out_free; fb_helper->crtc_info[i].mode_set.num_connectors = 0; } i = 0; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { fb_helper->crtc_info[i].mode_set.crtc = crtc; i++; } return 0; out_free: drm_fb_helper_crtc_free(fb_helper); return -ENOMEM; } EXPORT_SYMBOL(drm_fb_helper_init); void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) { if (!list_empty(&fb_helper->kernel_fb_list)) { list_del(&fb_helper->kernel_fb_list); #if 0 && defined(FREEBSD_NOTYET) if (list_empty(&kernel_fb_helper_list)) { pr_info("drm: unregistered panic notifier\n"); atomic_notifier_chain_unregister(&panic_notifier_list, &paniced); unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); } #endif /* FREEBSD_NOTYET */ } drm_fb_helper_crtc_free(fb_helper); } EXPORT_SYMBOL(drm_fb_helper_fini); #if 0 && defined(FREEBSD_NOTYET) static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, u16 regno, struct fb_info *info) { struct drm_fb_helper *fb_helper = info->par; struct drm_framebuffer *fb = fb_helper->fb; int pindex; if (info->fix.visual == FB_VISUAL_TRUECOLOR) { u32 *palette; u32 value; - /* place color in psuedopalette */ + /* place color in pseudopalette */ if (regno > 16) return -EINVAL; palette = (u32 *)info->pseudo_palette; red >>= (16 - info->var.red.length); green >>= (16 - info->var.green.length); blue >>= (16 - info->var.blue.length); value = (red << info->var.red.offset) | (green << info->var.green.offset) | (blue << info->var.blue.offset); if (info->var.transp.length > 0) { u32 mask = (1 << info->var.transp.length) - 1; mask <<= info->var.transp.offset; value |= mask; } palette[regno] = value; return 0; } pindex = regno; if (fb->bits_per_pixel == 16) { pindex = regno << 3; if (fb->depth == 16 && regno > 63) return -EINVAL; if (fb->depth == 15 && regno > 31) return -EINVAL; if (fb->depth == 16) { u16 r, g, b; int i; if (regno < 32) { for (i = 0; i < 8; i++) fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex + i); } fb_helper->funcs->gamma_get(crtc, &r, &g, &b, pindex >> 1); for (i = 0; i < 4; i++) fb_helper->funcs->gamma_set(crtc, r, green, b, (pindex >> 1) + i); } } if (fb->depth != 16) fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex); return 0; } int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) { struct drm_fb_helper *fb_helper = info->par; struct drm_crtc_helper_funcs *crtc_funcs; u16 *red, *green, *blue, *transp; struct drm_crtc *crtc; int i, j, rc = 0; int start; for (i = 0; i < fb_helper->crtc_count; i++) { crtc = fb_helper->crtc_info[i].mode_set.crtc; crtc_funcs = crtc->helper_private; red = cmap->red; green = cmap->green; blue = cmap->blue; transp = cmap->transp; start = cmap->start; for (j = 0; j < cmap->len; j++) { u16 hred, hgreen, hblue, htransp = 0xffff; hred = *red++; hgreen = *green++; hblue = *blue++; if (transp) htransp = *transp++; rc = setcolreg(crtc, hred, hgreen, hblue, start++, info); if (rc) return rc; } crtc_funcs->load_lut(crtc); } return rc; } EXPORT_SYMBOL(drm_fb_helper_setcmap); int drm_fb_helper_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct drm_fb_helper *fb_helper = info->par; struct drm_framebuffer *fb = fb_helper->fb; int depth; if (var->pixclock != 0 || in_dbg_master()) return -EINVAL; /* Need to resize the fb object !!! */ if (var->bits_per_pixel > fb->bits_per_pixel || var->xres > fb->width || var->yres > fb->height || var->xres_virtual > fb->width || var->yres_virtual > fb->height) { DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb " "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n", var->xres, var->yres, var->bits_per_pixel, var->xres_virtual, var->yres_virtual, fb->width, fb->height, fb->bits_per_pixel); return -EINVAL; } switch (var->bits_per_pixel) { case 16: depth = (var->green.length == 6) ? 16 : 15; break; case 32: depth = (var->transp.length > 0) ? 32 : 24; break; default: depth = var->bits_per_pixel; break; } switch (depth) { case 8: var->red.offset = 0; var->green.offset = 0; var->blue.offset = 0; var->red.length = 8; var->green.length = 8; var->blue.length = 8; var->transp.length = 0; var->transp.offset = 0; break; case 15: var->red.offset = 10; var->green.offset = 5; var->blue.offset = 0; var->red.length = 5; var->green.length = 5; var->blue.length = 5; var->transp.length = 1; var->transp.offset = 15; break; case 16: var->red.offset = 11; var->green.offset = 5; var->blue.offset = 0; var->red.length = 5; var->green.length = 6; var->blue.length = 5; var->transp.length = 0; var->transp.offset = 0; break; case 24: var->red.offset = 16; var->green.offset = 8; var->blue.offset = 0; var->red.length = 8; var->green.length = 8; var->blue.length = 8; var->transp.length = 0; var->transp.offset = 0; break; case 32: var->red.offset = 16; var->green.offset = 8; var->blue.offset = 0; var->red.length = 8; var->green.length = 8; var->blue.length = 8; var->transp.length = 8; var->transp.offset = 24; break; default: return -EINVAL; } return 0; } EXPORT_SYMBOL(drm_fb_helper_check_var); /* this will let fbcon do the mode init */ int drm_fb_helper_set_par(struct fb_info *info) { struct drm_fb_helper *fb_helper = info->par; struct drm_device *dev = fb_helper->dev; struct fb_var_screeninfo *var = &info->var; struct drm_crtc *crtc; int ret; int i; if (var->pixclock != 0) { DRM_ERROR("PIXEL CLOCK SET\n"); return -EINVAL; } sx_xlock(&dev->mode_config.mutex); for (i = 0; i < fb_helper->crtc_count; i++) { crtc = fb_helper->crtc_info[i].mode_set.crtc; ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set); if (ret) { sx_xunlock(&dev->mode_config.mutex); return ret; } } sx_xunlock(&dev->mode_config.mutex); if (fb_helper->delayed_hotplug) { fb_helper->delayed_hotplug = false; drm_fb_helper_hotplug_event(fb_helper); } return 0; } EXPORT_SYMBOL(drm_fb_helper_set_par); int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { struct drm_fb_helper *fb_helper = info->par; struct drm_device *dev = fb_helper->dev; struct drm_mode_set *modeset; struct drm_crtc *crtc; int ret = 0; int i; sx_xlock(&dev->mode_config.mutex); for (i = 0; i < fb_helper->crtc_count; i++) { crtc = fb_helper->crtc_info[i].mode_set.crtc; modeset = &fb_helper->crtc_info[i].mode_set; modeset->x = var->xoffset; modeset->y = var->yoffset; if (modeset->num_connectors) { ret = crtc->funcs->set_config(modeset); if (!ret) { info->var.xoffset = var->xoffset; info->var.yoffset = var->yoffset; } } } sx_xunlock(&dev->mode_config.mutex); return ret; } EXPORT_SYMBOL(drm_fb_helper_pan_display); #endif /* FREEBSD_NOTYET */ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, int preferred_bpp) { int new_fb = 0; int crtc_count = 0; int i; struct fb_info *info; struct drm_fb_helper_surface_size sizes; int gamma_size = 0; #if defined(__FreeBSD__) struct drm_crtc *crtc; struct drm_device *dev; int ret; device_t kdev; #endif memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size)); sizes.surface_depth = 24; sizes.surface_bpp = 32; sizes.fb_width = (unsigned)-1; sizes.fb_height = (unsigned)-1; /* if driver picks 8 or 16 by default use that for both depth/bpp */ if (preferred_bpp != sizes.surface_bpp) sizes.surface_depth = sizes.surface_bpp = preferred_bpp; /* first up get a count of crtcs now in use and new min/maxes width/heights */ for (i = 0; i < fb_helper->connector_count; i++) { struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i]; struct drm_cmdline_mode *cmdline_mode; cmdline_mode = &fb_helper_conn->cmdline_mode; if (cmdline_mode->bpp_specified) { switch (cmdline_mode->bpp) { case 8: sizes.surface_depth = sizes.surface_bpp = 8; break; case 15: sizes.surface_depth = 15; sizes.surface_bpp = 16; break; case 16: sizes.surface_depth = sizes.surface_bpp = 16; break; case 24: sizes.surface_depth = sizes.surface_bpp = 24; break; case 32: sizes.surface_depth = 24; sizes.surface_bpp = 32; break; } break; } } crtc_count = 0; for (i = 0; i < fb_helper->crtc_count; i++) { struct drm_display_mode *desired_mode; desired_mode = fb_helper->crtc_info[i].desired_mode; if (desired_mode) { if (gamma_size == 0) gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size; if (desired_mode->hdisplay < sizes.fb_width) sizes.fb_width = desired_mode->hdisplay; if (desired_mode->vdisplay < sizes.fb_height) sizes.fb_height = desired_mode->vdisplay; if (desired_mode->hdisplay > sizes.surface_width) sizes.surface_width = desired_mode->hdisplay; if (desired_mode->vdisplay > sizes.surface_height) sizes.surface_height = desired_mode->vdisplay; crtc_count++; } } if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) { /* hmm everyone went away - assume VGA cable just fell out and will come back later. */ DRM_INFO("Cannot find any crtc or sizes - going 1024x768\n"); sizes.fb_width = sizes.surface_width = 1024; sizes.fb_height = sizes.surface_height = 768; } /* push down into drivers */ new_fb = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes); if (new_fb < 0) return new_fb; info = fb_helper->fbdev; kdev = fb_helper->dev->dev; info->fb_video_dev = device_get_parent(kdev); /* set the fb pointer */ for (i = 0; i < fb_helper->crtc_count; i++) fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb; #if defined(__FreeBSD__) if (new_fb) { int ret; info->fb_fbd_dev = device_add_child(kdev, "fbd", device_get_unit(kdev)); if (info->fb_fbd_dev != NULL) ret = device_probe_and_attach(info->fb_fbd_dev); else ret = ENODEV; #ifdef DEV_VT if (ret != 0) DRM_ERROR("Failed to attach fbd device: %d\n", ret); #endif } else { /* Modified version of drm_fb_helper_set_par() */ dev = fb_helper->dev; sx_xlock(&dev->mode_config.mutex); for (i = 0; i < fb_helper->crtc_count; i++) { crtc = fb_helper->crtc_info[i].mode_set.crtc; ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set); if (ret) { sx_xunlock(&dev->mode_config.mutex); return ret; } } sx_xunlock(&dev->mode_config.mutex); if (fb_helper->delayed_hotplug) { fb_helper->delayed_hotplug = false; drm_fb_helper_hotplug_event(fb_helper); } } #else if (new_fb) { info->var.pixclock = 0; if (register_framebuffer(info) < 0) return -EINVAL; dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n", info->node, info->fix.id); } else { drm_fb_helper_set_par(info); } #endif #if 0 && defined(FREEBSD_NOTYET) /* Switch back to kernel console on panic */ /* multi card linked list maybe */ if (list_empty(&kernel_fb_helper_list)) { dev_info(fb_helper->dev->dev, "registered panic notifier\n"); atomic_notifier_chain_register(&panic_notifier_list, &paniced); register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); } #endif /* FREEBSD_NOTYET */ if (new_fb) list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list); return 0; } EXPORT_SYMBOL(drm_fb_helper_single_fb_probe); void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, uint32_t depth) { info->fb_stride = pitch; return; } EXPORT_SYMBOL(drm_fb_helper_fill_fix); void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, uint32_t fb_width, uint32_t fb_height) { struct drm_framebuffer *fb = fb_helper->fb; struct vt_kms_softc *sc; info->fb_name = device_get_nameunit(fb_helper->dev->dev); info->fb_width = fb->width; info->fb_height = fb->height; info->fb_depth = fb->bits_per_pixel; sc = (struct vt_kms_softc *)info->fb_priv; sc->fb_helper = fb_helper; } EXPORT_SYMBOL(drm_fb_helper_fill_var); static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper, uint32_t maxX, uint32_t maxY) { struct drm_connector *connector; int count = 0; int i; for (i = 0; i < fb_helper->connector_count; i++) { connector = fb_helper->connector_info[i]->connector; count += connector->funcs->fill_modes(connector, maxX, maxY); } return count; } static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height) { struct drm_display_mode *mode; list_for_each_entry(mode, &fb_connector->connector->modes, head) { if (drm_mode_width(mode) > width || drm_mode_height(mode) > height) continue; if (mode->type & DRM_MODE_TYPE_PREFERRED) return mode; } return NULL; } static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector) { struct drm_cmdline_mode *cmdline_mode; cmdline_mode = &fb_connector->cmdline_mode; return cmdline_mode->specified; } static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn, int width, int height) { struct drm_cmdline_mode *cmdline_mode; struct drm_display_mode *mode = NULL; cmdline_mode = &fb_helper_conn->cmdline_mode; if (cmdline_mode->specified == false) return mode; /* attempt to find a matching mode in the list of modes * we have gotten so far, if not add a CVT mode that conforms */ if (cmdline_mode->rb || cmdline_mode->margins) goto create_mode; list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) { /* check width/height */ if (mode->hdisplay != cmdline_mode->xres || mode->vdisplay != cmdline_mode->yres) continue; if (cmdline_mode->refresh_specified) { if (mode->vrefresh != cmdline_mode->refresh) continue; } if (cmdline_mode->interlace) { if (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) continue; } return mode; } create_mode: mode = drm_mode_create_from_cmdline_mode(fb_helper_conn->connector->dev, cmdline_mode); list_add(&mode->head, &fb_helper_conn->connector->modes); return mode; } static bool drm_connector_enabled(struct drm_connector *connector, bool strict) { bool enable; if (strict) enable = connector->status == connector_status_connected; else enable = connector->status != connector_status_disconnected; return enable; } static void drm_enable_connectors(struct drm_fb_helper *fb_helper, bool *enabled) { bool any_enabled = false; struct drm_connector *connector; int i = 0; for (i = 0; i < fb_helper->connector_count; i++) { connector = fb_helper->connector_info[i]->connector; enabled[i] = drm_connector_enabled(connector, true); DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id, enabled[i] ? "yes" : "no"); any_enabled |= enabled[i]; } if (any_enabled) return; for (i = 0; i < fb_helper->connector_count; i++) { connector = fb_helper->connector_info[i]->connector; enabled[i] = drm_connector_enabled(connector, false); } } static bool drm_target_cloned(struct drm_fb_helper *fb_helper, struct drm_display_mode **modes, bool *enabled, int width, int height) { int count, i, j; bool can_clone = false; struct drm_fb_helper_connector *fb_helper_conn; struct drm_display_mode *dmt_mode, *mode; /* only contemplate cloning in the single crtc case */ if (fb_helper->crtc_count > 1) return false; count = 0; for (i = 0; i < fb_helper->connector_count; i++) { if (enabled[i]) count++; } /* only contemplate cloning if more than one connector is enabled */ if (count <= 1) return false; /* check the command line or if nothing common pick 1024x768 */ can_clone = true; for (i = 0; i < fb_helper->connector_count; i++) { if (!enabled[i]) continue; fb_helper_conn = fb_helper->connector_info[i]; modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height); if (!modes[i]) { can_clone = false; break; } for (j = 0; j < i; j++) { if (!enabled[j]) continue; if (!drm_mode_equal(modes[j], modes[i])) can_clone = false; } } if (can_clone) { DRM_DEBUG_KMS("can clone using command line\n"); return true; } /* try and find a 1024x768 mode on each connector */ can_clone = true; dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60, false); for (i = 0; i < fb_helper->connector_count; i++) { if (!enabled[i]) continue; fb_helper_conn = fb_helper->connector_info[i]; list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) { if (drm_mode_equal(mode, dmt_mode)) modes[i] = mode; } if (!modes[i]) can_clone = false; } if (can_clone) { DRM_DEBUG_KMS("can clone using 1024x768\n"); return true; } DRM_INFO("kms: can't enable cloning when we probably wanted to.\n"); return false; } static bool drm_target_preferred(struct drm_fb_helper *fb_helper, struct drm_display_mode **modes, bool *enabled, int width, int height) { struct drm_fb_helper_connector *fb_helper_conn; int i; for (i = 0; i < fb_helper->connector_count; i++) { fb_helper_conn = fb_helper->connector_info[i]; if (enabled[i] == false) continue; DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n", fb_helper_conn->connector->base.id); /* got for command line mode first */ modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height); if (!modes[i]) { DRM_DEBUG_KMS("looking for preferred mode on connector %d\n", fb_helper_conn->connector->base.id); modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height); } /* No preferred modes, pick one off the list */ if (!modes[i] && !list_empty(&fb_helper_conn->connector->modes)) { list_for_each_entry(modes[i], &fb_helper_conn->connector->modes, head) break; } DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name : "none"); } return true; } static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, struct drm_fb_helper_crtc **best_crtcs, struct drm_display_mode **modes, int n, int width, int height) { int c, o; struct drm_device *dev = fb_helper->dev; struct drm_connector *connector; struct drm_connector_helper_funcs *connector_funcs; struct drm_encoder *encoder; struct drm_fb_helper_crtc *best_crtc; int my_score, best_score, score; struct drm_fb_helper_crtc **crtcs, *crtc; struct drm_fb_helper_connector *fb_helper_conn; if (n == fb_helper->connector_count) return 0; fb_helper_conn = fb_helper->connector_info[n]; connector = fb_helper_conn->connector; best_crtcs[n] = NULL; best_crtc = NULL; best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height); if (modes[n] == NULL) return best_score; crtcs = malloc(dev->mode_config.num_connector * sizeof(struct drm_fb_helper_crtc *), DRM_MEM_KMS, M_NOWAIT | M_ZERO); if (!crtcs) return best_score; my_score = 1; if (connector->status == connector_status_connected) my_score++; if (drm_has_cmdline_mode(fb_helper_conn)) my_score++; if (drm_has_preferred_mode(fb_helper_conn, width, height)) my_score++; connector_funcs = connector->helper_private; encoder = connector_funcs->best_encoder(connector); if (!encoder) goto out; /* select a crtc for this connector and then attempt to configure remaining connectors */ for (c = 0; c < fb_helper->crtc_count; c++) { crtc = &fb_helper->crtc_info[c]; if ((encoder->possible_crtcs & (1 << c)) == 0) continue; for (o = 0; o < n; o++) if (best_crtcs[o] == crtc) break; if (o < n) { /* ignore cloning unless only a single crtc */ if (fb_helper->crtc_count > 1) continue; if (!drm_mode_equal(modes[o], modes[n])) continue; } crtcs[n] = crtc; memcpy(crtcs, best_crtcs, n * sizeof(struct drm_fb_helper_crtc *)); score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1, width, height); if (score > best_score) { best_crtc = crtc; best_score = score; memcpy(best_crtcs, crtcs, dev->mode_config.num_connector * sizeof(struct drm_fb_helper_crtc *)); } } out: free(crtcs, DRM_MEM_KMS); return best_score; } static void drm_setup_crtcs(struct drm_fb_helper *fb_helper) { struct drm_device *dev = fb_helper->dev; struct drm_fb_helper_crtc **crtcs; struct drm_display_mode **modes; struct drm_mode_set *modeset; bool *enabled; int width, height; int i, ret; DRM_DEBUG_KMS("\n"); width = dev->mode_config.max_width; height = dev->mode_config.max_height; crtcs = malloc(dev->mode_config.num_connector * sizeof(struct drm_fb_helper_crtc *), DRM_MEM_KMS, M_NOWAIT | M_ZERO); modes = malloc(dev->mode_config.num_connector * sizeof(struct drm_display_mode *), DRM_MEM_KMS, M_NOWAIT | M_ZERO); enabled = malloc(dev->mode_config.num_connector * sizeof(bool), DRM_MEM_KMS, M_NOWAIT | M_ZERO); if (!crtcs || !modes || !enabled) { DRM_ERROR("Memory allocation failed\n"); goto out; } drm_enable_connectors(fb_helper, enabled); ret = drm_target_cloned(fb_helper, modes, enabled, width, height); if (!ret) { ret = drm_target_preferred(fb_helper, modes, enabled, width, height); if (!ret) DRM_ERROR("Unable to find initial modes\n"); } DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height); drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height); /* need to set the modesets up here for use later */ /* fill out the connector<->crtc mappings into the modesets */ for (i = 0; i < fb_helper->crtc_count; i++) { modeset = &fb_helper->crtc_info[i].mode_set; modeset->num_connectors = 0; } for (i = 0; i < fb_helper->connector_count; i++) { struct drm_display_mode *mode = modes[i]; struct drm_fb_helper_crtc *fb_crtc = crtcs[i]; modeset = &fb_crtc->mode_set; if (mode && fb_crtc) { DRM_DEBUG_KMS("desired mode %s set on crtc %d\n", mode->name, fb_crtc->mode_set.crtc->base.id); fb_crtc->desired_mode = mode; if (modeset->mode) drm_mode_destroy(dev, modeset->mode); modeset->mode = drm_mode_duplicate(dev, fb_crtc->desired_mode); modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector; } } out: free(crtcs, DRM_MEM_KMS); free(modes, DRM_MEM_KMS); free(enabled, DRM_MEM_KMS); } /** * drm_helper_initial_config - setup a sane initial connector configuration * @fb_helper: fb_helper device struct * @bpp_sel: bpp value to use for the framebuffer configuration * * LOCKING: * Called at init time by the driver to set up the @fb_helper initial * configuration, must take the mode config lock. * * Scans the CRTCs and connectors and tries to put together an initial setup. * At the moment, this is a cloned configuration across all heads with * a new framebuffer object as the backing store. * * RETURNS: * Zero if everything went ok, nonzero otherwise. */ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel) { struct drm_device *dev = fb_helper->dev; int count = 0; /* disable all the possible outputs/crtcs before entering KMS mode */ drm_helper_disable_unused_functions(fb_helper->dev); drm_fb_helper_parse_command_line(fb_helper); count = drm_fb_helper_probe_connector_modes(fb_helper, dev->mode_config.max_width, dev->mode_config.max_height); /* * we shouldn't end up with no modes here. */ if (count == 0) dev_info(fb_helper->dev->dev, "No connectors reported connected with modes\n"); drm_setup_crtcs(fb_helper); return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); } EXPORT_SYMBOL(drm_fb_helper_initial_config); /** * drm_fb_helper_hotplug_event - respond to a hotplug notification by * probing all the outputs attached to the fb * @fb_helper: the drm_fb_helper * * LOCKING: * Called at runtime, must take mode config lock. * * Scan the connectors attached to the fb_helper and try to put together a * setup after *notification of a change in output configuration. * * RETURNS: * 0 on success and a non-zero error code otherwise. */ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) { struct drm_device *dev = fb_helper->dev; int count = 0; u32 max_width, max_height, bpp_sel; int bound = 0, crtcs_bound = 0; struct drm_crtc *crtc; if (!fb_helper->fb) return 0; sx_xlock(&dev->mode_config.mutex); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { if (crtc->fb) crtcs_bound++; if (crtc->fb == fb_helper->fb) bound++; } if (bound < crtcs_bound) { fb_helper->delayed_hotplug = true; sx_xunlock(&dev->mode_config.mutex); return 0; } DRM_DEBUG_KMS("\n"); max_width = fb_helper->fb->width; max_height = fb_helper->fb->height; bpp_sel = fb_helper->fb->bits_per_pixel; count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, max_height); drm_setup_crtcs(fb_helper); sx_xunlock(&dev->mode_config.mutex); return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); } EXPORT_SYMBOL(drm_fb_helper_hotplug_event); Index: head/sys/dev/xen/blkback/blkback.c =================================================================== --- head/sys/dev/xen/blkback/blkback.c (revision 329872) +++ head/sys/dev/xen/blkback/blkback.c (revision 329873) @@ -1,3938 +1,3938 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009-2012 Spectra Logic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * Authors: Justin T. Gibbs (Spectra Logic Corporation) * Ken Merry (Spectra Logic Corporation) */ #include __FBSDID("$FreeBSD$"); /** * \file blkback.c * * \brief Device driver supporting the vending of block storage from * a FreeBSD domain to other domains. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /*--------------------------- Compile-time Tunables --------------------------*/ /** * The maximum number of shared memory ring pages we will allow in a * negotiated block-front/back communication channel. Allow enough * ring space for all requests to be XBB_MAX_REQUEST_SIZE'd. */ #define XBB_MAX_RING_PAGES 32 /** * The maximum number of outstanding request blocks (request headers plus * additional segment blocks) we will allow in a negotiated block-front/back * communication channel. */ #define XBB_MAX_REQUESTS \ __CONST_RING_SIZE(blkif, PAGE_SIZE * XBB_MAX_RING_PAGES) /** * \brief Define to force all I/O to be performed on memory owned by the * backend device, with a copy-in/out to the remote domain's memory. * * \note This option is currently required when this driver's domain is * operating in HVM mode on a system using an IOMMU. * * This driver uses Xen's grant table API to gain access to the memory of * the remote domains it serves. When our domain is operating in PV mode, * the grant table mechanism directly updates our domain's page table entries * to point to the physical pages of the remote domain. This scheme guarantees * that blkback and the backing devices it uses can safely perform DMA * operations to satisfy requests. In HVM mode, Xen may use a HW IOMMU to * insure that our domain cannot DMA to pages owned by another domain. As * of Xen 4.0, IOMMU mappings for HVM guests are not updated via the grant * table API. For this reason, in HVM mode, we must bounce all requests into * memory that is mapped into our domain at domain startup and thus has * valid IOMMU mappings. */ #define XBB_USE_BOUNCE_BUFFERS /** * \brief Define to enable rudimentary request logging to the console. */ #undef XBB_DEBUG /*---------------------------------- Macros ----------------------------------*/ /** * Custom malloc type for all driver allocations. */ static MALLOC_DEFINE(M_XENBLOCKBACK, "xbbd", "Xen Block Back Driver Data"); #ifdef XBB_DEBUG #define DPRINTF(fmt, args...) \ printf("xbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args) #else #define DPRINTF(fmt, args...) do {} while(0) #endif /** * The maximum mapped region size per request we will allow in a negotiated * block-front/back communication channel. */ #define XBB_MAX_REQUEST_SIZE \ MIN(MAXPHYS, BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) /** * The maximum number of segments (within a request header and accompanying * segment blocks) per request we will allow in a negotiated block-front/back * communication channel. */ #define XBB_MAX_SEGMENTS_PER_REQUEST \ (MIN(UIO_MAXIOV, \ MIN(BLKIF_MAX_SEGMENTS_PER_REQUEST, \ (XBB_MAX_REQUEST_SIZE / PAGE_SIZE) + 1))) /** * The maximum number of ring pages that we can allow per request list. * We limit this to the maximum number of segments per request, because * that is already a reasonable number of segments to aggregate. This * number should never be smaller than XBB_MAX_SEGMENTS_PER_REQUEST, * because that would leave situations where we can't dispatch even one * large request. */ #define XBB_MAX_SEGMENTS_PER_REQLIST XBB_MAX_SEGMENTS_PER_REQUEST /*--------------------------- Forward Declarations ---------------------------*/ struct xbb_softc; struct xbb_xen_req; static void xbb_attach_failed(struct xbb_softc *xbb, int err, const char *fmt, ...) __attribute__((format(printf, 3, 4))); static int xbb_shutdown(struct xbb_softc *xbb); /*------------------------------ Data Structures -----------------------------*/ STAILQ_HEAD(xbb_xen_req_list, xbb_xen_req); typedef enum { XBB_REQLIST_NONE = 0x00, XBB_REQLIST_MAPPED = 0x01 } xbb_reqlist_flags; struct xbb_xen_reqlist { /** * Back reference to the parent block back instance for this * request. Used during bio_done handling. */ struct xbb_softc *xbb; /** * BLKIF_OP code for this request. */ int operation; /** * Set to BLKIF_RSP_* to indicate request status. * * This field allows an error status to be recorded even if the * delivery of this status must be deferred. Deferred reporting * is necessary, for example, when an error is detected during * completion processing of one bio when other bios for this * request are still outstanding. */ int status; /** * Number of 512 byte sectors not transferred. */ int residual_512b_sectors; /** * Starting sector number of the first request in the list. */ off_t starting_sector_number; /** * If we're going to coalesce, the next contiguous sector would be * this one. */ off_t next_contig_sector; /** * Number of child requests in the list. */ int num_children; /** * Number of I/O requests still pending on the backend. */ int pendcnt; /** * Total number of segments for requests in the list. */ int nr_segments; /** * Flags for this particular request list. */ xbb_reqlist_flags flags; /** * Kernel virtual address space reserved for this request * list structure and used to map the remote domain's pages for * this I/O, into our domain's address space. */ uint8_t *kva; /** - * Base, psuedo-physical address, corresponding to the start + * Base, pseudo-physical address, corresponding to the start * of this request's kva region. */ uint64_t gnt_base; #ifdef XBB_USE_BOUNCE_BUFFERS /** * Pre-allocated domain local memory used to proxy remote * domain memory during I/O operations. */ uint8_t *bounce; #endif /** * Array of grant handles (one per page) used to map this request. */ grant_handle_t *gnt_handles; /** * Device statistics request ordering type (ordered or simple). */ devstat_tag_type ds_tag_type; /** * Device statistics request type (read, write, no_data). */ devstat_trans_flags ds_trans_type; /** * The start time for this request. */ struct bintime ds_t0; /** * Linked list of contiguous requests with the same operation type. */ struct xbb_xen_req_list contig_req_list; /** * Linked list links used to aggregate idle requests in the * request list free pool (xbb->reqlist_free_stailq) and pending * requests waiting for execution (xbb->reqlist_pending_stailq). */ STAILQ_ENTRY(xbb_xen_reqlist) links; }; STAILQ_HEAD(xbb_xen_reqlist_list, xbb_xen_reqlist); /** * \brief Object tracking an in-flight I/O from a Xen VBD consumer. */ struct xbb_xen_req { /** * Linked list links used to aggregate requests into a reqlist * and to store them in the request free pool. */ STAILQ_ENTRY(xbb_xen_req) links; /** * The remote domain's identifier for this I/O request. */ uint64_t id; /** * The number of pages currently mapped for this request. */ int nr_pages; /** * The number of 512 byte sectors comprising this requests. */ int nr_512b_sectors; /** * BLKIF_OP code for this request. */ int operation; /** * Storage used for non-native ring requests. */ blkif_request_t ring_req_storage; /** * Pointer to the Xen request in the ring. */ blkif_request_t *ring_req; /** * Consumer index for this request. */ RING_IDX req_ring_idx; /** * The start time for this request. */ struct bintime ds_t0; /** * Pointer back to our parent request list. */ struct xbb_xen_reqlist *reqlist; }; SLIST_HEAD(xbb_xen_req_slist, xbb_xen_req); /** * \brief Configuration data for the shared memory request ring * used to communicate with the front-end client of this * this driver. */ struct xbb_ring_config { /** KVA address where ring memory is mapped. */ vm_offset_t va; /** The pseudo-physical address where ring memory is mapped.*/ uint64_t gnt_addr; /** * Grant table handles, one per-ring page, returned by the * hyperpervisor upon mapping of the ring and required to * unmap it when a connection is torn down. */ grant_handle_t handle[XBB_MAX_RING_PAGES]; /** * The device bus address returned by the hypervisor when * mapping the ring and required to unmap it when a connection * is torn down. */ uint64_t bus_addr[XBB_MAX_RING_PAGES]; /** The number of ring pages mapped for the current connection. */ u_int ring_pages; /** * The grant references, one per-ring page, supplied by the * front-end, allowing us to reference the ring pages in the * front-end's domain and to map these pages into our own domain. */ grant_ref_t ring_ref[XBB_MAX_RING_PAGES]; /** The interrupt driven even channel used to signal ring events. */ evtchn_port_t evtchn; }; /** * Per-instance connection state flags. */ typedef enum { /** * The front-end requested a read-only mount of the * back-end device/file. */ XBBF_READ_ONLY = 0x01, /** Communication with the front-end has been established. */ XBBF_RING_CONNECTED = 0x02, /** * Front-end requests exist in the ring and are waiting for * xbb_xen_req objects to free up. */ XBBF_RESOURCE_SHORTAGE = 0x04, /** Connection teardown in progress. */ XBBF_SHUTDOWN = 0x08, /** A thread is already performing shutdown processing. */ XBBF_IN_SHUTDOWN = 0x10 } xbb_flag_t; /** Backend device type. */ typedef enum { /** Backend type unknown. */ XBB_TYPE_NONE = 0x00, /** * Backend type disk (access via cdev switch * strategy routine). */ XBB_TYPE_DISK = 0x01, /** Backend type file (access vnode operations.). */ XBB_TYPE_FILE = 0x02 } xbb_type; /** * \brief Structure used to memoize information about a per-request * scatter-gather list. * * The chief benefit of using this data structure is it avoids having * to reparse the possibly discontiguous S/G list in the original * request. Due to the way that the mapping of the memory backing an * I/O transaction is handled by Xen, a second pass is unavoidable. * At least this way the second walk is a simple array traversal. * * \note A single Scatter/Gather element in the block interface covers * at most 1 machine page. In this context a sector (blkif * nomenclature, not what I'd choose) is a 512b aligned unit * of mapping within the machine page referenced by an S/G * element. */ struct xbb_sg { /** The number of 512b data chunks mapped in this S/G element. */ int16_t nsect; /** * The index (0 based) of the first 512b data chunk mapped * in this S/G element. */ uint8_t first_sect; /** * The index (0 based) of the last 512b data chunk mapped * in this S/G element. */ uint8_t last_sect; }; /** * Character device backend specific configuration data. */ struct xbb_dev_data { /** Cdev used for device backend access. */ struct cdev *cdev; /** Cdev switch used for device backend access. */ struct cdevsw *csw; /** Used to hold a reference on opened cdev backend devices. */ int dev_ref; }; /** * File backend specific configuration data. */ struct xbb_file_data { /** Credentials to use for vnode backed (file based) I/O. */ struct ucred *cred; /** * \brief Array of io vectors used to process file based I/O. * * Only a single file based request is outstanding per-xbb instance, * so we only need one of these. */ struct iovec xiovecs[XBB_MAX_SEGMENTS_PER_REQLIST]; #ifdef XBB_USE_BOUNCE_BUFFERS /** * \brief Array of io vectors used to handle bouncing of file reads. * * Vnode operations are free to modify uio data during their * exectuion. In the case of a read with bounce buffering active, * we need some of the data from the original uio in order to * bounce-out the read data. This array serves as the temporary * storage for this saved data. */ struct iovec saved_xiovecs[XBB_MAX_SEGMENTS_PER_REQLIST]; /** * \brief Array of memoized bounce buffer kva offsets used * in the file based backend. * * Due to the way that the mapping of the memory backing an * I/O transaction is handled by Xen, a second pass through * the request sg elements is unavoidable. We memoize the computed * bounce address here to reduce the cost of the second walk. */ void *xiovecs_vaddr[XBB_MAX_SEGMENTS_PER_REQLIST]; #endif /* XBB_USE_BOUNCE_BUFFERS */ }; /** * Collection of backend type specific data. */ union xbb_backend_data { struct xbb_dev_data dev; struct xbb_file_data file; }; /** * Function signature of backend specific I/O handlers. */ typedef int (*xbb_dispatch_t)(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist, int operation, int flags); /** * Per-instance configuration data. */ struct xbb_softc { /** * Task-queue used to process I/O requests. */ struct taskqueue *io_taskqueue; /** * Single "run the request queue" task enqueued * on io_taskqueue. */ struct task io_task; /** Device type for this instance. */ xbb_type device_type; /** NewBus device corresponding to this instance. */ device_t dev; /** Backend specific dispatch routine for this instance. */ xbb_dispatch_t dispatch_io; /** The number of requests outstanding on the backend device/file. */ int active_request_count; /** Free pool of request tracking structures. */ struct xbb_xen_req_list request_free_stailq; /** Array, sized at connection time, of request tracking structures. */ struct xbb_xen_req *requests; /** Free pool of request list structures. */ struct xbb_xen_reqlist_list reqlist_free_stailq; /** List of pending request lists awaiting execution. */ struct xbb_xen_reqlist_list reqlist_pending_stailq; /** Array, sized at connection time, of request list structures. */ struct xbb_xen_reqlist *request_lists; /** * Global pool of kva used for mapping remote domain ring * and I/O transaction data. */ vm_offset_t kva; - /** Psuedo-physical address corresponding to kva. */ + /** Pseudo-physical address corresponding to kva. */ uint64_t gnt_base_addr; /** The size of the global kva pool. */ int kva_size; /** The size of the KVA area used for request lists. */ int reqlist_kva_size; /** The number of pages of KVA used for request lists */ int reqlist_kva_pages; /** Bitmap of free KVA pages */ bitstr_t *kva_free; /** * \brief Cached value of the front-end's domain id. * * This value is used at once for each mapped page in * a transaction. We cache it to avoid incuring the * cost of an ivar access every time this is needed. */ domid_t otherend_id; /** * \brief The blkif protocol abi in effect. * * There are situations where the back and front ends can * have a different, native abi (e.g. intel x86_64 and * 32bit x86 domains on the same machine). The back-end * always accommodates the front-end's native abi. That * value is pulled from the XenStore and recorded here. */ int abi; /** * \brief The maximum number of requests and request lists allowed * to be in flight at a time. * * This value is negotiated via the XenStore. */ u_int max_requests; /** * \brief The maximum number of segments (1 page per segment) * that can be mapped by a request. * * This value is negotiated via the XenStore. */ u_int max_request_segments; /** * \brief Maximum number of segments per request list. * * This value is derived from and will generally be larger than * max_request_segments. */ u_int max_reqlist_segments; /** * The maximum size of any request to this back-end * device. * * This value is negotiated via the XenStore. */ u_int max_request_size; /** * The maximum size of any request list. This is derived directly * from max_reqlist_segments. */ u_int max_reqlist_size; /** Various configuration and state bit flags. */ xbb_flag_t flags; /** Ring mapping and interrupt configuration data. */ struct xbb_ring_config ring_config; /** Runtime, cross-abi safe, structures for ring access. */ blkif_back_rings_t rings; /** IRQ mapping for the communication ring event channel. */ xen_intr_handle_t xen_intr_handle; /** * \brief Backend access mode flags (e.g. write, or read-only). * * This value is passed to us by the front-end via the XenStore. */ char *dev_mode; /** * \brief Backend device type (e.g. "disk", "cdrom", "floppy"). * * This value is passed to us by the front-end via the XenStore. * Currently unused. */ char *dev_type; /** * \brief Backend device/file identifier. * * This value is passed to us by the front-end via the XenStore. * We expect this to be a POSIX path indicating the file or * device to open. */ char *dev_name; /** * Vnode corresponding to the backend device node or file * we are acessing. */ struct vnode *vn; union xbb_backend_data backend; /** The native sector size of the backend. */ u_int sector_size; /** log2 of sector_size. */ u_int sector_size_shift; /** Size in bytes of the backend device or file. */ off_t media_size; /** * \brief media_size expressed in terms of the backend native * sector size. * * (e.g. xbb->media_size >> xbb->sector_size_shift). */ uint64_t media_num_sectors; /** * \brief Array of memoized scatter gather data computed during the * conversion of blkif ring requests to internal xbb_xen_req * structures. * * Ring processing is serialized so we only need one of these. */ struct xbb_sg xbb_sgs[XBB_MAX_SEGMENTS_PER_REQLIST]; /** * Temporary grant table map used in xbb_dispatch_io(). When * XBB_MAX_SEGMENTS_PER_REQLIST gets large, keeping this on the * stack could cause a stack overflow. */ struct gnttab_map_grant_ref maps[XBB_MAX_SEGMENTS_PER_REQLIST]; /** Mutex protecting per-instance data. */ struct mtx lock; /** * Resource representing allocated physical address space * associated with our per-instance kva region. */ struct resource *pseudo_phys_res; /** Resource id for allocated physical address space. */ int pseudo_phys_res_id; /** * I/O statistics from BlockBack dispatch down. These are * coalesced requests, and we start them right before execution. */ struct devstat *xbb_stats; /** * I/O statistics coming into BlockBack. These are the requests as * we get them from BlockFront. They are started as soon as we * receive a request, and completed when the I/O is complete. */ struct devstat *xbb_stats_in; /** Disable sending flush to the backend */ int disable_flush; /** Send a real flush for every N flush requests */ int flush_interval; /** Count of flush requests in the interval */ int flush_count; /** Don't coalesce requests if this is set */ int no_coalesce_reqs; /** Number of requests we have received */ uint64_t reqs_received; /** Number of requests we have completed*/ uint64_t reqs_completed; /** Number of requests we queued but not pushed*/ uint64_t reqs_queued_for_completion; /** Number of requests we completed with an error status*/ uint64_t reqs_completed_with_error; /** How many forced dispatches (i.e. without coalescing) have happened */ uint64_t forced_dispatch; /** How many normal dispatches have happened */ uint64_t normal_dispatch; /** How many total dispatches have happened */ uint64_t total_dispatch; /** How many times we have run out of KVA */ uint64_t kva_shortages; /** How many times we have run out of request structures */ uint64_t request_shortages; /** Watch to wait for hotplug script execution */ struct xs_watch hotplug_watch; }; /*---------------------------- Request Processing ----------------------------*/ /** * Allocate an internal transaction tracking structure from the free pool. * * \param xbb Per-instance xbb configuration structure. * * \return On success, a pointer to the allocated xbb_xen_req structure. * Otherwise NULL. */ static inline struct xbb_xen_req * xbb_get_req(struct xbb_softc *xbb) { struct xbb_xen_req *req; req = NULL; mtx_assert(&xbb->lock, MA_OWNED); if ((req = STAILQ_FIRST(&xbb->request_free_stailq)) != NULL) { STAILQ_REMOVE_HEAD(&xbb->request_free_stailq, links); xbb->active_request_count++; } return (req); } /** * Return an allocated transaction tracking structure to the free pool. * * \param xbb Per-instance xbb configuration structure. * \param req The request structure to free. */ static inline void xbb_release_req(struct xbb_softc *xbb, struct xbb_xen_req *req) { mtx_assert(&xbb->lock, MA_OWNED); STAILQ_INSERT_HEAD(&xbb->request_free_stailq, req, links); xbb->active_request_count--; KASSERT(xbb->active_request_count >= 0, ("xbb_release_req: negative active count")); } /** * Return an xbb_xen_req_list of allocated xbb_xen_reqs to the free pool. * * \param xbb Per-instance xbb configuration structure. * \param req_list The list of requests to free. * \param nreqs The number of items in the list. */ static inline void xbb_release_reqs(struct xbb_softc *xbb, struct xbb_xen_req_list *req_list, int nreqs) { mtx_assert(&xbb->lock, MA_OWNED); STAILQ_CONCAT(&xbb->request_free_stailq, req_list); xbb->active_request_count -= nreqs; KASSERT(xbb->active_request_count >= 0, ("xbb_release_reqs: negative active count")); } /** * Given a page index and 512b sector offset within that page, * calculate an offset into a request's kva region. * * \param reqlist The request structure whose kva region will be accessed. * \param pagenr The page index used to compute the kva offset. * \param sector The 512b sector index used to compute the page relative * kva offset. * * \return The computed global KVA offset. */ static inline uint8_t * xbb_reqlist_vaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector) { return (reqlist->kva + (PAGE_SIZE * pagenr) + (sector << 9)); } #ifdef XBB_USE_BOUNCE_BUFFERS /** * Given a page index and 512b sector offset within that page, * calculate an offset into a request's local bounce memory region. * * \param reqlist The request structure whose bounce region will be accessed. * \param pagenr The page index used to compute the bounce offset. * \param sector The 512b sector index used to compute the page relative * bounce offset. * * \return The computed global bounce buffer address. */ static inline uint8_t * xbb_reqlist_bounce_addr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector) { return (reqlist->bounce + (PAGE_SIZE * pagenr) + (sector << 9)); } #endif /** * Given a page number and 512b sector offset within that page, * calculate an offset into the request's memory region that the * underlying backend device/file should use for I/O. * * \param reqlist The request structure whose I/O region will be accessed. * \param pagenr The page index used to compute the I/O offset. * \param sector The 512b sector index used to compute the page relative * I/O offset. * * \return The computed global I/O address. * * Depending on configuration, this will either be a local bounce buffer * or a pointer to the memory mapped in from the front-end domain for * this request. */ static inline uint8_t * xbb_reqlist_ioaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector) { #ifdef XBB_USE_BOUNCE_BUFFERS return (xbb_reqlist_bounce_addr(reqlist, pagenr, sector)); #else return (xbb_reqlist_vaddr(reqlist, pagenr, sector)); #endif } /** * Given a page index and 512b sector offset within that page, calculate - * an offset into the local psuedo-physical address space used to map a + * an offset into the local pseudo-physical address space used to map a * front-end's request data into a request. * * \param reqlist The request list structure whose pseudo-physical region * will be accessed. * \param pagenr The page index used to compute the pseudo-physical offset. * \param sector The 512b sector index used to compute the page relative * pseudo-physical offset. * * \return The computed global pseudo-phsyical address. * * Depending on configuration, this will either be a local bounce buffer * or a pointer to the memory mapped in from the front-end domain for * this request. */ static inline uintptr_t xbb_get_gntaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector) { struct xbb_softc *xbb; xbb = reqlist->xbb; return ((uintptr_t)(xbb->gnt_base_addr + (uintptr_t)(reqlist->kva - xbb->kva) + (PAGE_SIZE * pagenr) + (sector << 9))); } /** * Get Kernel Virtual Address space for mapping requests. * * \param xbb Per-instance xbb configuration structure. * \param nr_pages Number of pages needed. * \param check_only If set, check for free KVA but don't allocate it. * \param have_lock If set, xbb lock is already held. * * \return On success, a pointer to the allocated KVA region. Otherwise NULL. * * Note: This should be unnecessary once we have either chaining or * scatter/gather support for struct bio. At that point we'll be able to * put multiple addresses and lengths in one bio/bio chain and won't need * to map everything into one virtual segment. */ static uint8_t * xbb_get_kva(struct xbb_softc *xbb, int nr_pages) { int first_clear; int num_clear; uint8_t *free_kva; int i; KASSERT(nr_pages != 0, ("xbb_get_kva of zero length")); first_clear = 0; free_kva = NULL; mtx_lock(&xbb->lock); /* * Look for the first available page. If there are none, we're done. */ bit_ffc(xbb->kva_free, xbb->reqlist_kva_pages, &first_clear); if (first_clear == -1) goto bailout; /* * Starting at the first available page, look for consecutive free * pages that will satisfy the user's request. */ for (i = first_clear, num_clear = 0; i < xbb->reqlist_kva_pages; i++) { /* * If this is true, the page is used, so we have to reset * the number of clear pages and the first clear page * (since it pointed to a region with an insufficient number * of clear pages). */ if (bit_test(xbb->kva_free, i)) { num_clear = 0; first_clear = -1; continue; } if (first_clear == -1) first_clear = i; /* * If this is true, we've found a large enough free region * to satisfy the request. */ if (++num_clear == nr_pages) { bit_nset(xbb->kva_free, first_clear, first_clear + nr_pages - 1); free_kva = xbb->kva + (uint8_t *)((intptr_t)first_clear * PAGE_SIZE); KASSERT(free_kva >= (uint8_t *)xbb->kva && free_kva + (nr_pages * PAGE_SIZE) <= (uint8_t *)xbb->ring_config.va, ("Free KVA %p len %d out of range, " "kva = %#jx, ring VA = %#jx\n", free_kva, nr_pages * PAGE_SIZE, (uintmax_t)xbb->kva, (uintmax_t)xbb->ring_config.va)); break; } } bailout: if (free_kva == NULL) { xbb->flags |= XBBF_RESOURCE_SHORTAGE; xbb->kva_shortages++; } mtx_unlock(&xbb->lock); return (free_kva); } /** * Free allocated KVA. * * \param xbb Per-instance xbb configuration structure. * \param kva_ptr Pointer to allocated KVA region. * \param nr_pages Number of pages in the KVA region. */ static void xbb_free_kva(struct xbb_softc *xbb, uint8_t *kva_ptr, int nr_pages) { intptr_t start_page; mtx_assert(&xbb->lock, MA_OWNED); start_page = (intptr_t)(kva_ptr - xbb->kva) >> PAGE_SHIFT; bit_nclear(xbb->kva_free, start_page, start_page + nr_pages - 1); } /** * Unmap the front-end pages associated with this I/O request. * * \param req The request structure to unmap. */ static void xbb_unmap_reqlist(struct xbb_xen_reqlist *reqlist) { struct gnttab_unmap_grant_ref unmap[XBB_MAX_SEGMENTS_PER_REQLIST]; u_int i; u_int invcount; int error; invcount = 0; for (i = 0; i < reqlist->nr_segments; i++) { if (reqlist->gnt_handles[i] == GRANT_REF_INVALID) continue; unmap[invcount].host_addr = xbb_get_gntaddr(reqlist, i, 0); unmap[invcount].dev_bus_addr = 0; unmap[invcount].handle = reqlist->gnt_handles[i]; reqlist->gnt_handles[i] = GRANT_REF_INVALID; invcount++; } error = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, invcount); KASSERT(error == 0, ("Grant table operation failed")); } /** * Allocate an internal transaction tracking structure from the free pool. * * \param xbb Per-instance xbb configuration structure. * * \return On success, a pointer to the allocated xbb_xen_reqlist structure. * Otherwise NULL. */ static inline struct xbb_xen_reqlist * xbb_get_reqlist(struct xbb_softc *xbb) { struct xbb_xen_reqlist *reqlist; reqlist = NULL; mtx_assert(&xbb->lock, MA_OWNED); if ((reqlist = STAILQ_FIRST(&xbb->reqlist_free_stailq)) != NULL) { STAILQ_REMOVE_HEAD(&xbb->reqlist_free_stailq, links); reqlist->flags = XBB_REQLIST_NONE; reqlist->kva = NULL; reqlist->status = BLKIF_RSP_OKAY; reqlist->residual_512b_sectors = 0; reqlist->num_children = 0; reqlist->nr_segments = 0; STAILQ_INIT(&reqlist->contig_req_list); } return (reqlist); } /** * Return an allocated transaction tracking structure to the free pool. * * \param xbb Per-instance xbb configuration structure. * \param req The request list structure to free. * \param wakeup If set, wakeup the work thread if freeing this reqlist * during a resource shortage condition. */ static inline void xbb_release_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist, int wakeup) { mtx_assert(&xbb->lock, MA_OWNED); if (wakeup) { wakeup = xbb->flags & XBBF_RESOURCE_SHORTAGE; xbb->flags &= ~XBBF_RESOURCE_SHORTAGE; } if (reqlist->kva != NULL) xbb_free_kva(xbb, reqlist->kva, reqlist->nr_segments); xbb_release_reqs(xbb, &reqlist->contig_req_list, reqlist->num_children); STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links); if ((xbb->flags & XBBF_SHUTDOWN) != 0) { /* * Shutdown is in progress. See if we can * progress further now that one more request * has completed and been returned to the * free pool. */ xbb_shutdown(xbb); } if (wakeup != 0) taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task); } /** * Request resources and do basic request setup. * * \param xbb Per-instance xbb configuration structure. * \param reqlist Pointer to reqlist pointer. * \param ring_req Pointer to a block ring request. * \param ring_index The ring index of this request. * * \return 0 for success, non-zero for failure. */ static int xbb_get_resources(struct xbb_softc *xbb, struct xbb_xen_reqlist **reqlist, blkif_request_t *ring_req, RING_IDX ring_idx) { struct xbb_xen_reqlist *nreqlist; struct xbb_xen_req *nreq; nreqlist = NULL; nreq = NULL; mtx_lock(&xbb->lock); /* * We don't allow new resources to be allocated if we're in the * process of shutting down. */ if ((xbb->flags & XBBF_SHUTDOWN) != 0) { mtx_unlock(&xbb->lock); return (1); } /* * Allocate a reqlist if the caller doesn't have one already. */ if (*reqlist == NULL) { nreqlist = xbb_get_reqlist(xbb); if (nreqlist == NULL) goto bailout_error; } /* We always allocate a request. */ nreq = xbb_get_req(xbb); if (nreq == NULL) goto bailout_error; mtx_unlock(&xbb->lock); if (*reqlist == NULL) { *reqlist = nreqlist; nreqlist->operation = ring_req->operation; nreqlist->starting_sector_number = ring_req->sector_number; STAILQ_INSERT_TAIL(&xbb->reqlist_pending_stailq, nreqlist, links); } nreq->reqlist = *reqlist; nreq->req_ring_idx = ring_idx; nreq->id = ring_req->id; nreq->operation = ring_req->operation; if (xbb->abi != BLKIF_PROTOCOL_NATIVE) { bcopy(ring_req, &nreq->ring_req_storage, sizeof(*ring_req)); nreq->ring_req = &nreq->ring_req_storage; } else { nreq->ring_req = ring_req; } binuptime(&nreq->ds_t0); devstat_start_transaction(xbb->xbb_stats_in, &nreq->ds_t0); STAILQ_INSERT_TAIL(&(*reqlist)->contig_req_list, nreq, links); (*reqlist)->num_children++; (*reqlist)->nr_segments += ring_req->nr_segments; return (0); bailout_error: /* * We're out of resources, so set the shortage flag. The next time * a request is released, we'll try waking up the work thread to * see if we can allocate more resources. */ xbb->flags |= XBBF_RESOURCE_SHORTAGE; xbb->request_shortages++; if (nreq != NULL) xbb_release_req(xbb, nreq); if (nreqlist != NULL) xbb_release_reqlist(xbb, nreqlist, /*wakeup*/ 0); mtx_unlock(&xbb->lock); return (1); } /** * Create and queue a response to a blkif request. * * \param xbb Per-instance xbb configuration structure. * \param req The request structure to which to respond. * \param status The status code to report. See BLKIF_RSP_* * in sys/xen/interface/io/blkif.h. */ static void xbb_queue_response(struct xbb_softc *xbb, struct xbb_xen_req *req, int status) { blkif_response_t *resp; /* * The mutex is required here, and should be held across this call * until after the subsequent call to xbb_push_responses(). This * is to guarantee that another context won't queue responses and * push them while we're active. * * That could lead to the other end being notified of responses * before the resources have been freed on this end. The other end * would then be able to queue additional I/O, and we may run out * of resources because we haven't freed them all yet. */ mtx_assert(&xbb->lock, MA_OWNED); /* * Place on the response ring for the relevant domain. * For now, only the spacing between entries is different * in the different ABIs, not the response entry layout. */ switch (xbb->abi) { case BLKIF_PROTOCOL_NATIVE: resp = RING_GET_RESPONSE(&xbb->rings.native, xbb->rings.native.rsp_prod_pvt); break; case BLKIF_PROTOCOL_X86_32: resp = (blkif_response_t *) RING_GET_RESPONSE(&xbb->rings.x86_32, xbb->rings.x86_32.rsp_prod_pvt); break; case BLKIF_PROTOCOL_X86_64: resp = (blkif_response_t *) RING_GET_RESPONSE(&xbb->rings.x86_64, xbb->rings.x86_64.rsp_prod_pvt); break; default: panic("Unexpected blkif protocol ABI."); } resp->id = req->id; resp->operation = req->operation; resp->status = status; if (status != BLKIF_RSP_OKAY) xbb->reqs_completed_with_error++; xbb->rings.common.rsp_prod_pvt++; xbb->reqs_queued_for_completion++; } /** * Send queued responses to blkif requests. * * \param xbb Per-instance xbb configuration structure. * \param run_taskqueue Flag that is set to 1 if the taskqueue * should be run, 0 if it does not need to be run. * \param notify Flag that is set to 1 if the other end should be * notified via irq, 0 if the other end should not be * notified. */ static void xbb_push_responses(struct xbb_softc *xbb, int *run_taskqueue, int *notify) { int more_to_do; /* * The mutex is required here. */ mtx_assert(&xbb->lock, MA_OWNED); more_to_do = 0; RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&xbb->rings.common, *notify); if (xbb->rings.common.rsp_prod_pvt == xbb->rings.common.req_cons) { /* * Tail check for pending requests. Allows frontend to avoid * notifications if requests are already in flight (lower * overheads and promotes batching). */ RING_FINAL_CHECK_FOR_REQUESTS(&xbb->rings.common, more_to_do); } else if (RING_HAS_UNCONSUMED_REQUESTS(&xbb->rings.common)) { more_to_do = 1; } xbb->reqs_completed += xbb->reqs_queued_for_completion; xbb->reqs_queued_for_completion = 0; *run_taskqueue = more_to_do; } /** * Complete a request list. * * \param xbb Per-instance xbb configuration structure. * \param reqlist Allocated internal request list structure. */ static void xbb_complete_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist) { struct xbb_xen_req *nreq; off_t sectors_sent; int notify, run_taskqueue; sectors_sent = 0; if (reqlist->flags & XBB_REQLIST_MAPPED) xbb_unmap_reqlist(reqlist); mtx_lock(&xbb->lock); /* * All I/O is done, send the response. A lock is not necessary * to protect the request list, because all requests have * completed. Therefore this is the only context accessing this * reqlist right now. However, in order to make sure that no one * else queues responses onto the queue or pushes them to the other * side while we're active, we need to hold the lock across the * calls to xbb_queue_response() and xbb_push_responses(). */ STAILQ_FOREACH(nreq, &reqlist->contig_req_list, links) { off_t cur_sectors_sent; /* Put this response on the ring, but don't push yet */ xbb_queue_response(xbb, nreq, reqlist->status); /* We don't report bytes sent if there is an error. */ if (reqlist->status == BLKIF_RSP_OKAY) cur_sectors_sent = nreq->nr_512b_sectors; else cur_sectors_sent = 0; sectors_sent += cur_sectors_sent; devstat_end_transaction(xbb->xbb_stats_in, /*bytes*/cur_sectors_sent << 9, reqlist->ds_tag_type, reqlist->ds_trans_type, /*now*/NULL, /*then*/&nreq->ds_t0); } /* * Take out any sectors not sent. If we wind up negative (which * might happen if an error is reported as well as a residual), just * report 0 sectors sent. */ sectors_sent -= reqlist->residual_512b_sectors; if (sectors_sent < 0) sectors_sent = 0; devstat_end_transaction(xbb->xbb_stats, /*bytes*/ sectors_sent << 9, reqlist->ds_tag_type, reqlist->ds_trans_type, /*now*/NULL, /*then*/&reqlist->ds_t0); xbb_release_reqlist(xbb, reqlist, /*wakeup*/ 1); xbb_push_responses(xbb, &run_taskqueue, ¬ify); mtx_unlock(&xbb->lock); if (run_taskqueue) taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task); if (notify) xen_intr_signal(xbb->xen_intr_handle); } /** * Completion handler for buffer I/O requests issued by the device * backend driver. * * \param bio The buffer I/O request on which to perform completion * processing. */ static void xbb_bio_done(struct bio *bio) { struct xbb_softc *xbb; struct xbb_xen_reqlist *reqlist; reqlist = bio->bio_caller1; xbb = reqlist->xbb; reqlist->residual_512b_sectors += bio->bio_resid >> 9; /* * This is a bit imprecise. With aggregated I/O a single * request list can contain multiple front-end requests and * a multiple bios may point to a single request. By carefully * walking the request list, we could map residuals and errors * back to the original front-end request, but the interface * isn't sufficiently rich for us to properly report the error. * So, we just treat the entire request list as having failed if an * error occurs on any part. And, if an error occurs, we treat * the amount of data transferred as 0. * * For residuals, we report it on the overall aggregated device, * but not on the individual requests, since we don't currently * do the work to determine which front-end request to which the * residual applies. */ if (bio->bio_error) { DPRINTF("BIO returned error %d for operation on device %s\n", bio->bio_error, xbb->dev_name); reqlist->status = BLKIF_RSP_ERROR; if (bio->bio_error == ENXIO && xenbus_get_state(xbb->dev) == XenbusStateConnected) { /* * Backend device has disappeared. Signal the * front-end that we (the device proxy) want to * go away. */ xenbus_set_state(xbb->dev, XenbusStateClosing); } } #ifdef XBB_USE_BOUNCE_BUFFERS if (bio->bio_cmd == BIO_READ) { vm_offset_t kva_offset; kva_offset = (vm_offset_t)bio->bio_data - (vm_offset_t)reqlist->bounce; memcpy((uint8_t *)reqlist->kva + kva_offset, bio->bio_data, bio->bio_bcount); } #endif /* XBB_USE_BOUNCE_BUFFERS */ /* * Decrement the pending count for the request list. When we're * done with the requests, send status back for all of them. */ if (atomic_fetchadd_int(&reqlist->pendcnt, -1) == 1) xbb_complete_reqlist(xbb, reqlist); g_destroy_bio(bio); } /** * Parse a blkif request into an internal request structure and send * it to the backend for processing. * * \param xbb Per-instance xbb configuration structure. * \param reqlist Allocated internal request list structure. * * \return On success, 0. For resource shortages, non-zero. * * This routine performs the backend common aspects of request parsing * including compiling an internal request structure, parsing the S/G * list and any secondary ring requests in which they may reside, and * the mapping of front-end I/O pages into our domain. */ static int xbb_dispatch_io(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist) { struct xbb_sg *xbb_sg; struct gnttab_map_grant_ref *map; struct blkif_request_segment *sg; struct blkif_request_segment *last_block_sg; struct xbb_xen_req *nreq; u_int nseg; u_int seg_idx; u_int block_segs; int nr_sects; int total_sects; int operation; uint8_t bio_flags; int error; reqlist->ds_tag_type = DEVSTAT_TAG_SIMPLE; bio_flags = 0; total_sects = 0; nr_sects = 0; /* * First determine whether we have enough free KVA to satisfy this * request list. If not, tell xbb_run_queue() so it can go to * sleep until we have more KVA. */ reqlist->kva = NULL; if (reqlist->nr_segments != 0) { reqlist->kva = xbb_get_kva(xbb, reqlist->nr_segments); if (reqlist->kva == NULL) { /* * If we're out of KVA, return ENOMEM. */ return (ENOMEM); } } binuptime(&reqlist->ds_t0); devstat_start_transaction(xbb->xbb_stats, &reqlist->ds_t0); switch (reqlist->operation) { case BLKIF_OP_WRITE_BARRIER: bio_flags |= BIO_ORDERED; reqlist->ds_tag_type = DEVSTAT_TAG_ORDERED; /* FALLTHROUGH */ case BLKIF_OP_WRITE: operation = BIO_WRITE; reqlist->ds_trans_type = DEVSTAT_WRITE; if ((xbb->flags & XBBF_READ_ONLY) != 0) { DPRINTF("Attempt to write to read only device %s\n", xbb->dev_name); reqlist->status = BLKIF_RSP_ERROR; goto send_response; } break; case BLKIF_OP_READ: operation = BIO_READ; reqlist->ds_trans_type = DEVSTAT_READ; break; case BLKIF_OP_FLUSH_DISKCACHE: /* * If this is true, the user has requested that we disable * flush support. So we just complete the requests * successfully. */ if (xbb->disable_flush != 0) { goto send_response; } /* * The user has requested that we only send a real flush * for every N flush requests. So keep count, and either * complete the request immediately or queue it for the * backend. */ if (xbb->flush_interval != 0) { if (++(xbb->flush_count) < xbb->flush_interval) { goto send_response; } else xbb->flush_count = 0; } operation = BIO_FLUSH; reqlist->ds_tag_type = DEVSTAT_TAG_ORDERED; reqlist->ds_trans_type = DEVSTAT_NO_DATA; goto do_dispatch; /*NOTREACHED*/ default: DPRINTF("error: unknown block io operation [%d]\n", reqlist->operation); reqlist->status = BLKIF_RSP_ERROR; goto send_response; } reqlist->xbb = xbb; xbb_sg = xbb->xbb_sgs; map = xbb->maps; seg_idx = 0; STAILQ_FOREACH(nreq, &reqlist->contig_req_list, links) { blkif_request_t *ring_req; RING_IDX req_ring_idx; u_int req_seg_idx; ring_req = nreq->ring_req; req_ring_idx = nreq->req_ring_idx; nr_sects = 0; nseg = ring_req->nr_segments; nreq->nr_pages = nseg; nreq->nr_512b_sectors = 0; req_seg_idx = 0; sg = NULL; /* Check that number of segments is sane. */ if (__predict_false(nseg == 0) || __predict_false(nseg > xbb->max_request_segments)) { DPRINTF("Bad number of segments in request (%d)\n", nseg); reqlist->status = BLKIF_RSP_ERROR; goto send_response; } block_segs = nseg; sg = ring_req->seg; last_block_sg = sg + block_segs; while (sg < last_block_sg) { KASSERT(seg_idx < XBB_MAX_SEGMENTS_PER_REQLIST, ("seg_idx %d is too large, max " "segs %d\n", seg_idx, XBB_MAX_SEGMENTS_PER_REQLIST)); xbb_sg->first_sect = sg->first_sect; xbb_sg->last_sect = sg->last_sect; xbb_sg->nsect = (int8_t)(sg->last_sect - sg->first_sect + 1); if ((sg->last_sect >= (PAGE_SIZE >> 9)) || (xbb_sg->nsect <= 0)) { reqlist->status = BLKIF_RSP_ERROR; goto send_response; } nr_sects += xbb_sg->nsect; map->host_addr = xbb_get_gntaddr(reqlist, seg_idx, /*sector*/0); KASSERT(map->host_addr + PAGE_SIZE <= xbb->ring_config.gnt_addr, ("Host address %#jx len %d overlaps " "ring address %#jx\n", (uintmax_t)map->host_addr, PAGE_SIZE, (uintmax_t)xbb->ring_config.gnt_addr)); map->flags = GNTMAP_host_map; map->ref = sg->gref; map->dom = xbb->otherend_id; if (operation == BIO_WRITE) map->flags |= GNTMAP_readonly; sg++; map++; xbb_sg++; seg_idx++; req_seg_idx++; } /* Convert to the disk's sector size */ nreq->nr_512b_sectors = nr_sects; nr_sects = (nr_sects << 9) >> xbb->sector_size_shift; total_sects += nr_sects; if ((nreq->nr_512b_sectors & ((xbb->sector_size >> 9) - 1)) != 0) { device_printf(xbb->dev, "%s: I/O size (%d) is not " "a multiple of the backing store sector " "size (%d)\n", __func__, nreq->nr_512b_sectors << 9, xbb->sector_size); reqlist->status = BLKIF_RSP_ERROR; goto send_response; } } error = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, xbb->maps, reqlist->nr_segments); if (error != 0) panic("Grant table operation failed (%d)", error); reqlist->flags |= XBB_REQLIST_MAPPED; for (seg_idx = 0, map = xbb->maps; seg_idx < reqlist->nr_segments; seg_idx++, map++){ if (__predict_false(map->status != 0)) { DPRINTF("invalid buffer -- could not remap " "it (%d)\n", map->status); DPRINTF("Mapping(%d): Host Addr 0x%"PRIx64", flags " "0x%x ref 0x%x, dom %d\n", seg_idx, map->host_addr, map->flags, map->ref, map->dom); reqlist->status = BLKIF_RSP_ERROR; goto send_response; } reqlist->gnt_handles[seg_idx] = map->handle; } if (reqlist->starting_sector_number + total_sects > xbb->media_num_sectors) { DPRINTF("%s of [%" PRIu64 ",%" PRIu64 "] " "extends past end of device %s\n", operation == BIO_READ ? "read" : "write", reqlist->starting_sector_number, reqlist->starting_sector_number + total_sects, xbb->dev_name); reqlist->status = BLKIF_RSP_ERROR; goto send_response; } do_dispatch: error = xbb->dispatch_io(xbb, reqlist, operation, bio_flags); if (error != 0) { reqlist->status = BLKIF_RSP_ERROR; goto send_response; } return (0); send_response: xbb_complete_reqlist(xbb, reqlist); return (0); } static __inline int xbb_count_sects(blkif_request_t *ring_req) { int i; int cur_size = 0; for (i = 0; i < ring_req->nr_segments; i++) { int nsect; nsect = (int8_t)(ring_req->seg[i].last_sect - ring_req->seg[i].first_sect + 1); if (nsect <= 0) break; cur_size += nsect; } return (cur_size); } /** * Process incoming requests from the shared communication ring in response * to a signal on the ring's event channel. * * \param context Callback argument registerd during task initialization - * the xbb_softc for this instance. * \param pending The number of taskqueue_enqueue events that have * occurred since this handler was last run. */ static void xbb_run_queue(void *context, int pending) { struct xbb_softc *xbb; blkif_back_rings_t *rings; RING_IDX rp; uint64_t cur_sector; int cur_operation; struct xbb_xen_reqlist *reqlist; xbb = (struct xbb_softc *)context; rings = &xbb->rings; /* * Work gather and dispatch loop. Note that we have a bias here * towards gathering I/O sent by blockfront. We first gather up * everything in the ring, as long as we have resources. Then we * dispatch one request, and then attempt to gather up any * additional requests that have come in while we were dispatching * the request. * * This allows us to get a clearer picture (via devstat) of how * many requests blockfront is queueing to us at any given time. */ for (;;) { int retval; /* * Initialize reqlist to the last element in the pending * queue, if there is one. This allows us to add more * requests to that request list, if we have room. */ reqlist = STAILQ_LAST(&xbb->reqlist_pending_stailq, xbb_xen_reqlist, links); if (reqlist != NULL) { cur_sector = reqlist->next_contig_sector; cur_operation = reqlist->operation; } else { cur_operation = 0; cur_sector = 0; } /* * Cache req_prod to avoid accessing a cache line shared * with the frontend. */ rp = rings->common.sring->req_prod; /* Ensure we see queued requests up to 'rp'. */ rmb(); /** * Run so long as there is work to consume and the generation * of a response will not overflow the ring. * * @note There's a 1 to 1 relationship between requests and * responses, so an overflow should never occur. This * test is to protect our domain from digesting bogus * data. Shouldn't we log this? */ while (rings->common.req_cons != rp && RING_REQUEST_CONS_OVERFLOW(&rings->common, rings->common.req_cons) == 0){ blkif_request_t ring_req_storage; blkif_request_t *ring_req; int cur_size; switch (xbb->abi) { case BLKIF_PROTOCOL_NATIVE: ring_req = RING_GET_REQUEST(&xbb->rings.native, rings->common.req_cons); break; case BLKIF_PROTOCOL_X86_32: { struct blkif_x86_32_request *ring_req32; ring_req32 = RING_GET_REQUEST( &xbb->rings.x86_32, rings->common.req_cons); blkif_get_x86_32_req(&ring_req_storage, ring_req32); ring_req = &ring_req_storage; break; } case BLKIF_PROTOCOL_X86_64: { struct blkif_x86_64_request *ring_req64; ring_req64 =RING_GET_REQUEST(&xbb->rings.x86_64, rings->common.req_cons); blkif_get_x86_64_req(&ring_req_storage, ring_req64); ring_req = &ring_req_storage; break; } default: panic("Unexpected blkif protocol ABI."); /* NOTREACHED */ } /* * Check for situations that would require closing * off this I/O for further coalescing: * - Coalescing is turned off. * - Current I/O is out of sequence with the previous * I/O. * - Coalesced I/O would be too large. */ if ((reqlist != NULL) && ((xbb->no_coalesce_reqs != 0) || ((xbb->no_coalesce_reqs == 0) && ((ring_req->sector_number != cur_sector) || (ring_req->operation != cur_operation) || ((ring_req->nr_segments + reqlist->nr_segments) > xbb->max_reqlist_segments))))) { reqlist = NULL; } /* * Grab and check for all resources in one shot. * If we can't get all of the resources we need, * the shortage is noted and the thread will get * woken up when more resources are available. */ retval = xbb_get_resources(xbb, &reqlist, ring_req, xbb->rings.common.req_cons); if (retval != 0) { /* * Resource shortage has been recorded. * We'll be scheduled to run once a request * object frees up due to a completion. */ break; } /* * Signify that we can overwrite this request with * a response by incrementing our consumer index. * The response won't be generated until after * we've already consumed all necessary data out * of the version of the request in the ring buffer * (for native mode). We must update the consumer * index before issuing back-end I/O so there is * no possibility that it will complete and a * response be generated before we make room in * the queue for that response. */ xbb->rings.common.req_cons++; xbb->reqs_received++; cur_size = xbb_count_sects(ring_req); cur_sector = ring_req->sector_number + cur_size; reqlist->next_contig_sector = cur_sector; cur_operation = ring_req->operation; } /* Check for I/O to dispatch */ reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq); if (reqlist == NULL) { /* * We're out of work to do, put the task queue to * sleep. */ break; } /* * Grab the first request off the queue and attempt * to dispatch it. */ STAILQ_REMOVE_HEAD(&xbb->reqlist_pending_stailq, links); retval = xbb_dispatch_io(xbb, reqlist); if (retval != 0) { /* * xbb_dispatch_io() returns non-zero only when * there is a resource shortage. If that's the * case, re-queue this request on the head of the * queue, and go to sleep until we have more * resources. */ STAILQ_INSERT_HEAD(&xbb->reqlist_pending_stailq, reqlist, links); break; } else { /* * If we still have anything on the queue after * removing the head entry, that is because we * met one of the criteria to create a new * request list (outlined above), and we'll call * that a forced dispatch for statistical purposes. * * Otherwise, if there is only one element on the * queue, we coalesced everything available on * the ring and we'll call that a normal dispatch. */ reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq); if (reqlist != NULL) xbb->forced_dispatch++; else xbb->normal_dispatch++; xbb->total_dispatch++; } } } /** * Interrupt handler bound to the shared ring's event channel. * * \param arg Callback argument registerd during event channel * binding - the xbb_softc for this instance. */ static int xbb_filter(void *arg) { struct xbb_softc *xbb; /* Defer to taskqueue thread. */ xbb = (struct xbb_softc *)arg; taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task); return (FILTER_HANDLED); } SDT_PROVIDER_DEFINE(xbb); SDT_PROBE_DEFINE1(xbb, kernel, xbb_dispatch_dev, flush, "int"); SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_dev, read, "int", "uint64_t", "uint64_t"); SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_dev, write, "int", "uint64_t", "uint64_t"); /*----------------------------- Backend Handlers -----------------------------*/ /** * Backend handler for character device access. * * \param xbb Per-instance xbb configuration structure. * \param reqlist Allocated internal request list structure. * \param operation BIO_* I/O operation code. * \param bio_flags Additional bio_flag data to pass to any generated * bios (e.g. BIO_ORDERED).. * * \return 0 for success, errno codes for failure. */ static int xbb_dispatch_dev(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist, int operation, int bio_flags) { struct xbb_dev_data *dev_data; struct bio *bios[XBB_MAX_SEGMENTS_PER_REQLIST]; off_t bio_offset; struct bio *bio; struct xbb_sg *xbb_sg; u_int nbio; u_int bio_idx; u_int nseg; u_int seg_idx; int error; dev_data = &xbb->backend.dev; bio_offset = (off_t)reqlist->starting_sector_number << xbb->sector_size_shift; error = 0; nbio = 0; bio_idx = 0; if (operation == BIO_FLUSH) { bio = g_new_bio(); if (__predict_false(bio == NULL)) { DPRINTF("Unable to allocate bio for BIO_FLUSH\n"); error = ENOMEM; return (error); } bio->bio_cmd = BIO_FLUSH; bio->bio_flags |= BIO_ORDERED; bio->bio_dev = dev_data->cdev; bio->bio_offset = 0; bio->bio_data = 0; bio->bio_done = xbb_bio_done; bio->bio_caller1 = reqlist; bio->bio_pblkno = 0; reqlist->pendcnt = 1; SDT_PROBE1(xbb, kernel, xbb_dispatch_dev, flush, device_get_unit(xbb->dev)); (*dev_data->csw->d_strategy)(bio); return (0); } xbb_sg = xbb->xbb_sgs; bio = NULL; nseg = reqlist->nr_segments; for (seg_idx = 0; seg_idx < nseg; seg_idx++, xbb_sg++) { /* * KVA will not be contiguous, so any additional * I/O will need to be represented in a new bio. */ if ((bio != NULL) && (xbb_sg->first_sect != 0)) { if ((bio->bio_length & (xbb->sector_size - 1)) != 0) { printf("%s: Discontiguous I/O request " "from domain %d ends on " "non-sector boundary\n", __func__, xbb->otherend_id); error = EINVAL; goto fail_free_bios; } bio = NULL; } if (bio == NULL) { /* * Make sure that the start of this bio is * aligned to a device sector. */ if ((bio_offset & (xbb->sector_size - 1)) != 0){ printf("%s: Misaligned I/O request " "from domain %d\n", __func__, xbb->otherend_id); error = EINVAL; goto fail_free_bios; } bio = bios[nbio++] = g_new_bio(); if (__predict_false(bio == NULL)) { error = ENOMEM; goto fail_free_bios; } bio->bio_cmd = operation; bio->bio_flags |= bio_flags; bio->bio_dev = dev_data->cdev; bio->bio_offset = bio_offset; bio->bio_data = xbb_reqlist_ioaddr(reqlist, seg_idx, xbb_sg->first_sect); bio->bio_done = xbb_bio_done; bio->bio_caller1 = reqlist; bio->bio_pblkno = bio_offset >> xbb->sector_size_shift; } bio->bio_length += xbb_sg->nsect << 9; bio->bio_bcount = bio->bio_length; bio_offset += xbb_sg->nsect << 9; if (xbb_sg->last_sect != (PAGE_SIZE - 512) >> 9) { if ((bio->bio_length & (xbb->sector_size - 1)) != 0) { printf("%s: Discontiguous I/O request " "from domain %d ends on " "non-sector boundary\n", __func__, xbb->otherend_id); error = EINVAL; goto fail_free_bios; } /* * KVA will not be contiguous, so any additional * I/O will need to be represented in a new bio. */ bio = NULL; } } reqlist->pendcnt = nbio; for (bio_idx = 0; bio_idx < nbio; bio_idx++) { #ifdef XBB_USE_BOUNCE_BUFFERS vm_offset_t kva_offset; kva_offset = (vm_offset_t)bios[bio_idx]->bio_data - (vm_offset_t)reqlist->bounce; if (operation == BIO_WRITE) { memcpy(bios[bio_idx]->bio_data, (uint8_t *)reqlist->kva + kva_offset, bios[bio_idx]->bio_bcount); } #endif if (operation == BIO_READ) { SDT_PROBE3(xbb, kernel, xbb_dispatch_dev, read, device_get_unit(xbb->dev), bios[bio_idx]->bio_offset, bios[bio_idx]->bio_length); } else if (operation == BIO_WRITE) { SDT_PROBE3(xbb, kernel, xbb_dispatch_dev, write, device_get_unit(xbb->dev), bios[bio_idx]->bio_offset, bios[bio_idx]->bio_length); } (*dev_data->csw->d_strategy)(bios[bio_idx]); } return (error); fail_free_bios: for (bio_idx = 0; bio_idx < (nbio-1); bio_idx++) g_destroy_bio(bios[bio_idx]); return (error); } SDT_PROBE_DEFINE1(xbb, kernel, xbb_dispatch_file, flush, "int"); SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_file, read, "int", "uint64_t", "uint64_t"); SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_file, write, "int", "uint64_t", "uint64_t"); /** * Backend handler for file access. * * \param xbb Per-instance xbb configuration structure. * \param reqlist Allocated internal request list. * \param operation BIO_* I/O operation code. * \param flags Additional bio_flag data to pass to any generated bios * (e.g. BIO_ORDERED).. * * \return 0 for success, errno codes for failure. */ static int xbb_dispatch_file(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist, int operation, int flags) { struct xbb_file_data *file_data; u_int seg_idx; u_int nseg; struct uio xuio; struct xbb_sg *xbb_sg; struct iovec *xiovec; #ifdef XBB_USE_BOUNCE_BUFFERS void **p_vaddr; int saved_uio_iovcnt; #endif /* XBB_USE_BOUNCE_BUFFERS */ int error; file_data = &xbb->backend.file; error = 0; bzero(&xuio, sizeof(xuio)); switch (operation) { case BIO_READ: xuio.uio_rw = UIO_READ; break; case BIO_WRITE: xuio.uio_rw = UIO_WRITE; break; case BIO_FLUSH: { struct mount *mountpoint; SDT_PROBE1(xbb, kernel, xbb_dispatch_file, flush, device_get_unit(xbb->dev)); (void) vn_start_write(xbb->vn, &mountpoint, V_WAIT); vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY); error = VOP_FSYNC(xbb->vn, MNT_WAIT, curthread); VOP_UNLOCK(xbb->vn, 0); vn_finished_write(mountpoint); goto bailout_send_response; /* NOTREACHED */ } default: panic("invalid operation %d", operation); /* NOTREACHED */ } xuio.uio_offset = (vm_offset_t)reqlist->starting_sector_number << xbb->sector_size_shift; xuio.uio_segflg = UIO_SYSSPACE; xuio.uio_iov = file_data->xiovecs; xuio.uio_iovcnt = 0; xbb_sg = xbb->xbb_sgs; nseg = reqlist->nr_segments; for (xiovec = NULL, seg_idx = 0; seg_idx < nseg; seg_idx++, xbb_sg++) { /* * If the first sector is not 0, the KVA will * not be contiguous and we'll need to go on * to another segment. */ if (xbb_sg->first_sect != 0) xiovec = NULL; if (xiovec == NULL) { xiovec = &file_data->xiovecs[xuio.uio_iovcnt]; xiovec->iov_base = xbb_reqlist_ioaddr(reqlist, seg_idx, xbb_sg->first_sect); #ifdef XBB_USE_BOUNCE_BUFFERS /* * Store the address of the incoming * buffer at this particular offset * as well, so we can do the copy * later without having to do more * work to recalculate this address. */ p_vaddr = &file_data->xiovecs_vaddr[xuio.uio_iovcnt]; *p_vaddr = xbb_reqlist_vaddr(reqlist, seg_idx, xbb_sg->first_sect); #endif /* XBB_USE_BOUNCE_BUFFERS */ xiovec->iov_len = 0; xuio.uio_iovcnt++; } xiovec->iov_len += xbb_sg->nsect << 9; xuio.uio_resid += xbb_sg->nsect << 9; /* * If the last sector is not the full page * size count, the next segment will not be * contiguous in KVA and we need a new iovec. */ if (xbb_sg->last_sect != (PAGE_SIZE - 512) >> 9) xiovec = NULL; } xuio.uio_td = curthread; #ifdef XBB_USE_BOUNCE_BUFFERS saved_uio_iovcnt = xuio.uio_iovcnt; if (operation == BIO_WRITE) { /* Copy the write data to the local buffer. */ for (seg_idx = 0, p_vaddr = file_data->xiovecs_vaddr, xiovec = xuio.uio_iov; seg_idx < xuio.uio_iovcnt; seg_idx++, xiovec++, p_vaddr++) { memcpy(xiovec->iov_base, *p_vaddr, xiovec->iov_len); } } else { /* * We only need to save off the iovecs in the case of a * read, because the copy for the read happens after the * VOP_READ(). (The uio will get modified in that call * sequence.) */ memcpy(file_data->saved_xiovecs, xuio.uio_iov, xuio.uio_iovcnt * sizeof(xuio.uio_iov[0])); } #endif /* XBB_USE_BOUNCE_BUFFERS */ switch (operation) { case BIO_READ: SDT_PROBE3(xbb, kernel, xbb_dispatch_file, read, device_get_unit(xbb->dev), xuio.uio_offset, xuio.uio_resid); vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY); /* * UFS pays attention to IO_DIRECT for reads. If the * DIRECTIO option is configured into the kernel, it calls * ffs_rawread(). But that only works for single-segment * uios with user space addresses. In our case, with a * kernel uio, it still reads into the buffer cache, but it * will just try to release the buffer from the cache later * on in ffs_read(). * * ZFS does not pay attention to IO_DIRECT for reads. * * UFS does not pay attention to IO_SYNC for reads. * * ZFS pays attention to IO_SYNC (which translates into the * Solaris define FRSYNC for zfs_read()) for reads. It * attempts to sync the file before reading. * * So, to attempt to provide some barrier semantics in the * BIO_ORDERED case, set both IO_DIRECT and IO_SYNC. */ error = VOP_READ(xbb->vn, &xuio, (flags & BIO_ORDERED) ? (IO_DIRECT|IO_SYNC) : 0, file_data->cred); VOP_UNLOCK(xbb->vn, 0); break; case BIO_WRITE: { struct mount *mountpoint; SDT_PROBE3(xbb, kernel, xbb_dispatch_file, write, device_get_unit(xbb->dev), xuio.uio_offset, xuio.uio_resid); (void)vn_start_write(xbb->vn, &mountpoint, V_WAIT); vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY); /* * UFS pays attention to IO_DIRECT for writes. The write * is done asynchronously. (Normally the write would just * get put into cache. * * UFS pays attention to IO_SYNC for writes. It will * attempt to write the buffer out synchronously if that * flag is set. * * ZFS does not pay attention to IO_DIRECT for writes. * * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC) * for writes. It will flush the transaction from the * cache before returning. * * So if we've got the BIO_ORDERED flag set, we want * IO_SYNC in either the UFS or ZFS case. */ error = VOP_WRITE(xbb->vn, &xuio, (flags & BIO_ORDERED) ? IO_SYNC : 0, file_data->cred); VOP_UNLOCK(xbb->vn, 0); vn_finished_write(mountpoint); break; } default: panic("invalid operation %d", operation); /* NOTREACHED */ } #ifdef XBB_USE_BOUNCE_BUFFERS /* We only need to copy here for read operations */ if (operation == BIO_READ) { for (seg_idx = 0, p_vaddr = file_data->xiovecs_vaddr, xiovec = file_data->saved_xiovecs; seg_idx < saved_uio_iovcnt; seg_idx++, xiovec++, p_vaddr++) { /* * Note that we have to use the copy of the * io vector we made above. uiomove() modifies * the uio and its referenced vector as uiomove * performs the copy, so we can't rely on any * state from the original uio. */ memcpy(*p_vaddr, xiovec->iov_base, xiovec->iov_len); } } #endif /* XBB_USE_BOUNCE_BUFFERS */ bailout_send_response: if (error != 0) reqlist->status = BLKIF_RSP_ERROR; xbb_complete_reqlist(xbb, reqlist); return (0); } /*--------------------------- Backend Configuration --------------------------*/ /** * Close and cleanup any backend device/file specific state for this * block back instance. * * \param xbb Per-instance xbb configuration structure. */ static void xbb_close_backend(struct xbb_softc *xbb) { DROP_GIANT(); DPRINTF("closing dev=%s\n", xbb->dev_name); if (xbb->vn) { int flags = FREAD; if ((xbb->flags & XBBF_READ_ONLY) == 0) flags |= FWRITE; switch (xbb->device_type) { case XBB_TYPE_DISK: if (xbb->backend.dev.csw) { dev_relthread(xbb->backend.dev.cdev, xbb->backend.dev.dev_ref); xbb->backend.dev.csw = NULL; xbb->backend.dev.cdev = NULL; } break; case XBB_TYPE_FILE: break; case XBB_TYPE_NONE: default: panic("Unexpected backend type."); break; } (void)vn_close(xbb->vn, flags, NOCRED, curthread); xbb->vn = NULL; switch (xbb->device_type) { case XBB_TYPE_DISK: break; case XBB_TYPE_FILE: if (xbb->backend.file.cred != NULL) { crfree(xbb->backend.file.cred); xbb->backend.file.cred = NULL; } break; case XBB_TYPE_NONE: default: panic("Unexpected backend type."); break; } } PICKUP_GIANT(); } /** * Open a character device to be used for backend I/O. * * \param xbb Per-instance xbb configuration structure. * * \return 0 for success, errno codes for failure. */ static int xbb_open_dev(struct xbb_softc *xbb) { struct vattr vattr; struct cdev *dev; struct cdevsw *devsw; int error; xbb->device_type = XBB_TYPE_DISK; xbb->dispatch_io = xbb_dispatch_dev; xbb->backend.dev.cdev = xbb->vn->v_rdev; xbb->backend.dev.csw = dev_refthread(xbb->backend.dev.cdev, &xbb->backend.dev.dev_ref); if (xbb->backend.dev.csw == NULL) panic("Unable to retrieve device switch"); error = VOP_GETATTR(xbb->vn, &vattr, NOCRED); if (error) { xenbus_dev_fatal(xbb->dev, error, "error getting " "vnode attributes for device %s", xbb->dev_name); return (error); } dev = xbb->vn->v_rdev; devsw = dev->si_devsw; if (!devsw->d_ioctl) { xenbus_dev_fatal(xbb->dev, ENODEV, "no d_ioctl for " "device %s!", xbb->dev_name); return (ENODEV); } error = devsw->d_ioctl(dev, DIOCGSECTORSIZE, (caddr_t)&xbb->sector_size, FREAD, curthread); if (error) { xenbus_dev_fatal(xbb->dev, error, "error calling ioctl DIOCGSECTORSIZE " "for device %s", xbb->dev_name); return (error); } error = devsw->d_ioctl(dev, DIOCGMEDIASIZE, (caddr_t)&xbb->media_size, FREAD, curthread); if (error) { xenbus_dev_fatal(xbb->dev, error, "error calling ioctl DIOCGMEDIASIZE " "for device %s", xbb->dev_name); return (error); } return (0); } /** * Open a file to be used for backend I/O. * * \param xbb Per-instance xbb configuration structure. * * \return 0 for success, errno codes for failure. */ static int xbb_open_file(struct xbb_softc *xbb) { struct xbb_file_data *file_data; struct vattr vattr; int error; file_data = &xbb->backend.file; xbb->device_type = XBB_TYPE_FILE; xbb->dispatch_io = xbb_dispatch_file; error = VOP_GETATTR(xbb->vn, &vattr, curthread->td_ucred); if (error != 0) { xenbus_dev_fatal(xbb->dev, error, "error calling VOP_GETATTR()" "for file %s", xbb->dev_name); return (error); } /* * Verify that we have the ability to upgrade to exclusive * access on this file so we can trap errors at open instead * of reporting them during first access. */ if (VOP_ISLOCKED(xbb->vn) != LK_EXCLUSIVE) { vn_lock(xbb->vn, LK_UPGRADE | LK_RETRY); if (xbb->vn->v_iflag & VI_DOOMED) { error = EBADF; xenbus_dev_fatal(xbb->dev, error, "error locking file %s", xbb->dev_name); return (error); } } file_data->cred = crhold(curthread->td_ucred); xbb->media_size = vattr.va_size; /* * XXX KDM vattr.va_blocksize may be larger than 512 bytes here. * With ZFS, it is 131072 bytes. Block sizes that large don't work * with disklabel and UFS on FreeBSD at least. Large block sizes * may not work with other OSes as well. So just export a sector * size of 512 bytes, which should work with any OS or * application. Since our backing is a file, any block size will * work fine for the backing store. */ #if 0 xbb->sector_size = vattr.va_blocksize; #endif xbb->sector_size = 512; /* * Sanity check. The media size has to be at least one * sector long. */ if (xbb->media_size < xbb->sector_size) { error = EINVAL; xenbus_dev_fatal(xbb->dev, error, "file %s size %ju < block size %u", xbb->dev_name, (uintmax_t)xbb->media_size, xbb->sector_size); } return (error); } /** * Open the backend provider for this connection. * * \param xbb Per-instance xbb configuration structure. * * \return 0 for success, errno codes for failure. */ static int xbb_open_backend(struct xbb_softc *xbb) { struct nameidata nd; int flags; int error; flags = FREAD; error = 0; DPRINTF("opening dev=%s\n", xbb->dev_name); if (rootvnode == NULL) { xenbus_dev_fatal(xbb->dev, ENOENT, "Root file system not mounted"); return (ENOENT); } if ((xbb->flags & XBBF_READ_ONLY) == 0) flags |= FWRITE; pwd_ensure_dirs(); again: NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, xbb->dev_name, curthread); error = vn_open(&nd, &flags, 0, NULL); if (error) { /* * This is the only reasonable guess we can make as far as * path if the user doesn't give us a fully qualified path. * If they want to specify a file, they need to specify the * full path. */ if (xbb->dev_name[0] != '/') { char *dev_path = "/dev/"; char *dev_name; /* Try adding device path at beginning of name */ dev_name = malloc(strlen(xbb->dev_name) + strlen(dev_path) + 1, M_XENBLOCKBACK, M_NOWAIT); if (dev_name) { sprintf(dev_name, "%s%s", dev_path, xbb->dev_name); free(xbb->dev_name, M_XENBLOCKBACK); xbb->dev_name = dev_name; goto again; } } xenbus_dev_fatal(xbb->dev, error, "error opening device %s", xbb->dev_name); return (error); } NDFREE(&nd, NDF_ONLY_PNBUF); xbb->vn = nd.ni_vp; /* We only support disks and files. */ if (vn_isdisk(xbb->vn, &error)) { error = xbb_open_dev(xbb); } else if (xbb->vn->v_type == VREG) { error = xbb_open_file(xbb); } else { error = EINVAL; xenbus_dev_fatal(xbb->dev, error, "%s is not a disk " "or file", xbb->dev_name); } VOP_UNLOCK(xbb->vn, 0); if (error != 0) { xbb_close_backend(xbb); return (error); } xbb->sector_size_shift = fls(xbb->sector_size) - 1; xbb->media_num_sectors = xbb->media_size >> xbb->sector_size_shift; DPRINTF("opened %s=%s sector_size=%u media_size=%" PRId64 "\n", (xbb->device_type == XBB_TYPE_DISK) ? "dev" : "file", xbb->dev_name, xbb->sector_size, xbb->media_size); return (0); } /*------------------------ Inter-Domain Communication ------------------------*/ /** * Free dynamically allocated KVA or pseudo-physical address allocations. * * \param xbb Per-instance xbb configuration structure. */ static void xbb_free_communication_mem(struct xbb_softc *xbb) { if (xbb->kva != 0) { if (xbb->pseudo_phys_res != NULL) { xenmem_free(xbb->dev, xbb->pseudo_phys_res_id, xbb->pseudo_phys_res); xbb->pseudo_phys_res = NULL; } } xbb->kva = 0; xbb->gnt_base_addr = 0; if (xbb->kva_free != NULL) { free(xbb->kva_free, M_XENBLOCKBACK); xbb->kva_free = NULL; } } /** * Cleanup all inter-domain communication mechanisms. * * \param xbb Per-instance xbb configuration structure. */ static int xbb_disconnect(struct xbb_softc *xbb) { struct gnttab_unmap_grant_ref ops[XBB_MAX_RING_PAGES]; struct gnttab_unmap_grant_ref *op; u_int ring_idx; int error; DPRINTF("\n"); if ((xbb->flags & XBBF_RING_CONNECTED) == 0) return (0); xen_intr_unbind(&xbb->xen_intr_handle); mtx_unlock(&xbb->lock); taskqueue_drain(xbb->io_taskqueue, &xbb->io_task); mtx_lock(&xbb->lock); /* * No new interrupts can generate work, but we must wait * for all currently active requests to drain. */ if (xbb->active_request_count != 0) return (EAGAIN); for (ring_idx = 0, op = ops; ring_idx < xbb->ring_config.ring_pages; ring_idx++, op++) { op->host_addr = xbb->ring_config.gnt_addr + (ring_idx * PAGE_SIZE); op->dev_bus_addr = xbb->ring_config.bus_addr[ring_idx]; op->handle = xbb->ring_config.handle[ring_idx]; } error = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, ops, xbb->ring_config.ring_pages); if (error != 0) panic("Grant table op failed (%d)", error); xbb_free_communication_mem(xbb); if (xbb->requests != NULL) { free(xbb->requests, M_XENBLOCKBACK); xbb->requests = NULL; } if (xbb->request_lists != NULL) { struct xbb_xen_reqlist *reqlist; int i; /* There is one request list for ever allocated request. */ for (i = 0, reqlist = xbb->request_lists; i < xbb->max_requests; i++, reqlist++){ #ifdef XBB_USE_BOUNCE_BUFFERS if (reqlist->bounce != NULL) { free(reqlist->bounce, M_XENBLOCKBACK); reqlist->bounce = NULL; } #endif if (reqlist->gnt_handles != NULL) { free(reqlist->gnt_handles, M_XENBLOCKBACK); reqlist->gnt_handles = NULL; } } free(xbb->request_lists, M_XENBLOCKBACK); xbb->request_lists = NULL; } xbb->flags &= ~XBBF_RING_CONNECTED; return (0); } /** * Map shared memory ring into domain local address space, initialize * ring control structures, and bind an interrupt to the event channel * used to notify us of ring changes. * * \param xbb Per-instance xbb configuration structure. */ static int xbb_connect_ring(struct xbb_softc *xbb) { struct gnttab_map_grant_ref gnts[XBB_MAX_RING_PAGES]; struct gnttab_map_grant_ref *gnt; u_int ring_idx; int error; if ((xbb->flags & XBBF_RING_CONNECTED) != 0) return (0); /* * Kva for our ring is at the tail of the region of kva allocated * by xbb_alloc_communication_mem(). */ xbb->ring_config.va = xbb->kva + (xbb->kva_size - (xbb->ring_config.ring_pages * PAGE_SIZE)); xbb->ring_config.gnt_addr = xbb->gnt_base_addr + (xbb->kva_size - (xbb->ring_config.ring_pages * PAGE_SIZE)); for (ring_idx = 0, gnt = gnts; ring_idx < xbb->ring_config.ring_pages; ring_idx++, gnt++) { gnt->host_addr = xbb->ring_config.gnt_addr + (ring_idx * PAGE_SIZE); gnt->flags = GNTMAP_host_map; gnt->ref = xbb->ring_config.ring_ref[ring_idx]; gnt->dom = xbb->otherend_id; } error = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, gnts, xbb->ring_config.ring_pages); if (error) panic("blkback: Ring page grant table op failed (%d)", error); for (ring_idx = 0, gnt = gnts; ring_idx < xbb->ring_config.ring_pages; ring_idx++, gnt++) { if (gnt->status != 0) { xbb->ring_config.va = 0; xenbus_dev_fatal(xbb->dev, EACCES, "Ring shared page mapping failed. " "Status %d.", gnt->status); return (EACCES); } xbb->ring_config.handle[ring_idx] = gnt->handle; xbb->ring_config.bus_addr[ring_idx] = gnt->dev_bus_addr; } /* Initialize the ring based on ABI. */ switch (xbb->abi) { case BLKIF_PROTOCOL_NATIVE: { blkif_sring_t *sring; sring = (blkif_sring_t *)xbb->ring_config.va; BACK_RING_INIT(&xbb->rings.native, sring, xbb->ring_config.ring_pages * PAGE_SIZE); break; } case BLKIF_PROTOCOL_X86_32: { blkif_x86_32_sring_t *sring_x86_32; sring_x86_32 = (blkif_x86_32_sring_t *)xbb->ring_config.va; BACK_RING_INIT(&xbb->rings.x86_32, sring_x86_32, xbb->ring_config.ring_pages * PAGE_SIZE); break; } case BLKIF_PROTOCOL_X86_64: { blkif_x86_64_sring_t *sring_x86_64; sring_x86_64 = (blkif_x86_64_sring_t *)xbb->ring_config.va; BACK_RING_INIT(&xbb->rings.x86_64, sring_x86_64, xbb->ring_config.ring_pages * PAGE_SIZE); break; } default: panic("Unexpected blkif protocol ABI."); } xbb->flags |= XBBF_RING_CONNECTED; error = xen_intr_bind_remote_port(xbb->dev, xbb->otherend_id, xbb->ring_config.evtchn, xbb_filter, /*ithread_handler*/NULL, /*arg*/xbb, INTR_TYPE_BIO | INTR_MPSAFE, &xbb->xen_intr_handle); if (error) { (void)xbb_disconnect(xbb); xenbus_dev_fatal(xbb->dev, error, "binding event channel"); return (error); } DPRINTF("rings connected!\n"); return 0; } /** * Size KVA and pseudo-physical address allocations based on negotiated * values for the size and number of I/O requests, and the size of our * communication ring. * * \param xbb Per-instance xbb configuration structure. * * These address spaces are used to dynamically map pages in the * front-end's domain into our own. */ static int xbb_alloc_communication_mem(struct xbb_softc *xbb) { xbb->reqlist_kva_pages = xbb->max_requests * xbb->max_request_segments; xbb->reqlist_kva_size = xbb->reqlist_kva_pages * PAGE_SIZE; xbb->kva_size = xbb->reqlist_kva_size + (xbb->ring_config.ring_pages * PAGE_SIZE); xbb->kva_free = bit_alloc(xbb->reqlist_kva_pages, M_XENBLOCKBACK, M_NOWAIT); if (xbb->kva_free == NULL) return (ENOMEM); DPRINTF("%s: kva_size = %d, reqlist_kva_size = %d\n", device_get_nameunit(xbb->dev), xbb->kva_size, xbb->reqlist_kva_size); /* * Reserve a range of pseudo physical memory that we can map * into kva. These pages will only be backed by machine * pages ("real memory") during the lifetime of front-end requests * via grant table operations. */ xbb->pseudo_phys_res_id = 0; xbb->pseudo_phys_res = xenmem_alloc(xbb->dev, &xbb->pseudo_phys_res_id, xbb->kva_size); if (xbb->pseudo_phys_res == NULL) { xbb->kva = 0; return (ENOMEM); } xbb->kva = (vm_offset_t)rman_get_virtual(xbb->pseudo_phys_res); xbb->gnt_base_addr = rman_get_start(xbb->pseudo_phys_res); DPRINTF("%s: kva: %#jx, gnt_base_addr: %#jx\n", device_get_nameunit(xbb->dev), (uintmax_t)xbb->kva, (uintmax_t)xbb->gnt_base_addr); return (0); } /** * Collect front-end information from the XenStore. * * \param xbb Per-instance xbb configuration structure. */ static int xbb_collect_frontend_info(struct xbb_softc *xbb) { char protocol_abi[64]; const char *otherend_path; int error; u_int ring_idx; u_int ring_page_order; size_t ring_size; otherend_path = xenbus_get_otherend_path(xbb->dev); /* * Protocol defaults valid even if all negotiation fails. */ xbb->ring_config.ring_pages = 1; xbb->max_request_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; xbb->max_request_size = xbb->max_request_segments * PAGE_SIZE; /* * Mandatory data (used in all versions of the protocol) first. */ error = xs_scanf(XST_NIL, otherend_path, "event-channel", NULL, "%" PRIu32, &xbb->ring_config.evtchn); if (error != 0) { xenbus_dev_fatal(xbb->dev, error, "Unable to retrieve event-channel information " "from frontend %s. Unable to connect.", xenbus_get_otherend_path(xbb->dev)); return (error); } /* * These fields are initialized to legacy protocol defaults * so we only need to fail if reading the updated value succeeds * and the new value is outside of its allowed range. * * \note xs_gather() returns on the first encountered error, so * we must use independent calls in order to guarantee * we don't miss information in a sparsly populated front-end * tree. * * \note xs_scanf() does not update variables for unmatched * fields. */ ring_page_order = 0; xbb->max_requests = 32; (void)xs_scanf(XST_NIL, otherend_path, "ring-page-order", NULL, "%u", &ring_page_order); xbb->ring_config.ring_pages = 1 << ring_page_order; ring_size = PAGE_SIZE * xbb->ring_config.ring_pages; xbb->max_requests = BLKIF_MAX_RING_REQUESTS(ring_size); if (xbb->ring_config.ring_pages > XBB_MAX_RING_PAGES) { xenbus_dev_fatal(xbb->dev, EINVAL, "Front-end specified ring-pages of %u " "exceeds backend limit of %u. " "Unable to connect.", xbb->ring_config.ring_pages, XBB_MAX_RING_PAGES); return (EINVAL); } if (xbb->ring_config.ring_pages == 1) { error = xs_gather(XST_NIL, otherend_path, "ring-ref", "%" PRIu32, &xbb->ring_config.ring_ref[0], NULL); if (error != 0) { xenbus_dev_fatal(xbb->dev, error, "Unable to retrieve ring information " "from frontend %s. Unable to " "connect.", xenbus_get_otherend_path(xbb->dev)); return (error); } } else { /* Multi-page ring format. */ for (ring_idx = 0; ring_idx < xbb->ring_config.ring_pages; ring_idx++) { char ring_ref_name[]= "ring_refXX"; snprintf(ring_ref_name, sizeof(ring_ref_name), "ring-ref%u", ring_idx); error = xs_scanf(XST_NIL, otherend_path, ring_ref_name, NULL, "%" PRIu32, &xbb->ring_config.ring_ref[ring_idx]); if (error != 0) { xenbus_dev_fatal(xbb->dev, error, "Failed to retriev grant " "reference for page %u of " "shared ring. Unable " "to connect.", ring_idx); return (error); } } } error = xs_gather(XST_NIL, otherend_path, "protocol", "%63s", protocol_abi, NULL); if (error != 0 || !strcmp(protocol_abi, XEN_IO_PROTO_ABI_NATIVE)) { /* * Assume native if the frontend has not * published ABI data or it has published and * matches our own ABI. */ xbb->abi = BLKIF_PROTOCOL_NATIVE; } else if (!strcmp(protocol_abi, XEN_IO_PROTO_ABI_X86_32)) { xbb->abi = BLKIF_PROTOCOL_X86_32; } else if (!strcmp(protocol_abi, XEN_IO_PROTO_ABI_X86_64)) { xbb->abi = BLKIF_PROTOCOL_X86_64; } else { xenbus_dev_fatal(xbb->dev, EINVAL, "Unknown protocol ABI (%s) published by " "frontend. Unable to connect.", protocol_abi); return (EINVAL); } return (0); } /** * Allocate per-request data structures given request size and number * information negotiated with the front-end. * * \param xbb Per-instance xbb configuration structure. */ static int xbb_alloc_requests(struct xbb_softc *xbb) { struct xbb_xen_req *req; struct xbb_xen_req *last_req; /* * Allocate request book keeping datastructures. */ xbb->requests = malloc(xbb->max_requests * sizeof(*xbb->requests), M_XENBLOCKBACK, M_NOWAIT|M_ZERO); if (xbb->requests == NULL) { xenbus_dev_fatal(xbb->dev, ENOMEM, "Unable to allocate request structures"); return (ENOMEM); } req = xbb->requests; last_req = &xbb->requests[xbb->max_requests - 1]; STAILQ_INIT(&xbb->request_free_stailq); while (req <= last_req) { STAILQ_INSERT_TAIL(&xbb->request_free_stailq, req, links); req++; } return (0); } static int xbb_alloc_request_lists(struct xbb_softc *xbb) { struct xbb_xen_reqlist *reqlist; int i; /* * If no requests can be merged, we need 1 request list per * in flight request. */ xbb->request_lists = malloc(xbb->max_requests * sizeof(*xbb->request_lists), M_XENBLOCKBACK, M_NOWAIT|M_ZERO); if (xbb->request_lists == NULL) { xenbus_dev_fatal(xbb->dev, ENOMEM, "Unable to allocate request list structures"); return (ENOMEM); } STAILQ_INIT(&xbb->reqlist_free_stailq); STAILQ_INIT(&xbb->reqlist_pending_stailq); for (i = 0; i < xbb->max_requests; i++) { int seg; reqlist = &xbb->request_lists[i]; reqlist->xbb = xbb; #ifdef XBB_USE_BOUNCE_BUFFERS reqlist->bounce = malloc(xbb->max_reqlist_size, M_XENBLOCKBACK, M_NOWAIT); if (reqlist->bounce == NULL) { xenbus_dev_fatal(xbb->dev, ENOMEM, "Unable to allocate request " "bounce buffers"); return (ENOMEM); } #endif /* XBB_USE_BOUNCE_BUFFERS */ reqlist->gnt_handles = malloc(xbb->max_reqlist_segments * sizeof(*reqlist->gnt_handles), M_XENBLOCKBACK, M_NOWAIT|M_ZERO); if (reqlist->gnt_handles == NULL) { xenbus_dev_fatal(xbb->dev, ENOMEM, "Unable to allocate request " "grant references"); return (ENOMEM); } for (seg = 0; seg < xbb->max_reqlist_segments; seg++) reqlist->gnt_handles[seg] = GRANT_REF_INVALID; STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links); } return (0); } /** * Supply information about the physical device to the frontend * via XenBus. * * \param xbb Per-instance xbb configuration structure. */ static int xbb_publish_backend_info(struct xbb_softc *xbb) { struct xs_transaction xst; const char *our_path; const char *leaf; int error; our_path = xenbus_get_node(xbb->dev); while (1) { error = xs_transaction_start(&xst); if (error != 0) { xenbus_dev_fatal(xbb->dev, error, "Error publishing backend info " "(start transaction)"); return (error); } leaf = "sectors"; error = xs_printf(xst, our_path, leaf, "%"PRIu64, xbb->media_num_sectors); if (error != 0) break; /* XXX Support all VBD attributes here. */ leaf = "info"; error = xs_printf(xst, our_path, leaf, "%u", xbb->flags & XBBF_READ_ONLY ? VDISK_READONLY : 0); if (error != 0) break; leaf = "sector-size"; error = xs_printf(xst, our_path, leaf, "%u", xbb->sector_size); if (error != 0) break; error = xs_transaction_end(xst, 0); if (error == 0) { return (0); } else if (error != EAGAIN) { xenbus_dev_fatal(xbb->dev, error, "ending transaction"); return (error); } } xenbus_dev_fatal(xbb->dev, error, "writing %s/%s", our_path, leaf); xs_transaction_end(xst, 1); return (error); } /** * Connect to our blkfront peer now that it has completed publishing * its configuration into the XenStore. * * \param xbb Per-instance xbb configuration structure. */ static void xbb_connect(struct xbb_softc *xbb) { int error; if (xenbus_get_state(xbb->dev) != XenbusStateInitialised) return; if (xbb_collect_frontend_info(xbb) != 0) return; xbb->flags &= ~XBBF_SHUTDOWN; /* * We limit the maximum number of reqlist segments to the maximum * number of segments in the ring, or our absolute maximum, * whichever is smaller. */ xbb->max_reqlist_segments = MIN(xbb->max_request_segments * xbb->max_requests, XBB_MAX_SEGMENTS_PER_REQLIST); /* * The maximum size is simply a function of the number of segments * we can handle. */ xbb->max_reqlist_size = xbb->max_reqlist_segments * PAGE_SIZE; /* Allocate resources whose size depends on front-end configuration. */ error = xbb_alloc_communication_mem(xbb); if (error != 0) { xenbus_dev_fatal(xbb->dev, error, "Unable to allocate communication memory"); return; } error = xbb_alloc_requests(xbb); if (error != 0) { /* Specific errors are reported by xbb_alloc_requests(). */ return; } error = xbb_alloc_request_lists(xbb); if (error != 0) { /* Specific errors are reported by xbb_alloc_request_lists(). */ return; } /* * Connect communication channel. */ error = xbb_connect_ring(xbb); if (error != 0) { /* Specific errors are reported by xbb_connect_ring(). */ return; } if (xbb_publish_backend_info(xbb) != 0) { /* * If we can't publish our data, we cannot participate * in this connection, and waiting for a front-end state * change will not help the situation. */ (void)xbb_disconnect(xbb); return; } /* Ready for I/O. */ xenbus_set_state(xbb->dev, XenbusStateConnected); } /*-------------------------- Device Teardown Support -------------------------*/ /** * Perform device shutdown functions. * * \param xbb Per-instance xbb configuration structure. * * Mark this instance as shutting down, wait for any active I/O on the * backend device/file to drain, disconnect from the front-end, and notify * any waiters (e.g. a thread invoking our detach method) that detach can * now proceed. */ static int xbb_shutdown(struct xbb_softc *xbb) { XenbusState frontState; int error; DPRINTF("\n"); /* * Due to the need to drop our mutex during some * xenbus operations, it is possible for two threads * to attempt to close out shutdown processing at * the same time. Tell the caller that hits this * race to try back later. */ if ((xbb->flags & XBBF_IN_SHUTDOWN) != 0) return (EAGAIN); xbb->flags |= XBBF_IN_SHUTDOWN; mtx_unlock(&xbb->lock); if (xbb->hotplug_watch.node != NULL) { xs_unregister_watch(&xbb->hotplug_watch); free(xbb->hotplug_watch.node, M_XENBLOCKBACK); xbb->hotplug_watch.node = NULL; } if (xenbus_get_state(xbb->dev) < XenbusStateClosing) xenbus_set_state(xbb->dev, XenbusStateClosing); frontState = xenbus_get_otherend_state(xbb->dev); mtx_lock(&xbb->lock); xbb->flags &= ~XBBF_IN_SHUTDOWN; /* Wait for the frontend to disconnect (if it's connected). */ if (frontState == XenbusStateConnected) return (EAGAIN); DPRINTF("\n"); /* Indicate shutdown is in progress. */ xbb->flags |= XBBF_SHUTDOWN; /* Disconnect from the front-end. */ error = xbb_disconnect(xbb); if (error != 0) { /* * Requests still outstanding. We'll be called again * once they complete. */ KASSERT(error == EAGAIN, ("%s: Unexpected xbb_disconnect() failure %d", __func__, error)); return (error); } DPRINTF("\n"); /* Indicate to xbb_detach() that is it safe to proceed. */ wakeup(xbb); return (0); } /** * Report an attach time error to the console and Xen, and cleanup * this instance by forcing immediate detach processing. * * \param xbb Per-instance xbb configuration structure. * \param err Errno describing the error. * \param fmt Printf style format and arguments */ static void xbb_attach_failed(struct xbb_softc *xbb, int err, const char *fmt, ...) { va_list ap; va_list ap_hotplug; va_start(ap, fmt); va_copy(ap_hotplug, ap); xs_vprintf(XST_NIL, xenbus_get_node(xbb->dev), "hotplug-error", fmt, ap_hotplug); va_end(ap_hotplug); xs_printf(XST_NIL, xenbus_get_node(xbb->dev), "hotplug-status", "error"); xenbus_dev_vfatal(xbb->dev, err, fmt, ap); va_end(ap); xs_printf(XST_NIL, xenbus_get_node(xbb->dev), "online", "0"); mtx_lock(&xbb->lock); xbb_shutdown(xbb); mtx_unlock(&xbb->lock); } /*---------------------------- NewBus Entrypoints ----------------------------*/ /** * Inspect a XenBus device and claim it if is of the appropriate type. * * \param dev NewBus device object representing a candidate XenBus device. * * \return 0 for success, errno codes for failure. */ static int xbb_probe(device_t dev) { if (!strcmp(xenbus_get_type(dev), "vbd")) { device_set_desc(dev, "Backend Virtual Block Device"); device_quiet(dev); return (0); } return (ENXIO); } /** * Setup sysctl variables to control various Block Back parameters. * * \param xbb Xen Block Back softc. * */ static void xbb_setup_sysctl(struct xbb_softc *xbb) { struct sysctl_ctx_list *sysctl_ctx = NULL; struct sysctl_oid *sysctl_tree = NULL; sysctl_ctx = device_get_sysctl_ctx(xbb->dev); if (sysctl_ctx == NULL) return; sysctl_tree = device_get_sysctl_tree(xbb->dev); if (sysctl_tree == NULL) return; SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "disable_flush", CTLFLAG_RW, &xbb->disable_flush, 0, "fake the flush command"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "flush_interval", CTLFLAG_RW, &xbb->flush_interval, 0, "send a real flush for N flush requests"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "no_coalesce_reqs", CTLFLAG_RW, &xbb->no_coalesce_reqs,0, "Don't coalesce contiguous requests"); SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "reqs_received", CTLFLAG_RW, &xbb->reqs_received, "how many I/O requests we have received"); SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "reqs_completed", CTLFLAG_RW, &xbb->reqs_completed, "how many I/O requests have been completed"); SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "reqs_queued_for_completion", CTLFLAG_RW, &xbb->reqs_queued_for_completion, "how many I/O requests queued but not yet pushed"); SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "reqs_completed_with_error", CTLFLAG_RW, &xbb->reqs_completed_with_error, "how many I/O requests completed with error status"); SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "forced_dispatch", CTLFLAG_RW, &xbb->forced_dispatch, "how many I/O dispatches were forced"); SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "normal_dispatch", CTLFLAG_RW, &xbb->normal_dispatch, "how many I/O dispatches were normal"); SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "total_dispatch", CTLFLAG_RW, &xbb->total_dispatch, "total number of I/O dispatches"); SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "kva_shortages", CTLFLAG_RW, &xbb->kva_shortages, "how many times we have run out of KVA"); SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "request_shortages", CTLFLAG_RW, &xbb->request_shortages, "how many times we have run out of requests"); SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "max_requests", CTLFLAG_RD, &xbb->max_requests, 0, "maximum outstanding requests (negotiated)"); SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "max_request_segments", CTLFLAG_RD, &xbb->max_request_segments, 0, "maximum number of pages per requests (negotiated)"); SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "max_request_size", CTLFLAG_RD, &xbb->max_request_size, 0, "maximum size in bytes of a request (negotiated)"); SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "ring_pages", CTLFLAG_RD, &xbb->ring_config.ring_pages, 0, "communication channel pages (negotiated)"); } static void xbb_attach_disk(struct xs_watch *watch, const char **vec, unsigned int len) { device_t dev; struct xbb_softc *xbb; int error; dev = (device_t) watch->callback_data; xbb = device_get_softc(dev); error = xs_gather(XST_NIL, xenbus_get_node(dev), "physical-device-path", NULL, &xbb->dev_name, NULL); if (error != 0) return; xs_unregister_watch(watch); free(watch->node, M_XENBLOCKBACK); watch->node = NULL; /* Collect physical device information. */ error = xs_gather(XST_NIL, xenbus_get_otherend_path(xbb->dev), "device-type", NULL, &xbb->dev_type, NULL); if (error != 0) xbb->dev_type = NULL; error = xs_gather(XST_NIL, xenbus_get_node(dev), "mode", NULL, &xbb->dev_mode, NULL); if (error != 0) { xbb_attach_failed(xbb, error, "reading backend fields at %s", xenbus_get_node(dev)); return; } /* Parse fopen style mode flags. */ if (strchr(xbb->dev_mode, 'w') == NULL) xbb->flags |= XBBF_READ_ONLY; /* * Verify the physical device is present and can support * the desired I/O mode. */ error = xbb_open_backend(xbb); if (error != 0) { xbb_attach_failed(xbb, error, "Unable to open %s", xbb->dev_name); return; } /* Use devstat(9) for recording statistics. */ xbb->xbb_stats = devstat_new_entry("xbb", device_get_unit(xbb->dev), xbb->sector_size, DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT | DEVSTAT_TYPE_IF_OTHER, DEVSTAT_PRIORITY_OTHER); xbb->xbb_stats_in = devstat_new_entry("xbbi", device_get_unit(xbb->dev), xbb->sector_size, DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT | DEVSTAT_TYPE_IF_OTHER, DEVSTAT_PRIORITY_OTHER); /* * Setup sysctl variables. */ xbb_setup_sysctl(xbb); /* * Create a taskqueue for doing work that must occur from a * thread context. */ xbb->io_taskqueue = taskqueue_create_fast(device_get_nameunit(dev), M_NOWAIT, taskqueue_thread_enqueue, /*contxt*/&xbb->io_taskqueue); if (xbb->io_taskqueue == NULL) { xbb_attach_failed(xbb, error, "Unable to create taskqueue"); return; } taskqueue_start_threads(&xbb->io_taskqueue, /*num threads*/1, /*priority*/PWAIT, /*thread name*/ "%s taskq", device_get_nameunit(dev)); /* Update hot-plug status to satisfy xend. */ error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev), "hotplug-status", "connected"); if (error) { xbb_attach_failed(xbb, error, "writing %s/hotplug-status", xenbus_get_node(xbb->dev)); return; } /* Tell the front end that we are ready to connect. */ xenbus_set_state(dev, XenbusStateInitialised); } /** * Attach to a XenBus device that has been claimed by our probe routine. * * \param dev NewBus device object representing this Xen Block Back instance. * * \return 0 for success, errno codes for failure. */ static int xbb_attach(device_t dev) { struct xbb_softc *xbb; int error; u_int max_ring_page_order; struct sbuf *watch_path; DPRINTF("Attaching to %s\n", xenbus_get_node(dev)); /* * Basic initialization. * After this block it is safe to call xbb_detach() * to clean up any allocated data for this instance. */ xbb = device_get_softc(dev); xbb->dev = dev; xbb->otherend_id = xenbus_get_otherend_id(dev); TASK_INIT(&xbb->io_task, /*priority*/0, xbb_run_queue, xbb); mtx_init(&xbb->lock, device_get_nameunit(dev), NULL, MTX_DEF); /* * Publish protocol capabilities for consumption by the * front-end. */ error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev), "feature-barrier", "1"); if (error) { xbb_attach_failed(xbb, error, "writing %s/feature-barrier", xenbus_get_node(xbb->dev)); return (error); } error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev), "feature-flush-cache", "1"); if (error) { xbb_attach_failed(xbb, error, "writing %s/feature-flush-cache", xenbus_get_node(xbb->dev)); return (error); } max_ring_page_order = flsl(XBB_MAX_RING_PAGES) - 1; error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev), "max-ring-page-order", "%u", max_ring_page_order); if (error) { xbb_attach_failed(xbb, error, "writing %s/max-ring-page-order", xenbus_get_node(xbb->dev)); return (error); } /* * We need to wait for hotplug script execution before * moving forward. */ watch_path = xs_join(xenbus_get_node(xbb->dev), "physical-device-path"); xbb->hotplug_watch.callback_data = (uintptr_t)dev; xbb->hotplug_watch.callback = xbb_attach_disk; KASSERT(xbb->hotplug_watch.node == NULL, ("watch node already setup")); xbb->hotplug_watch.node = strdup(sbuf_data(watch_path), M_XENBLOCKBACK); sbuf_delete(watch_path); error = xs_register_watch(&xbb->hotplug_watch); if (error != 0) { xbb_attach_failed(xbb, error, "failed to create watch on %s", xbb->hotplug_watch.node); free(xbb->hotplug_watch.node, M_XENBLOCKBACK); return (error); } /* Tell the toolstack blkback has attached. */ xenbus_set_state(dev, XenbusStateInitWait); return (0); } /** * Detach from a block back device instance. * * \param dev NewBus device object representing this Xen Block Back instance. * * \return 0 for success, errno codes for failure. * * \note A block back device may be detached at any time in its life-cycle, * including part way through the attach process. For this reason, * initialization order and the initialization state checks in this * routine must be carefully coupled so that attach time failures * are gracefully handled. */ static int xbb_detach(device_t dev) { struct xbb_softc *xbb; DPRINTF("\n"); xbb = device_get_softc(dev); mtx_lock(&xbb->lock); while (xbb_shutdown(xbb) == EAGAIN) { msleep(xbb, &xbb->lock, /*wakeup prio unchanged*/0, "xbb_shutdown", 0); } mtx_unlock(&xbb->lock); DPRINTF("\n"); if (xbb->io_taskqueue != NULL) taskqueue_free(xbb->io_taskqueue); if (xbb->xbb_stats != NULL) devstat_remove_entry(xbb->xbb_stats); if (xbb->xbb_stats_in != NULL) devstat_remove_entry(xbb->xbb_stats_in); xbb_close_backend(xbb); if (xbb->dev_mode != NULL) { free(xbb->dev_mode, M_XENSTORE); xbb->dev_mode = NULL; } if (xbb->dev_type != NULL) { free(xbb->dev_type, M_XENSTORE); xbb->dev_type = NULL; } if (xbb->dev_name != NULL) { free(xbb->dev_name, M_XENSTORE); xbb->dev_name = NULL; } mtx_destroy(&xbb->lock); return (0); } /** * Prepare this block back device for suspension of this VM. * * \param dev NewBus device object representing this Xen Block Back instance. * * \return 0 for success, errno codes for failure. */ static int xbb_suspend(device_t dev) { #ifdef NOT_YET struct xbb_softc *sc = device_get_softc(dev); /* Prevent new requests being issued until we fix things up. */ mtx_lock(&sc->xb_io_lock); sc->connected = BLKIF_STATE_SUSPENDED; mtx_unlock(&sc->xb_io_lock); #endif return (0); } /** * Perform any processing required to recover from a suspended state. * * \param dev NewBus device object representing this Xen Block Back instance. * * \return 0 for success, errno codes for failure. */ static int xbb_resume(device_t dev) { return (0); } /** * Handle state changes expressed via the XenStore by our front-end peer. * * \param dev NewBus device object representing this Xen * Block Back instance. * \param frontend_state The new state of the front-end. * * \return 0 for success, errno codes for failure. */ static void xbb_frontend_changed(device_t dev, XenbusState frontend_state) { struct xbb_softc *xbb = device_get_softc(dev); DPRINTF("frontend_state=%s, xbb_state=%s\n", xenbus_strstate(frontend_state), xenbus_strstate(xenbus_get_state(xbb->dev))); switch (frontend_state) { case XenbusStateInitialising: break; case XenbusStateInitialised: case XenbusStateConnected: xbb_connect(xbb); break; case XenbusStateClosing: case XenbusStateClosed: mtx_lock(&xbb->lock); xbb_shutdown(xbb); mtx_unlock(&xbb->lock); if (frontend_state == XenbusStateClosed) xenbus_set_state(xbb->dev, XenbusStateClosed); break; default: xenbus_dev_fatal(xbb->dev, EINVAL, "saw state %d at frontend", frontend_state); break; } } /*---------------------------- NewBus Registration ---------------------------*/ static device_method_t xbb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, xbb_probe), DEVMETHOD(device_attach, xbb_attach), DEVMETHOD(device_detach, xbb_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, xbb_suspend), DEVMETHOD(device_resume, xbb_resume), /* Xenbus interface */ DEVMETHOD(xenbus_otherend_changed, xbb_frontend_changed), { 0, 0 } }; static driver_t xbb_driver = { "xbbd", xbb_methods, sizeof(struct xbb_softc), }; devclass_t xbb_devclass; DRIVER_MODULE(xbbd, xenbusb_back, xbb_driver, xbb_devclass, 0, 0); Index: head/sys/dev/xen/netback/netback.c =================================================================== --- head/sys/dev/xen/netback/netback.c (revision 329872) +++ head/sys/dev/xen/netback/netback.c (revision 329873) @@ -1,2517 +1,2517 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009-2011 Spectra Logic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * Authors: Justin T. Gibbs (Spectra Logic Corporation) * Alan Somers (Spectra Logic Corporation) * John Suykerbuyk (Spectra Logic Corporation) */ #include __FBSDID("$FreeBSD$"); /** * \file netback.c * * \brief Device driver supporting the vending of network access * from this FreeBSD domain to other domains. */ #include "opt_inet.h" #include "opt_inet6.h" #include "opt_sctp.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if __FreeBSD_version >= 700000 #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include /*--------------------------- Compile-time Tunables --------------------------*/ /*---------------------------------- Macros ----------------------------------*/ /** * Custom malloc type for all driver allocations. */ static MALLOC_DEFINE(M_XENNETBACK, "xnb", "Xen Net Back Driver Data"); #define XNB_SG 1 /* netback driver supports feature-sg */ #define XNB_GSO_TCPV4 0 /* netback driver supports feature-gso-tcpv4 */ #define XNB_RX_COPY 1 /* netback driver supports feature-rx-copy */ #define XNB_RX_FLIP 0 /* netback driver does not support feature-rx-flip */ #undef XNB_DEBUG #define XNB_DEBUG /* hardcode on during development */ #ifdef XNB_DEBUG #define DPRINTF(fmt, args...) \ printf("xnb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args) #else #define DPRINTF(fmt, args...) do {} while (0) #endif /* Default length for stack-allocated grant tables */ #define GNTTAB_LEN (64) /* Features supported by all backends. TSO and LRO can be negotiated */ #define XNB_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE) #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE) /** * Two argument version of the standard macro. Second argument is a tentative * value of req_cons */ #define RING_HAS_UNCONSUMED_REQUESTS_2(_r, cons) ({ \ unsigned int req = (_r)->sring->req_prod - cons; \ unsigned int rsp = RING_SIZE(_r) - \ (cons - (_r)->rsp_prod_pvt); \ req < rsp ? req : rsp; \ }) #define virt_to_mfn(x) (vtophys(x) >> PAGE_SHIFT) #define virt_to_offset(x) ((x) & (PAGE_SIZE - 1)) /** * Predefined array type of grant table copy descriptors. Used to pass around * statically allocated memory structures. */ typedef struct gnttab_copy gnttab_copy_table[GNTTAB_LEN]; /*--------------------------- Forward Declarations ---------------------------*/ struct xnb_softc; struct xnb_pkt; static void xnb_attach_failed(struct xnb_softc *xnb, int err, const char *fmt, ...) __printflike(3,4); static int xnb_shutdown(struct xnb_softc *xnb); static int create_netdev(device_t dev); static int xnb_detach(device_t dev); static int xnb_ifmedia_upd(struct ifnet *ifp); static void xnb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); static void xnb_intr(void *arg); static int xnb_send(netif_rx_back_ring_t *rxb, domid_t otherend, const struct mbuf *mbufc, gnttab_copy_table gnttab); static int xnb_recv(netif_tx_back_ring_t *txb, domid_t otherend, struct mbuf **mbufc, struct ifnet *ifnet, gnttab_copy_table gnttab); static int xnb_ring2pkt(struct xnb_pkt *pkt, const netif_tx_back_ring_t *tx_ring, RING_IDX start); static void xnb_txpkt2rsp(const struct xnb_pkt *pkt, netif_tx_back_ring_t *ring, int error); static struct mbuf *xnb_pkt2mbufc(const struct xnb_pkt *pkt, struct ifnet *ifp); static int xnb_txpkt2gnttab(const struct xnb_pkt *pkt, struct mbuf *mbufc, gnttab_copy_table gnttab, const netif_tx_back_ring_t *txb, domid_t otherend_id); static void xnb_update_mbufc(struct mbuf *mbufc, const gnttab_copy_table gnttab, int n_entries); static int xnb_mbufc2pkt(const struct mbuf *mbufc, struct xnb_pkt *pkt, RING_IDX start, int space); static int xnb_rxpkt2gnttab(const struct xnb_pkt *pkt, const struct mbuf *mbufc, gnttab_copy_table gnttab, const netif_rx_back_ring_t *rxb, domid_t otherend_id); static int xnb_rxpkt2rsp(const struct xnb_pkt *pkt, const gnttab_copy_table gnttab, int n_entries, netif_rx_back_ring_t *ring); static void xnb_stop(struct xnb_softc*); static int xnb_ioctl(struct ifnet*, u_long, caddr_t); static void xnb_start_locked(struct ifnet*); static void xnb_start(struct ifnet*); static void xnb_ifinit_locked(struct xnb_softc*); static void xnb_ifinit(void*); #ifdef XNB_DEBUG static int xnb_unit_test_main(SYSCTL_HANDLER_ARGS); static int xnb_dump_rings(SYSCTL_HANDLER_ARGS); #endif #if defined(INET) || defined(INET6) static void xnb_add_mbuf_cksum(struct mbuf *mbufc); #endif /*------------------------------ Data Structures -----------------------------*/ /** * Representation of a xennet packet. Simplified version of a packet as * stored in the Xen tx ring. Applicable to both RX and TX packets */ struct xnb_pkt{ /** * Array index of the first data-bearing (eg, not extra info) entry * for this packet */ RING_IDX car; /** * Array index of the second data-bearing entry for this packet. * Invalid if the packet has only one data-bearing entry. If the * packet has more than two data-bearing entries, then the second * through the last will be sequential modulo the ring size */ RING_IDX cdr; /** * Optional extra info. Only valid if flags contains * NETTXF_extra_info. Note that extra.type will always be * XEN_NETIF_EXTRA_TYPE_GSO. Currently, no known netfront or netback * driver will ever set XEN_NETIF_EXTRA_TYPE_MCAST_* */ netif_extra_info_t extra; /** Size of entire packet in bytes. */ uint16_t size; /** The size of the first entry's data in bytes */ uint16_t car_size; /** * Either NETTXF_ or NETRXF_ flags. Note that the flag values are * not the same for TX and RX packets */ uint16_t flags; /** * The number of valid data-bearing entries (either netif_tx_request's * or netif_rx_response's) in the packet. If this is 0, it means the * entire packet is invalid. */ uint16_t list_len; /** There was an error processing the packet */ uint8_t error; }; /** xnb_pkt method: initialize it */ static inline void xnb_pkt_initialize(struct xnb_pkt *pxnb) { bzero(pxnb, sizeof(*pxnb)); } /** xnb_pkt method: mark the packet as valid */ static inline void xnb_pkt_validate(struct xnb_pkt *pxnb) { pxnb->error = 0; }; /** xnb_pkt method: mark the packet as invalid */ static inline void xnb_pkt_invalidate(struct xnb_pkt *pxnb) { pxnb->error = 1; }; /** xnb_pkt method: Check whether the packet is valid */ static inline int xnb_pkt_is_valid(const struct xnb_pkt *pxnb) { return (! pxnb->error); } #ifdef XNB_DEBUG /** xnb_pkt method: print the packet's contents in human-readable format*/ static void __unused xnb_dump_pkt(const struct xnb_pkt *pkt) { if (pkt == NULL) { DPRINTF("Was passed a null pointer.\n"); return; } DPRINTF("pkt address= %p\n", pkt); DPRINTF("pkt->size=%d\n", pkt->size); DPRINTF("pkt->car_size=%d\n", pkt->car_size); DPRINTF("pkt->flags=0x%04x\n", pkt->flags); DPRINTF("pkt->list_len=%d\n", pkt->list_len); /* DPRINTF("pkt->extra"); TODO */ DPRINTF("pkt->car=%d\n", pkt->car); DPRINTF("pkt->cdr=%d\n", pkt->cdr); DPRINTF("pkt->error=%d\n", pkt->error); } #endif /* XNB_DEBUG */ static void xnb_dump_txreq(RING_IDX idx, const struct netif_tx_request *txreq) { if (txreq != NULL) { DPRINTF("netif_tx_request index =%u\n", idx); DPRINTF("netif_tx_request.gref =%u\n", txreq->gref); DPRINTF("netif_tx_request.offset=%hu\n", txreq->offset); DPRINTF("netif_tx_request.flags =%hu\n", txreq->flags); DPRINTF("netif_tx_request.id =%hu\n", txreq->id); DPRINTF("netif_tx_request.size =%hu\n", txreq->size); } } /** * \brief Configuration data for a shared memory request ring * used to communicate with the front-end client of this * this driver. */ struct xnb_ring_config { /** * Runtime structures for ring access. Unfortunately, TX and RX rings * use different data structures, and that cannot be changed since it * is part of the interdomain protocol. */ union{ netif_rx_back_ring_t rx_ring; netif_tx_back_ring_t tx_ring; } back_ring; /** * The device bus address returned by the hypervisor when * mapping the ring and required to unmap it when a connection * is torn down. */ uint64_t bus_addr; /** The pseudo-physical address where ring memory is mapped.*/ uint64_t gnt_addr; /** KVA address where ring memory is mapped. */ vm_offset_t va; /** * Grant table handles, one per-ring page, returned by the * hyperpervisor upon mapping of the ring and required to * unmap it when a connection is torn down. */ grant_handle_t handle; /** The number of ring pages mapped for the current connection. */ unsigned ring_pages; /** * The grant references, one per-ring page, supplied by the * front-end, allowing us to reference the ring pages in the * front-end's domain and to map these pages into our own domain. */ grant_ref_t ring_ref; }; /** * Per-instance connection state flags. */ typedef enum { /** Communication with the front-end has been established. */ XNBF_RING_CONNECTED = 0x01, /** * Front-end requests exist in the ring and are waiting for * xnb_xen_req objects to free up. */ XNBF_RESOURCE_SHORTAGE = 0x02, /** Connection teardown has started. */ XNBF_SHUTDOWN = 0x04, /** A thread is already performing shutdown processing. */ XNBF_IN_SHUTDOWN = 0x08 } xnb_flag_t; /** * Types of rings. Used for array indices and to identify a ring's control * data structure type */ typedef enum{ XNB_RING_TYPE_TX = 0, /* ID of TX rings, used for array indices */ XNB_RING_TYPE_RX = 1, /* ID of RX rings, used for array indices */ XNB_NUM_RING_TYPES } xnb_ring_type_t; /** * Per-instance configuration data. */ struct xnb_softc { /** NewBus device corresponding to this instance. */ device_t dev; /* Media related fields */ /** Generic network media state */ struct ifmedia sc_media; /** Media carrier info */ struct ifnet *xnb_ifp; /** Our own private carrier state */ unsigned carrier; /** Device MAC Address */ uint8_t mac[ETHER_ADDR_LEN]; /* Xen related fields */ /** * \brief The netif protocol abi in effect. * * There are situations where the back and front ends can * have a different, native abi (e.g. intel x86_64 and * 32bit x86 domains on the same machine). The back-end * always accommodates the front-end's native abi. That * value is pulled from the XenStore and recorded here. */ int abi; /** * Name of the bridge to which this VIF is connected, if any * This field is dynamically allocated by xenbus and must be free()ed * when no longer needed */ char *bridge; /** The interrupt driven even channel used to signal ring events. */ evtchn_port_t evtchn; /** Xen device handle.*/ long handle; /** Handle to the communication ring event channel. */ xen_intr_handle_t xen_intr_handle; /** * \brief Cached value of the front-end's domain id. * * This value is used at once for each mapped page in * a transaction. We cache it to avoid incuring the * cost of an ivar access every time this is needed. */ domid_t otherend_id; /** * Undocumented frontend feature. Has something to do with * scatter/gather IO */ uint8_t can_sg; /** Undocumented frontend feature */ uint8_t gso; /** Undocumented frontend feature */ uint8_t gso_prefix; /** Can checksum TCP/UDP over IPv4 */ uint8_t ip_csum; /* Implementation related fields */ /** * Preallocated grant table copy descriptor for RX operations. * Access must be protected by rx_lock */ gnttab_copy_table rx_gnttab; /** * Preallocated grant table copy descriptor for TX operations. * Access must be protected by tx_lock */ gnttab_copy_table tx_gnttab; /** * Resource representing allocated physical address space * associated with our per-instance kva region. */ struct resource *pseudo_phys_res; /** Resource id for allocated physical address space. */ int pseudo_phys_res_id; /** Ring mapping and interrupt configuration data. */ struct xnb_ring_config ring_configs[XNB_NUM_RING_TYPES]; /** * Global pool of kva used for mapping remote domain ring * and I/O transaction data. */ vm_offset_t kva; - /** Psuedo-physical address corresponding to kva. */ + /** Pseudo-physical address corresponding to kva. */ uint64_t gnt_base_addr; /** Various configuration and state bit flags. */ xnb_flag_t flags; /** Mutex protecting per-instance data in the receive path. */ struct mtx rx_lock; /** Mutex protecting per-instance data in the softc structure. */ struct mtx sc_lock; /** Mutex protecting per-instance data in the transmit path. */ struct mtx tx_lock; /** The size of the global kva pool. */ int kva_size; /** Name of the interface */ char if_name[IFNAMSIZ]; }; /*---------------------------- Debugging functions ---------------------------*/ #ifdef XNB_DEBUG static void __unused xnb_dump_gnttab_copy(const struct gnttab_copy *entry) { if (entry == NULL) { printf("NULL grant table pointer\n"); return; } if (entry->flags & GNTCOPY_dest_gref) printf("gnttab dest ref=\t%u\n", entry->dest.u.ref); else printf("gnttab dest gmfn=\t%"PRI_xen_pfn"\n", entry->dest.u.gmfn); printf("gnttab dest offset=\t%hu\n", entry->dest.offset); printf("gnttab dest domid=\t%hu\n", entry->dest.domid); if (entry->flags & GNTCOPY_source_gref) printf("gnttab source ref=\t%u\n", entry->source.u.ref); else printf("gnttab source gmfn=\t%"PRI_xen_pfn"\n", entry->source.u.gmfn); printf("gnttab source offset=\t%hu\n", entry->source.offset); printf("gnttab source domid=\t%hu\n", entry->source.domid); printf("gnttab len=\t%hu\n", entry->len); printf("gnttab flags=\t%hu\n", entry->flags); printf("gnttab status=\t%hd\n", entry->status); } static int xnb_dump_rings(SYSCTL_HANDLER_ARGS) { static char results[720]; struct xnb_softc const* xnb = (struct xnb_softc*)arg1; netif_rx_back_ring_t const* rxb = &xnb->ring_configs[XNB_RING_TYPE_RX].back_ring.rx_ring; netif_tx_back_ring_t const* txb = &xnb->ring_configs[XNB_RING_TYPE_TX].back_ring.tx_ring; /* empty the result strings */ results[0] = 0; if ( !txb || !txb->sring || !rxb || !rxb->sring ) return (SYSCTL_OUT(req, results, strnlen(results, 720))); snprintf(results, 720, "\n\t%35s %18s\n" /* TX, RX */ "\t%16s %18d %18d\n" /* req_cons */ "\t%16s %18d %18d\n" /* nr_ents */ "\t%16s %18d %18d\n" /* rsp_prod_pvt */ "\t%16s %18p %18p\n" /* sring */ "\t%16s %18d %18d\n" /* req_prod */ "\t%16s %18d %18d\n" /* req_event */ "\t%16s %18d %18d\n" /* rsp_prod */ "\t%16s %18d %18d\n", /* rsp_event */ "TX", "RX", "req_cons", txb->req_cons, rxb->req_cons, "nr_ents", txb->nr_ents, rxb->nr_ents, "rsp_prod_pvt", txb->rsp_prod_pvt, rxb->rsp_prod_pvt, "sring", txb->sring, rxb->sring, "sring->req_prod", txb->sring->req_prod, rxb->sring->req_prod, "sring->req_event", txb->sring->req_event, rxb->sring->req_event, "sring->rsp_prod", txb->sring->rsp_prod, rxb->sring->rsp_prod, "sring->rsp_event", txb->sring->rsp_event, rxb->sring->rsp_event); return (SYSCTL_OUT(req, results, strnlen(results, 720))); } static void __unused xnb_dump_mbuf(const struct mbuf *m) { int len; uint8_t *d; if (m == NULL) return; printf("xnb_dump_mbuf:\n"); if (m->m_flags & M_PKTHDR) { printf(" flowid=%10d, csum_flags=%#8x, csum_data=%#8x, " "tso_segsz=%5hd\n", m->m_pkthdr.flowid, (int)m->m_pkthdr.csum_flags, m->m_pkthdr.csum_data, m->m_pkthdr.tso_segsz); printf(" rcvif=%16p, len=%19d\n", m->m_pkthdr.rcvif, m->m_pkthdr.len); } printf(" m_next=%16p, m_nextpk=%16p, m_data=%16p\n", m->m_next, m->m_nextpkt, m->m_data); printf(" m_len=%17d, m_flags=%#15x, m_type=%18u\n", m->m_len, m->m_flags, m->m_type); len = m->m_len; d = mtod(m, uint8_t*); while (len > 0) { int i; printf(" "); for (i = 0; (i < 16) && (len > 0); i++, len--) { printf("%02hhx ", *(d++)); } printf("\n"); } } #endif /* XNB_DEBUG */ /*------------------------ Inter-Domain Communication ------------------------*/ /** * Free dynamically allocated KVA or pseudo-physical address allocations. * * \param xnb Per-instance xnb configuration structure. */ static void xnb_free_communication_mem(struct xnb_softc *xnb) { if (xnb->kva != 0) { if (xnb->pseudo_phys_res != NULL) { xenmem_free(xnb->dev, xnb->pseudo_phys_res_id, xnb->pseudo_phys_res); xnb->pseudo_phys_res = NULL; } } xnb->kva = 0; xnb->gnt_base_addr = 0; } /** * Cleanup all inter-domain communication mechanisms. * * \param xnb Per-instance xnb configuration structure. */ static int xnb_disconnect(struct xnb_softc *xnb) { struct gnttab_unmap_grant_ref gnts[XNB_NUM_RING_TYPES]; int error; int i; if (xnb->xen_intr_handle != NULL) xen_intr_unbind(&xnb->xen_intr_handle); /* * We may still have another thread currently processing requests. We * must acquire the rx and tx locks to make sure those threads are done, * but we can release those locks as soon as we acquire them, because no * more interrupts will be arriving. */ mtx_lock(&xnb->tx_lock); mtx_unlock(&xnb->tx_lock); mtx_lock(&xnb->rx_lock); mtx_unlock(&xnb->rx_lock); /* Free malloc'd softc member variables */ if (xnb->bridge != NULL) { free(xnb->bridge, M_XENSTORE); xnb->bridge = NULL; } /* All request processing has stopped, so unmap the rings */ for (i=0; i < XNB_NUM_RING_TYPES; i++) { gnts[i].host_addr = xnb->ring_configs[i].gnt_addr; gnts[i].dev_bus_addr = xnb->ring_configs[i].bus_addr; gnts[i].handle = xnb->ring_configs[i].handle; } error = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, gnts, XNB_NUM_RING_TYPES); KASSERT(error == 0, ("Grant table unmap op failed (%d)", error)); xnb_free_communication_mem(xnb); /* * Zero the ring config structs because the pointers, handles, and * grant refs contained therein are no longer valid. */ bzero(&xnb->ring_configs[XNB_RING_TYPE_TX], sizeof(struct xnb_ring_config)); bzero(&xnb->ring_configs[XNB_RING_TYPE_RX], sizeof(struct xnb_ring_config)); xnb->flags &= ~XNBF_RING_CONNECTED; return (0); } /** * Map a single shared memory ring into domain local address space and * initialize its control structure * * \param xnb Per-instance xnb configuration structure * \param ring_type Array index of this ring in the xnb's array of rings * \return An errno */ static int xnb_connect_ring(struct xnb_softc *xnb, xnb_ring_type_t ring_type) { struct gnttab_map_grant_ref gnt; struct xnb_ring_config *ring = &xnb->ring_configs[ring_type]; int error; /* TX ring type = 0, RX =1 */ ring->va = xnb->kva + ring_type * PAGE_SIZE; ring->gnt_addr = xnb->gnt_base_addr + ring_type * PAGE_SIZE; gnt.host_addr = ring->gnt_addr; gnt.flags = GNTMAP_host_map; gnt.ref = ring->ring_ref; gnt.dom = xnb->otherend_id; error = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &gnt, 1); if (error != 0) panic("netback: Ring page grant table op failed (%d)", error); if (gnt.status != 0) { ring->va = 0; error = EACCES; xenbus_dev_fatal(xnb->dev, error, "Ring shared page mapping failed. " "Status %d.", gnt.status); } else { ring->handle = gnt.handle; ring->bus_addr = gnt.dev_bus_addr; if (ring_type == XNB_RING_TYPE_TX) { BACK_RING_INIT(&ring->back_ring.tx_ring, (netif_tx_sring_t*)ring->va, ring->ring_pages * PAGE_SIZE); } else if (ring_type == XNB_RING_TYPE_RX) { BACK_RING_INIT(&ring->back_ring.rx_ring, (netif_rx_sring_t*)ring->va, ring->ring_pages * PAGE_SIZE); } else { xenbus_dev_fatal(xnb->dev, error, "Unknown ring type %d", ring_type); } } return error; } /** * Setup the shared memory rings and bind an interrupt to the event channel * used to notify us of ring changes. * * \param xnb Per-instance xnb configuration structure. */ static int xnb_connect_comms(struct xnb_softc *xnb) { int error; xnb_ring_type_t i; if ((xnb->flags & XNBF_RING_CONNECTED) != 0) return (0); /* * Kva for our rings are at the tail of the region of kva allocated * by xnb_alloc_communication_mem(). */ for (i=0; i < XNB_NUM_RING_TYPES; i++) { error = xnb_connect_ring(xnb, i); if (error != 0) return error; } xnb->flags |= XNBF_RING_CONNECTED; error = xen_intr_bind_remote_port(xnb->dev, xnb->otherend_id, xnb->evtchn, /*filter*/NULL, xnb_intr, /*arg*/xnb, INTR_TYPE_BIO | INTR_MPSAFE, &xnb->xen_intr_handle); if (error != 0) { (void)xnb_disconnect(xnb); xenbus_dev_fatal(xnb->dev, error, "binding event channel"); return (error); } DPRINTF("rings connected!\n"); return (0); } /** * Size KVA and pseudo-physical address allocations based on negotiated * values for the size and number of I/O requests, and the size of our * communication ring. * * \param xnb Per-instance xnb configuration structure. * * These address spaces are used to dynamically map pages in the * front-end's domain into our own. */ static int xnb_alloc_communication_mem(struct xnb_softc *xnb) { xnb_ring_type_t i; xnb->kva_size = 0; for (i=0; i < XNB_NUM_RING_TYPES; i++) { xnb->kva_size += xnb->ring_configs[i].ring_pages * PAGE_SIZE; } /* * Reserve a range of pseudo physical memory that we can map * into kva. These pages will only be backed by machine * pages ("real memory") during the lifetime of front-end requests * via grant table operations. We will map the netif tx and rx rings * into this space. */ xnb->pseudo_phys_res_id = 0; xnb->pseudo_phys_res = xenmem_alloc(xnb->dev, &xnb->pseudo_phys_res_id, xnb->kva_size); if (xnb->pseudo_phys_res == NULL) { xnb->kva = 0; return (ENOMEM); } xnb->kva = (vm_offset_t)rman_get_virtual(xnb->pseudo_phys_res); xnb->gnt_base_addr = rman_get_start(xnb->pseudo_phys_res); return (0); } /** * Collect information from the XenStore related to our device and its frontend * * \param xnb Per-instance xnb configuration structure. */ static int xnb_collect_xenstore_info(struct xnb_softc *xnb) { /** * \todo Linux collects the following info. We should collect most * of this, too: * "feature-rx-notify" */ const char *otherend_path; const char *our_path; int err; unsigned int rx_copy, bridge_len; uint8_t no_csum_offload; otherend_path = xenbus_get_otherend_path(xnb->dev); our_path = xenbus_get_node(xnb->dev); /* Collect the critical communication parameters */ err = xs_gather(XST_NIL, otherend_path, "tx-ring-ref", "%l" PRIu32, &xnb->ring_configs[XNB_RING_TYPE_TX].ring_ref, "rx-ring-ref", "%l" PRIu32, &xnb->ring_configs[XNB_RING_TYPE_RX].ring_ref, "event-channel", "%" PRIu32, &xnb->evtchn, NULL); if (err != 0) { xenbus_dev_fatal(xnb->dev, err, "Unable to retrieve ring information from " "frontend %s. Unable to connect.", otherend_path); return (err); } /* Collect the handle from xenstore */ err = xs_scanf(XST_NIL, our_path, "handle", NULL, "%li", &xnb->handle); if (err != 0) { xenbus_dev_fatal(xnb->dev, err, "Error reading handle from frontend %s. " "Unable to connect.", otherend_path); } /* * Collect the bridgename, if any. We do not need bridge_len; we just * throw it away */ err = xs_read(XST_NIL, our_path, "bridge", &bridge_len, (void**)&xnb->bridge); if (err != 0) xnb->bridge = NULL; /* * Does the frontend request that we use rx copy? If not, return an * error because this driver only supports rx copy. */ err = xs_scanf(XST_NIL, otherend_path, "request-rx-copy", NULL, "%" PRIu32, &rx_copy); if (err == ENOENT) { err = 0; rx_copy = 0; } if (err < 0) { xenbus_dev_fatal(xnb->dev, err, "reading %s/request-rx-copy", otherend_path); return err; } /** * \todo: figure out the exact meaning of this feature, and when * the frontend will set it to true. It should be set to true * at some point */ /* if (!rx_copy)*/ /* return EOPNOTSUPP;*/ /** \todo Collect the rx notify feature */ /* Collect the feature-sg. */ if (xs_scanf(XST_NIL, otherend_path, "feature-sg", NULL, "%hhu", &xnb->can_sg) < 0) xnb->can_sg = 0; /* Collect remaining frontend features */ if (xs_scanf(XST_NIL, otherend_path, "feature-gso-tcpv4", NULL, "%hhu", &xnb->gso) < 0) xnb->gso = 0; if (xs_scanf(XST_NIL, otherend_path, "feature-gso-tcpv4-prefix", NULL, "%hhu", &xnb->gso_prefix) < 0) xnb->gso_prefix = 0; if (xs_scanf(XST_NIL, otherend_path, "feature-no-csum-offload", NULL, "%hhu", &no_csum_offload) < 0) no_csum_offload = 0; xnb->ip_csum = (no_csum_offload == 0); return (0); } /** * Supply information about the physical device to the frontend * via XenBus. * * \param xnb Per-instance xnb configuration structure. */ static int xnb_publish_backend_info(struct xnb_softc *xnb) { struct xs_transaction xst; const char *our_path; int error; our_path = xenbus_get_node(xnb->dev); do { error = xs_transaction_start(&xst); if (error != 0) { xenbus_dev_fatal(xnb->dev, error, "Error publishing backend info " "(start transaction)"); break; } error = xs_printf(xst, our_path, "feature-sg", "%d", XNB_SG); if (error != 0) break; error = xs_printf(xst, our_path, "feature-gso-tcpv4", "%d", XNB_GSO_TCPV4); if (error != 0) break; error = xs_printf(xst, our_path, "feature-rx-copy", "%d", XNB_RX_COPY); if (error != 0) break; error = xs_printf(xst, our_path, "feature-rx-flip", "%d", XNB_RX_FLIP); if (error != 0) break; error = xs_transaction_end(xst, 0); if (error != 0 && error != EAGAIN) { xenbus_dev_fatal(xnb->dev, error, "ending transaction"); break; } } while (error == EAGAIN); return (error); } /** * Connect to our netfront peer now that it has completed publishing * its configuration into the XenStore. * * \param xnb Per-instance xnb configuration structure. */ static void xnb_connect(struct xnb_softc *xnb) { int error; if (xenbus_get_state(xnb->dev) == XenbusStateConnected) return; if (xnb_collect_xenstore_info(xnb) != 0) return; xnb->flags &= ~XNBF_SHUTDOWN; /* Read front end configuration. */ /* Allocate resources whose size depends on front-end configuration. */ error = xnb_alloc_communication_mem(xnb); if (error != 0) { xenbus_dev_fatal(xnb->dev, error, "Unable to allocate communication memory"); return; } /* * Connect communication channel. */ error = xnb_connect_comms(xnb); if (error != 0) { /* Specific errors are reported by xnb_connect_comms(). */ return; } xnb->carrier = 1; /* Ready for I/O. */ xenbus_set_state(xnb->dev, XenbusStateConnected); } /*-------------------------- Device Teardown Support -------------------------*/ /** * Perform device shutdown functions. * * \param xnb Per-instance xnb configuration structure. * * Mark this instance as shutting down, wait for any active requests * to drain, disconnect from the front-end, and notify any waiters (e.g. * a thread invoking our detach method) that detach can now proceed. */ static int xnb_shutdown(struct xnb_softc *xnb) { /* * Due to the need to drop our mutex during some * xenbus operations, it is possible for two threads * to attempt to close out shutdown processing at * the same time. Tell the caller that hits this * race to try back later. */ if ((xnb->flags & XNBF_IN_SHUTDOWN) != 0) return (EAGAIN); xnb->flags |= XNBF_SHUTDOWN; xnb->flags |= XNBF_IN_SHUTDOWN; mtx_unlock(&xnb->sc_lock); /* Free the network interface */ xnb->carrier = 0; if (xnb->xnb_ifp != NULL) { ether_ifdetach(xnb->xnb_ifp); if_free(xnb->xnb_ifp); xnb->xnb_ifp = NULL; } mtx_lock(&xnb->sc_lock); xnb_disconnect(xnb); mtx_unlock(&xnb->sc_lock); if (xenbus_get_state(xnb->dev) < XenbusStateClosing) xenbus_set_state(xnb->dev, XenbusStateClosing); mtx_lock(&xnb->sc_lock); xnb->flags &= ~XNBF_IN_SHUTDOWN; /* Indicate to xnb_detach() that is it safe to proceed. */ wakeup(xnb); return (0); } /** * Report an attach time error to the console and Xen, and cleanup * this instance by forcing immediate detach processing. * * \param xnb Per-instance xnb configuration structure. * \param err Errno describing the error. * \param fmt Printf style format and arguments */ static void xnb_attach_failed(struct xnb_softc *xnb, int err, const char *fmt, ...) { va_list ap; va_list ap_hotplug; va_start(ap, fmt); va_copy(ap_hotplug, ap); xs_vprintf(XST_NIL, xenbus_get_node(xnb->dev), "hotplug-error", fmt, ap_hotplug); va_end(ap_hotplug); (void)xs_printf(XST_NIL, xenbus_get_node(xnb->dev), "hotplug-status", "error"); xenbus_dev_vfatal(xnb->dev, err, fmt, ap); va_end(ap); (void)xs_printf(XST_NIL, xenbus_get_node(xnb->dev), "online", "0"); xnb_detach(xnb->dev); } /*---------------------------- NewBus Entrypoints ----------------------------*/ /** * Inspect a XenBus device and claim it if is of the appropriate type. * * \param dev NewBus device object representing a candidate XenBus device. * * \return 0 for success, errno codes for failure. */ static int xnb_probe(device_t dev) { if (!strcmp(xenbus_get_type(dev), "vif")) { DPRINTF("Claiming device %d, %s\n", device_get_unit(dev), devclass_get_name(device_get_devclass(dev))); device_set_desc(dev, "Backend Virtual Network Device"); device_quiet(dev); return (0); } return (ENXIO); } /** * Setup sysctl variables to control various Network Back parameters. * * \param xnb Xen Net Back softc. * */ static void xnb_setup_sysctl(struct xnb_softc *xnb) { struct sysctl_ctx_list *sysctl_ctx = NULL; struct sysctl_oid *sysctl_tree = NULL; sysctl_ctx = device_get_sysctl_ctx(xnb->dev); if (sysctl_ctx == NULL) return; sysctl_tree = device_get_sysctl_tree(xnb->dev); if (sysctl_tree == NULL) return; #ifdef XNB_DEBUG SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "unit_test_results", CTLTYPE_STRING | CTLFLAG_RD, xnb, 0, xnb_unit_test_main, "A", "Results of builtin unit tests"); SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "dump_rings", CTLTYPE_STRING | CTLFLAG_RD, xnb, 0, xnb_dump_rings, "A", "Xennet Back Rings"); #endif /* XNB_DEBUG */ } /** * Create a network device. * @param handle device handle */ int create_netdev(device_t dev) { struct ifnet *ifp; struct xnb_softc *xnb; int err = 0; uint32_t handle; xnb = device_get_softc(dev); mtx_init(&xnb->sc_lock, "xnb_softc", "xen netback softc lock", MTX_DEF); mtx_init(&xnb->tx_lock, "xnb_tx", "xen netback tx lock", MTX_DEF); mtx_init(&xnb->rx_lock, "xnb_rx", "xen netback rx lock", MTX_DEF); xnb->dev = dev; ifmedia_init(&xnb->sc_media, 0, xnb_ifmedia_upd, xnb_ifmedia_sts); ifmedia_add(&xnb->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); ifmedia_set(&xnb->sc_media, IFM_ETHER|IFM_MANUAL); /* * Set the MAC address to a dummy value (00:00:00:00:00), * if the MAC address of the host-facing interface is set * to the same as the guest-facing one (the value found in * xenstore), the bridge would stop delivering packets to * us because it would see that the destination address of * the packet is the same as the interface, and so the bridge * would expect the packet has already been delivered locally * (and just drop it). */ bzero(&xnb->mac[0], sizeof(xnb->mac)); /* The interface will be named using the following nomenclature: * * xnb. * * Where handle is the oder of the interface referred to the guest. */ err = xs_scanf(XST_NIL, xenbus_get_node(xnb->dev), "handle", NULL, "%" PRIu32, &handle); if (err != 0) return (err); snprintf(xnb->if_name, IFNAMSIZ, "xnb%" PRIu16 ".%" PRIu32, xenbus_get_otherend_id(dev), handle); if (err == 0) { /* Set up ifnet structure */ ifp = xnb->xnb_ifp = if_alloc(IFT_ETHER); ifp->if_softc = xnb; if_initname(ifp, xnb->if_name, IF_DUNIT_NONE); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = xnb_ioctl; ifp->if_start = xnb_start; ifp->if_init = xnb_ifinit; ifp->if_mtu = ETHERMTU; ifp->if_snd.ifq_maxlen = NET_RX_RING_SIZE - 1; ifp->if_hwassist = XNB_CSUM_FEATURES; ifp->if_capabilities = IFCAP_HWCSUM; ifp->if_capenable = IFCAP_HWCSUM; ether_ifattach(ifp, xnb->mac); xnb->carrier = 0; } return err; } /** * Attach to a XenBus device that has been claimed by our probe routine. * * \param dev NewBus device object representing this Xen Net Back instance. * * \return 0 for success, errno codes for failure. */ static int xnb_attach(device_t dev) { struct xnb_softc *xnb; int error; xnb_ring_type_t i; error = create_netdev(dev); if (error != 0) { xenbus_dev_fatal(dev, error, "creating netdev"); return (error); } DPRINTF("Attaching to %s\n", xenbus_get_node(dev)); /* * Basic initialization. * After this block it is safe to call xnb_detach() * to clean up any allocated data for this instance. */ xnb = device_get_softc(dev); xnb->otherend_id = xenbus_get_otherend_id(dev); for (i=0; i < XNB_NUM_RING_TYPES; i++) { xnb->ring_configs[i].ring_pages = 1; } /* * Setup sysctl variables. */ xnb_setup_sysctl(xnb); /* Update hot-plug status to satisfy xend. */ error = xs_printf(XST_NIL, xenbus_get_node(xnb->dev), "hotplug-status", "connected"); if (error != 0) { xnb_attach_failed(xnb, error, "writing %s/hotplug-status", xenbus_get_node(xnb->dev)); return (error); } if ((error = xnb_publish_backend_info(xnb)) != 0) { /* * If we can't publish our data, we cannot participate * in this connection, and waiting for a front-end state * change will not help the situation. */ xnb_attach_failed(xnb, error, "Publishing backend status for %s", xenbus_get_node(xnb->dev)); return error; } /* Tell the front end that we are ready to connect. */ xenbus_set_state(dev, XenbusStateInitWait); return (0); } /** * Detach from a net back device instance. * * \param dev NewBus device object representing this Xen Net Back instance. * * \return 0 for success, errno codes for failure. * * \note A net back device may be detached at any time in its life-cycle, * including part way through the attach process. For this reason, * initialization order and the initialization state checks in this * routine must be carefully coupled so that attach time failures * are gracefully handled. */ static int xnb_detach(device_t dev) { struct xnb_softc *xnb; DPRINTF("\n"); xnb = device_get_softc(dev); mtx_lock(&xnb->sc_lock); while (xnb_shutdown(xnb) == EAGAIN) { msleep(xnb, &xnb->sc_lock, /*wakeup prio unchanged*/0, "xnb_shutdown", 0); } mtx_unlock(&xnb->sc_lock); DPRINTF("\n"); mtx_destroy(&xnb->tx_lock); mtx_destroy(&xnb->rx_lock); mtx_destroy(&xnb->sc_lock); return (0); } /** * Prepare this net back device for suspension of this VM. * * \param dev NewBus device object representing this Xen net Back instance. * * \return 0 for success, errno codes for failure. */ static int xnb_suspend(device_t dev) { return (0); } /** * Perform any processing required to recover from a suspended state. * * \param dev NewBus device object representing this Xen Net Back instance. * * \return 0 for success, errno codes for failure. */ static int xnb_resume(device_t dev) { return (0); } /** * Handle state changes expressed via the XenStore by our front-end peer. * * \param dev NewBus device object representing this Xen * Net Back instance. * \param frontend_state The new state of the front-end. * * \return 0 for success, errno codes for failure. */ static void xnb_frontend_changed(device_t dev, XenbusState frontend_state) { struct xnb_softc *xnb; xnb = device_get_softc(dev); DPRINTF("frontend_state=%s, xnb_state=%s\n", xenbus_strstate(frontend_state), xenbus_strstate(xenbus_get_state(xnb->dev))); switch (frontend_state) { case XenbusStateInitialising: break; case XenbusStateInitialised: case XenbusStateConnected: xnb_connect(xnb); break; case XenbusStateClosing: case XenbusStateClosed: mtx_lock(&xnb->sc_lock); xnb_shutdown(xnb); mtx_unlock(&xnb->sc_lock); if (frontend_state == XenbusStateClosed) xenbus_set_state(xnb->dev, XenbusStateClosed); break; default: xenbus_dev_fatal(xnb->dev, EINVAL, "saw state %d at frontend", frontend_state); break; } } /*---------------------------- Request Processing ----------------------------*/ /** * Interrupt handler bound to the shared ring's event channel. * Entry point for the xennet transmit path in netback * Transfers packets from the Xen ring to the host's generic networking stack * * \param arg Callback argument registerd during event channel * binding - the xnb_softc for this instance. */ static void xnb_intr(void *arg) { struct xnb_softc *xnb; struct ifnet *ifp; netif_tx_back_ring_t *txb; RING_IDX req_prod_local; xnb = (struct xnb_softc *)arg; ifp = xnb->xnb_ifp; txb = &xnb->ring_configs[XNB_RING_TYPE_TX].back_ring.tx_ring; mtx_lock(&xnb->tx_lock); do { int notify; req_prod_local = txb->sring->req_prod; xen_rmb(); for (;;) { struct mbuf *mbufc; int err; err = xnb_recv(txb, xnb->otherend_id, &mbufc, ifp, xnb->tx_gnttab); if (err || (mbufc == NULL)) break; /* Send the packet to the generic network stack */ (*xnb->xnb_ifp->if_input)(xnb->xnb_ifp, mbufc); } RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(txb, notify); if (notify != 0) xen_intr_signal(xnb->xen_intr_handle); txb->sring->req_event = txb->req_cons + 1; xen_mb(); } while (txb->sring->req_prod != req_prod_local) ; mtx_unlock(&xnb->tx_lock); xnb_start(ifp); } /** * Build a struct xnb_pkt based on netif_tx_request's from a netif tx ring. * Will read exactly 0 or 1 packets from the ring; never a partial packet. * \param[out] pkt The returned packet. If there is an error building * the packet, pkt.list_len will be set to 0. * \param[in] tx_ring Pointer to the Ring that is the input to this function * \param[in] start The ring index of the first potential request * \return The number of requests consumed to build this packet */ static int xnb_ring2pkt(struct xnb_pkt *pkt, const netif_tx_back_ring_t *tx_ring, RING_IDX start) { /* * Outline: * 1) Initialize pkt * 2) Read the first request of the packet * 3) Read the extras * 4) Set cdr * 5) Loop on the remainder of the packet * 6) Finalize pkt (stuff like car_size and list_len) */ int idx = start; int discard = 0; /* whether to discard the packet */ int more_data = 0; /* there are more request past the last one */ uint16_t cdr_size = 0; /* accumulated size of requests 2 through n */ xnb_pkt_initialize(pkt); /* Read the first request */ if (RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) { netif_tx_request_t *tx = RING_GET_REQUEST(tx_ring, idx); pkt->size = tx->size; pkt->flags = tx->flags & ~NETTXF_more_data; more_data = tx->flags & NETTXF_more_data; pkt->list_len++; pkt->car = idx; idx++; } /* Read the extra info */ if ((pkt->flags & NETTXF_extra_info) && RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) { netif_extra_info_t *ext = (netif_extra_info_t*) RING_GET_REQUEST(tx_ring, idx); pkt->extra.type = ext->type; switch (pkt->extra.type) { case XEN_NETIF_EXTRA_TYPE_GSO: pkt->extra.u.gso = ext->u.gso; break; default: /* * The reference Linux netfront driver will * never set any other extra.type. So we don't * know what to do with it. Let's print an * error, then consume and discard the packet */ printf("xnb(%s:%d): Unknown extra info type %d." " Discarding packet\n", __func__, __LINE__, pkt->extra.type); xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring, start)); xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring, idx)); discard = 1; break; } pkt->extra.flags = ext->flags; if (ext->flags & XEN_NETIF_EXTRA_FLAG_MORE) { /* * The reference linux netfront driver never sets this * flag (nor does any other known netfront). So we * will discard the packet. */ printf("xnb(%s:%d): Request sets " "XEN_NETIF_EXTRA_FLAG_MORE, but we can't handle " "that\n", __func__, __LINE__); xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring, start)); xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring, idx)); discard = 1; } idx++; } /* Set cdr. If there is not more data, cdr is invalid */ pkt->cdr = idx; /* Loop on remainder of packet */ while (more_data && RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) { netif_tx_request_t *tx = RING_GET_REQUEST(tx_ring, idx); pkt->list_len++; cdr_size += tx->size; if (tx->flags & ~NETTXF_more_data) { /* There should be no other flags set at this point */ printf("xnb(%s:%d): Request sets unknown flags %d " "after the 1st request in the packet.\n", __func__, __LINE__, tx->flags); xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring, start)); xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring, idx)); } more_data = tx->flags & NETTXF_more_data; idx++; } /* Finalize packet */ if (more_data != 0) { /* The ring ran out of requests before finishing the packet */ xnb_pkt_invalidate(pkt); idx = start; /* tell caller that we consumed no requests */ } else { /* Calculate car_size */ pkt->car_size = pkt->size - cdr_size; } if (discard != 0) { xnb_pkt_invalidate(pkt); } return idx - start; } /** * Respond to all the requests that constituted pkt. Builds the responses and * writes them to the ring, but doesn't push them to the shared ring. * \param[in] pkt the packet that needs a response * \param[in] error true if there was an error handling the packet, such * as in the hypervisor copy op or mbuf allocation * \param[out] ring Responses go here */ static void xnb_txpkt2rsp(const struct xnb_pkt *pkt, netif_tx_back_ring_t *ring, int error) { /* * Outline: * 1) Respond to the first request * 2) Respond to the extra info reques * Loop through every remaining request in the packet, generating * responses that copy those requests' ids and sets the status * appropriately. */ netif_tx_request_t *tx; netif_tx_response_t *rsp; int i; uint16_t status; status = (xnb_pkt_is_valid(pkt) == 0) || error ? NETIF_RSP_ERROR : NETIF_RSP_OKAY; KASSERT((pkt->list_len == 0) || (ring->rsp_prod_pvt == pkt->car), ("Cannot respond to ring requests out of order")); if (pkt->list_len >= 1) { uint16_t id; tx = RING_GET_REQUEST(ring, ring->rsp_prod_pvt); id = tx->id; rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); rsp->id = id; rsp->status = status; ring->rsp_prod_pvt++; if (pkt->flags & NETRXF_extra_info) { rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); rsp->status = NETIF_RSP_NULL; ring->rsp_prod_pvt++; } } for (i=0; i < pkt->list_len - 1; i++) { uint16_t id; tx = RING_GET_REQUEST(ring, ring->rsp_prod_pvt); id = tx->id; rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); rsp->id = id; rsp->status = status; ring->rsp_prod_pvt++; } } /** * Create an mbuf chain to represent a packet. Initializes all of the headers * in the mbuf chain, but does not copy the data. The returned chain must be * free()'d when no longer needed * \param[in] pkt A packet to model the mbuf chain after * \return A newly allocated mbuf chain, possibly with clusters attached. * NULL on failure */ static struct mbuf* xnb_pkt2mbufc(const struct xnb_pkt *pkt, struct ifnet *ifp) { /** * \todo consider using a memory pool for mbufs instead of * reallocating them for every packet */ /** \todo handle extra data */ struct mbuf *m; m = m_getm(NULL, pkt->size, M_NOWAIT, MT_DATA); if (m != NULL) { m->m_pkthdr.rcvif = ifp; if (pkt->flags & NETTXF_data_validated) { /* * We lie to the host OS and always tell it that the * checksums are ok, because the packet is unlikely to * get corrupted going across domains. */ m->m_pkthdr.csum_flags = ( CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR ); m->m_pkthdr.csum_data = 0xffff; } } return m; } /** * Build a gnttab_copy table that can be used to copy data from a pkt * to an mbufc. Does not actually perform the copy. Always uses gref's on * the packet side. * \param[in] pkt pkt's associated requests form the src for * the copy operation * \param[in] mbufc mbufc's storage forms the dest for the copy operation * \param[out] gnttab Storage for the returned grant table * \param[in] txb Pointer to the backend ring structure * \param[in] otherend_id The domain ID of the other end of the copy * \return The number of gnttab entries filled */ static int xnb_txpkt2gnttab(const struct xnb_pkt *pkt, struct mbuf *mbufc, gnttab_copy_table gnttab, const netif_tx_back_ring_t *txb, domid_t otherend_id) { struct mbuf *mbuf = mbufc;/* current mbuf within the chain */ int gnt_idx = 0; /* index into grant table */ RING_IDX r_idx = pkt->car; /* index into tx ring buffer */ int r_ofs = 0; /* offset of next data within tx request's data area */ int m_ofs = 0; /* offset of next data within mbuf's data area */ /* size in bytes that still needs to be represented in the table */ uint16_t size_remaining = pkt->size; while (size_remaining > 0) { const netif_tx_request_t *txq = RING_GET_REQUEST(txb, r_idx); const size_t mbuf_space = M_TRAILINGSPACE(mbuf) - m_ofs; const size_t req_size = r_idx == pkt->car ? pkt->car_size : txq->size; const size_t pkt_space = req_size - r_ofs; /* * space is the largest amount of data that can be copied in the * grant table's next entry */ const size_t space = MIN(pkt_space, mbuf_space); /* TODO: handle this error condition without panicking */ KASSERT(gnt_idx < GNTTAB_LEN, ("Grant table is too short")); gnttab[gnt_idx].source.u.ref = txq->gref; gnttab[gnt_idx].source.domid = otherend_id; gnttab[gnt_idx].source.offset = txq->offset + r_ofs; gnttab[gnt_idx].dest.u.gmfn = virt_to_mfn( mtod(mbuf, vm_offset_t) + m_ofs); gnttab[gnt_idx].dest.offset = virt_to_offset( mtod(mbuf, vm_offset_t) + m_ofs); gnttab[gnt_idx].dest.domid = DOMID_SELF; gnttab[gnt_idx].len = space; gnttab[gnt_idx].flags = GNTCOPY_source_gref; gnt_idx++; r_ofs += space; m_ofs += space; size_remaining -= space; if (req_size - r_ofs <= 0) { /* Must move to the next tx request */ r_ofs = 0; r_idx = (r_idx == pkt->car) ? pkt->cdr : r_idx + 1; } if (M_TRAILINGSPACE(mbuf) - m_ofs <= 0) { /* Must move to the next mbuf */ m_ofs = 0; mbuf = mbuf->m_next; } } return gnt_idx; } /** * Check the status of the grant copy operations, and update mbufs various * non-data fields to reflect the data present. * \param[in,out] mbufc mbuf chain to update. The chain must be valid and of * the correct length, and data should already be present * \param[in] gnttab A grant table for a just completed copy op * \param[in] n_entries The number of valid entries in the grant table */ static void xnb_update_mbufc(struct mbuf *mbufc, const gnttab_copy_table gnttab, int n_entries) { struct mbuf *mbuf = mbufc; int i; size_t total_size = 0; for (i = 0; i < n_entries; i++) { KASSERT(gnttab[i].status == GNTST_okay, ("Some gnttab_copy entry had error status %hd\n", gnttab[i].status)); mbuf->m_len += gnttab[i].len; total_size += gnttab[i].len; if (M_TRAILINGSPACE(mbuf) <= 0) { mbuf = mbuf->m_next; } } mbufc->m_pkthdr.len = total_size; #if defined(INET) || defined(INET6) xnb_add_mbuf_cksum(mbufc); #endif } /** * Dequeue at most one packet from the shared ring * \param[in,out] txb Netif tx ring. A packet will be removed from it, and * its private indices will be updated. But the indices * will not be pushed to the shared ring. * \param[in] ifnet Interface to which the packet will be sent * \param[in] otherend Domain ID of the other end of the ring * \param[out] mbufc The assembled mbuf chain, ready to send to the generic * networking stack * \param[in,out] gnttab Pointer to enough memory for a grant table. We make * this a function parameter so that we will take less * stack space. * \return An error code */ static int xnb_recv(netif_tx_back_ring_t *txb, domid_t otherend, struct mbuf **mbufc, struct ifnet *ifnet, gnttab_copy_table gnttab) { struct xnb_pkt pkt; /* number of tx requests consumed to build the last packet */ int num_consumed; int nr_ents; *mbufc = NULL; num_consumed = xnb_ring2pkt(&pkt, txb, txb->req_cons); if (num_consumed == 0) return 0; /* Nothing to receive */ /* update statistics independent of errors */ if_inc_counter(ifnet, IFCOUNTER_IPACKETS, 1); /* * if we got here, then 1 or more requests was consumed, but the packet * is not necessarily valid. */ if (xnb_pkt_is_valid(&pkt) == 0) { /* got a garbage packet, respond and drop it */ xnb_txpkt2rsp(&pkt, txb, 1); txb->req_cons += num_consumed; DPRINTF("xnb_intr: garbage packet, num_consumed=%d\n", num_consumed); if_inc_counter(ifnet, IFCOUNTER_IERRORS, 1); return EINVAL; } *mbufc = xnb_pkt2mbufc(&pkt, ifnet); if (*mbufc == NULL) { /* * Couldn't allocate mbufs. Respond and drop the packet. Do * not consume the requests */ xnb_txpkt2rsp(&pkt, txb, 1); DPRINTF("xnb_intr: Couldn't allocate mbufs, num_consumed=%d\n", num_consumed); if_inc_counter(ifnet, IFCOUNTER_IQDROPS, 1); return ENOMEM; } nr_ents = xnb_txpkt2gnttab(&pkt, *mbufc, gnttab, txb, otherend); if (nr_ents > 0) { int __unused hv_ret = HYPERVISOR_grant_table_op(GNTTABOP_copy, gnttab, nr_ents); KASSERT(hv_ret == 0, ("HYPERVISOR_grant_table_op returned %d\n", hv_ret)); xnb_update_mbufc(*mbufc, gnttab, nr_ents); } xnb_txpkt2rsp(&pkt, txb, 0); txb->req_cons += num_consumed; return 0; } /** * Create an xnb_pkt based on the contents of an mbuf chain. * \param[in] mbufc mbuf chain to transform into a packet * \param[out] pkt Storage for the newly generated xnb_pkt * \param[in] start The ring index of the first available slot in the rx * ring * \param[in] space The number of free slots in the rx ring * \retval 0 Success * \retval EINVAL mbufc was corrupt or not convertible into a pkt * \retval EAGAIN There was not enough space in the ring to queue the * packet */ static int xnb_mbufc2pkt(const struct mbuf *mbufc, struct xnb_pkt *pkt, RING_IDX start, int space) { int retval = 0; if ((mbufc == NULL) || ( (mbufc->m_flags & M_PKTHDR) == 0) || (mbufc->m_pkthdr.len == 0)) { xnb_pkt_invalidate(pkt); retval = EINVAL; } else { int slots_required; xnb_pkt_validate(pkt); pkt->flags = 0; pkt->size = mbufc->m_pkthdr.len; pkt->car = start; pkt->car_size = mbufc->m_len; if (mbufc->m_pkthdr.csum_flags & CSUM_TSO) { pkt->flags |= NETRXF_extra_info; pkt->extra.u.gso.size = mbufc->m_pkthdr.tso_segsz; pkt->extra.u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; pkt->extra.u.gso.pad = 0; pkt->extra.u.gso.features = 0; pkt->extra.type = XEN_NETIF_EXTRA_TYPE_GSO; pkt->extra.flags = 0; pkt->cdr = start + 2; } else { pkt->cdr = start + 1; } if (mbufc->m_pkthdr.csum_flags & (CSUM_TSO | CSUM_DELAY_DATA)) { pkt->flags |= (NETRXF_csum_blank | NETRXF_data_validated); } /* * Each ring response can have up to PAGE_SIZE of data. * Assume that we can defragment the mbuf chain efficiently * into responses so that each response but the last uses all * PAGE_SIZE bytes. */ pkt->list_len = howmany(pkt->size, PAGE_SIZE); if (pkt->list_len > 1) { pkt->flags |= NETRXF_more_data; } slots_required = pkt->list_len + (pkt->flags & NETRXF_extra_info ? 1 : 0); if (slots_required > space) { xnb_pkt_invalidate(pkt); retval = EAGAIN; } } return retval; } /** * Build a gnttab_copy table that can be used to copy data from an mbuf chain * to the frontend's shared buffers. Does not actually perform the copy. * Always uses gref's on the other end's side. * \param[in] pkt pkt's associated responses form the dest for the copy * operatoin * \param[in] mbufc The source for the copy operation * \param[out] gnttab Storage for the returned grant table * \param[in] rxb Pointer to the backend ring structure * \param[in] otherend_id The domain ID of the other end of the copy * \return The number of gnttab entries filled */ static int xnb_rxpkt2gnttab(const struct xnb_pkt *pkt, const struct mbuf *mbufc, gnttab_copy_table gnttab, const netif_rx_back_ring_t *rxb, domid_t otherend_id) { const struct mbuf *mbuf = mbufc;/* current mbuf within the chain */ int gnt_idx = 0; /* index into grant table */ RING_IDX r_idx = pkt->car; /* index into rx ring buffer */ int r_ofs = 0; /* offset of next data within rx request's data area */ int m_ofs = 0; /* offset of next data within mbuf's data area */ /* size in bytes that still needs to be represented in the table */ uint16_t size_remaining; size_remaining = (xnb_pkt_is_valid(pkt) != 0) ? pkt->size : 0; while (size_remaining > 0) { const netif_rx_request_t *rxq = RING_GET_REQUEST(rxb, r_idx); const size_t mbuf_space = mbuf->m_len - m_ofs; /* Xen shared pages have an implied size of PAGE_SIZE */ const size_t req_size = PAGE_SIZE; const size_t pkt_space = req_size - r_ofs; /* * space is the largest amount of data that can be copied in the * grant table's next entry */ const size_t space = MIN(pkt_space, mbuf_space); /* TODO: handle this error condition without panicing */ KASSERT(gnt_idx < GNTTAB_LEN, ("Grant table is too short")); gnttab[gnt_idx].dest.u.ref = rxq->gref; gnttab[gnt_idx].dest.domid = otherend_id; gnttab[gnt_idx].dest.offset = r_ofs; gnttab[gnt_idx].source.u.gmfn = virt_to_mfn( mtod(mbuf, vm_offset_t) + m_ofs); gnttab[gnt_idx].source.offset = virt_to_offset( mtod(mbuf, vm_offset_t) + m_ofs); gnttab[gnt_idx].source.domid = DOMID_SELF; gnttab[gnt_idx].len = space; gnttab[gnt_idx].flags = GNTCOPY_dest_gref; gnt_idx++; r_ofs += space; m_ofs += space; size_remaining -= space; if (req_size - r_ofs <= 0) { /* Must move to the next rx request */ r_ofs = 0; r_idx = (r_idx == pkt->car) ? pkt->cdr : r_idx + 1; } if (mbuf->m_len - m_ofs <= 0) { /* Must move to the next mbuf */ m_ofs = 0; mbuf = mbuf->m_next; } } return gnt_idx; } /** * Generates responses for all the requests that constituted pkt. Builds * responses and writes them to the ring, but doesn't push the shared ring * indices. * \param[in] pkt the packet that needs a response * \param[in] gnttab The grant copy table corresponding to this packet. * Used to determine how many rsp->netif_rx_response_t's to * generate. * \param[in] n_entries Number of relevant entries in the grant table * \param[out] ring Responses go here * \return The number of RX requests that were consumed to generate * the responses */ static int xnb_rxpkt2rsp(const struct xnb_pkt *pkt, const gnttab_copy_table gnttab, int n_entries, netif_rx_back_ring_t *ring) { /* * This code makes the following assumptions: * * All entries in gnttab set GNTCOPY_dest_gref * * The entries in gnttab are grouped by their grefs: any two * entries with the same gref must be adjacent */ int error = 0; int gnt_idx, i; int n_responses = 0; grant_ref_t last_gref = GRANT_REF_INVALID; RING_IDX r_idx; KASSERT(gnttab != NULL, ("Received a null granttable copy")); /* * In the event of an error, we only need to send one response to the * netfront. In that case, we musn't write any data to the responses * after the one we send. So we must loop all the way through gnttab * looking for errors before we generate any responses * * Since we're looping through the grant table anyway, we'll count the * number of different gref's in it, which will tell us how many * responses to generate */ for (gnt_idx = 0; gnt_idx < n_entries; gnt_idx++) { int16_t status = gnttab[gnt_idx].status; if (status != GNTST_okay) { DPRINTF( "Got error %d for hypervisor gnttab_copy status\n", status); error = 1; break; } if (gnttab[gnt_idx].dest.u.ref != last_gref) { n_responses++; last_gref = gnttab[gnt_idx].dest.u.ref; } } if (error != 0) { uint16_t id; netif_rx_response_t *rsp; id = RING_GET_REQUEST(ring, ring->rsp_prod_pvt)->id; rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); rsp->id = id; rsp->status = NETIF_RSP_ERROR; n_responses = 1; } else { gnt_idx = 0; const int has_extra = pkt->flags & NETRXF_extra_info; if (has_extra != 0) n_responses++; for (i = 0; i < n_responses; i++) { netif_rx_request_t rxq; netif_rx_response_t *rsp; r_idx = ring->rsp_prod_pvt + i; /* * We copy the structure of rxq instead of making a * pointer because it shares the same memory as rsp. */ rxq = *(RING_GET_REQUEST(ring, r_idx)); rsp = RING_GET_RESPONSE(ring, r_idx); if (has_extra && (i == 1)) { netif_extra_info_t *ext = (netif_extra_info_t*)rsp; ext->type = XEN_NETIF_EXTRA_TYPE_GSO; ext->flags = 0; ext->u.gso.size = pkt->extra.u.gso.size; ext->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; ext->u.gso.pad = 0; ext->u.gso.features = 0; } else { rsp->id = rxq.id; rsp->status = GNTST_okay; rsp->offset = 0; rsp->flags = 0; if (i < pkt->list_len - 1) rsp->flags |= NETRXF_more_data; if ((i == 0) && has_extra) rsp->flags |= NETRXF_extra_info; if ((i == 0) && (pkt->flags & NETRXF_data_validated)) { rsp->flags |= NETRXF_data_validated; rsp->flags |= NETRXF_csum_blank; } rsp->status = 0; for (; gnttab[gnt_idx].dest.u.ref == rxq.gref; gnt_idx++) { rsp->status += gnttab[gnt_idx].len; } } } } ring->req_cons += n_responses; ring->rsp_prod_pvt += n_responses; return n_responses; } #if defined(INET) || defined(INET6) /** * Add IP, TCP, and/or UDP checksums to every mbuf in a chain. The first mbuf * in the chain must start with a struct ether_header. * * XXX This function will perform incorrectly on UDP packets that are split up * into multiple ethernet frames. */ static void xnb_add_mbuf_cksum(struct mbuf *mbufc) { struct ether_header *eh; struct ip *iph; uint16_t ether_type; eh = mtod(mbufc, struct ether_header*); ether_type = ntohs(eh->ether_type); if (ether_type != ETHERTYPE_IP) { /* Nothing to calculate */ return; } iph = (struct ip*)(eh + 1); if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) { iph->ip_sum = 0; iph->ip_sum = in_cksum_hdr(iph); } switch (iph->ip_p) { case IPPROTO_TCP: if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) { size_t tcplen = ntohs(iph->ip_len) - sizeof(struct ip); struct tcphdr *th = (struct tcphdr*)(iph + 1); th->th_sum = in_pseudo(iph->ip_src.s_addr, iph->ip_dst.s_addr, htons(IPPROTO_TCP + tcplen)); th->th_sum = in_cksum_skip(mbufc, sizeof(struct ether_header) + ntohs(iph->ip_len), sizeof(struct ether_header) + (iph->ip_hl << 2)); } break; case IPPROTO_UDP: if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) { size_t udplen = ntohs(iph->ip_len) - sizeof(struct ip); struct udphdr *uh = (struct udphdr*)(iph + 1); uh->uh_sum = in_pseudo(iph->ip_src.s_addr, iph->ip_dst.s_addr, htons(IPPROTO_UDP + udplen)); uh->uh_sum = in_cksum_skip(mbufc, sizeof(struct ether_header) + ntohs(iph->ip_len), sizeof(struct ether_header) + (iph->ip_hl << 2)); } break; default: break; } } #endif /* INET || INET6 */ static void xnb_stop(struct xnb_softc *xnb) { struct ifnet *ifp; mtx_assert(&xnb->sc_lock, MA_OWNED); ifp = xnb->xnb_ifp; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); if_link_state_change(ifp, LINK_STATE_DOWN); } static int xnb_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct xnb_softc *xnb = ifp->if_softc; struct ifreq *ifr = (struct ifreq*) data; #ifdef INET struct ifaddr *ifa = (struct ifaddr*)data; #endif int error = 0; switch (cmd) { case SIOCSIFFLAGS: mtx_lock(&xnb->sc_lock); if (ifp->if_flags & IFF_UP) { xnb_ifinit_locked(xnb); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { xnb_stop(xnb); } } /* * Note: netfront sets a variable named xn_if_flags * here, but that variable is never read */ mtx_unlock(&xnb->sc_lock); break; case SIOCSIFADDR: #ifdef INET mtx_lock(&xnb->sc_lock); if (ifa->ifa_addr->sa_family == AF_INET) { ifp->if_flags |= IFF_UP; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); if_link_state_change(ifp, LINK_STATE_DOWN); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if_link_state_change(ifp, LINK_STATE_UP); } arp_ifinit(ifp, ifa); mtx_unlock(&xnb->sc_lock); } else { mtx_unlock(&xnb->sc_lock); #endif error = ether_ioctl(ifp, cmd, data); #ifdef INET } #endif break; case SIOCSIFCAP: mtx_lock(&xnb->sc_lock); if (ifr->ifr_reqcap & IFCAP_TXCSUM) { ifp->if_capenable |= IFCAP_TXCSUM; ifp->if_hwassist |= XNB_CSUM_FEATURES; } else { ifp->if_capenable &= ~(IFCAP_TXCSUM); ifp->if_hwassist &= ~(XNB_CSUM_FEATURES); } if ((ifr->ifr_reqcap & IFCAP_RXCSUM)) { ifp->if_capenable |= IFCAP_RXCSUM; } else { ifp->if_capenable &= ~(IFCAP_RXCSUM); } /* * TODO enable TSO4 and LRO once we no longer need * to calculate checksums in software */ #if 0 if (ifr->if_reqcap |= IFCAP_TSO4) { if (IFCAP_TXCSUM & ifp->if_capenable) { printf("xnb: Xen netif requires that " "TXCSUM be enabled in order " "to use TSO4\n"); error = EINVAL; } else { ifp->if_capenable |= IFCAP_TSO4; ifp->if_hwassist |= CSUM_TSO; } } else { ifp->if_capenable &= ~(IFCAP_TSO4); ifp->if_hwassist &= ~(CSUM_TSO); } if (ifr->ifreqcap |= IFCAP_LRO) { ifp->if_capenable |= IFCAP_LRO; } else { ifp->if_capenable &= ~(IFCAP_LRO); } #endif mtx_unlock(&xnb->sc_lock); break; case SIOCSIFMTU: ifp->if_mtu = ifr->ifr_mtu; ifp->if_drv_flags &= ~IFF_DRV_RUNNING; xnb_ifinit(xnb); break; case SIOCADDMULTI: case SIOCDELMULTI: case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &xnb->sc_media, cmd); break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } static void xnb_start_locked(struct ifnet *ifp) { netif_rx_back_ring_t *rxb; struct xnb_softc *xnb; struct mbuf *mbufc; RING_IDX req_prod_local; xnb = ifp->if_softc; rxb = &xnb->ring_configs[XNB_RING_TYPE_RX].back_ring.rx_ring; if (!xnb->carrier) return; do { int out_of_space = 0; int notify; req_prod_local = rxb->sring->req_prod; xen_rmb(); for (;;) { int error; IF_DEQUEUE(&ifp->if_snd, mbufc); if (mbufc == NULL) break; error = xnb_send(rxb, xnb->otherend_id, mbufc, xnb->rx_gnttab); switch (error) { case EAGAIN: /* * Insufficient space in the ring. * Requeue pkt and send when space is * available. */ IF_PREPEND(&ifp->if_snd, mbufc); /* * Perhaps the frontend missed an IRQ * and went to sleep. Notify it to wake * it up. */ out_of_space = 1; break; case EINVAL: /* OS gave a corrupt packet. Drop it.*/ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); /* FALLTHROUGH */ default: /* Send succeeded, or packet had error. * Free the packet */ if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if (mbufc) m_freem(mbufc); break; } if (out_of_space != 0) break; } RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(rxb, notify); if ((notify != 0) || (out_of_space != 0)) xen_intr_signal(xnb->xen_intr_handle); rxb->sring->req_event = req_prod_local + 1; xen_mb(); } while (rxb->sring->req_prod != req_prod_local) ; } /** * Sends one packet to the ring. Blocks until the packet is on the ring * \param[in] mbufc Contains one packet to send. Caller must free * \param[in,out] rxb The packet will be pushed onto this ring, but the * otherend will not be notified. * \param[in] otherend The domain ID of the other end of the connection * \retval EAGAIN The ring did not have enough space for the packet. * The ring has not been modified * \param[in,out] gnttab Pointer to enough memory for a grant table. We make * this a function parameter so that we will take less * stack space. * \retval EINVAL mbufc was corrupt or not convertible into a pkt */ static int xnb_send(netif_rx_back_ring_t *ring, domid_t otherend, const struct mbuf *mbufc, gnttab_copy_table gnttab) { struct xnb_pkt pkt; int error, n_entries, n_reqs; RING_IDX space; space = ring->sring->req_prod - ring->req_cons; error = xnb_mbufc2pkt(mbufc, &pkt, ring->rsp_prod_pvt, space); if (error != 0) return error; n_entries = xnb_rxpkt2gnttab(&pkt, mbufc, gnttab, ring, otherend); if (n_entries != 0) { int __unused hv_ret = HYPERVISOR_grant_table_op(GNTTABOP_copy, gnttab, n_entries); KASSERT(hv_ret == 0, ("HYPERVISOR_grant_table_op returned %d\n", hv_ret)); } n_reqs = xnb_rxpkt2rsp(&pkt, gnttab, n_entries, ring); return 0; } static void xnb_start(struct ifnet *ifp) { struct xnb_softc *xnb; xnb = ifp->if_softc; mtx_lock(&xnb->rx_lock); xnb_start_locked(ifp); mtx_unlock(&xnb->rx_lock); } /* equivalent of network_open() in Linux */ static void xnb_ifinit_locked(struct xnb_softc *xnb) { struct ifnet *ifp; ifp = xnb->xnb_ifp; mtx_assert(&xnb->sc_lock, MA_OWNED); if (ifp->if_drv_flags & IFF_DRV_RUNNING) return; xnb_stop(xnb); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if_link_state_change(ifp, LINK_STATE_UP); } static void xnb_ifinit(void *xsc) { struct xnb_softc *xnb = xsc; mtx_lock(&xnb->sc_lock); xnb_ifinit_locked(xnb); mtx_unlock(&xnb->sc_lock); } /** * Callback used by the generic networking code to tell us when our carrier * state has changed. Since we don't have a physical carrier, we don't care */ static int xnb_ifmedia_upd(struct ifnet *ifp) { return (0); } /** * Callback used by the generic networking code to ask us what our carrier * state is. Since we don't have a physical carrier, this is very simple */ static void xnb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE; ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; } /*---------------------------- NewBus Registration ---------------------------*/ static device_method_t xnb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, xnb_probe), DEVMETHOD(device_attach, xnb_attach), DEVMETHOD(device_detach, xnb_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, xnb_suspend), DEVMETHOD(device_resume, xnb_resume), /* Xenbus interface */ DEVMETHOD(xenbus_otherend_changed, xnb_frontend_changed), { 0, 0 } }; static driver_t xnb_driver = { "xnb", xnb_methods, sizeof(struct xnb_softc), }; devclass_t xnb_devclass; DRIVER_MODULE(xnb, xenbusb_back, xnb_driver, xnb_devclass, 0, 0); /*-------------------------- Unit Tests -------------------------------------*/ #ifdef XNB_DEBUG #include "netback_unit_tests.c" #endif Index: head/sys/i386/ibcs2/syscalls.master =================================================================== --- head/sys/i386/ibcs2/syscalls.master (revision 329872) +++ head/sys/i386/ibcs2/syscalls.master (revision 329873) @@ -1,205 +1,205 @@ $FreeBSD$ ; @(#)syscalls.master 8.1 (Berkeley) 7/19/93 ; System call name/number master file (or rather, slave, from IBCS2). ; Processed to created ibcs2_sysent.c, ibcs2_syscalls.c and ibcs2_syscall.h. ; Columns: number audit type nargs name alt{name,tag,rtyp}/comments ; number system call number, must be in order ; audit the audit event associated with the system call ; A value of AUE_NULL means no auditing, but it also means that ; there is no audit event for the call at this time. For the ; case where the event exists, but we don't want auditing, the ; event should be #defined to AUE_NULL in audit_kevents.h. ; type one of STD, OBSOL, UNIMPL, COMPAT -; name psuedo-prototype of syscall routine +; name pseudo-prototype of syscall routine ; If one of the following alts is different, then all appear: ; altname name of system call if different ; alttag name of args struct tag if different from [o]`name'"_args" ; altrtyp return type if not int (bogus - syscalls always return int) ; for UNIMPL/OBSOL, name continues with comments ; types: ; STD always included ; COMPAT included on COMPAT #ifdef ; OBSOL obsolete, not included in system, only specifies name ; UNIMPL not implemented, placeholder only #include #include #include #include #include #include ; #ifdef's, etc. may be included, and are copied to the output files. 0 AUE_NULL NOPROTO { int nosys(void); } syscall nosys_args int 1 AUE_EXIT NOPROTO { void sys_exit(int rval); } exit \ sys_exit_args void 2 AUE_FORK NOPROTO { int fork(void); } 3 AUE_NULL STD { int ibcs2_read(int fd, char *buf, \ u_int nbytes); } 4 AUE_NULL NOPROTO { int write(int fd, char *buf, \ u_int nbytes); } 5 AUE_OPEN_RWTC STD { int ibcs2_open(char *path, int flags, \ int mode); } 6 AUE_CLOSE NOPROTO { int close(int fd); } 7 AUE_WAIT4 STD { int ibcs2_wait(int a1, int a2, int a3); } 8 AUE_CREAT STD { int ibcs2_creat(char *path, int mode); } 9 AUE_LINK NOPROTO { int link(char *path, char *link); } 10 AUE_UNLINK STD { int ibcs2_unlink(char *path); } 11 AUE_EXECVE STD { int ibcs2_execv(char *path, char **argp); } 12 AUE_CHDIR STD { int ibcs2_chdir(char *path); } 13 AUE_NULL STD { int ibcs2_time(ibcs2_time_t *tp); } 14 AUE_MKNOD STD { int ibcs2_mknod(char* path, int mode, \ int dev); } 15 AUE_CHMOD STD { int ibcs2_chmod(char *path, int mode); } 16 AUE_CHOWN STD { int ibcs2_chown(char *path, int uid, \ int gid); } 17 AUE_NULL NOPROTO { int obreak(caddr_t nsize); } 18 AUE_STAT STD { int ibcs2_stat(char* path, \ struct ibcs2_stat *st); } 19 AUE_LSEEK STD { long ibcs2_lseek(int fd, long offset, \ int whence); } 20 AUE_NULL NOPROTO { pid_t getpid(void); } 21 AUE_MOUNT STD { int ibcs2_mount(char *special, char *dir, \ int flags, int fstype, char *data, \ int len); } 22 AUE_UMOUNT STD { int ibcs2_umount(char *name); } 23 AUE_SETUID STD { int ibcs2_setuid(int uid); } 24 AUE_GETUID NOPROTO { uid_t getuid(void); } 25 AUE_SETTIMEOFDAY STD { int ibcs2_stime(long *timep); } 26 AUE_PTRACE NOPROTO { int ptrace(int req, pid_t pid, \ caddr_t addr, int data); } 27 AUE_NULL STD { int ibcs2_alarm(unsigned sec); } 28 AUE_FSTAT STD { int ibcs2_fstat(int fd, \ struct ibcs2_stat *st); } 29 AUE_NULL STD { int ibcs2_pause(void); } 30 AUE_NULL STD { int ibcs2_utime(char *path, \ struct ibcs2_utimbuf *buf); } 31 AUE_NULL UNIMPL ibcs2_stty 32 AUE_NULL UNIMPL ibcs2_gtty 33 AUE_ACCESS STD { int ibcs2_access(char *path, int amode); } 34 AUE_NICE STD { int ibcs2_nice(int incr); } 35 AUE_STATFS STD { int ibcs2_statfs(char *path, \ struct ibcs2_statfs *buf, int len, \ int fstype); } 36 AUE_NULL NOPROTO { int sync(void); } 37 AUE_KILL STD { int ibcs2_kill(int pid, int signo); } 38 AUE_FSTATFS STD { int ibcs2_fstatfs(int fd, \ struct ibcs2_statfs *buf, int len, \ int fstype); } 39 AUE_NULL STD { int ibcs2_pgrpsys(int type, caddr_t dummy, \ int pid, int pgid); } 40 AUE_NULL STD { int ibcs2_xenix(int a1, int a2, int a3, \ int a4, int a5); } 41 AUE_NULL NOPROTO { int dup(u_int fd); } 42 AUE_PIPE NOPROTO { int pipe(void); } 43 AUE_NULL STD { int ibcs2_times(struct tms *tp); } 44 AUE_PROFILE NOPROTO { int profil(caddr_t samples, u_int size, \ u_int offset, u_int scale); } 45 AUE_NULL STD { int ibcs2_plock(int cmd); } 46 AUE_SETGID STD { int ibcs2_setgid(int gid); } 47 AUE_GETGID NOPROTO { gid_t getgid(void); } 48 AUE_NULL STD { int ibcs2_sigsys(int sig, ibcs2_sig_t fp); } 49 AUE_MSGSYS STD { int ibcs2_msgsys(int which, int a2, \ int a3, int a4, int a5, int a6); } 50 AUE_NULL STD { int ibcs2_sysi86(int cmd, int *arg); } 51 AUE_NULL UNIMPL ibcs2_acct 52 AUE_SHMSYS STD { int ibcs2_shmsys(int which, int a2, \ int a3, int a4); } 53 AUE_SEMSYS STD { int ibcs2_semsys(int which, int a2, \ int a3, int a4, int a5); } 54 AUE_IOCTL STD { int ibcs2_ioctl(int fd, int cmd, \ caddr_t data); } 55 AUE_NULL STD { int ibcs2_uadmin(int cmd, int func, \ caddr_t data); } 56 AUE_NULL UNIMPL nosys 57 AUE_NULL STD { int ibcs2_utssys(int a1, int a2, \ int flag); } 58 AUE_FSYNC NOPROTO { int fsync(int fd); } 59 AUE_EXECVE STD { int ibcs2_execve(char *path, char **argp, \ char **envp); } 60 AUE_UMASK NOPROTO { int umask(int newmask); } 61 AUE_CHROOT NOPROTO { int chroot(char *path); } 62 AUE_FCNTL STD { int ibcs2_fcntl(int fd, int cmd, \ char *arg); } 63 AUE_NULL STD { long ibcs2_ulimit(int cmd, int newlimit); } 64 AUE_NULL UNIMPL reserved for unix/pc 65 AUE_NULL UNIMPL reserved for unix/pc 66 AUE_NULL UNIMPL reserved for unix/pc 67 AUE_NULL UNIMPL reserved for unix/pc 68 AUE_NULL UNIMPL reserved for unix/pc 69 AUE_NULL UNIMPL reserved for unix/pc 70 AUE_NULL OBSOL rfs_advfs 71 AUE_NULL OBSOL rfs_unadvfs 72 AUE_NULL OBSOL rfs_rmount 73 AUE_NULL OBSOL rfs_rumount 74 AUE_NULL OBSOL rfs_rfstart 75 AUE_NULL OBSOL rfs_sigret 76 AUE_NULL OBSOL rfs_rdebug 77 AUE_NULL OBSOL rfs_rfstop 78 AUE_NULL UNIMPL rfs_rfsys 79 AUE_RMDIR STD { int ibcs2_rmdir(char *path); } 80 AUE_MKDIR STD { int ibcs2_mkdir(char *path, int mode); } 81 AUE_GETDIRENTRIES STD { int ibcs2_getdents(int fd, char *buf, \ int nbytes); } 82 AUE_NULL UNIMPL nosys 83 AUE_NULL UNIMPL nosys 84 AUE_NULL STD { int ibcs2_sysfs(int cmd, caddr_t d1, \ char *buf); } 85 AUE_GETMSG STD { int ibcs2_getmsg(int fd, \ struct ibcs2_stropts *ctl, \ struct ibcs2_stropts *dat, int *flags); } 86 AUE_PUTMSG STD { int ibcs2_putmsg(int fd, \ struct ibcs2_stropts *ctl, \ struct ibcs2_stropts *dat, int flags); } 87 AUE_POLL NOPROTO { int poll(struct pollfd *fds, u_int nfds, \ int timeout); } 88 AUE_NULL UNIMPL nosys 89 AUE_NULL STD { int ibcs2_secure(int cmd, int a1, int a2, \ int a3, int a4, int a5); } 90 AUE_SYMLINK STD { int ibcs2_symlink(char *path, \ char *link); } 91 AUE_LSTAT STD { int ibcs2_lstat(char *path, \ struct ibcs2_stat *st); } 92 AUE_READLINK STD { int ibcs2_readlink(char *path, char *buf, \ int count); } 93 AUE_NULL UNIMPL nosys 94 AUE_NULL UNIMPL nosys 95 AUE_NULL UNIMPL nosys 96 AUE_NULL UNIMPL nosys 97 AUE_NULL UNIMPL nosys 98 AUE_NULL UNIMPL nosys 99 AUE_NULL UNIMPL nosys 100 AUE_NULL UNIMPL nosys 101 AUE_NULL UNIMPL nosys 102 AUE_NULL UNIMPL nosys 103 AUE_NULL NOPROTO { int sigreturn( \ struct sigcontext *sigcntxp); } 104 AUE_NULL UNIMPL nosys 105 AUE_NULL STD { int ibcs2_isc(void); } 106 AUE_NULL UNIMPL nosys 107 AUE_NULL UNIMPL nosys 108 AUE_NULL UNIMPL nosys 109 AUE_NULL UNIMPL nosys 110 AUE_NULL UNIMPL nosys 111 AUE_NULL UNIMPL nosys 112 AUE_NULL UNIMPL nosys 113 AUE_NULL UNIMPL nosys 114 AUE_NULL UNIMPL nosys 115 AUE_NULL UNIMPL nosys 116 AUE_NULL UNIMPL nosys 117 AUE_NULL UNIMPL nosys 118 AUE_NULL UNIMPL nosys 119 AUE_NULL UNIMPL nosys 120 AUE_NULL UNIMPL nosys 121 AUE_NULL UNIMPL nosys 122 AUE_NULL UNIMPL nosys 123 AUE_NULL UNIMPL nosys 124 AUE_NULL UNIMPL nosys 125 AUE_NULL UNIMPL nosys 126 AUE_NULL UNIMPL nosys 127 AUE_NULL UNIMPL nosys ; vim: syntax=off Index: head/sys/i386/linux/syscalls.master =================================================================== --- head/sys/i386/linux/syscalls.master (revision 329872) +++ head/sys/i386/linux/syscalls.master (revision 329873) @@ -1,700 +1,700 @@ $FreeBSD$ ; @(#)syscalls.master 8.1 (Berkeley) 7/19/93 ; System call name/number master file (or rather, slave, from LINUX). ; Processed to create linux_sysent.c, linux_proto.h and linux_syscall.h. ; Columns: number audit type nargs name alt{name,tag,rtyp}/comments ; number system call number, must be in order ; audit the audit event associated with the system call ; A value of AUE_NULL means no auditing, but it also means that ; there is no audit event for the call at this time. For the ; case where the event exists, but we don't want auditing, the ; event should be #defined to AUE_NULL in audit_kevents.h. ; type one of STD, NOPROTO, UNIMPL -; name psuedo-prototype of syscall routine +; name pseudo-prototype of syscall routine ; If one of the following alts is different, then all appear: ; altname name of system call if different ; alttag name of args struct tag if different from [o]`name'"_args" ; altrtyp return type if not int (bogus - syscalls always return int) ; for UNIMPL, name continues with comments ; types: ; STD always included ; UNIMPL not implemented, placeholder only ; NOPROTO same as STD except do not create structure or ; function prototype in sys/sysproto.h. Does add a ; definition to syscall.h besides adding a sysent. #include #include #include #include #include #include ; Isn't pretty, but there seems to be no other way to trap nosys #define nosys linux_nosys ; #ifdef's, etc. may be included, and are copied to the output files. 0 AUE_NULL UNIMPL setup 1 AUE_EXIT STD { void linux_exit(int rval); } 2 AUE_FORK STD { int linux_fork(void); } 3 AUE_NULL NOPROTO { int read(int fd, char *buf, \ u_int nbyte); } 4 AUE_NULL NOPROTO { int write(int fd, char *buf, \ u_int nbyte); } 5 AUE_OPEN_RWTC STD { int linux_open(char *path, l_int flags, \ l_int mode); } 6 AUE_CLOSE NOPROTO { int close(int fd); } 7 AUE_WAIT4 STD { int linux_waitpid(l_pid_t pid, \ l_int *status, l_int options); } 8 AUE_CREAT STD { int linux_creat(char *path, \ l_int mode); } 9 AUE_LINK STD { int linux_link(char *path, char *to); } 10 AUE_UNLINK STD { int linux_unlink(char *path); } 11 AUE_EXECVE STD { int linux_execve(char *path, char **argp, \ char **envp); } 12 AUE_CHDIR STD { int linux_chdir(char *path); } 13 AUE_NULL STD { int linux_time(l_time_t *tm); } 14 AUE_MKNOD STD { int linux_mknod(char *path, l_int mode, \ l_dev_t dev); } 15 AUE_CHMOD STD { int linux_chmod(char *path, \ l_mode_t mode); } 16 AUE_LCHOWN STD { int linux_lchown16(char *path, \ l_uid16_t uid, l_gid16_t gid); } 17 AUE_NULL UNIMPL break 18 AUE_STAT STD { int linux_stat(char *path, \ struct linux_stat *up); } 19 AUE_LSEEK STD { int linux_lseek(l_uint fdes, l_off_t off, \ l_int whence); } 20 AUE_GETPID STD { int linux_getpid(void); } 21 AUE_MOUNT STD { int linux_mount(char *specialfile, \ char *dir, char *filesystemtype, \ l_ulong rwflag, void *data); } 22 AUE_UMOUNT STD { int linux_oldumount(char *path); } 23 AUE_SETUID STD { int linux_setuid16(l_uid16_t uid); } 24 AUE_GETUID STD { int linux_getuid16(void); } 25 AUE_SETTIMEOFDAY STD { int linux_stime(void); } 26 AUE_PTRACE STD { int linux_ptrace(l_long req, l_long pid, \ l_long addr, l_long data); } 27 AUE_NULL STD { int linux_alarm(l_uint secs); } 28 AUE_FSTAT STD { int linux_fstat(l_uint fd, \ struct linux_stat *up); } 29 AUE_NULL STD { int linux_pause(void); } 30 AUE_UTIME STD { int linux_utime(char *fname, \ struct l_utimbuf *times); } 31 AUE_NULL UNIMPL stty 32 AUE_NULL UNIMPL gtty 33 AUE_ACCESS STD { int linux_access(char *path, l_int amode); } 34 AUE_NICE STD { int linux_nice(l_int inc); } 35 AUE_NULL UNIMPL ftime 36 AUE_SYNC NOPROTO { int sync(void); } 37 AUE_KILL STD { int linux_kill(l_int pid, l_int signum); } 38 AUE_RENAME STD { int linux_rename(char *from, char *to); } 39 AUE_MKDIR STD { int linux_mkdir(char *path, l_int mode); } 40 AUE_RMDIR STD { int linux_rmdir(char *path); } 41 AUE_DUP NOPROTO { int dup(u_int fd); } 42 AUE_PIPE STD { int linux_pipe(l_int *pipefds); } 43 AUE_NULL STD { int linux_times(struct l_times_argv *buf); } 44 AUE_NULL UNIMPL prof 45 AUE_NULL STD { int linux_brk(l_ulong dsend); } 46 AUE_SETGID STD { int linux_setgid16(l_gid16_t gid); } 47 AUE_GETGID STD { int linux_getgid16(void); } 48 AUE_NULL STD { int linux_signal(l_int sig, \ void *handler); } 49 AUE_GETEUID STD { int linux_geteuid16(void); } 50 AUE_GETEGID STD { int linux_getegid16(void); } 51 AUE_ACCT NOPROTO { int acct(char *path); } 52 AUE_UMOUNT STD { int linux_umount(char *path, l_int flags); } 53 AUE_NULL UNIMPL lock 54 AUE_IOCTL STD { int linux_ioctl(l_uint fd, l_uint cmd, \ l_ulong arg); } 55 AUE_FCNTL STD { int linux_fcntl(l_uint fd, l_uint cmd, \ l_ulong arg); } 56 AUE_NULL UNIMPL mpx 57 AUE_SETPGRP NOPROTO { int setpgid(int pid, int pgid); } 58 AUE_NULL UNIMPL ulimit 59 AUE_NULL STD { int linux_olduname(void); } 60 AUE_UMASK NOPROTO { int umask(int newmask); } 61 AUE_CHROOT NOPROTO { int chroot(char *path); } 62 AUE_NULL STD { int linux_ustat(l_dev_t dev, \ struct l_ustat *ubuf); } 63 AUE_DUP2 NOPROTO { int dup2(u_int from, u_int to); } 64 AUE_GETPPID STD { int linux_getppid(void); } 65 AUE_GETPGRP NOPROTO { int getpgrp(void); } 66 AUE_SETSID NOPROTO { int setsid(void); } 67 AUE_NULL STD { int linux_sigaction(l_int sig, \ l_osigaction_t *nsa, \ l_osigaction_t *osa); } 68 AUE_NULL STD { int linux_sgetmask(void); } 69 AUE_NULL STD { int linux_ssetmask(l_osigset_t mask); } 70 AUE_SETREUID STD { int linux_setreuid16(l_uid16_t ruid, \ l_uid16_t euid); } 71 AUE_SETREGID STD { int linux_setregid16(l_gid16_t rgid, \ l_gid16_t egid); } 72 AUE_NULL STD { int linux_sigsuspend(l_int hist0, \ l_int hist1, l_osigset_t mask); } 73 AUE_NULL STD { int linux_sigpending(l_osigset_t *mask); } 74 AUE_SYSCTL STD { int linux_sethostname(char *hostname, \ u_int len); } 75 AUE_SETRLIMIT STD { int linux_setrlimit(l_uint resource, \ struct l_rlimit *rlim); } 76 AUE_GETRLIMIT STD { int linux_old_getrlimit(l_uint resource, \ struct l_rlimit *rlim); } 77 AUE_GETRUSAGE NOPROTO { int getrusage(int who, \ struct rusage *rusage); } 78 AUE_NULL NOPROTO { int gettimeofday( \ struct timeval *tp, \ struct timezone *tzp); } 79 AUE_SETTIMEOFDAY NOPROTO { int settimeofday( \ struct timeval *tv, \ struct timezone *tzp); } 80 AUE_GETGROUPS STD { int linux_getgroups16(l_uint gidsetsize, \ l_gid16_t *gidset); } 81 AUE_SETGROUPS STD { int linux_setgroups16(l_uint gidsetsize, \ l_gid16_t *gidset); } 82 AUE_SELECT STD { int linux_old_select( \ struct l_old_select_argv *ptr); } 83 AUE_SYMLINK STD { int linux_symlink(char *path, char *to); } ; 84: oldlstat 84 AUE_LSTAT STD { int linux_lstat(char *path, struct l_stat *up); } 85 AUE_READLINK STD { int linux_readlink(char *name, char *buf, \ l_int count); } 86 AUE_USELIB STD { int linux_uselib(char *library); } 87 AUE_SWAPON NOPROTO { int swapon(char *name); } 88 AUE_REBOOT STD { int linux_reboot(l_int magic1, \ l_int magic2, l_uint cmd, void *arg); } ; 89: old_readdir 89 AUE_GETDIRENTRIES STD { int linux_readdir(l_uint fd, \ struct l_dirent *dent, l_uint count); } ; 90: old_mmap 90 AUE_MMAP STD { int linux_mmap(struct l_mmap_argv *ptr); } 91 AUE_MUNMAP NOPROTO { int munmap(caddr_t addr, int len); } 92 AUE_TRUNCATE STD { int linux_truncate(char *path, \ l_ulong length); } 93 AUE_FTRUNCATE STD { int linux_ftruncate(int fd, long length); } 94 AUE_FCHMOD NOPROTO { int fchmod(int fd, int mode); } 95 AUE_FCHOWN NOPROTO { int fchown(int fd, int uid, int gid); } 96 AUE_GETPRIORITY STD { int linux_getpriority(int which, int who); } 97 AUE_SETPRIORITY NOPROTO { int setpriority(int which, int who, \ int prio); } 98 AUE_PROFILE UNIMPL profil 99 AUE_STATFS STD { int linux_statfs(char *path, \ struct l_statfs_buf *buf); } 100 AUE_FSTATFS STD { int linux_fstatfs(l_uint fd, \ struct l_statfs_buf *buf); } 101 AUE_NULL STD { int linux_ioperm(l_ulong start, \ l_ulong length, l_int enable); } 102 AUE_NULL STD { int linux_socketcall(l_int what, \ l_ulong args); } 103 AUE_NULL STD { int linux_syslog(l_int type, char *buf, \ l_int len); } 104 AUE_SETITIMER STD { int linux_setitimer(l_int which, \ struct l_itimerval *itv, \ struct l_itimerval *oitv); } 105 AUE_GETITIMER STD { int linux_getitimer(l_int which, \ struct l_itimerval *itv); } 106 AUE_STAT STD { int linux_newstat(char *path, \ struct l_newstat *buf); } 107 AUE_LSTAT STD { int linux_newlstat(char *path, \ struct l_newstat *buf); } 108 AUE_FSTAT STD { int linux_newfstat(l_uint fd, \ struct l_newstat *buf); } ; 109: olduname 109 AUE_NULL STD { int linux_uname(void); } 110 AUE_NULL STD { int linux_iopl(l_int level); } 111 AUE_NULL STD { int linux_vhangup(void); } 112 AUE_NULL UNIMPL idle 113 AUE_NULL STD { int linux_vm86old(void); } 114 AUE_WAIT4 STD { int linux_wait4(l_pid_t pid, \ l_int *status, l_int options, \ void *rusage); } 115 AUE_SWAPOFF STD { int linux_swapoff(void); } 116 AUE_NULL STD { int linux_sysinfo(struct l_sysinfo *info); } 117 AUE_NULL STD { int linux_ipc(l_uint what, l_int arg1, \ l_int arg2, l_int arg3, void *ptr, \ l_long arg5); } 118 AUE_FSYNC NOPROTO { int fsync(int fd); } 119 AUE_SIGRETURN STD { int linux_sigreturn( \ struct l_sigframe *sfp); } 120 AUE_RFORK STD { int linux_clone(l_int flags, void *stack, \ void *parent_tidptr, void *tls, void * child_tidptr); } 121 AUE_SYSCTL STD { int linux_setdomainname(char *name, \ int len); } 122 AUE_NULL STD { int linux_newuname( \ struct l_new_utsname *buf); } 123 AUE_NULL STD { int linux_modify_ldt(l_int func, \ void *ptr, l_ulong bytecount); } 124 AUE_ADJTIME STD { int linux_adjtimex(void); } 125 AUE_MPROTECT STD { int linux_mprotect(caddr_t addr, int len, \ int prot); } 126 AUE_SIGPROCMASK STD { int linux_sigprocmask(l_int how, \ l_osigset_t *mask, l_osigset_t *omask); } 127 AUE_NULL UNIMPL create_module 128 AUE_NULL STD { int linux_init_module(void); } 129 AUE_NULL STD { int linux_delete_module(void); } 130 AUE_NULL UNIMPL get_kernel_syms 131 AUE_QUOTACTL STD { int linux_quotactl(void); } 132 AUE_GETPGID NOPROTO { int getpgid(int pid); } 133 AUE_FCHDIR NOPROTO { int fchdir(int fd); } 134 AUE_BDFLUSH STD { int linux_bdflush(void); } 135 AUE_NULL STD { int linux_sysfs(l_int option, \ l_ulong arg1, l_ulong arg2); } 136 AUE_PERSONALITY STD { int linux_personality(l_uint per); } 137 AUE_NULL UNIMPL afs_syscall 138 AUE_SETFSUID STD { int linux_setfsuid16(l_uid16_t uid); } 139 AUE_SETFSGID STD { int linux_setfsgid16(l_gid16_t gid); } 140 AUE_LSEEK STD { int linux_llseek(l_int fd, l_ulong ohigh, \ l_ulong olow, l_loff_t *res, \ l_uint whence); } 141 AUE_GETDIRENTRIES STD { int linux_getdents(l_uint fd, \ void *dent, l_uint count); } ; 142: newselect 142 AUE_SELECT STD { int linux_select(l_int nfds, \ l_fd_set *readfds, l_fd_set *writefds, \ l_fd_set *exceptfds, \ struct l_timeval *timeout); } 143 AUE_FLOCK NOPROTO { int flock(int fd, int how); } 144 AUE_MSYNC STD { int linux_msync(l_ulong addr, \ l_size_t len, l_int fl); } 145 AUE_READV NOPROTO { int readv(int fd, struct iovec *iovp, \ u_int iovcnt); } 146 AUE_WRITEV NOPROTO { int writev(int fd, struct iovec *iovp, \ u_int iovcnt); } 147 AUE_GETSID STD { int linux_getsid(l_pid_t pid); } 148 AUE_NULL STD { int linux_fdatasync(l_uint fd); } 149 AUE_SYSCTL STD { int linux_sysctl( \ struct l___sysctl_args *args); } 150 AUE_MLOCK NOPROTO { int mlock(const void *addr, size_t len); } 151 AUE_MUNLOCK NOPROTO { int munlock(const void *addr, size_t len); } 152 AUE_MLOCKALL NOPROTO { int mlockall(int how); } 153 AUE_MUNLOCKALL NOPROTO { int munlockall(void); } 154 AUE_SCHED_SETPARAM STD { int linux_sched_setparam(l_pid_t pid, \ struct sched_param *param); } 155 AUE_SCHED_GETPARAM STD { int linux_sched_getparam(l_pid_t pid, \ struct sched_param *param); } 156 AUE_SCHED_SETSCHEDULER STD { int linux_sched_setscheduler( \ l_pid_t pid, l_int policy, \ struct sched_param *param); } 157 AUE_SCHED_GETSCHEDULER STD { int linux_sched_getscheduler( \ l_pid_t pid); } 158 AUE_NULL NOPROTO { int sched_yield(void); } 159 AUE_SCHED_GET_PRIORITY_MAX STD { int linux_sched_get_priority_max( \ l_int policy); } 160 AUE_SCHED_GET_PRIORITY_MIN STD { int linux_sched_get_priority_min( \ l_int policy); } 161 AUE_SCHED_RR_GET_INTERVAL STD { int linux_sched_rr_get_interval( \ l_pid_t pid, struct l_timespec *interval); } 162 AUE_NULL STD { int linux_nanosleep( \ const struct l_timespec *rqtp, \ struct l_timespec *rmtp); } 163 AUE_NULL STD { int linux_mremap(l_ulong addr, \ l_ulong old_len, l_ulong new_len, \ l_ulong flags, l_ulong new_addr); } 164 AUE_SETRESUID STD { int linux_setresuid16(l_uid16_t ruid, \ l_uid16_t euid, l_uid16_t suid); } 165 AUE_GETRESUID STD { int linux_getresuid16(l_uid16_t *ruid, \ l_uid16_t *euid, l_uid16_t *suid); } 166 AUE_NULL STD { int linux_vm86(void); } 167 AUE_NULL UNIMPL query_module 168 AUE_POLL NOPROTO { int poll(struct pollfd* fds, \ unsigned int nfds, long timeout); } 169 AUE_NULL UNIMPL nfsservctl 170 AUE_SETRESGID STD { int linux_setresgid16(l_gid16_t rgid, \ l_gid16_t egid, l_gid16_t sgid); } 171 AUE_GETRESGID STD { int linux_getresgid16(l_gid16_t *rgid, \ l_gid16_t *egid, l_gid16_t *sgid); } 172 AUE_PRCTL STD { int linux_prctl(l_int option, l_int arg2, l_int arg3, \ l_int arg4, l_int arg5); } 173 AUE_NULL STD { int linux_rt_sigreturn( \ struct l_ucontext *ucp); } 174 AUE_NULL STD { int linux_rt_sigaction(l_int sig, \ l_sigaction_t *act, l_sigaction_t *oact, \ l_size_t sigsetsize); } 175 AUE_NULL STD { int linux_rt_sigprocmask(l_int how, \ l_sigset_t *mask, l_sigset_t *omask, \ l_size_t sigsetsize); } 176 AUE_NULL STD { int linux_rt_sigpending(l_sigset_t *set, \ l_size_t sigsetsize); } 177 AUE_NULL STD { int linux_rt_sigtimedwait(l_sigset_t *mask, \ l_siginfo_t *ptr, \ struct l_timeval *timeout, \ l_size_t sigsetsize); } 178 AUE_NULL STD { int linux_rt_sigqueueinfo(l_pid_t pid, l_int sig, \ l_siginfo_t *info); } 179 AUE_NULL STD { int linux_rt_sigsuspend( \ l_sigset_t *newset, \ l_size_t sigsetsize); } 180 AUE_PREAD STD { int linux_pread(l_uint fd, char *buf, \ l_size_t nbyte, l_loff_t offset); } 181 AUE_PWRITE STD { int linux_pwrite(l_uint fd, char *buf, \ l_size_t nbyte, l_loff_t offset); } 182 AUE_CHOWN STD { int linux_chown16(char *path, \ l_uid16_t uid, l_gid16_t gid); } 183 AUE_GETCWD STD { int linux_getcwd(char *buf, \ l_ulong bufsize); } 184 AUE_CAPGET STD { int linux_capget(struct l_user_cap_header *hdrp, \ struct l_user_cap_data *datap); } 185 AUE_CAPSET STD { int linux_capset(struct l_user_cap_header *hdrp, \ struct l_user_cap_data *datap); } 186 AUE_NULL STD { int linux_sigaltstack(l_stack_t *uss, \ l_stack_t *uoss); } 187 AUE_SENDFILE STD { int linux_sendfile(void); } 188 AUE_GETPMSG UNIMPL getpmsg 189 AUE_PUTPMSG UNIMPL putpmsg 190 AUE_VFORK STD { int linux_vfork(void); } ; 191: ugetrlimit 191 AUE_GETRLIMIT STD { int linux_getrlimit(l_uint resource, \ struct l_rlimit *rlim); } 192 AUE_MMAP STD { int linux_mmap2(l_ulong addr, l_ulong len, \ l_ulong prot, l_ulong flags, l_ulong fd, \ l_ulong pgoff); } 193 AUE_TRUNCATE STD { int linux_truncate64(char *path, \ l_loff_t length); } 194 AUE_FTRUNCATE STD { int linux_ftruncate64(l_uint fd, \ l_loff_t length); } 195 AUE_STAT STD { int linux_stat64(const char *filename, \ struct l_stat64 *statbuf); } 196 AUE_LSTAT STD { int linux_lstat64(const char *filename, \ struct l_stat64 *statbuf); } 197 AUE_FSTAT STD { int linux_fstat64(l_int fd, \ struct l_stat64 *statbuf); } 198 AUE_LCHOWN STD { int linux_lchown(char *path, l_uid_t uid, \ l_gid_t gid); } 199 AUE_GETUID STD { int linux_getuid(void); } 200 AUE_GETGID STD { int linux_getgid(void); } 201 AUE_GETEUID NOPROTO { int geteuid(void); } 202 AUE_GETEGID NOPROTO { int getegid(void); } 203 AUE_SETREUID NOPROTO { int setreuid(uid_t ruid, uid_t euid); } 204 AUE_SETREGID NOPROTO { int setregid(gid_t rgid, gid_t egid); } 205 AUE_GETGROUPS STD { int linux_getgroups(l_int gidsetsize, \ l_gid_t *grouplist); } 206 AUE_SETGROUPS STD { int linux_setgroups(l_int gidsetsize, \ l_gid_t *grouplist); } 207 AUE_FCHOWN NODEF fchown fchown fchown_args int 208 AUE_SETRESUID NOPROTO { int setresuid(uid_t ruid, uid_t euid, \ uid_t suid); } 209 AUE_GETRESUID NOPROTO { int getresuid(uid_t *ruid, uid_t *euid, \ uid_t *suid); } 210 AUE_SETRESGID NOPROTO { int setresgid(gid_t rgid, gid_t egid, \ gid_t sgid); } 211 AUE_GETRESGID NOPROTO { int getresgid(gid_t *rgid, gid_t *egid, \ gid_t *sgid); } 212 AUE_CHOWN STD { int linux_chown(char *path, l_uid_t uid, \ l_gid_t gid); } 213 AUE_SETUID NOPROTO { int setuid(uid_t uid); } 214 AUE_SETGID NOPROTO { int setgid(gid_t gid); } 215 AUE_SETFSUID STD { int linux_setfsuid(l_uid_t uid); } 216 AUE_SETFSGID STD { int linux_setfsgid(l_gid_t gid); } 217 AUE_PIVOT_ROOT STD { int linux_pivot_root(char *new_root, \ char *put_old); } 218 AUE_MINCORE STD { int linux_mincore(l_ulong start, \ l_size_t len, u_char *vec); } 219 AUE_MADVISE NOPROTO { int madvise(void *addr, size_t len, \ int behav); } 220 AUE_GETDIRENTRIES STD { int linux_getdents64(l_uint fd, \ void *dirent, l_uint count); } 221 AUE_FCNTL STD { int linux_fcntl64(l_uint fd, l_uint cmd, \ l_ulong arg); } 222 AUE_NULL UNIMPL 223 AUE_NULL UNIMPL 224 AUE_NULL STD { long linux_gettid(void); } 225 AUE_NULL UNIMPL linux_readahead 226 AUE_NULL STD { int linux_setxattr(void); } 227 AUE_NULL STD { int linux_lsetxattr(void); } 228 AUE_NULL STD { int linux_fsetxattr(void); } 229 AUE_NULL STD { int linux_getxattr(void); } 230 AUE_NULL STD { int linux_lgetxattr(void); } 231 AUE_NULL STD { int linux_fgetxattr(void); } 232 AUE_NULL STD { int linux_listxattr(void); } 233 AUE_NULL STD { int linux_llistxattr(void); } 234 AUE_NULL STD { int linux_flistxattr(void); } 235 AUE_NULL STD { int linux_removexattr(void); } 236 AUE_NULL STD { int linux_lremovexattr(void); } 237 AUE_NULL STD { int linux_fremovexattr(void); } 238 AUE_NULL STD { int linux_tkill(int tid, int sig); } 239 AUE_SENDFILE UNIMPL linux_sendfile64 240 AUE_NULL STD { int linux_sys_futex(void *uaddr, int op, uint32_t val, \ struct l_timespec *timeout, uint32_t *uaddr2, uint32_t val3); } 241 AUE_NULL STD { int linux_sched_setaffinity(l_pid_t pid, l_uint len, \ l_ulong *user_mask_ptr); } 242 AUE_NULL STD { int linux_sched_getaffinity(l_pid_t pid, l_uint len, \ l_ulong *user_mask_ptr); } 243 AUE_NULL STD { int linux_set_thread_area(struct l_user_desc *desc); } 244 AUE_NULL STD { int linux_get_thread_area(struct l_user_desc *desc); } 245 AUE_NULL UNIMPL linux_io_setup 246 AUE_NULL UNIMPL linux_io_destroy 247 AUE_NULL UNIMPL linux_io_getevents 248 AUE_NULL UNIMPL linux_io_submit 249 AUE_NULL UNIMPL linux_io_cancel 250 AUE_NULL STD { int linux_fadvise64(int fd, l_loff_t offset, \ l_size_t len, int advice); } 251 AUE_NULL UNIMPL 252 AUE_EXIT STD { int linux_exit_group(int error_code); } 253 AUE_NULL STD { int linux_lookup_dcookie(void); } 254 AUE_NULL STD { int linux_epoll_create(l_int size); } 255 AUE_NULL STD { int linux_epoll_ctl(l_int epfd, l_int op, l_int fd, \ struct epoll_event *event); } 256 AUE_NULL STD { int linux_epoll_wait(l_int epfd, struct epoll_event *events, \ l_int maxevents, l_int timeout); } 257 AUE_NULL STD { int linux_remap_file_pages(void); } 258 AUE_NULL STD { int linux_set_tid_address(int *tidptr); } 259 AUE_NULL STD { int linux_timer_create(clockid_t clock_id, \ struct sigevent *evp, l_timer_t *timerid); } 260 AUE_NULL STD { int linux_timer_settime(l_timer_t timerid, l_int flags, \ const struct itimerspec *new, struct itimerspec *old); } 261 AUE_NULL STD { int linux_timer_gettime(l_timer_t timerid, struct itimerspec *setting); } 262 AUE_NULL STD { int linux_timer_getoverrun(l_timer_t timerid); } 263 AUE_NULL STD { int linux_timer_delete(l_timer_t timerid); } 264 AUE_CLOCK_SETTIME STD { int linux_clock_settime(clockid_t which, struct l_timespec *tp); } 265 AUE_NULL STD { int linux_clock_gettime(clockid_t which, struct l_timespec *tp); } 266 AUE_NULL STD { int linux_clock_getres(clockid_t which, struct l_timespec *tp); } 267 AUE_NULL STD { int linux_clock_nanosleep(clockid_t which, int flags, \ struct l_timespec *rqtp, struct l_timespec *rmtp); } 268 AUE_STATFS STD { int linux_statfs64(char *path, size_t bufsize, struct l_statfs64_buf *buf); } 269 AUE_FSTATFS STD { int linux_fstatfs64(l_uint fd, size_t bufsize, struct l_statfs64_buf *buf); } 270 AUE_NULL STD { int linux_tgkill(int tgid, int pid, int sig); } 271 AUE_UTIMES STD { int linux_utimes(char *fname, \ struct l_timeval *tptr); } 272 AUE_NULL STD { int linux_fadvise64_64(int fd, \ l_loff_t offset, l_loff_t len, \ int advice); } 273 AUE_NULL UNIMPL vserver 274 AUE_NULL STD { int linux_mbind(void); } 275 AUE_NULL STD { int linux_get_mempolicy(void); } 276 AUE_NULL STD { int linux_set_mempolicy(void); } ; Linux 2.6.6: 277 AUE_NULL STD { int linux_mq_open(const char *name, int oflag, mode_t mode, \ struct mq_attr *attr); } 278 AUE_NULL STD { int linux_mq_unlink(const char *name); } 279 AUE_NULL STD { int linux_mq_timedsend(l_mqd_t mqd, const char *msg_ptr, \ size_t msg_len, unsigned int msg_prio, const struct \ l_timespec *abs_timeout); } 280 AUE_NULL STD { int linux_mq_timedreceive(l_mqd_t mqd, char *msg_ptr, \ size_t msg_len, unsigned int msg_prio, const struct \ l_timespec *abs_timeout); } 281 AUE_NULL STD { int linux_mq_notify(l_mqd_t mqd, const struct l_timespec *abs_timeout); } 282 AUE_NULL STD { int linux_mq_getsetattr(l_mqd_t mqd, const struct mq_attr *attr, \ struct mq_attr *oattr); } 283 AUE_NULL STD { int linux_kexec_load(void); } 284 AUE_WAIT6 STD { int linux_waitid(int idtype, l_pid_t id, \ l_siginfo_t *info, int options, \ void *rusage); } 285 AUE_NULL UNIMPL ; Linux 2.6.11: 286 AUE_NULL STD { int linux_add_key(void); } 287 AUE_NULL STD { int linux_request_key(void); } 288 AUE_NULL STD { int linux_keyctl(void); } ; Linux 2.6.13: 289 AUE_NULL STD { int linux_ioprio_set(void); } 290 AUE_NULL STD { int linux_ioprio_get(void); } 291 AUE_NULL STD { int linux_inotify_init(void); } 292 AUE_NULL STD { int linux_inotify_add_watch(void); } 293 AUE_NULL STD { int linux_inotify_rm_watch(void); } ; Linux 2.6.16: 294 AUE_NULL STD { int linux_migrate_pages(void); } 295 AUE_OPEN_RWTC STD { int linux_openat(l_int dfd, const char *filename, \ l_int flags, l_int mode); } 296 AUE_MKDIRAT STD { int linux_mkdirat(l_int dfd, const char *pathname, \ l_int mode); } 297 AUE_MKNODAT STD { int linux_mknodat(l_int dfd, const char *filename, \ l_int mode, l_uint dev); } 298 AUE_FCHOWNAT STD { int linux_fchownat(l_int dfd, const char *filename, \ l_uid16_t uid, l_gid16_t gid, l_int flag); } 299 AUE_FUTIMESAT STD { int linux_futimesat(l_int dfd, char *filename, \ struct l_timeval *utimes); } 300 AUE_FSTATAT STD { int linux_fstatat64(l_int dfd, char *pathname, \ struct l_stat64 *statbuf, l_int flag); } 301 AUE_UNLINKAT STD { int linux_unlinkat(l_int dfd, const char *pathname, \ l_int flag); } 302 AUE_RENAMEAT STD { int linux_renameat(l_int olddfd, const char *oldname, \ l_int newdfd, const char *newname); } 303 AUE_LINKAT STD { int linux_linkat(l_int olddfd, const char *oldname, \ l_int newdfd, const char *newname, l_int flag); } 304 AUE_SYMLINKAT STD { int linux_symlinkat(const char *oldname, l_int newdfd, \ const char *newname); } 305 AUE_READLINKAT STD { int linux_readlinkat(l_int dfd, const char *path, \ char *buf, l_int bufsiz); } 306 AUE_FCHMODAT STD { int linux_fchmodat(l_int dfd, const char *filename, \ l_mode_t mode); } 307 AUE_FACCESSAT STD { int linux_faccessat(l_int dfd, const char *filename, \ l_int amode); } 308 AUE_SELECT STD { int linux_pselect6(l_int nfds, l_fd_set *readfds, \ l_fd_set *writefds, l_fd_set *exceptfds, \ struct l_timespec *tsp, l_uintptr_t *sig); } 309 AUE_POLL STD { int linux_ppoll(struct pollfd *fds, uint32_t nfds, \ struct l_timespec *tsp, l_sigset_t *sset, l_size_t ssize); } 310 AUE_NULL STD { int linux_unshare(void); } ; Linux 2.6.17: 311 AUE_NULL STD { int linux_set_robust_list(struct linux_robust_list_head *head, \ l_size_t len); } 312 AUE_NULL STD { int linux_get_robust_list(l_int pid, \ struct linux_robust_list_head **head, l_size_t *len); } 313 AUE_NULL STD { int linux_splice(void); } 314 AUE_NULL STD { int linux_sync_file_range(void); } 315 AUE_NULL STD { int linux_tee(void); } 316 AUE_NULL STD { int linux_vmsplice(void); } ; Linux 2.6.18: 317 AUE_NULL STD { int linux_move_pages(void); } ; Linux 2.6.19: 318 AUE_NULL STD { int linux_getcpu(void); } 319 AUE_NULL STD { int linux_epoll_pwait(l_int epfd, struct epoll_event *events, \ l_int maxevents, l_int timeout, l_sigset_t *mask, \ l_size_t sigsetsize); } ; Linux 2.6.22: 320 AUE_FUTIMESAT STD { int linux_utimensat(l_int dfd, const char *pathname, \ const struct l_timespec *times, l_int flags); } 321 AUE_NULL STD { int linux_signalfd(void); } 322 AUE_NULL STD { int linux_timerfd_create(l_int clockid, l_int flags); } 323 AUE_NULL STD { int linux_eventfd(l_uint initval); } ; Linux 2.6.23: 324 AUE_NULL STD { int linux_fallocate(l_int fd, l_int mode, \ l_loff_t offset, l_loff_t len); } ; Linux 2.6.25: 325 AUE_NULL STD { int linux_timerfd_settime(l_int fd, l_int flags, \ const struct l_itimerspec *new_value, \ struct l_itimerspec *old_value); } 326 AUE_NULL STD { int linux_timerfd_gettime(l_int fd, \ struct l_itimerspec *old_value); } ; Linux 2.6.27: 327 AUE_NULL STD { int linux_signalfd4(void); } 328 AUE_NULL STD { int linux_eventfd2(l_uint initval, l_int flags); } 329 AUE_NULL STD { int linux_epoll_create1(l_int flags); } 330 AUE_NULL STD { int linux_dup3(l_int oldfd, \ l_int newfd, l_int flags); } 331 AUE_NULL STD { int linux_pipe2(l_int *pipefds, l_int flags); } 332 AUE_NULL STD { int linux_inotify_init1(void); } ; Linux 2.6.30: 333 AUE_NULL STD { int linux_preadv(l_ulong fd, \ struct iovec *vec, l_ulong vlen, \ l_ulong pos_l, l_ulong pos_h); } 334 AUE_NULL STD { int linux_pwritev(l_ulong fd, \ struct iovec *vec, l_ulong vlen, \ l_ulong pos_l, l_ulong pos_h); } ; Linux 2.6.31: 335 AUE_NULL STD { int linux_rt_tgsigqueueinfo(l_pid_t tgid, \ l_pid_t tid, l_int sig, l_siginfo_t *uinfo); } 336 AUE_NULL STD { int linux_perf_event_open(void); } ; Linux 2.6.33: 337 AUE_NULL STD { int linux_recvmmsg(l_int s, \ struct l_mmsghdr *msg, l_uint vlen, \ l_uint flags, struct l_timespec *timeout); } 338 AUE_NULL STD { int linux_fanotify_init(void); } 339 AUE_NULL STD { int linux_fanotify_mark(void); } ; Linux 2.6.36: 340 AUE_NULL STD { int linux_prlimit64(l_pid_t pid, \ l_uint resource, \ struct rlimit *new, \ struct rlimit *old); } ; Linux 2.6.39: 341 AUE_NULL STD { int linux_name_to_handle_at(void); } 342 AUE_NULL STD { int linux_open_by_handle_at(void); } 343 AUE_NULL STD { int linux_clock_adjtime(void); } 344 AUE_SYNC STD { int linux_syncfs(l_int fd); } ; Linux 3.0: 345 AUE_NULL STD { int linux_sendmmsg(l_int s, \ struct l_mmsghdr *msg, l_uint vlen, \ l_uint flags); } 346 AUE_NULL STD { int linux_setns(void); } ; Linux 3.2 (glibc 2.15): 347 AUE_NULL STD { int linux_process_vm_readv(l_pid_t pid, \ const struct iovec *lvec, l_ulong liovcnt, \ const struct iovec *rvec, l_ulong riovcnt, \ l_ulong flags); } 348 AUE_NULL STD { int linux_process_vm_writev(l_pid_t pid, \ const struct iovec *lvec, l_ulong liovcnt, \ const struct iovec *rvec, l_ulong riovcnt, \ l_ulong flags); } ; Linux 3.5 (no glibc wrapper): 349 AUE_NULL STD { int linux_kcmp(l_pid_t pid1, l_pid_t pid2, \ l_int type, l_ulong idx1, l_ulong idx); } ; Linux 3.8 (no glibc wrapper): 350 AUE_NULL STD { int linux_finit_module(l_int fd, \ const char *uargs, l_int flags); } ; Linux 3.14: 351 AUE_NULL STD { int linux_sched_setattr(l_pid_t pid, \ void *attr, l_uint flags); } 352 AUE_NULL STD { int linux_sched_getattr(l_pid_t pid, \ void *attr, l_uint size, l_uint flags); } ; Linux 3.15: 353 AUE_NULL STD { int linux_renameat2(l_int oldfd, \ const char *oldname, l_int newfd, \ const char *newname, unsigned int flags); } ; Linux 3.17: 354 AUE_NULL STD { int linux_seccomp(l_uint op, l_uint flags, \ const char *uargs); } 355 AUE_NULL STD { int linux_getrandom(char *buf, \ l_size_t count, l_uint flags); } 356 AUE_NULL STD { int linux_memfd_create(const char *uname_ptr, \ l_uint flags); } ; Linux 3.18: 357 AUE_NULL STD { int linux_bpf(l_int cmd, void *attr, \ l_uint size); } ; Linux 3.19: 358 AUE_NULL STD { int linux_execveat(l_int dfd, \ const char *filename, const char **argv, \ const char **envp, l_int flags); } ; Linux 4.3: sockets now direct system calls: 359 AUE_SOCKET STD { int linux_socket(l_int domain, l_int type, \ l_int protocol); } 360 AUE_SOCKETPAIR STD { int linux_socketpair(l_int domain, \ l_int type, l_int protocol, l_uintptr_t rsv); } 361 AUE_BIND STD { int linux_bind(l_int s, l_uintptr_t name, \ l_int namelen); } 362 AUE_CONNECT STD { int linux_connect(l_int s, l_uintptr_t name, \ l_int namelen); } 363 AUE_LISTEN STD { int linux_listen(l_int s, l_int backlog); } 364 AUE_ACCEPT STD { int linux_accept4(l_int s, l_uintptr_t addr, \ l_uintptr_t namelen, l_int flags); } 365 AUE_GETSOCKOPT STD { int linux_getsockopt(l_int s, l_int level, \ l_int optname, l_uintptr_t optval, \ l_uintptr_t optlen); } 366 AUE_SETSOCKOPT STD { int linux_setsockopt(l_int s, l_int level, \ l_int optname, l_uintptr_t optval, \ l_int optlen); } 367 AUE_GETSOCKNAME STD { int linux_getsockname(l_int s, \ l_uintptr_t addr, l_uintptr_t namelen); } 368 AUE_GETPEERNAME STD { int linux_getpeername(l_int s, \ l_uintptr_t addr, l_uintptr_t namelen); } 369 AUE_SENDTO STD { int linux_sendto(l_int s, l_uintptr_t msg, \ l_int len, l_int flags, l_uintptr_t to, \ l_int tolen); } 370 AUE_SENDMSG STD { int linux_sendmsg(l_int s, l_uintptr_t msg, \ l_int flags); } 371 AUE_RECVFROM STD { int linux_recvfrom(l_int s, l_uintptr_t buf, \ l_size_t len, l_int flags, l_uintptr_t from, \ l_uintptr_t fromlen); } 372 AUE_RECVMSG STD { int linux_recvmsg(l_int s, l_uintptr_t msg, \ l_int flags); } 373 AUE_NULL STD { int linux_shutdown(l_int s, l_int how); } ; Linux 4.2: 374 AUE_NULL STD { int linux_userfaultfd(l_int flags); } ; Linux 4.3: 375 AUE_NULL STD { int linux_membarrier(l_int cmd, l_int flags); } ; Linux 4.4: 376 AUE_NULL STD { int linux_mlock2(l_ulong start, l_size_t len, \ l_int flags); } ; Linux 4.5: 377 AUE_NULL STD { int linux_copy_file_range(l_int fd_in, \ l_loff_t *off_in, l_int fd_out, \ l_loff_t *off_out, l_size_t len, \ l_uint flags); } ; Linux 4.6: 378 AUE_NULL STD { int linux_preadv2(l_ulong fd, \ const struct iovec *vec, l_ulong vlen, \ l_ulong pos_l, l_ulong pos_h, l_int flags); } 379 AUE_NULL STD { int linux_pwritev2(l_ulong fd, \ const struct iovec *vec, l_ulong vlen, \ l_ulong pos_l, l_ulong pos_h, l_int flags); } ; Linux 4.8: 380 AUE_NULL STD { int linux_pkey_mprotect(l_ulong start, \ l_size_t len, l_ulong prot, l_int pkey); } 381 AUE_NULL STD { int linux_pkey_alloc(l_ulong flags, \ l_ulong init_val); } 382 AUE_NULL STD { int linux_pkey_free(l_int pkey); } ; please, keep this line at the end. 383 AUE_NULL UNIMPL nosys ; vim: syntax=off Index: head/sys/i386/pci/pci_pir.c =================================================================== --- head/sys/i386/pci/pci_pir.c (revision 329872) +++ head/sys/i386/pci/pci_pir.c (revision 329873) @@ -1,744 +1,744 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997, Stefan Esser * Copyright (c) 2000, Michael Smith * Copyright (c) 2000, BSDi * Copyright (c) 2004, John Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define NUM_ISA_INTERRUPTS 16 /* * A link device. Loosely based on the ACPI PCI link device. This doesn't * try to support priorities for different ISA interrupts. */ struct pci_link { TAILQ_ENTRY(pci_link) pl_links; uint8_t pl_id; uint8_t pl_irq; uint16_t pl_irqmask; int pl_references; int pl_routed; }; struct pci_link_lookup { struct pci_link **pci_link_ptr; int bus; int device; int pin; }; struct pci_dev_lookup { uint8_t link; int bus; int device; int pin; }; typedef void pir_entry_handler(struct PIR_entry *entry, struct PIR_intpin* intpin, void *arg); static void pci_print_irqmask(u_int16_t irqs); static int pci_pir_biosroute(int bus, int device, int func, int pin, int irq); static int pci_pir_choose_irq(struct pci_link *pci_link, int irqmask); static void pci_pir_create_links(struct PIR_entry *entry, struct PIR_intpin *intpin, void *arg); static void pci_pir_dump_links(void); static struct pci_link *pci_pir_find_link(uint8_t link_id); static void pci_pir_find_link_handler(struct PIR_entry *entry, struct PIR_intpin *intpin, void *arg); static void pci_pir_initial_irqs(struct PIR_entry *entry, struct PIR_intpin *intpin, void *arg); static void pci_pir_parse(void); static uint8_t pci_pir_search_irq(int bus, int device, int pin); static int pci_pir_valid_irq(struct pci_link *pci_link, int irq); static void pci_pir_walk_table(pir_entry_handler *handler, void *arg); static MALLOC_DEFINE(M_PIR, "$PIR", "$PIR structures"); static struct PIR_table *pci_route_table; static device_t pir_device; static int pci_route_count, pir_bios_irqs, pir_parsed; static TAILQ_HEAD(, pci_link) pci_links; static int pir_interrupt_weight[NUM_ISA_INTERRUPTS]; /* sysctl vars */ SYSCTL_DECL(_hw_pci); /* XXX this likely should live in a header file */ /* IRQs 3, 4, 5, 6, 7, 9, 10, 11, 12, 14, 15 */ #define PCI_IRQ_OVERRIDE_MASK 0xdef8 static uint32_t pci_irq_override_mask = PCI_IRQ_OVERRIDE_MASK; SYSCTL_INT(_hw_pci, OID_AUTO, irq_override_mask, CTLFLAG_RDTUN, &pci_irq_override_mask, PCI_IRQ_OVERRIDE_MASK, "Mask of allowed irqs to try to route when it has no good clue about\n" "which irqs it should use."); /* * Look for the interrupt routing table. * * We use PCI BIOS's PIR table if it's available. $PIR is the standard way * to do this. Sadly, some machines are not standards conforming and have * _PIR instead. We shrug and cope by looking for both. */ void pci_pir_open(void) { struct PIR_table *pt; uint32_t sigaddr; int i; uint8_t ck, *cv; /* Don't try if we've already found a table. */ if (pci_route_table != NULL) return; /* Look for $PIR and then _PIR. */ sigaddr = bios_sigsearch(0, "$PIR", 4, 16, 0); if (sigaddr == 0) sigaddr = bios_sigsearch(0, "_PIR", 4, 16, 0); if (sigaddr == 0) return; /* If we found something, check the checksum and length. */ /* XXX - Use pmap_mapdev()? */ pt = (struct PIR_table *)(uintptr_t)BIOS_PADDRTOVADDR(sigaddr); if (pt->pt_header.ph_length <= sizeof(struct PIR_header)) return; for (cv = (u_int8_t *)pt, ck = 0, i = 0; i < (pt->pt_header.ph_length); i++) ck += cv[i]; if (ck != 0) return; /* Ok, we've got a valid table. */ pci_route_table = pt; pci_route_count = (pt->pt_header.ph_length - sizeof(struct PIR_header)) / sizeof(struct PIR_entry); } /* * Find the pci_link structure for a given link ID. */ static struct pci_link * pci_pir_find_link(uint8_t link_id) { struct pci_link *pci_link; TAILQ_FOREACH(pci_link, &pci_links, pl_links) { if (pci_link->pl_id == link_id) return (pci_link); } return (NULL); } /* * Find the link device associated with a PCI device in the table. */ static void pci_pir_find_link_handler(struct PIR_entry *entry, struct PIR_intpin *intpin, void *arg) { struct pci_link_lookup *lookup; lookup = (struct pci_link_lookup *)arg; if (entry->pe_bus == lookup->bus && entry->pe_device == lookup->device && intpin - entry->pe_intpin == lookup->pin) *lookup->pci_link_ptr = pci_pir_find_link(intpin->link); } /* * Check to see if a possible IRQ setting is valid. */ static int pci_pir_valid_irq(struct pci_link *pci_link, int irq) { if (!PCI_INTERRUPT_VALID(irq)) return (0); return (pci_link->pl_irqmask & (1 << irq)); } /* * Walk the $PIR executing the worker function for each valid intpin entry * in the table. The handler is passed a pointer to both the entry and * the intpin in the entry. */ static void pci_pir_walk_table(pir_entry_handler *handler, void *arg) { struct PIR_entry *entry; struct PIR_intpin *intpin; int i, pin; entry = &pci_route_table->pt_entry[0]; for (i = 0; i < pci_route_count; i++, entry++) { intpin = &entry->pe_intpin[0]; for (pin = 0; pin < 4; pin++, intpin++) if (intpin->link != 0) handler(entry, intpin, arg); } } static void pci_pir_create_links(struct PIR_entry *entry, struct PIR_intpin *intpin, void *arg) { struct pci_link *pci_link; pci_link = pci_pir_find_link(intpin->link); if (pci_link != NULL) { pci_link->pl_references++; if (intpin->irqs != pci_link->pl_irqmask) { if (bootverbose) printf( "$PIR: Entry %d.%d.INT%c has different mask for link %#x, merging\n", entry->pe_bus, entry->pe_device, (intpin - entry->pe_intpin) + 'A', pci_link->pl_id); pci_link->pl_irqmask &= intpin->irqs; } } else { pci_link = malloc(sizeof(struct pci_link), M_PIR, M_WAITOK); pci_link->pl_id = intpin->link; pci_link->pl_irqmask = intpin->irqs; pci_link->pl_irq = PCI_INVALID_IRQ; pci_link->pl_references = 1; pci_link->pl_routed = 0; TAILQ_INSERT_TAIL(&pci_links, pci_link, pl_links); } } /* * Look to see if any of the function on the PCI device at bus/device have * an interrupt routed to intpin 'pin' by the BIOS. */ static uint8_t pci_pir_search_irq(int bus, int device, int pin) { uint32_t value; uint8_t func, maxfunc; /* See if we have a valid device at function 0. */ value = pci_cfgregread(bus, device, 0, PCIR_HDRTYPE, 1); if ((value & PCIM_HDRTYPE) > PCI_MAXHDRTYPE) return (PCI_INVALID_IRQ); if (value & PCIM_MFDEV) maxfunc = PCI_FUNCMAX; else maxfunc = 0; /* Scan all possible functions at this device. */ for (func = 0; func <= maxfunc; func++) { value = pci_cfgregread(bus, device, func, PCIR_DEVVENDOR, 4); if (value == 0xffffffff) continue; value = pci_cfgregread(bus, device, func, PCIR_INTPIN, 1); /* * See if it uses the pin in question. Note that the passed * in pin uses 0 for A, .. 3 for D whereas the intpin * register uses 0 for no interrupt, 1 for A, .. 4 for D. */ if (value != pin + 1) continue; value = pci_cfgregread(bus, device, func, PCIR_INTLINE, 1); if (bootverbose) printf( "$PIR: Found matching pin for %d.%d.INT%c at func %d: %d\n", bus, device, pin + 'A', func, value); if (value != PCI_INVALID_IRQ) return (value); } return (PCI_INVALID_IRQ); } /* * Try to initialize IRQ based on this device's IRQ. */ static void pci_pir_initial_irqs(struct PIR_entry *entry, struct PIR_intpin *intpin, void *arg) { struct pci_link *pci_link; uint8_t irq, pin; pin = intpin - entry->pe_intpin; pci_link = pci_pir_find_link(intpin->link); irq = pci_pir_search_irq(entry->pe_bus, entry->pe_device, pin); if (irq == PCI_INVALID_IRQ || irq == pci_link->pl_irq) return; /* Don't trust any BIOS IRQs greater than 15. */ if (irq >= NUM_ISA_INTERRUPTS) { printf( "$PIR: Ignoring invalid BIOS IRQ %d from %d.%d.INT%c for link %#x\n", irq, entry->pe_bus, entry->pe_device, pin + 'A', pci_link->pl_id); return; } /* * If we don't have an IRQ for this link yet, then we trust the * BIOS, even if it seems invalid from the $PIR entries. */ if (pci_link->pl_irq == PCI_INVALID_IRQ) { if (!pci_pir_valid_irq(pci_link, irq)) printf( "$PIR: Using invalid BIOS IRQ %d from %d.%d.INT%c for link %#x\n", irq, entry->pe_bus, entry->pe_device, pin + 'A', pci_link->pl_id); pci_link->pl_irq = irq; pci_link->pl_routed = 1; return; } /* * We have an IRQ and it doesn't match the current IRQ for this * link. If the new IRQ is invalid, then warn about it and ignore * it. If the old IRQ is invalid and the new IRQ is valid, then * prefer the new IRQ instead. If both IRQs are valid, then just * use the first one. Note that if we ever get into this situation * we are having to guess which setting the BIOS actually routed. * Perhaps we should just give up instead. */ if (!pci_pir_valid_irq(pci_link, irq)) { printf( "$PIR: BIOS IRQ %d for %d.%d.INT%c is not valid for link %#x\n", irq, entry->pe_bus, entry->pe_device, pin + 'A', pci_link->pl_id); } else if (!pci_pir_valid_irq(pci_link, pci_link->pl_irq)) { printf( "$PIR: Preferring valid BIOS IRQ %d from %d.%d.INT%c for link %#x to IRQ %d\n", irq, entry->pe_bus, entry->pe_device, pin + 'A', pci_link->pl_id, pci_link->pl_irq); pci_link->pl_irq = irq; pci_link->pl_routed = 1; } else printf( "$PIR: BIOS IRQ %d for %d.%d.INT%c does not match link %#x irq %d\n", irq, entry->pe_bus, entry->pe_device, pin + 'A', pci_link->pl_id, pci_link->pl_irq); } /* * Parse $PIR to enumerate link devices and attempt to determine their * initial state. This could perhaps be cleaner if we had drivers for the * various interrupt routers as they could read the initial IRQ for each * link. */ static void pci_pir_parse(void) { char tunable_buffer[64]; struct pci_link *pci_link; int i, irq; /* Only parse once. */ if (pir_parsed) return; pir_parsed = 1; /* Enumerate link devices. */ TAILQ_INIT(&pci_links); pci_pir_walk_table(pci_pir_create_links, NULL); if (bootverbose) { printf("$PIR: Links after initial probe:\n"); pci_pir_dump_links(); } /* * Check to see if the BIOS has already routed any of the links by * checking each device connected to each link to see if it has a * valid IRQ. */ pci_pir_walk_table(pci_pir_initial_irqs, NULL); if (bootverbose) { printf("$PIR: Links after initial IRQ discovery:\n"); pci_pir_dump_links(); } /* * Allow the user to override the IRQ for a given link device. We * allow invalid IRQs to be specified but warn about them. An IRQ * of 255 or 0 clears any preset IRQ. */ i = 0; TAILQ_FOREACH(pci_link, &pci_links, pl_links) { snprintf(tunable_buffer, sizeof(tunable_buffer), "hw.pci.link.%#x.irq", pci_link->pl_id); if (getenv_int(tunable_buffer, &irq) == 0) continue; if (irq == 0) irq = PCI_INVALID_IRQ; if (irq != PCI_INVALID_IRQ && !pci_pir_valid_irq(pci_link, irq) && bootverbose) printf( "$PIR: Warning, IRQ %d for link %#x is not listed as valid\n", irq, pci_link->pl_id); pci_link->pl_routed = 0; pci_link->pl_irq = irq; i = 1; } if (bootverbose && i) { printf("$PIR: Links after tunable overrides:\n"); pci_pir_dump_links(); } /* * Build initial interrupt weights as well as bitmap of "known-good" * IRQs that the BIOS has already used for PCI link devices. */ TAILQ_FOREACH(pci_link, &pci_links, pl_links) { if (!PCI_INTERRUPT_VALID(pci_link->pl_irq)) continue; pir_bios_irqs |= 1 << pci_link->pl_irq; pir_interrupt_weight[pci_link->pl_irq] += pci_link->pl_references; } if (bootverbose) { printf("$PIR: IRQs used by BIOS: "); pci_print_irqmask(pir_bios_irqs); printf("\n"); printf("$PIR: Interrupt Weights:\n[ "); for (i = 0; i < NUM_ISA_INTERRUPTS; i++) printf(" %3d", i); printf(" ]\n[ "); for (i = 0; i < NUM_ISA_INTERRUPTS; i++) printf(" %3d", pir_interrupt_weight[i]); printf(" ]\n"); } } /* * Use the PCI BIOS to route an interrupt for a given device. * * Input: * AX = PCIBIOS_ROUTE_INTERRUPT * BH = bus * BL = device [7:3] / function [2:0] * CH = IRQ * CL = Interrupt Pin (0x0A = A, ... 0x0D = D) */ static int pci_pir_biosroute(int bus, int device, int func, int pin, int irq) { struct bios_regs args; args.eax = PCIBIOS_ROUTE_INTERRUPT; args.ebx = (bus << 8) | (device << 3) | func; args.ecx = (irq << 8) | (0xa + pin); return (bios32(&args, PCIbios.ventry, GSEL(GCODE_SEL, SEL_KPL))); } /* * Route a PCI interrupt using a link device from the $PIR. */ int pci_pir_route_interrupt(int bus, int device, int func, int pin) { struct pci_link_lookup lookup; struct pci_link *pci_link; int error, irq; if (pci_route_table == NULL) return (PCI_INVALID_IRQ); /* Lookup link device for this PCI device/pin. */ pci_link = NULL; lookup.bus = bus; lookup.device = device; lookup.pin = pin - 1; lookup.pci_link_ptr = &pci_link; pci_pir_walk_table(pci_pir_find_link_handler, &lookup); if (pci_link == NULL) { printf("$PIR: No matching entry for %d.%d.INT%c\n", bus, device, pin - 1 + 'A'); return (PCI_INVALID_IRQ); } /* * Pick a new interrupt if we don't have one already. We look * for an interrupt from several different sets. First, if * this link only has one valid IRQ, use that. Second, we * check the set of PCI only interrupts from the $PIR. Third, * we check the set of known-good interrupts that the BIOS has * already used. Lastly, we check the "all possible valid * IRQs" set. */ if (!PCI_INTERRUPT_VALID(pci_link->pl_irq)) { if (pci_link->pl_irqmask != 0 && powerof2(pci_link->pl_irqmask)) irq = ffs(pci_link->pl_irqmask) - 1; else irq = pci_pir_choose_irq(pci_link, pci_route_table->pt_header.ph_pci_irqs); if (!PCI_INTERRUPT_VALID(irq)) irq = pci_pir_choose_irq(pci_link, pir_bios_irqs); if (!PCI_INTERRUPT_VALID(irq)) irq = pci_pir_choose_irq(pci_link, pci_irq_override_mask); if (!PCI_INTERRUPT_VALID(irq)) { if (bootverbose) printf( "$PIR: Failed to route interrupt for %d:%d INT%c\n", bus, device, pin - 1 + 'A'); return (PCI_INVALID_IRQ); } pci_link->pl_irq = irq; } /* Ask the BIOS to route this IRQ if we haven't done so already. */ if (!pci_link->pl_routed) { error = pci_pir_biosroute(bus, device, func, pin - 1, pci_link->pl_irq); /* Ignore errors when routing a unique interrupt. */ if (error && !powerof2(pci_link->pl_irqmask)) { printf("$PIR: ROUTE_INTERRUPT failed.\n"); return (PCI_INVALID_IRQ); } pci_link->pl_routed = 1; /* Ensure the interrupt is set to level/low trigger. */ KASSERT(pir_device != NULL, ("missing pir device")); BUS_CONFIG_INTR(pir_device, pci_link->pl_irq, INTR_TRIGGER_LEVEL, INTR_POLARITY_LOW); } if (bootverbose) printf("$PIR: %d:%d INT%c routed to irq %d\n", bus, device, pin - 1 + 'A', pci_link->pl_irq); return (pci_link->pl_irq); } /* * Try to pick an interrupt for the specified link from the interrupts * set in the mask. */ static int pci_pir_choose_irq(struct pci_link *pci_link, int irqmask) { int i, irq, realmask; /* XXX: Need to have a #define of known bad IRQs to also mask out? */ realmask = pci_link->pl_irqmask & irqmask; if (realmask == 0) return (PCI_INVALID_IRQ); /* Find IRQ with lowest weight. */ irq = PCI_INVALID_IRQ; for (i = 0; i < NUM_ISA_INTERRUPTS; i++) { if (!(realmask & 1 << i)) continue; if (irq == PCI_INVALID_IRQ || pir_interrupt_weight[i] < pir_interrupt_weight[irq]) irq = i; } if (bootverbose && PCI_INTERRUPT_VALID(irq)) { printf("$PIR: Found IRQ %d for link %#x from ", irq, pci_link->pl_id); pci_print_irqmask(realmask); printf("\n"); } return (irq); } static void pci_print_irqmask(u_int16_t irqs) { int i, first; if (irqs == 0) { printf("none"); return; } first = 1; for (i = 0; i < 16; i++, irqs >>= 1) if (irqs & 1) { if (!first) printf(" "); else first = 0; printf("%d", i); } } /* * Display link devices. */ static void pci_pir_dump_links(void) { struct pci_link *pci_link; printf("Link IRQ Rtd Ref IRQs\n"); TAILQ_FOREACH(pci_link, &pci_links, pl_links) { printf("%#4x %3d %c %3d ", pci_link->pl_id, pci_link->pl_irq, pci_link->pl_routed ? 'Y' : 'N', pci_link->pl_references); pci_print_irqmask(pci_link->pl_irqmask); printf("\n"); } } /* * See if any interrupts for a given PCI bus are routed in the PIR. Don't * even bother looking if the BIOS doesn't support routing anyways. If we * are probing a PCI-PCI bridge, then require_parse will be true and we should * only succeed if a host-PCI bridge has already attached and parsed the PIR. */ int pci_pir_probe(int bus, int require_parse) { int i; if (pci_route_table == NULL || (require_parse && !pir_parsed)) return (0); for (i = 0; i < pci_route_count; i++) if (pci_route_table->pt_entry[i].pe_bus == bus) return (1); return (0); } /* - * The driver for the new-bus psuedo device pir0 for the $PIR table. + * The driver for the new-bus pseudo device pir0 for the $PIR table. */ static int pir_probe(device_t dev) { char buf[64]; snprintf(buf, sizeof(buf), "PCI Interrupt Routing Table: %d Entries", pci_route_count); device_set_desc_copy(dev, buf); return (0); } static int pir_attach(device_t dev) { pci_pir_parse(); KASSERT(pir_device == NULL, ("Multiple pir devices")); pir_device = dev; return (0); } static void pir_resume_find_device(struct PIR_entry *entry, struct PIR_intpin *intpin, void *arg) { struct pci_dev_lookup *pd; pd = (struct pci_dev_lookup *)arg; if (intpin->link != pd->link || pd->bus != -1) return; pd->bus = entry->pe_bus; pd->device = entry->pe_device; pd->pin = intpin - entry->pe_intpin; } static int pir_resume(device_t dev) { struct pci_dev_lookup pd; struct pci_link *pci_link; int error; /* Ask the BIOS to re-route each link that was already routed. */ TAILQ_FOREACH(pci_link, &pci_links, pl_links) { if (!PCI_INTERRUPT_VALID(pci_link->pl_irq)) { KASSERT(!pci_link->pl_routed, ("link %#x is routed but has invalid PCI IRQ", pci_link->pl_id)); continue; } if (pci_link->pl_routed) { pd.bus = -1; pd.link = pci_link->pl_id; pci_pir_walk_table(pir_resume_find_device, &pd); KASSERT(pd.bus != -1, ("did not find matching entry for link %#x in the $PIR table", pci_link->pl_id)); if (bootverbose) device_printf(dev, "Using %d.%d.INT%c to route link %#x to IRQ %d\n", pd.bus, pd.device, pd.pin + 'A', pci_link->pl_id, pci_link->pl_irq); error = pci_pir_biosroute(pd.bus, pd.device, 0, pd.pin, pci_link->pl_irq); if (error) device_printf(dev, "ROUTE_INTERRUPT on resume for link %#x failed.\n", pci_link->pl_id); } } return (0); } static device_method_t pir_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pir_probe), DEVMETHOD(device_attach, pir_attach), DEVMETHOD(device_resume, pir_resume), { 0, 0 } }; static driver_t pir_driver = { "pir", pir_methods, 1, }; static devclass_t pir_devclass; DRIVER_MODULE(pir, legacy, pir_driver, pir_devclass, 0, 0); Index: head/sys/kern/syscalls.master =================================================================== --- head/sys/kern/syscalls.master (revision 329872) +++ head/sys/kern/syscalls.master (revision 329873) @@ -1,1033 +1,1033 @@ $FreeBSD$ ; from: @(#)syscalls.master 8.2 (Berkeley) 1/13/94 ; ; System call name/number master file. ; Processed to created init_sysent.c, syscalls.c and syscall.h. ; Columns: number audit type name alt{name,tag,rtyp}/comments ; number system call number, must be in order ; audit the audit event associated with the system call ; A value of AUE_NULL means no auditing, but it also means that ; there is no audit event for the call at this time. For the ; case where the event exists, but we don't want auditing, the ; event should be #defined to AUE_NULL in audit_kevents.h. ; type one of STD, OBSOL, UNIMPL, COMPAT, COMPAT4, COMPAT6, ; COMPAT7, COMPAT11, NODEF, NOARGS, NOPROTO, NOSTD ; The COMPAT* options may be combined with one or more NO* ; options separated by '|' with no spaces (e.g. COMPAT|NOARGS) -; name psuedo-prototype of syscall routine +; name pseudo-prototype of syscall routine ; If one of the following alts is different, then all appear: ; altname name of system call if different ; alttag name of args struct tag if different from [o]`name'"_args" ; altrtyp return type if not int (bogus - syscalls always return int) ; for UNIMPL/OBSOL, name continues with comments ; types: ; STD always included ; COMPAT included on COMPAT #ifdef ; COMPAT4 included on COMPAT_FREEBSD4 #ifdef (FreeBSD 4 compat) ; COMPAT6 included on COMPAT_FREEBSD6 #ifdef (FreeBSD 6 compat) ; COMPAT7 included on COMPAT_FREEBSD7 #ifdef (FreeBSD 7 compat) ; COMPAT10 included on COMPAT_FREEBSD10 #ifdef (FreeBSD 10 compat) ; COMPAT11 included on COMPAT11 #ifdef (FreeBSD 11 compat) ; OBSOL obsolete, not included in system, only specifies name ; UNIMPL not implemented, placeholder only ; NOSTD implemented but as a lkm that can be statically ; compiled in; sysent entry will be filled with lkmressys ; so the SYSCALL_MODULE macro works ; NOARGS same as STD except do not create structure in sys/sysproto.h ; NODEF same as STD except only have the entry in the syscall table ; added. Meaning - do not create structure or function ; prototype in sys/sysproto.h ; NOPROTO same as STD except do not create structure or ; function prototype in sys/sysproto.h. Does add a ; definition to syscall.h besides adding a sysent. ; NOTSTATIC syscall is loadable ; ; Please copy any additions and changes to the following compatability tables: ; sys/compat/freebsd32/syscalls.master ; #ifdef's, etc. may be included, and are copied to the output files. #include #include #include ; Reserved/unimplemented system calls in the range 0-150 inclusive ; are reserved for use in future Berkeley releases. ; Additional system calls implemented in vendor and other ; redistributions should be placed in the reserved range at the end ; of the current calls. 0 AUE_NULL STD { int nosys(void); } syscall nosys_args int 1 AUE_EXIT STD { void sys_exit(int rval); } exit \ sys_exit_args void 2 AUE_FORK STD { int fork(void); } 3 AUE_READ STD { ssize_t read(int fd, void *buf, \ size_t nbyte); } 4 AUE_WRITE STD { ssize_t write(int fd, const void *buf, \ size_t nbyte); } 5 AUE_OPEN_RWTC STD { int open(char *path, int flags, int mode); } ; XXX should be { int open(const char *path, int flags, ...); } ; but we're not ready for `const' or varargs. ; XXX man page says `mode_t mode'. 6 AUE_CLOSE STD { int close(int fd); } 7 AUE_WAIT4 STD { int wait4(int pid, int *status, \ int options, struct rusage *rusage); } 8 AUE_CREAT COMPAT { int creat(char *path, int mode); } 9 AUE_LINK STD { int link(char *path, char *link); } 10 AUE_UNLINK STD { int unlink(char *path); } 11 AUE_NULL OBSOL execv 12 AUE_CHDIR STD { int chdir(char *path); } 13 AUE_FCHDIR STD { int fchdir(int fd); } 14 AUE_MKNOD COMPAT11 { int mknod(char *path, int mode, int dev); } 15 AUE_CHMOD STD { int chmod(char *path, int mode); } 16 AUE_CHOWN STD { int chown(char *path, int uid, int gid); } 17 AUE_NULL STD { int obreak(char *nsize); } break \ obreak_args int 18 AUE_GETFSSTAT COMPAT4 { int getfsstat(struct ostatfs *buf, \ long bufsize, int mode); } 19 AUE_LSEEK COMPAT { long lseek(int fd, long offset, \ int whence); } 20 AUE_GETPID STD { pid_t getpid(void); } 21 AUE_MOUNT STD { int mount(char *type, char *path, \ int flags, caddr_t data); } ; XXX `path' should have type `const char *' but we're not ready for that. 22 AUE_UMOUNT STD { int unmount(char *path, int flags); } 23 AUE_SETUID STD { int setuid(uid_t uid); } 24 AUE_GETUID STD { uid_t getuid(void); } 25 AUE_GETEUID STD { uid_t geteuid(void); } 26 AUE_PTRACE STD { int ptrace(int req, pid_t pid, \ caddr_t addr, int data); } 27 AUE_RECVMSG STD { int recvmsg(int s, struct msghdr *msg, \ int flags); } 28 AUE_SENDMSG STD { int sendmsg(int s, struct msghdr *msg, \ int flags); } 29 AUE_RECVFROM STD { int recvfrom(int s, caddr_t buf, \ size_t len, int flags, \ struct sockaddr * __restrict from, \ __socklen_t * __restrict fromlenaddr); } 30 AUE_ACCEPT STD { int accept(int s, \ struct sockaddr * __restrict name, \ __socklen_t * __restrict anamelen); } 31 AUE_GETPEERNAME STD { int getpeername(int fdes, \ struct sockaddr * __restrict asa, \ __socklen_t * __restrict alen); } 32 AUE_GETSOCKNAME STD { int getsockname(int fdes, \ struct sockaddr * __restrict asa, \ __socklen_t * __restrict alen); } 33 AUE_ACCESS STD { int access(char *path, int amode); } 34 AUE_CHFLAGS STD { int chflags(const char *path, u_long flags); } 35 AUE_FCHFLAGS STD { int fchflags(int fd, u_long flags); } 36 AUE_SYNC STD { int sync(void); } 37 AUE_KILL STD { int kill(int pid, int signum); } 38 AUE_STAT COMPAT { int stat(char *path, struct ostat *ub); } 39 AUE_GETPPID STD { pid_t getppid(void); } 40 AUE_LSTAT COMPAT { int lstat(char *path, struct ostat *ub); } 41 AUE_DUP STD { int dup(u_int fd); } 42 AUE_PIPE COMPAT10 { int pipe(void); } 43 AUE_GETEGID STD { gid_t getegid(void); } 44 AUE_PROFILE STD { int profil(caddr_t samples, size_t size, \ size_t offset, u_int scale); } 45 AUE_KTRACE STD { int ktrace(const char *fname, int ops, \ int facs, int pid); } 46 AUE_SIGACTION COMPAT { int sigaction(int signum, \ struct osigaction *nsa, \ struct osigaction *osa); } 47 AUE_GETGID STD { gid_t getgid(void); } 48 AUE_SIGPROCMASK COMPAT { int sigprocmask(int how, osigset_t mask); } ; XXX note nonstandard (bogus) calling convention - the libc stub passes ; us the mask, not a pointer to it, and we return the old mask as the ; (int) return value. 49 AUE_GETLOGIN STD { int getlogin(char *namebuf, u_int \ namelen); } 50 AUE_SETLOGIN STD { int setlogin(char *namebuf); } 51 AUE_ACCT STD { int acct(char *path); } 52 AUE_SIGPENDING COMPAT { int sigpending(void); } 53 AUE_SIGALTSTACK STD { int sigaltstack(stack_t *ss, \ stack_t *oss); } 54 AUE_IOCTL STD { int ioctl(int fd, u_long com, \ caddr_t data); } 55 AUE_REBOOT STD { int reboot(int opt); } 56 AUE_REVOKE STD { int revoke(char *path); } 57 AUE_SYMLINK STD { int symlink(char *path, char *link); } 58 AUE_READLINK STD { ssize_t readlink(char *path, char *buf, \ size_t count); } 59 AUE_EXECVE STD { int execve(char *fname, char **argv, \ char **envv); } 60 AUE_UMASK STD { int umask(int newmask); } umask umask_args \ int 61 AUE_CHROOT STD { int chroot(char *path); } 62 AUE_FSTAT COMPAT { int fstat(int fd, struct ostat *sb); } 63 AUE_NULL COMPAT { int getkerninfo(int op, char *where, \ size_t *size, int arg); } getkerninfo \ getkerninfo_args int 64 AUE_NULL COMPAT { int getpagesize(void); } getpagesize \ getpagesize_args int 65 AUE_MSYNC STD { int msync(void *addr, size_t len, \ int flags); } 66 AUE_VFORK STD { int vfork(void); } 67 AUE_NULL OBSOL vread 68 AUE_NULL OBSOL vwrite 69 AUE_SBRK STD { int sbrk(int incr); } 70 AUE_SSTK STD { int sstk(int incr); } 71 AUE_MMAP COMPAT { int mmap(void *addr, int len, int prot, \ int flags, int fd, long pos); } 72 AUE_O_VADVISE STD { int ovadvise(int anom); } vadvise \ ovadvise_args int 73 AUE_MUNMAP STD { int munmap(void *addr, size_t len); } 74 AUE_MPROTECT STD { int mprotect(void *addr, size_t len, \ int prot); } 75 AUE_MADVISE STD { int madvise(void *addr, size_t len, \ int behav); } 76 AUE_NULL OBSOL vhangup 77 AUE_NULL OBSOL vlimit 78 AUE_MINCORE STD { int mincore(const void *addr, size_t len, \ char *vec); } 79 AUE_GETGROUPS STD { int getgroups(u_int gidsetsize, \ gid_t *gidset); } 80 AUE_SETGROUPS STD { int setgroups(u_int gidsetsize, \ gid_t *gidset); } 81 AUE_GETPGRP STD { int getpgrp(void); } 82 AUE_SETPGRP STD { int setpgid(int pid, int pgid); } 83 AUE_SETITIMER STD { int setitimer(u_int which, struct \ itimerval *itv, struct itimerval *oitv); } 84 AUE_WAIT4 COMPAT { int wait(void); } 85 AUE_SWAPON STD { int swapon(char *name); } 86 AUE_GETITIMER STD { int getitimer(u_int which, \ struct itimerval *itv); } 87 AUE_SYSCTL COMPAT { int gethostname(char *hostname, \ u_int len); } gethostname \ gethostname_args int 88 AUE_SYSCTL COMPAT { int sethostname(char *hostname, \ u_int len); } sethostname \ sethostname_args int 89 AUE_GETDTABLESIZE STD { int getdtablesize(void); } 90 AUE_DUP2 STD { int dup2(u_int from, u_int to); } 91 AUE_NULL UNIMPL getdopt 92 AUE_FCNTL STD { int fcntl(int fd, int cmd, long arg); } ; XXX should be { int fcntl(int fd, int cmd, ...); } ; but we're not ready for varargs. 93 AUE_SELECT STD { int select(int nd, fd_set *in, fd_set *ou, \ fd_set *ex, struct timeval *tv); } 94 AUE_NULL UNIMPL setdopt 95 AUE_FSYNC STD { int fsync(int fd); } 96 AUE_SETPRIORITY STD { int setpriority(int which, int who, \ int prio); } 97 AUE_SOCKET STD { int socket(int domain, int type, \ int protocol); } 98 AUE_CONNECT STD { int connect(int s, caddr_t name, \ int namelen); } 99 AUE_ACCEPT COMPAT|NOARGS { int accept(int s, caddr_t name, \ int *anamelen); } accept accept_args int 100 AUE_GETPRIORITY STD { int getpriority(int which, int who); } 101 AUE_SEND COMPAT { int send(int s, caddr_t buf, int len, \ int flags); } 102 AUE_RECV COMPAT { int recv(int s, caddr_t buf, int len, \ int flags); } 103 AUE_SIGRETURN COMPAT { int sigreturn( \ struct osigcontext *sigcntxp); } 104 AUE_BIND STD { int bind(int s, caddr_t name, \ int namelen); } 105 AUE_SETSOCKOPT STD { int setsockopt(int s, int level, int name, \ caddr_t val, int valsize); } 106 AUE_LISTEN STD { int listen(int s, int backlog); } 107 AUE_NULL OBSOL vtimes 108 AUE_NULL COMPAT { int sigvec(int signum, struct sigvec *nsv, \ struct sigvec *osv); } 109 AUE_NULL COMPAT { int sigblock(int mask); } 110 AUE_NULL COMPAT { int sigsetmask(int mask); } 111 AUE_NULL COMPAT { int sigsuspend(osigset_t mask); } ; XXX note nonstandard (bogus) calling convention - the libc stub passes ; us the mask, not a pointer to it. 112 AUE_NULL COMPAT { int sigstack(struct sigstack *nss, \ struct sigstack *oss); } 113 AUE_RECVMSG COMPAT { int recvmsg(int s, struct omsghdr *msg, \ int flags); } 114 AUE_SENDMSG COMPAT { int sendmsg(int s, caddr_t msg, \ int flags); } 115 AUE_NULL OBSOL vtrace 116 AUE_GETTIMEOFDAY STD { int gettimeofday(struct timeval *tp, \ struct timezone *tzp); } 117 AUE_GETRUSAGE STD { int getrusage(int who, \ struct rusage *rusage); } 118 AUE_GETSOCKOPT STD { int getsockopt(int s, int level, int name, \ caddr_t val, int *avalsize); } 119 AUE_NULL UNIMPL resuba (BSD/OS 2.x) 120 AUE_READV STD { int readv(int fd, struct iovec *iovp, \ u_int iovcnt); } 121 AUE_WRITEV STD { int writev(int fd, struct iovec *iovp, \ u_int iovcnt); } 122 AUE_SETTIMEOFDAY STD { int settimeofday(struct timeval *tv, \ struct timezone *tzp); } 123 AUE_FCHOWN STD { int fchown(int fd, int uid, int gid); } 124 AUE_FCHMOD STD { int fchmod(int fd, int mode); } 125 AUE_RECVFROM COMPAT|NOARGS { int recvfrom(int s, caddr_t buf, \ size_t len, int flags, caddr_t from, int \ *fromlenaddr); } recvfrom recvfrom_args \ int 126 AUE_SETREUID STD { int setreuid(int ruid, int euid); } 127 AUE_SETREGID STD { int setregid(int rgid, int egid); } 128 AUE_RENAME STD { int rename(char *from, char *to); } 129 AUE_TRUNCATE COMPAT { int truncate(char *path, long length); } 130 AUE_FTRUNCATE COMPAT { int ftruncate(int fd, long length); } 131 AUE_FLOCK STD { int flock(int fd, int how); } 132 AUE_MKFIFO STD { int mkfifo(char *path, int mode); } 133 AUE_SENDTO STD { int sendto(int s, caddr_t buf, size_t len, \ int flags, caddr_t to, int tolen); } 134 AUE_SHUTDOWN STD { int shutdown(int s, int how); } 135 AUE_SOCKETPAIR STD { int socketpair(int domain, int type, \ int protocol, int *rsv); } 136 AUE_MKDIR STD { int mkdir(char *path, int mode); } 137 AUE_RMDIR STD { int rmdir(char *path); } 138 AUE_UTIMES STD { int utimes(char *path, \ struct timeval *tptr); } 139 AUE_NULL OBSOL 4.2 sigreturn 140 AUE_ADJTIME STD { int adjtime(struct timeval *delta, \ struct timeval *olddelta); } 141 AUE_GETPEERNAME COMPAT { int getpeername(int fdes, caddr_t asa, \ int *alen); } 142 AUE_SYSCTL COMPAT { long gethostid(void); } 143 AUE_SYSCTL COMPAT { int sethostid(long hostid); } 144 AUE_GETRLIMIT COMPAT { int getrlimit(u_int which, struct \ orlimit *rlp); } 145 AUE_SETRLIMIT COMPAT { int setrlimit(u_int which, \ struct orlimit *rlp); } 146 AUE_KILLPG COMPAT { int killpg(int pgid, int signum); } 147 AUE_SETSID STD { int setsid(void); } 148 AUE_QUOTACTL STD { int quotactl(char *path, int cmd, int uid, \ caddr_t arg); } 149 AUE_O_QUOTA COMPAT { int quota(void); } 150 AUE_GETSOCKNAME COMPAT|NOARGS { int getsockname(int fdec, \ caddr_t asa, int *alen); } getsockname \ getsockname_args int ; Syscalls 151-180 inclusive are reserved for vendor-specific ; system calls. (This includes various calls added for compatibity ; with other Unix variants.) ; Some of these calls are now supported by BSD... 151 AUE_NULL UNIMPL sem_lock (BSD/OS 2.x) 152 AUE_NULL UNIMPL sem_wakeup (BSD/OS 2.x) 153 AUE_NULL UNIMPL asyncdaemon (BSD/OS 2.x) ; 154 is initialised by the NLM code, if present. 154 AUE_NULL NOSTD { int nlm_syscall(int debug_level, int grace_period, int addr_count, char **addrs); } ; 155 is initialized by the NFS code, if present. 155 AUE_NFS_SVC NOSTD { int nfssvc(int flag, caddr_t argp); } 156 AUE_GETDIRENTRIES COMPAT { int getdirentries(int fd, char *buf, \ u_int count, long *basep); } 157 AUE_STATFS COMPAT4 { int statfs(char *path, \ struct ostatfs *buf); } 158 AUE_FSTATFS COMPAT4 { int fstatfs(int fd, \ struct ostatfs *buf); } 159 AUE_NULL UNIMPL nosys 160 AUE_LGETFH STD { int lgetfh(char *fname, \ struct fhandle *fhp); } 161 AUE_NFS_GETFH STD { int getfh(char *fname, \ struct fhandle *fhp); } 162 AUE_SYSCTL COMPAT4 { int getdomainname(char *domainname, \ int len); } 163 AUE_SYSCTL COMPAT4 { int setdomainname(char *domainname, \ int len); } 164 AUE_NULL COMPAT4 { int uname(struct utsname *name); } 165 AUE_SYSARCH STD { int sysarch(int op, char *parms); } 166 AUE_RTPRIO STD { int rtprio(int function, pid_t pid, \ struct rtprio *rtp); } 167 AUE_NULL UNIMPL nosys 168 AUE_NULL UNIMPL nosys 169 AUE_SEMSYS NOSTD { int semsys(int which, int a2, int a3, \ int a4, int a5); } ; XXX should be { int semsys(int which, ...); } 170 AUE_MSGSYS NOSTD { int msgsys(int which, int a2, int a3, \ int a4, int a5, int a6); } ; XXX should be { int msgsys(int which, ...); } 171 AUE_SHMSYS NOSTD { int shmsys(int which, int a2, int a3, \ int a4); } ; XXX should be { int shmsys(int which, ...); } 172 AUE_NULL UNIMPL nosys 173 AUE_PREAD COMPAT6 { ssize_t pread(int fd, void *buf, \ size_t nbyte, int pad, off_t offset); } 174 AUE_PWRITE COMPAT6 { ssize_t pwrite(int fd, \ const void *buf, \ size_t nbyte, int pad, off_t offset); } 175 AUE_SETFIB STD { int setfib(int fibnum); } 176 AUE_NTP_ADJTIME STD { int ntp_adjtime(struct timex *tp); } 177 AUE_NULL UNIMPL sfork (BSD/OS 2.x) 178 AUE_NULL UNIMPL getdescriptor (BSD/OS 2.x) 179 AUE_NULL UNIMPL setdescriptor (BSD/OS 2.x) 180 AUE_NULL UNIMPL nosys ; Syscalls 181-199 are used by/reserved for BSD 181 AUE_SETGID STD { int setgid(gid_t gid); } 182 AUE_SETEGID STD { int setegid(gid_t egid); } 183 AUE_SETEUID STD { int seteuid(uid_t euid); } 184 AUE_NULL UNIMPL lfs_bmapv 185 AUE_NULL UNIMPL lfs_markv 186 AUE_NULL UNIMPL lfs_segclean 187 AUE_NULL UNIMPL lfs_segwait 188 AUE_STAT COMPAT11 { int stat(char *path, \ struct freebsd11_stat *ub); } 189 AUE_FSTAT COMPAT11 { int fstat(int fd, \ struct freebsd11_stat *sb); } 190 AUE_LSTAT COMPAT11 { int lstat(char *path, \ struct freebsd11_stat *ub); } 191 AUE_PATHCONF STD { int pathconf(char *path, int name); } 192 AUE_FPATHCONF STD { int fpathconf(int fd, int name); } 193 AUE_NULL UNIMPL nosys 194 AUE_GETRLIMIT STD { int getrlimit(u_int which, \ struct rlimit *rlp); } getrlimit \ __getrlimit_args int 195 AUE_SETRLIMIT STD { int setrlimit(u_int which, \ struct rlimit *rlp); } setrlimit \ __setrlimit_args int 196 AUE_GETDIRENTRIES COMPAT11 { int getdirentries(int fd, char *buf, \ u_int count, long *basep); } 197 AUE_MMAP COMPAT6 { caddr_t mmap(caddr_t addr, \ size_t len, int prot, int flags, int fd, \ int pad, off_t pos); } 198 AUE_NULL NOPROTO { int nosys(void); } __syscall \ __syscall_args int 199 AUE_LSEEK COMPAT6 { off_t lseek(int fd, int pad, \ off_t offset, int whence); } 200 AUE_TRUNCATE COMPAT6 { int truncate(char *path, int pad, \ off_t length); } 201 AUE_FTRUNCATE COMPAT6 { int ftruncate(int fd, int pad, \ off_t length); } 202 AUE_SYSCTL STD { int __sysctl(int *name, u_int namelen, \ void *old, size_t *oldlenp, void *new, \ size_t newlen); } __sysctl sysctl_args int 203 AUE_MLOCK STD { int mlock(const void *addr, size_t len); } 204 AUE_MUNLOCK STD { int munlock(const void *addr, size_t len); } 205 AUE_UNDELETE STD { int undelete(char *path); } 206 AUE_FUTIMES STD { int futimes(int fd, struct timeval *tptr); } 207 AUE_GETPGID STD { int getpgid(pid_t pid); } 208 AUE_NULL UNIMPL newreboot (NetBSD) 209 AUE_POLL STD { int poll(struct pollfd *fds, u_int nfds, \ int timeout); } ; ; The following are reserved for loadable syscalls ; 210 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 211 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 212 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 213 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 214 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 215 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 216 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 217 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 218 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 219 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int ; ; The following were introduced with NetBSD/4.4Lite-2 220 AUE_SEMCTL COMPAT7|NOSTD { int __semctl(int semid, int semnum, \ int cmd, union semun_old *arg); } 221 AUE_SEMGET NOSTD { int semget(key_t key, int nsems, \ int semflg); } 222 AUE_SEMOP NOSTD { int semop(int semid, struct sembuf *sops, \ size_t nsops); } 223 AUE_NULL UNIMPL semconfig 224 AUE_MSGCTL COMPAT7|NOSTD { int msgctl(int msqid, int cmd, \ struct msqid_ds_old *buf); } 225 AUE_MSGGET NOSTD { int msgget(key_t key, int msgflg); } 226 AUE_MSGSND NOSTD { int msgsnd(int msqid, const void *msgp, \ size_t msgsz, int msgflg); } 227 AUE_MSGRCV NOSTD { ssize_t msgrcv(int msqid, void *msgp, \ size_t msgsz, long msgtyp, int msgflg); } 228 AUE_SHMAT NOSTD { int shmat(int shmid, const void *shmaddr, \ int shmflg); } 229 AUE_SHMCTL COMPAT7|NOSTD { int shmctl(int shmid, int cmd, \ struct shmid_ds_old *buf); } 230 AUE_SHMDT NOSTD { int shmdt(const void *shmaddr); } 231 AUE_SHMGET NOSTD { int shmget(key_t key, size_t size, \ int shmflg); } ; 232 AUE_NULL STD { int clock_gettime(clockid_t clock_id, \ struct timespec *tp); } 233 AUE_CLOCK_SETTIME STD { int clock_settime( \ clockid_t clock_id, \ const struct timespec *tp); } 234 AUE_NULL STD { int clock_getres(clockid_t clock_id, \ struct timespec *tp); } 235 AUE_NULL STD { int ktimer_create(clockid_t clock_id, \ struct sigevent *evp, int *timerid); } 236 AUE_NULL STD { int ktimer_delete(int timerid); } 237 AUE_NULL STD { int ktimer_settime(int timerid, int flags, \ const struct itimerspec *value, \ struct itimerspec *ovalue); } 238 AUE_NULL STD { int ktimer_gettime(int timerid, struct \ itimerspec *value); } 239 AUE_NULL STD { int ktimer_getoverrun(int timerid); } 240 AUE_NULL STD { int nanosleep(const struct timespec *rqtp, \ struct timespec *rmtp); } 241 AUE_NULL STD { int ffclock_getcounter(ffcounter *ffcount); } 242 AUE_NULL STD { int ffclock_setestimate( \ struct ffclock_estimate *cest); } 243 AUE_NULL STD { int ffclock_getestimate( \ struct ffclock_estimate *cest); } 244 AUE_NULL STD { int clock_nanosleep(clockid_t clock_id, \ int flags, const struct timespec *rqtp, \ struct timespec *rmtp); } 245 AUE_NULL UNIMPL nosys 246 AUE_NULL UNIMPL nosys 247 AUE_NULL STD { int clock_getcpuclockid2(id_t id,\ int which, clockid_t *clock_id); } 248 AUE_NULL STD { int ntp_gettime(struct ntptimeval *ntvp); } 249 AUE_NULL UNIMPL nosys ; syscall numbers initially used in OpenBSD 250 AUE_MINHERIT STD { int minherit(void *addr, size_t len, \ int inherit); } 251 AUE_RFORK STD { int rfork(int flags); } 252 AUE_POLL OBSOL openbsd_poll 253 AUE_ISSETUGID STD { int issetugid(void); } 254 AUE_LCHOWN STD { int lchown(char *path, int uid, int gid); } 255 AUE_AIO_READ STD { int aio_read(struct aiocb *aiocbp); } 256 AUE_AIO_WRITE STD { int aio_write(struct aiocb *aiocbp); } 257 AUE_LIO_LISTIO STD { int lio_listio(int mode, \ struct aiocb * const *acb_list, \ int nent, struct sigevent *sig); } 258 AUE_NULL UNIMPL nosys 259 AUE_NULL UNIMPL nosys 260 AUE_NULL UNIMPL nosys 261 AUE_NULL UNIMPL nosys 262 AUE_NULL UNIMPL nosys 263 AUE_NULL UNIMPL nosys 264 AUE_NULL UNIMPL nosys 265 AUE_NULL UNIMPL nosys 266 AUE_NULL UNIMPL nosys 267 AUE_NULL UNIMPL nosys 268 AUE_NULL UNIMPL nosys 269 AUE_NULL UNIMPL nosys 270 AUE_NULL UNIMPL nosys 271 AUE_NULL UNIMPL nosys 272 AUE_O_GETDENTS COMPAT11 { int getdents(int fd, char *buf, \ size_t count); } 273 AUE_NULL UNIMPL nosys 274 AUE_LCHMOD STD { int lchmod(char *path, mode_t mode); } 275 AUE_LCHOWN NOPROTO { int lchown(char *path, uid_t uid, \ gid_t gid); } netbsd_lchown lchown_args \ int 276 AUE_LUTIMES STD { int lutimes(char *path, \ struct timeval *tptr); } 277 AUE_MSYNC NOPROTO { int msync(void *addr, size_t len, \ int flags); } netbsd_msync msync_args int 278 AUE_STAT COMPAT11 { int nstat(char *path, struct nstat *ub); } 279 AUE_FSTAT COMPAT11 { int nfstat(int fd, struct nstat *sb); } 280 AUE_LSTAT COMPAT11 { int nlstat(char *path, struct nstat *ub); } 281 AUE_NULL UNIMPL nosys 282 AUE_NULL UNIMPL nosys 283 AUE_NULL UNIMPL nosys 284 AUE_NULL UNIMPL nosys 285 AUE_NULL UNIMPL nosys 286 AUE_NULL UNIMPL nosys 287 AUE_NULL UNIMPL nosys 288 AUE_NULL UNIMPL nosys ; 289 and 290 from NetBSD (OpenBSD: 267 and 268) 289 AUE_PREADV STD { ssize_t preadv(int fd, struct iovec *iovp, \ u_int iovcnt, off_t offset); } 290 AUE_PWRITEV STD { ssize_t pwritev(int fd, struct iovec *iovp, \ u_int iovcnt, off_t offset); } 291 AUE_NULL UNIMPL nosys 292 AUE_NULL UNIMPL nosys 293 AUE_NULL UNIMPL nosys 294 AUE_NULL UNIMPL nosys 295 AUE_NULL UNIMPL nosys 296 AUE_NULL UNIMPL nosys ; XXX 297 is 300 in NetBSD 297 AUE_FHSTATFS COMPAT4 { int fhstatfs( \ const struct fhandle *u_fhp, \ struct ostatfs *buf); } 298 AUE_FHOPEN STD { int fhopen(const struct fhandle *u_fhp, \ int flags); } 299 AUE_FHSTAT COMPAT11 { int fhstat(const struct fhandle *u_fhp, \ struct freebsd11_stat *sb); } ; syscall numbers for FreeBSD 300 AUE_NULL STD { int modnext(int modid); } 301 AUE_NULL STD { int modstat(int modid, \ struct module_stat *stat); } 302 AUE_NULL STD { int modfnext(int modid); } 303 AUE_NULL STD { int modfind(const char *name); } 304 AUE_MODLOAD STD { int kldload(const char *file); } 305 AUE_MODUNLOAD STD { int kldunload(int fileid); } 306 AUE_NULL STD { int kldfind(const char *file); } 307 AUE_NULL STD { int kldnext(int fileid); } 308 AUE_NULL STD { int kldstat(int fileid, struct \ kld_file_stat* stat); } 309 AUE_NULL STD { int kldfirstmod(int fileid); } 310 AUE_GETSID STD { int getsid(pid_t pid); } 311 AUE_SETRESUID STD { int setresuid(uid_t ruid, uid_t euid, \ uid_t suid); } 312 AUE_SETRESGID STD { int setresgid(gid_t rgid, gid_t egid, \ gid_t sgid); } 313 AUE_NULL OBSOL signanosleep 314 AUE_AIO_RETURN STD { ssize_t aio_return(struct aiocb *aiocbp); } 315 AUE_AIO_SUSPEND STD { int aio_suspend( \ struct aiocb * const * aiocbp, int nent, \ const struct timespec *timeout); } 316 AUE_AIO_CANCEL STD { int aio_cancel(int fd, \ struct aiocb *aiocbp); } 317 AUE_AIO_ERROR STD { int aio_error(struct aiocb *aiocbp); } 318 AUE_AIO_READ COMPAT6 { int aio_read(struct oaiocb *aiocbp); } 319 AUE_AIO_WRITE COMPAT6 { int aio_write(struct oaiocb *aiocbp); } 320 AUE_LIO_LISTIO COMPAT6 { int lio_listio(int mode, \ struct oaiocb * const *acb_list, \ int nent, struct osigevent *sig); } 321 AUE_NULL STD { int yield(void); } 322 AUE_NULL OBSOL thr_sleep 323 AUE_NULL OBSOL thr_wakeup 324 AUE_MLOCKALL STD { int mlockall(int how); } 325 AUE_MUNLOCKALL STD { int munlockall(void); } 326 AUE_GETCWD STD { int __getcwd(char *buf, size_t buflen); } 327 AUE_NULL STD { int sched_setparam (pid_t pid, \ const struct sched_param *param); } 328 AUE_NULL STD { int sched_getparam (pid_t pid, struct \ sched_param *param); } 329 AUE_NULL STD { int sched_setscheduler (pid_t pid, int \ policy, const struct sched_param \ *param); } 330 AUE_NULL STD { int sched_getscheduler (pid_t pid); } 331 AUE_NULL STD { int sched_yield (void); } 332 AUE_NULL STD { int sched_get_priority_max (int policy); } 333 AUE_NULL STD { int sched_get_priority_min (int policy); } 334 AUE_NULL STD { int sched_rr_get_interval (pid_t pid, \ struct timespec *interval); } 335 AUE_NULL STD { int utrace(const void *addr, size_t len); } 336 AUE_SENDFILE COMPAT4 { int sendfile(int fd, int s, \ off_t offset, size_t nbytes, \ struct sf_hdtr *hdtr, off_t *sbytes, \ int flags); } 337 AUE_NULL STD { int kldsym(int fileid, int cmd, \ void *data); } 338 AUE_JAIL STD { int jail(struct jail *jail); } 339 AUE_NULL NOSTD|NOTSTATIC { int nnpfs_syscall(int operation, \ char *a_pathP, int a_opcode, \ void *a_paramsP, int a_followSymlinks); } 340 AUE_SIGPROCMASK STD { int sigprocmask(int how, \ const sigset_t *set, sigset_t *oset); } 341 AUE_SIGSUSPEND STD { int sigsuspend(const sigset_t *sigmask); } 342 AUE_SIGACTION COMPAT4 { int sigaction(int sig, const \ struct sigaction *act, \ struct sigaction *oact); } 343 AUE_SIGPENDING STD { int sigpending(sigset_t *set); } 344 AUE_SIGRETURN COMPAT4 { int sigreturn( \ const struct ucontext4 *sigcntxp); } 345 AUE_SIGWAIT STD { int sigtimedwait(const sigset_t *set, \ siginfo_t *info, \ const struct timespec *timeout); } 346 AUE_NULL STD { int sigwaitinfo(const sigset_t *set, \ siginfo_t *info); } 347 AUE_ACL_GET_FILE STD { int __acl_get_file(const char *path, \ acl_type_t type, struct acl *aclp); } 348 AUE_ACL_SET_FILE STD { int __acl_set_file(const char *path, \ acl_type_t type, struct acl *aclp); } 349 AUE_ACL_GET_FD STD { int __acl_get_fd(int filedes, \ acl_type_t type, struct acl *aclp); } 350 AUE_ACL_SET_FD STD { int __acl_set_fd(int filedes, \ acl_type_t type, struct acl *aclp); } 351 AUE_ACL_DELETE_FILE STD { int __acl_delete_file(const char *path, \ acl_type_t type); } 352 AUE_ACL_DELETE_FD STD { int __acl_delete_fd(int filedes, \ acl_type_t type); } 353 AUE_ACL_CHECK_FILE STD { int __acl_aclcheck_file(const char *path, \ acl_type_t type, struct acl *aclp); } 354 AUE_ACL_CHECK_FD STD { int __acl_aclcheck_fd(int filedes, \ acl_type_t type, struct acl *aclp); } 355 AUE_EXTATTRCTL STD { int extattrctl(const char *path, int cmd, \ const char *filename, int attrnamespace, \ const char *attrname); } 356 AUE_EXTATTR_SET_FILE STD { ssize_t extattr_set_file( \ const char *path, int attrnamespace, \ const char *attrname, void *data, \ size_t nbytes); } 357 AUE_EXTATTR_GET_FILE STD { ssize_t extattr_get_file( \ const char *path, int attrnamespace, \ const char *attrname, void *data, \ size_t nbytes); } 358 AUE_EXTATTR_DELETE_FILE STD { int extattr_delete_file(const char *path, \ int attrnamespace, \ const char *attrname); } 359 AUE_AIO_WAITCOMPLETE STD { ssize_t aio_waitcomplete( \ struct aiocb **aiocbp, \ struct timespec *timeout); } 360 AUE_GETRESUID STD { int getresuid(uid_t *ruid, uid_t *euid, \ uid_t *suid); } 361 AUE_GETRESGID STD { int getresgid(gid_t *rgid, gid_t *egid, \ gid_t *sgid); } 362 AUE_KQUEUE STD { int kqueue(void); } 363 AUE_KEVENT COMPAT11 { int kevent(int fd, \ struct kevent_freebsd11 *changelist, \ int nchanges, \ struct kevent_freebsd11 *eventlist, \ int nevents, \ const struct timespec *timeout); } 364 AUE_NULL UNIMPL __cap_get_proc 365 AUE_NULL UNIMPL __cap_set_proc 366 AUE_NULL UNIMPL __cap_get_fd 367 AUE_NULL UNIMPL __cap_get_file 368 AUE_NULL UNIMPL __cap_set_fd 369 AUE_NULL UNIMPL __cap_set_file 370 AUE_NULL UNIMPL nosys 371 AUE_EXTATTR_SET_FD STD { ssize_t extattr_set_fd(int fd, \ int attrnamespace, const char *attrname, \ void *data, size_t nbytes); } 372 AUE_EXTATTR_GET_FD STD { ssize_t extattr_get_fd(int fd, \ int attrnamespace, const char *attrname, \ void *data, size_t nbytes); } 373 AUE_EXTATTR_DELETE_FD STD { int extattr_delete_fd(int fd, \ int attrnamespace, \ const char *attrname); } 374 AUE_SETUGID STD { int __setugid(int flag); } 375 AUE_NULL UNIMPL nfsclnt 376 AUE_EACCESS STD { int eaccess(char *path, int amode); } 377 AUE_NULL NOSTD|NOTSTATIC { int afs3_syscall(long syscall, \ long parm1, long parm2, long parm3, \ long parm4, long parm5, long parm6); } 378 AUE_NMOUNT STD { int nmount(struct iovec *iovp, \ unsigned int iovcnt, int flags); } 379 AUE_NULL UNIMPL kse_exit 380 AUE_NULL UNIMPL kse_wakeup 381 AUE_NULL UNIMPL kse_create 382 AUE_NULL UNIMPL kse_thr_interrupt 383 AUE_NULL UNIMPL kse_release 384 AUE_NULL STD { int __mac_get_proc(struct mac *mac_p); } 385 AUE_NULL STD { int __mac_set_proc(struct mac *mac_p); } 386 AUE_NULL STD { int __mac_get_fd(int fd, \ struct mac *mac_p); } 387 AUE_NULL STD { int __mac_get_file(const char *path_p, \ struct mac *mac_p); } 388 AUE_NULL STD { int __mac_set_fd(int fd, \ struct mac *mac_p); } 389 AUE_NULL STD { int __mac_set_file(const char *path_p, \ struct mac *mac_p); } 390 AUE_NULL STD { int kenv(int what, const char *name, \ char *value, int len); } 391 AUE_LCHFLAGS STD { int lchflags(const char *path, \ u_long flags); } 392 AUE_NULL STD { int uuidgen(struct uuid *store, \ int count); } 393 AUE_SENDFILE STD { int sendfile(int fd, int s, off_t offset, \ size_t nbytes, struct sf_hdtr *hdtr, \ off_t *sbytes, int flags); } 394 AUE_NULL STD { int mac_syscall(const char *policy, \ int call, void *arg); } 395 AUE_GETFSSTAT COMPAT11 { int getfsstat(struct freebsd11_statfs *buf, \ long bufsize, int mode); } 396 AUE_STATFS COMPAT11 { int statfs(char *path, \ struct freebsd11_statfs *buf); } 397 AUE_FSTATFS COMPAT11 { int fstatfs(int fd, \ struct freebsd11_statfs *buf); } 398 AUE_FHSTATFS COMPAT11 { int fhstatfs(const struct fhandle *u_fhp, \ struct freebsd11_statfs *buf); } 399 AUE_NULL UNIMPL nosys 400 AUE_SEMCLOSE NOSTD { int ksem_close(semid_t id); } 401 AUE_SEMPOST NOSTD { int ksem_post(semid_t id); } 402 AUE_SEMWAIT NOSTD { int ksem_wait(semid_t id); } 403 AUE_SEMTRYWAIT NOSTD { int ksem_trywait(semid_t id); } 404 AUE_SEMINIT NOSTD { int ksem_init(semid_t *idp, \ unsigned int value); } 405 AUE_SEMOPEN NOSTD { int ksem_open(semid_t *idp, \ const char *name, int oflag, \ mode_t mode, unsigned int value); } 406 AUE_SEMUNLINK NOSTD { int ksem_unlink(const char *name); } 407 AUE_SEMGETVALUE NOSTD { int ksem_getvalue(semid_t id, int *val); } 408 AUE_SEMDESTROY NOSTD { int ksem_destroy(semid_t id); } 409 AUE_NULL STD { int __mac_get_pid(pid_t pid, \ struct mac *mac_p); } 410 AUE_NULL STD { int __mac_get_link(const char *path_p, \ struct mac *mac_p); } 411 AUE_NULL STD { int __mac_set_link(const char *path_p, \ struct mac *mac_p); } 412 AUE_EXTATTR_SET_LINK STD { ssize_t extattr_set_link( \ const char *path, int attrnamespace, \ const char *attrname, void *data, \ size_t nbytes); } 413 AUE_EXTATTR_GET_LINK STD { ssize_t extattr_get_link( \ const char *path, int attrnamespace, \ const char *attrname, void *data, \ size_t nbytes); } 414 AUE_EXTATTR_DELETE_LINK STD { int extattr_delete_link( \ const char *path, int attrnamespace, \ const char *attrname); } 415 AUE_NULL STD { int __mac_execve(char *fname, char **argv, \ char **envv, struct mac *mac_p); } 416 AUE_SIGACTION STD { int sigaction(int sig, \ const struct sigaction *act, \ struct sigaction *oact); } 417 AUE_SIGRETURN STD { int sigreturn( \ const struct __ucontext *sigcntxp); } 418 AUE_NULL UNIMPL __xstat 419 AUE_NULL UNIMPL __xfstat 420 AUE_NULL UNIMPL __xlstat 421 AUE_NULL STD { int getcontext(struct __ucontext *ucp); } 422 AUE_NULL STD { int setcontext( \ const struct __ucontext *ucp); } 423 AUE_NULL STD { int swapcontext(struct __ucontext *oucp, \ const struct __ucontext *ucp); } 424 AUE_SWAPOFF STD { int swapoff(const char *name); } 425 AUE_ACL_GET_LINK STD { int __acl_get_link(const char *path, \ acl_type_t type, struct acl *aclp); } 426 AUE_ACL_SET_LINK STD { int __acl_set_link(const char *path, \ acl_type_t type, struct acl *aclp); } 427 AUE_ACL_DELETE_LINK STD { int __acl_delete_link(const char *path, \ acl_type_t type); } 428 AUE_ACL_CHECK_LINK STD { int __acl_aclcheck_link(const char *path, \ acl_type_t type, struct acl *aclp); } 429 AUE_SIGWAIT STD { int sigwait(const sigset_t *set, \ int *sig); } 430 AUE_THR_CREATE STD { int thr_create(ucontext_t *ctx, long *id, \ int flags); } 431 AUE_THR_EXIT STD { void thr_exit(long *state); } 432 AUE_NULL STD { int thr_self(long *id); } 433 AUE_THR_KILL STD { int thr_kill(long id, int sig); } 434 AUE_NULL UNIMPL nosys 435 AUE_NULL UNIMPL nosys 436 AUE_JAIL_ATTACH STD { int jail_attach(int jid); } 437 AUE_EXTATTR_LIST_FD STD { ssize_t extattr_list_fd(int fd, \ int attrnamespace, void *data, \ size_t nbytes); } 438 AUE_EXTATTR_LIST_FILE STD { ssize_t extattr_list_file( \ const char *path, int attrnamespace, \ void *data, size_t nbytes); } 439 AUE_EXTATTR_LIST_LINK STD { ssize_t extattr_list_link( \ const char *path, int attrnamespace, \ void *data, size_t nbytes); } 440 AUE_NULL UNIMPL kse_switchin 441 AUE_SEMWAIT NOSTD { int ksem_timedwait(semid_t id, \ const struct timespec *abstime); } 442 AUE_NULL STD { int thr_suspend( \ const struct timespec *timeout); } 443 AUE_NULL STD { int thr_wake(long id); } 444 AUE_MODUNLOAD STD { int kldunloadf(int fileid, int flags); } 445 AUE_AUDIT STD { int audit(const void *record, \ u_int length); } 446 AUE_AUDITON STD { int auditon(int cmd, void *data, \ u_int length); } 447 AUE_GETAUID STD { int getauid(uid_t *auid); } 448 AUE_SETAUID STD { int setauid(uid_t *auid); } 449 AUE_GETAUDIT STD { int getaudit(struct auditinfo *auditinfo); } 450 AUE_SETAUDIT STD { int setaudit(struct auditinfo *auditinfo); } 451 AUE_GETAUDIT_ADDR STD { int getaudit_addr( \ struct auditinfo_addr *auditinfo_addr, \ u_int length); } 452 AUE_SETAUDIT_ADDR STD { int setaudit_addr( \ struct auditinfo_addr *auditinfo_addr, \ u_int length); } 453 AUE_AUDITCTL STD { int auditctl(char *path); } 454 AUE_NULL STD { int _umtx_op(void *obj, int op, \ u_long val, void *uaddr1, void *uaddr2); } 455 AUE_THR_NEW STD { int thr_new(struct thr_param *param, \ int param_size); } 456 AUE_NULL STD { int sigqueue(pid_t pid, int signum, void *value); } 457 AUE_MQ_OPEN NOSTD { int kmq_open(const char *path, int flags, \ mode_t mode, const struct mq_attr *attr); } 458 AUE_MQ_SETATTR NOSTD { int kmq_setattr(int mqd, \ const struct mq_attr *attr, \ struct mq_attr *oattr); } 459 AUE_MQ_TIMEDRECEIVE NOSTD { int kmq_timedreceive(int mqd, \ char *msg_ptr, size_t msg_len, \ unsigned *msg_prio, \ const struct timespec *abs_timeout); } 460 AUE_MQ_TIMEDSEND NOSTD { int kmq_timedsend(int mqd, \ const char *msg_ptr, size_t msg_len,\ unsigned msg_prio, \ const struct timespec *abs_timeout);} 461 AUE_MQ_NOTIFY NOSTD { int kmq_notify(int mqd, \ const struct sigevent *sigev); } 462 AUE_MQ_UNLINK NOSTD { int kmq_unlink(const char *path); } 463 AUE_NULL STD { int abort2(const char *why, int nargs, void **args); } 464 AUE_NULL STD { int thr_set_name(long id, const char *name); } 465 AUE_AIO_FSYNC STD { int aio_fsync(int op, struct aiocb *aiocbp); } 466 AUE_RTPRIO STD { int rtprio_thread(int function, \ lwpid_t lwpid, struct rtprio *rtp); } 467 AUE_NULL UNIMPL nosys 468 AUE_NULL UNIMPL nosys 469 AUE_NULL UNIMPL __getpath_fromfd 470 AUE_NULL UNIMPL __getpath_fromaddr 471 AUE_SCTP_PEELOFF NOSTD { int sctp_peeloff(int sd, uint32_t name); } 472 AUE_SCTP_GENERIC_SENDMSG NOSTD { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, \ caddr_t to, __socklen_t tolen, \ struct sctp_sndrcvinfo *sinfo, int flags); } 473 AUE_SCTP_GENERIC_SENDMSG_IOV NOSTD { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, \ caddr_t to, __socklen_t tolen, \ struct sctp_sndrcvinfo *sinfo, int flags); } 474 AUE_SCTP_GENERIC_RECVMSG NOSTD { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, \ struct sockaddr * from, __socklen_t *fromlenaddr, \ struct sctp_sndrcvinfo *sinfo, int *msg_flags); } 475 AUE_PREAD STD { ssize_t pread(int fd, void *buf, \ size_t nbyte, off_t offset); } 476 AUE_PWRITE STD { ssize_t pwrite(int fd, const void *buf, \ size_t nbyte, off_t offset); } 477 AUE_MMAP STD { caddr_t mmap(caddr_t addr, size_t len, \ int prot, int flags, int fd, off_t pos); } 478 AUE_LSEEK STD { off_t lseek(int fd, off_t offset, \ int whence); } 479 AUE_TRUNCATE STD { int truncate(char *path, off_t length); } 480 AUE_FTRUNCATE STD { int ftruncate(int fd, off_t length); } 481 AUE_THR_KILL2 STD { int thr_kill2(pid_t pid, long id, int sig); } 482 AUE_SHMOPEN STD { int shm_open(const char *path, int flags, \ mode_t mode); } 483 AUE_SHMUNLINK STD { int shm_unlink(const char *path); } 484 AUE_NULL STD { int cpuset(cpusetid_t *setid); } 485 AUE_NULL STD { int cpuset_setid(cpuwhich_t which, id_t id, \ cpusetid_t setid); } 486 AUE_NULL STD { int cpuset_getid(cpulevel_t level, \ cpuwhich_t which, id_t id, \ cpusetid_t *setid); } 487 AUE_NULL STD { int cpuset_getaffinity(cpulevel_t level, \ cpuwhich_t which, id_t id, size_t cpusetsize, \ cpuset_t *mask); } 488 AUE_NULL STD { int cpuset_setaffinity(cpulevel_t level, \ cpuwhich_t which, id_t id, size_t cpusetsize, \ const cpuset_t *mask); } 489 AUE_FACCESSAT STD { int faccessat(int fd, char *path, int amode, \ int flag); } 490 AUE_FCHMODAT STD { int fchmodat(int fd, char *path, mode_t mode, \ int flag); } 491 AUE_FCHOWNAT STD { int fchownat(int fd, char *path, uid_t uid, \ gid_t gid, int flag); } 492 AUE_FEXECVE STD { int fexecve(int fd, char **argv, \ char **envv); } 493 AUE_FSTATAT COMPAT11 { int fstatat(int fd, char *path, \ struct freebsd11_stat *buf, int flag); } 494 AUE_FUTIMESAT STD { int futimesat(int fd, char *path, \ struct timeval *times); } 495 AUE_LINKAT STD { int linkat(int fd1, char *path1, int fd2, \ char *path2, int flag); } 496 AUE_MKDIRAT STD { int mkdirat(int fd, char *path, mode_t mode); } 497 AUE_MKFIFOAT STD { int mkfifoat(int fd, char *path, mode_t mode); } 498 AUE_MKNODAT COMPAT11 { int mknodat(int fd, char *path, mode_t mode, \ uint32_t dev); } ; XXX: see the comment for open 499 AUE_OPENAT_RWTC STD { int openat(int fd, char *path, int flag, \ mode_t mode); } 500 AUE_READLINKAT STD { int readlinkat(int fd, char *path, char *buf, \ size_t bufsize); } 501 AUE_RENAMEAT STD { int renameat(int oldfd, char *old, int newfd, \ char *new); } 502 AUE_SYMLINKAT STD { int symlinkat(char *path1, int fd, \ char *path2); } 503 AUE_UNLINKAT STD { int unlinkat(int fd, char *path, int flag); } 504 AUE_POSIX_OPENPT STD { int posix_openpt(int flags); } ; 505 is initialised by the kgssapi code, if present. 505 AUE_NULL NOSTD { int gssd_syscall(char *path); } 506 AUE_JAIL_GET STD { int jail_get(struct iovec *iovp, \ unsigned int iovcnt, int flags); } 507 AUE_JAIL_SET STD { int jail_set(struct iovec *iovp, \ unsigned int iovcnt, int flags); } 508 AUE_JAIL_REMOVE STD { int jail_remove(int jid); } 509 AUE_CLOSEFROM STD { int closefrom(int lowfd); } 510 AUE_SEMCTL NOSTD { int __semctl(int semid, int semnum, \ int cmd, union semun *arg); } 511 AUE_MSGCTL NOSTD { int msgctl(int msqid, int cmd, \ struct msqid_ds *buf); } 512 AUE_SHMCTL NOSTD { int shmctl(int shmid, int cmd, \ struct shmid_ds *buf); } 513 AUE_LPATHCONF STD { int lpathconf(char *path, int name); } 514 AUE_NULL OBSOL cap_new 515 AUE_CAP_RIGHTS_GET STD { int __cap_rights_get(int version, \ int fd, cap_rights_t *rightsp); } 516 AUE_CAP_ENTER STD { int cap_enter(void); } 517 AUE_CAP_GETMODE STD { int cap_getmode(u_int *modep); } 518 AUE_PDFORK STD { int pdfork(int *fdp, int flags); } 519 AUE_PDKILL STD { int pdkill(int fd, int signum); } 520 AUE_PDGETPID STD { int pdgetpid(int fd, pid_t *pidp); } 521 AUE_PDWAIT UNIMPL pdwait4 522 AUE_SELECT STD { int pselect(int nd, fd_set *in, \ fd_set *ou, fd_set *ex, \ const struct timespec *ts, \ const sigset_t *sm); } 523 AUE_GETLOGINCLASS STD { int getloginclass(char *namebuf, \ size_t namelen); } 524 AUE_SETLOGINCLASS STD { int setloginclass(const char *namebuf); } 525 AUE_NULL STD { int rctl_get_racct(const void *inbufp, \ size_t inbuflen, void *outbufp, \ size_t outbuflen); } 526 AUE_NULL STD { int rctl_get_rules(const void *inbufp, \ size_t inbuflen, void *outbufp, \ size_t outbuflen); } 527 AUE_NULL STD { int rctl_get_limits(const void *inbufp, \ size_t inbuflen, void *outbufp, \ size_t outbuflen); } 528 AUE_NULL STD { int rctl_add_rule(const void *inbufp, \ size_t inbuflen, void *outbufp, \ size_t outbuflen); } 529 AUE_NULL STD { int rctl_remove_rule(const void *inbufp, \ size_t inbuflen, void *outbufp, \ size_t outbuflen); } 530 AUE_POSIX_FALLOCATE STD { int posix_fallocate(int fd, \ off_t offset, off_t len); } 531 AUE_POSIX_FADVISE STD { int posix_fadvise(int fd, off_t offset, \ off_t len, int advice); } 532 AUE_WAIT6 STD { int wait6(idtype_t idtype, id_t id, \ int *status, int options, \ struct __wrusage *wrusage, \ siginfo_t *info); } 533 AUE_CAP_RIGHTS_LIMIT STD { int cap_rights_limit(int fd, \ cap_rights_t *rightsp); } 534 AUE_CAP_IOCTLS_LIMIT STD { int cap_ioctls_limit(int fd, \ const u_long *cmds, size_t ncmds); } 535 AUE_CAP_IOCTLS_GET STD { ssize_t cap_ioctls_get(int fd, \ u_long *cmds, size_t maxcmds); } 536 AUE_CAP_FCNTLS_LIMIT STD { int cap_fcntls_limit(int fd, \ uint32_t fcntlrights); } 537 AUE_CAP_FCNTLS_GET STD { int cap_fcntls_get(int fd, \ uint32_t *fcntlrightsp); } 538 AUE_BINDAT STD { int bindat(int fd, int s, caddr_t name, \ int namelen); } 539 AUE_CONNECTAT STD { int connectat(int fd, int s, caddr_t name, \ int namelen); } 540 AUE_CHFLAGSAT STD { int chflagsat(int fd, const char *path, \ u_long flags, int atflag); } 541 AUE_ACCEPT STD { int accept4(int s, \ struct sockaddr * __restrict name, \ __socklen_t * __restrict anamelen, \ int flags); } 542 AUE_PIPE STD { int pipe2(int *fildes, int flags); } 543 AUE_AIO_MLOCK STD { int aio_mlock(struct aiocb *aiocbp); } 544 AUE_PROCCTL STD { int procctl(idtype_t idtype, id_t id, \ int com, void *data); } 545 AUE_POLL STD { int ppoll(struct pollfd *fds, u_int nfds, \ const struct timespec *ts, \ const sigset_t *set); } 546 AUE_FUTIMES STD { int futimens(int fd, \ struct timespec *times); } 547 AUE_FUTIMESAT STD { int utimensat(int fd, \ char *path, \ struct timespec *times, int flag); } 548 AUE_NULL UNIMPL numa_getaffinity 549 AUE_NULL UNIMPL numa_setaffinity 550 AUE_FSYNC STD { int fdatasync(int fd); } 551 AUE_FSTAT STD { int fstat(int fd, struct stat *sb); } 552 AUE_FSTATAT STD { int fstatat(int fd, char *path, \ struct stat *buf, int flag); } 553 AUE_FHSTAT STD { int fhstat(const struct fhandle *u_fhp, \ struct stat *sb); } 554 AUE_GETDIRENTRIES STD { ssize_t getdirentries(int fd, char *buf, \ size_t count, off_t *basep); } 555 AUE_STATFS STD { int statfs(char *path, struct statfs *buf); } 556 AUE_FSTATFS STD { int fstatfs(int fd, struct statfs *buf); } 557 AUE_GETFSSTAT STD { int getfsstat(struct statfs *buf, \ long bufsize, int mode); } 558 AUE_FHSTATFS STD { int fhstatfs(const struct fhandle *u_fhp, \ struct statfs *buf); } 559 AUE_MKNODAT STD { int mknodat(int fd, char *path, mode_t mode, \ dev_t dev); } 560 AUE_KEVENT STD { int kevent(int fd, \ struct kevent *changelist, int nchanges, \ struct kevent *eventlist, int nevents, \ const struct timespec *timeout); } 561 AUE_NULL STD { int cpuset_getdomain(cpulevel_t level, \ cpuwhich_t which, id_t id, \ size_t domainsetsize, domainset_t *mask, \ int *policy); } 562 AUE_NULL STD { int cpuset_setdomain(cpulevel_t level, \ cpuwhich_t which, id_t id, \ size_t domainsetsize, domainset_t *mask, \ int policy); } ; Please copy any additions and changes to the following compatability tables: ; sys/compat/freebsd32/syscalls.master ; vim: syntax=off Index: head/sys/netgraph/ng_atmllc.c =================================================================== --- head/sys/netgraph/ng_atmllc.c (revision 329872) +++ head/sys/netgraph/ng_atmllc.c (revision 329873) @@ -1,284 +1,284 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003-2004 Benno Rice * All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include /* for M_HASFCS and ETHER_HDR_LEN */ #define NG_ATMLLC_HEADER "\252\252\3\0\200\302" #define NG_ATMLLC_HEADER_LEN (sizeof(struct atmllc)) #define NG_ATMLLC_TYPE_ETHERNET_FCS 0x0001 #define NG_ATMLLC_TYPE_FDDI_FCS 0x0004 #define NG_ATMLLC_TYPE_ETHERNET_NOFCS 0x0007 #define NG_ATMLLC_TYPE_FDDI_NOFCS 0x000A struct ng_atmllc_priv { hook_p atm; hook_p ether; hook_p fddi; }; struct atmllc { uint8_t llchdr[6]; /* aa.aa.03.00.00.00 */ uint8_t type[2]; /* "ethernet" type */ }; /* ATM_LLC macros: note type code in host byte order */ #define ATM_LLC_TYPE(X) (((X)->type[0] << 8) | ((X)->type[1])) #define ATM_LLC_SETTYPE(X, V) do { \ (X)->type[0] = ((V) >> 8) & 0xff; \ (X)->type[1] = ((V) & 0xff); \ } while (0) /* Netgraph methods. */ static ng_constructor_t ng_atmllc_constructor; static ng_shutdown_t ng_atmllc_shutdown; static ng_rcvmsg_t ng_atmllc_rcvmsg; static ng_newhook_t ng_atmllc_newhook; static ng_rcvdata_t ng_atmllc_rcvdata; static ng_disconnect_t ng_atmllc_disconnect; static struct ng_type ng_atmllc_typestruct = { .version = NG_ABI_VERSION, .name = NG_ATMLLC_NODE_TYPE, .constructor = ng_atmllc_constructor, .rcvmsg = ng_atmllc_rcvmsg, .shutdown = ng_atmllc_shutdown, .newhook = ng_atmllc_newhook, .rcvdata = ng_atmllc_rcvdata, .disconnect = ng_atmllc_disconnect, }; NETGRAPH_INIT(atmllc, &ng_atmllc_typestruct); static int ng_atmllc_constructor(node_p node) { struct ng_atmllc_priv *priv; priv = malloc(sizeof(*priv), M_NETGRAPH, M_WAITOK | M_ZERO); NG_NODE_SET_PRIVATE(node, priv); return (0); } static int ng_atmllc_rcvmsg(node_p node, item_p item, hook_p lasthook) { struct ng_mesg *msg; int error; error = 0; NGI_GET_MSG(item, msg); msg->header.flags |= NGF_RESP; NG_RESPOND_MSG(error, node, item, msg); return (error); } static int ng_atmllc_shutdown(node_p node) { struct ng_atmllc_priv *priv; priv = NG_NODE_PRIVATE(node); free(priv, M_NETGRAPH); NG_NODE_UNREF(node); return (0); } static int ng_atmllc_newhook(node_p node, hook_p hook, const char *name) { struct ng_atmllc_priv *priv; priv = NG_NODE_PRIVATE(node); if (strcmp(name, NG_ATMLLC_HOOK_ATM) == 0) { if (priv->atm != NULL) { return (EISCONN); } priv->atm = hook; } else if (strcmp(name, NG_ATMLLC_HOOK_ETHER) == 0) { if (priv->ether != NULL) { return (EISCONN); } priv->ether = hook; } else if (strcmp(name, NG_ATMLLC_HOOK_FDDI) == 0) { if (priv->fddi != NULL) { return (EISCONN); } priv->fddi = hook; } else { return (EINVAL); } return (0); } static int ng_atmllc_rcvdata(hook_p hook, item_p item) { struct ng_atmllc_priv *priv; struct mbuf *m; struct atmllc *hdr; hook_p outhook; u_int padding; int error; priv = NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); NGI_GET_M(item, m); outhook = NULL; padding = 0; if (hook == priv->atm) { - /* Ditch the psuedoheader. */ + /* Ditch the pseudoheader. */ hdr = mtod(m, struct atmllc *); /* m_adj(m, sizeof(struct atm_pseudohdr)); */ /* * Make sure we have the LLC and ethernet headers. * The ethernet header size is slightly larger than the FDDI * header, which is convenient. */ if (m->m_len < sizeof(struct atmllc) + ETHER_HDR_LEN) { m = m_pullup(m, sizeof(struct atmllc) + ETHER_HDR_LEN); if (m == NULL) { NG_FREE_ITEM(item); return (ENOMEM); } } /* Decode the LLC header. */ hdr = mtod(m, struct atmllc *); if (ATM_LLC_TYPE(hdr) == NG_ATMLLC_TYPE_ETHERNET_NOFCS) { m->m_flags &= ~M_HASFCS; outhook = priv->ether; padding = 2; } else if (ATM_LLC_TYPE(hdr) == NG_ATMLLC_TYPE_ETHERNET_FCS) { m->m_flags |= M_HASFCS; outhook = priv->ether; padding = 2; } else if (ATM_LLC_TYPE(hdr) == NG_ATMLLC_TYPE_FDDI_NOFCS) { m->m_flags &= ~M_HASFCS; outhook = priv->fddi; padding = 3; } else if (ATM_LLC_TYPE(hdr) == NG_ATMLLC_TYPE_FDDI_FCS) { m->m_flags |= M_HASFCS; outhook = priv->fddi; padding = 3; } else { printf("ng_atmllc: unknown type: %x\n", ATM_LLC_TYPE(hdr)); } /* Remove the LLC header and any padding*/ m_adj(m, sizeof(struct atmllc) + padding); } else if (hook == priv->ether) { /* Add the LLC header */ M_PREPEND(m, NG_ATMLLC_HEADER_LEN + 2, M_NOWAIT); if (m == NULL) { printf("ng_atmllc: M_PREPEND failed\n"); NG_FREE_ITEM(item); return (ENOMEM); } hdr = mtod(m, struct atmllc *); bzero((void *)hdr, sizeof(struct atmllc) + 2); bcopy(NG_ATMLLC_HEADER, hdr->llchdr, 6); if ((m->m_flags & M_HASFCS) != 0) { ATM_LLC_SETTYPE(hdr, NG_ATMLLC_TYPE_ETHERNET_FCS); } else { ATM_LLC_SETTYPE(hdr, NG_ATMLLC_TYPE_ETHERNET_NOFCS); } outhook = priv->atm; } else if (hook == priv->fddi) { /* Add the LLC header */ M_PREPEND(m, NG_ATMLLC_HEADER_LEN + 3, M_NOWAIT); if (m == NULL) { printf("ng_atmllc: M_PREPEND failed\n"); NG_FREE_ITEM(item); return (ENOMEM); } hdr = mtod(m, struct atmllc *); bzero((void *)hdr, sizeof(struct atmllc) + 3); bcopy(NG_ATMLLC_HEADER, hdr->llchdr, 6); if ((m->m_flags & M_HASFCS) != 0) { ATM_LLC_SETTYPE(hdr, NG_ATMLLC_TYPE_FDDI_FCS); } else { ATM_LLC_SETTYPE(hdr, NG_ATMLLC_TYPE_FDDI_NOFCS); } outhook = priv->atm; } if (outhook == NULL) { NG_FREE_M(m); NG_FREE_ITEM(item); return (0); } NG_FWD_NEW_DATA(error, item, outhook, m); return (error); } static int ng_atmllc_disconnect(hook_p hook) { node_p node; struct ng_atmllc_priv *priv; node = NG_HOOK_NODE(hook); priv = NG_NODE_PRIVATE(node); if (hook == priv->atm) { priv->atm = NULL; } else if (hook == priv->ether) { priv->ether = NULL; } else if (hook == priv->fddi) { priv->fddi = NULL; } if (NG_NODE_NUMHOOKS(node) == 0 && NG_NODE_IS_VALID(node)) { ng_rmnode_self(node); } return (0); } Index: head/sys/x86/include/apicvar.h =================================================================== --- head/sys/x86/include/apicvar.h (revision 329872) +++ head/sys/x86/include/apicvar.h (revision 329873) @@ -1,497 +1,497 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003 John Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _X86_APICVAR_H_ #define _X86_APICVAR_H_ /* * Local && I/O APIC variable definitions. */ /* * Layout of local APIC interrupt vectors: * * 0xff (255) +-------------+ * | | 15 (Spurious / IPIs / Local Interrupts) * 0xf0 (240) +-------------+ * | | 14 (I/O Interrupts / Timer) * 0xe0 (224) +-------------+ * | | 13 (I/O Interrupts) * 0xd0 (208) +-------------+ * | | 12 (I/O Interrupts) * 0xc0 (192) +-------------+ * | | 11 (I/O Interrupts) * 0xb0 (176) +-------------+ * | | 10 (I/O Interrupts) * 0xa0 (160) +-------------+ * | | 9 (I/O Interrupts) * 0x90 (144) +-------------+ * | | 8 (I/O Interrupts / System Calls) * 0x80 (128) +-------------+ * | | 7 (I/O Interrupts) * 0x70 (112) +-------------+ * | | 6 (I/O Interrupts) * 0x60 (96) +-------------+ * | | 5 (I/O Interrupts) * 0x50 (80) +-------------+ * | | 4 (I/O Interrupts) * 0x40 (64) +-------------+ * | | 3 (I/O Interrupts) * 0x30 (48) +-------------+ * | | 2 (ATPIC Interrupts) * 0x20 (32) +-------------+ * | | 1 (Exceptions, traps, faults, etc.) * 0x10 (16) +-------------+ * | | 0 (Exceptions, traps, faults, etc.) * 0x00 (0) +-------------+ * * Note: 0x80 needs to be handled specially and not allocated to an * I/O device! */ #define xAPIC_MAX_APIC_ID 0xfe #define xAPIC_ID_ALL 0xff #define MAX_APIC_ID 0x200 #define APIC_ID_ALL 0xffffffff #define IOAPIC_MAX_ID xAPIC_MAX_APIC_ID /* I/O Interrupts are used for external devices such as ISA, PCI, etc. */ #define APIC_IO_INTS (IDT_IO_INTS + 16) #define APIC_NUM_IOINTS 191 /* The timer interrupt is used for clock handling and drives hardclock, etc. */ #define APIC_TIMER_INT (APIC_IO_INTS + APIC_NUM_IOINTS) /* ********************* !!! WARNING !!! ****************************** * Each local apic has an interrupt receive fifo that is two entries deep * for each interrupt priority class (higher 4 bits of interrupt vector). * Once the fifo is full the APIC can no longer receive interrupts for this * class and sending IPIs from other CPUs will be blocked. * To avoid deadlocks there should be no more than two IPI interrupts * pending at the same time. * Currently this is guaranteed by dividing the IPIs in two groups that have * each at most one IPI interrupt pending. The first group is protected by the * smp_ipi_mtx and waits for the completion of the IPI (Only one IPI user * at a time) The second group uses a single interrupt and a bitmap to avoid * redundant IPI interrupts. */ /* Interrupts for local APIC LVT entries other than the timer. */ #define APIC_LOCAL_INTS 240 #define APIC_ERROR_INT APIC_LOCAL_INTS #define APIC_THERMAL_INT (APIC_LOCAL_INTS + 1) #define APIC_CMC_INT (APIC_LOCAL_INTS + 2) #define APIC_IPI_INTS (APIC_LOCAL_INTS + 3) #define IPI_RENDEZVOUS (APIC_IPI_INTS) /* Inter-CPU rendezvous. */ #define IPI_INVLTLB (APIC_IPI_INTS + 1) /* TLB Shootdown IPIs */ #define IPI_INVLPG (APIC_IPI_INTS + 2) #define IPI_INVLRNG (APIC_IPI_INTS + 3) #define IPI_INVLCACHE (APIC_IPI_INTS + 4) /* Vector to handle bitmap based IPIs */ #define IPI_BITMAP_VECTOR (APIC_IPI_INTS + 5) /* IPIs handled by IPI_BITMAP_VECTOR */ #define IPI_AST 0 /* Generate software trap. */ #define IPI_PREEMPT 1 #define IPI_HARDCLOCK 2 #define IPI_BITMAP_LAST IPI_HARDCLOCK #define IPI_IS_BITMAPED(x) ((x) <= IPI_BITMAP_LAST) #define IPI_STOP (APIC_IPI_INTS + 6) /* Stop CPU until restarted. */ #define IPI_SUSPEND (APIC_IPI_INTS + 7) /* Suspend CPU until restarted. */ #ifdef __i386__ #define IPI_LAZYPMAP (APIC_IPI_INTS + 8) /* Lazy pmap release. */ #define IPI_DYN_FIRST (APIC_IPI_INTS + 9) #else #define IPI_DYN_FIRST (APIC_IPI_INTS + 8) #endif #define IPI_DYN_LAST (253) /* IPIs allocated at runtime */ /* * IPI_STOP_HARD does not need to occupy a slot in the IPI vector space since * it is delivered using an NMI anyways. */ #define IPI_NMI_FIRST 254 #define IPI_TRACE 254 /* Interrupt for tracing. */ #define IPI_STOP_HARD 255 /* Stop CPU with a NMI. */ /* * The spurious interrupt can share the priority class with the IPIs since * it is not a normal interrupt. (Does not use the APIC's interrupt fifo) */ #define APIC_SPURIOUS_INT 255 #ifndef LOCORE #define APIC_IPI_DEST_SELF -1 #define APIC_IPI_DEST_ALL -2 #define APIC_IPI_DEST_OTHERS -3 #define APIC_BUS_UNKNOWN -1 #define APIC_BUS_ISA 0 #define APIC_BUS_EISA 1 #define APIC_BUS_PCI 2 #define APIC_BUS_MAX APIC_BUS_PCI #define IRQ_EXTINT (NUM_IO_INTS + 1) #define IRQ_NMI (NUM_IO_INTS + 2) #define IRQ_SMI (NUM_IO_INTS + 3) #define IRQ_DISABLED (NUM_IO_INTS + 4) /* - * An APIC enumerator is a psuedo bus driver that enumerates APIC's including + * An APIC enumerator is a pseudo bus driver that enumerates APIC's including * CPU's and I/O APIC's. */ struct apic_enumerator { const char *apic_name; int (*apic_probe)(void); int (*apic_probe_cpus)(void); int (*apic_setup_local)(void); int (*apic_setup_io)(void); SLIST_ENTRY(apic_enumerator) apic_next; }; inthand_t IDTVEC(apic_isr1), IDTVEC(apic_isr2), IDTVEC(apic_isr3), IDTVEC(apic_isr4), IDTVEC(apic_isr5), IDTVEC(apic_isr6), IDTVEC(apic_isr7), IDTVEC(cmcint), IDTVEC(errorint), IDTVEC(spuriousint), IDTVEC(timerint), IDTVEC(apic_isr1_pti), IDTVEC(apic_isr2_pti), IDTVEC(apic_isr3_pti), IDTVEC(apic_isr4_pti), IDTVEC(apic_isr5_pti), IDTVEC(apic_isr6_pti), IDTVEC(apic_isr7_pti), IDTVEC(cmcint_pti), IDTVEC(errorint_pti), IDTVEC(spuriousint_pti), IDTVEC(timerint_pti); extern vm_paddr_t lapic_paddr; extern int *apic_cpuids; void apic_register_enumerator(struct apic_enumerator *enumerator); void *ioapic_create(vm_paddr_t addr, int32_t apic_id, int intbase); int ioapic_disable_pin(void *cookie, u_int pin); int ioapic_get_vector(void *cookie, u_int pin); void ioapic_register(void *cookie); int ioapic_remap_vector(void *cookie, u_int pin, int vector); int ioapic_set_bus(void *cookie, u_int pin, int bus_type); int ioapic_set_extint(void *cookie, u_int pin); int ioapic_set_nmi(void *cookie, u_int pin); int ioapic_set_polarity(void *cookie, u_int pin, enum intr_polarity pol); int ioapic_set_triggermode(void *cookie, u_int pin, enum intr_trigger trigger); int ioapic_set_smi(void *cookie, u_int pin); /* * Struct containing pointers to APIC functions whose * implementation is run time selectable. */ struct apic_ops { void (*create)(u_int, int); void (*init)(vm_paddr_t); void (*xapic_mode)(void); bool (*is_x2apic)(void); void (*setup)(int); void (*dump)(const char *); void (*disable)(void); void (*eoi)(void); int (*id)(void); int (*intr_pending)(u_int); void (*set_logical_id)(u_int, u_int, u_int); u_int (*cpuid)(u_int); /* Vectors */ u_int (*alloc_vector)(u_int, u_int); u_int (*alloc_vectors)(u_int, u_int *, u_int, u_int); void (*enable_vector)(u_int, u_int); void (*disable_vector)(u_int, u_int); void (*free_vector)(u_int, u_int, u_int); /* PMC */ int (*enable_pmc)(void); void (*disable_pmc)(void); void (*reenable_pmc)(void); /* CMC */ void (*enable_cmc)(void); /* AMD ELVT */ int (*enable_mca_elvt)(void); /* IPI */ void (*ipi_raw)(register_t, u_int); void (*ipi_vectored)(u_int, int); int (*ipi_wait)(int); int (*ipi_alloc)(inthand_t *ipifunc); void (*ipi_free)(int vector); /* LVT */ int (*set_lvt_mask)(u_int, u_int, u_char); int (*set_lvt_mode)(u_int, u_int, u_int32_t); int (*set_lvt_polarity)(u_int, u_int, enum intr_polarity); int (*set_lvt_triggermode)(u_int, u_int, enum intr_trigger); }; extern struct apic_ops apic_ops; static inline void lapic_create(u_int apic_id, int boot_cpu) { apic_ops.create(apic_id, boot_cpu); } static inline void lapic_init(vm_paddr_t addr) { apic_ops.init(addr); } static inline void lapic_xapic_mode(void) { apic_ops.xapic_mode(); } static inline bool lapic_is_x2apic(void) { return (apic_ops.is_x2apic()); } static inline void lapic_setup(int boot) { apic_ops.setup(boot); } static inline void lapic_dump(const char *str) { apic_ops.dump(str); } static inline void lapic_disable(void) { apic_ops.disable(); } static inline void lapic_eoi(void) { apic_ops.eoi(); } static inline int lapic_id(void) { return (apic_ops.id()); } static inline int lapic_intr_pending(u_int vector) { return (apic_ops.intr_pending(vector)); } /* XXX: UNUSED */ static inline void lapic_set_logical_id(u_int apic_id, u_int cluster, u_int cluster_id) { apic_ops.set_logical_id(apic_id, cluster, cluster_id); } static inline u_int apic_cpuid(u_int apic_id) { return (apic_ops.cpuid(apic_id)); } static inline u_int apic_alloc_vector(u_int apic_id, u_int irq) { return (apic_ops.alloc_vector(apic_id, irq)); } static inline u_int apic_alloc_vectors(u_int apic_id, u_int *irqs, u_int count, u_int align) { return (apic_ops.alloc_vectors(apic_id, irqs, count, align)); } static inline void apic_enable_vector(u_int apic_id, u_int vector) { apic_ops.enable_vector(apic_id, vector); } static inline void apic_disable_vector(u_int apic_id, u_int vector) { apic_ops.disable_vector(apic_id, vector); } static inline void apic_free_vector(u_int apic_id, u_int vector, u_int irq) { apic_ops.free_vector(apic_id, vector, irq); } static inline int lapic_enable_pmc(void) { return (apic_ops.enable_pmc()); } static inline void lapic_disable_pmc(void) { apic_ops.disable_pmc(); } static inline void lapic_reenable_pmc(void) { apic_ops.reenable_pmc(); } static inline void lapic_enable_cmc(void) { apic_ops.enable_cmc(); } static inline int lapic_enable_mca_elvt(void) { return (apic_ops.enable_mca_elvt()); } static inline void lapic_ipi_raw(register_t icrlo, u_int dest) { apic_ops.ipi_raw(icrlo, dest); } static inline void lapic_ipi_vectored(u_int vector, int dest) { apic_ops.ipi_vectored(vector, dest); } static inline int lapic_ipi_wait(int delay) { return (apic_ops.ipi_wait(delay)); } static inline int lapic_ipi_alloc(inthand_t *ipifunc) { return (apic_ops.ipi_alloc(ipifunc)); } static inline void lapic_ipi_free(int vector) { return (apic_ops.ipi_free(vector)); } static inline int lapic_set_lvt_mask(u_int apic_id, u_int lvt, u_char masked) { return (apic_ops.set_lvt_mask(apic_id, lvt, masked)); } static inline int lapic_set_lvt_mode(u_int apic_id, u_int lvt, u_int32_t mode) { return (apic_ops.set_lvt_mode(apic_id, lvt, mode)); } static inline int lapic_set_lvt_polarity(u_int apic_id, u_int lvt, enum intr_polarity pol) { return (apic_ops.set_lvt_polarity(apic_id, lvt, pol)); } static inline int lapic_set_lvt_triggermode(u_int apic_id, u_int lvt, enum intr_trigger trigger) { return (apic_ops.set_lvt_triggermode(apic_id, lvt, trigger)); } void lapic_handle_cmc(void); void lapic_handle_error(void); void lapic_handle_intr(int vector, struct trapframe *frame); void lapic_handle_timer(struct trapframe *frame); int ioapic_get_rid(u_int apic_id, uint16_t *ridp); extern int x2apic_mode; extern int lapic_eoi_suppression; #ifdef _SYS_SYSCTL_H_ SYSCTL_DECL(_hw_apic); #endif #endif /* !LOCORE */ #endif /* _X86_APICVAR_H_ */