Index: projects/clang390-import/cddl/usr.sbin/zfsd/Makefile =================================================================== --- projects/clang390-import/cddl/usr.sbin/zfsd/Makefile (revision 305028) +++ projects/clang390-import/cddl/usr.sbin/zfsd/Makefile (revision 305029) @@ -1,13 +1,12 @@ # $FreeBSD$ -SRCDIR=${.CURDIR}/../../.. .include "Makefile.common" PROG_CXX= zfsd MAN= zfsd.8 .include # The unittests require devel/googletest and devel/googlemock from ports. # Don't automatically build them. SUBDIR= Index: projects/clang390-import/cddl/usr.sbin/zfsd/Makefile.common =================================================================== --- projects/clang390-import/cddl/usr.sbin/zfsd/Makefile.common (revision 305028) +++ projects/clang390-import/cddl/usr.sbin/zfsd/Makefile.common (revision 305029) @@ -1,42 +1,42 @@ # $FreeBSD$ SRCS= callout.cc \ case_file.cc \ zfsd_event.cc \ vdev.cc \ vdev_iterator.cc \ zfsd.cc \ zfsd_exception.cc \ zpool_list.cc \ zfsd_main.cc WARNS?= 3 # Ignore warnings about Solaris specific pragmas. IGNORE_PRAGMA= YES -INCFLAGS+= -I${SRCDIR}/cddl/contrib/opensolaris/lib/libzpool/common -INCFLAGS+= -I${SRCDIR}/cddl/compat/opensolaris/include -INCFLAGS+= -I${SRCDIR}/cddl/compat/opensolaris/lib/libumem -INCFLAGS+= -I${SRCDIR}/sys/cddl/compat/opensolaris -INCFLAGS+= -I${SRCDIR}/cddl/contrib/opensolaris/head -INCFLAGS+= -I${SRCDIR}/cddl/contrib/opensolaris/lib/libuutil/common -INCFLAGS+= -I${SRCDIR}/cddl/contrib/opensolaris/lib/libumem/common -INCFLAGS+= -I${SRCDIR}/cddl/contrib/opensolaris/lib/libzfs_core/common -INCFLAGS+= -I${SRCDIR}/cddl/contrib/opensolaris/lib/libzfs/common -INCFLAGS+= -I${SRCDIR}/cddl/contrib/opensolaris/lib/libnvpair -INCFLAGS+= -I${SRCDIR}/sys/cddl/contrib/opensolaris/common/zfs -INCFLAGS+= -I${SRCDIR}/sys/cddl/contrib/opensolaris/uts/common -INCFLAGS+= -I${SRCDIR}/sys/cddl/contrib/opensolaris/uts/common/fs/zfs -INCFLAGS+= -I${SRCDIR}/sys/cddl/contrib/opensolaris/uts/common/sys +INCFLAGS+= -I${SRCTOP}/cddl/contrib/opensolaris/lib/libzpool/common +INCFLAGS+= -I${SRCTOP}/cddl/compat/opensolaris/include +INCFLAGS+= -I${SRCTOP}/cddl/compat/opensolaris/lib/libumem +INCFLAGS+= -I${SRCTOP}/sys/cddl/compat/opensolaris +INCFLAGS+= -I${SRCTOP}/cddl/contrib/opensolaris/head +INCFLAGS+= -I${SRCTOP}/cddl/contrib/opensolaris/lib/libuutil/common +INCFLAGS+= -I${SRCTOP}/cddl/contrib/opensolaris/lib/libumem/common +INCFLAGS+= -I${SRCTOP}/cddl/contrib/opensolaris/lib/libzfs_core/common +INCFLAGS+= -I${SRCTOP}/cddl/contrib/opensolaris/lib/libzfs/common +INCFLAGS+= -I${SRCTOP}/cddl/contrib/opensolaris/lib/libnvpair +INCFLAGS+= -I${SRCTOP}/sys/cddl/contrib/opensolaris/common/zfs +INCFLAGS+= -I${SRCTOP}/sys/cddl/contrib/opensolaris/uts/common +INCFLAGS+= -I${SRCTOP}/sys/cddl/contrib/opensolaris/uts/common/fs/zfs +INCFLAGS+= -I${SRCTOP}/sys/cddl/contrib/opensolaris/uts/common/sys CFLAGS= -g -DNEED_SOLARIS_BOOLEAN ${INCFLAGS} DPADD= ${LIBDEVDCTL} ${LIBZFS} ${LIBZFS_CORE} ${LIBUTIL} ${LIBGEOM} \ ${LIBBSDXML} ${LIBSBUF} ${LIBNVPAIR} ${LIBUUTIL} LIBADD= devdctl zfs zfs_core util geom bsdxml sbuf nvpair uutil cscope: find ${.CURDIR} -type f -a \( -name "*.[ch]" -o -name "*.cc" \) \ > ${.CURDIR}/cscope.files cd ${.CURDIR} && cscope -buq ${INCFLAGS} Index: projects/clang390-import/cddl/usr.sbin/zfsd/tests/Makefile =================================================================== --- projects/clang390-import/cddl/usr.sbin/zfsd/tests/Makefile (revision 305028) +++ projects/clang390-import/cddl/usr.sbin/zfsd/tests/Makefile (revision 305029) @@ -1,45 +1,38 @@ # $FreeBSD$ -SRCDIR=${.CURDIR}/../../../.. .include "${.CURDIR}/../Makefile.common" -.PATH: ${.CURDIR}/.. +.PATH: ${.CURDIR:H} -TESTSDIR?= ${TESTSBASE}/cddl/usr.sbin/zfsd - PLAIN_TESTS_CXX= zfsd_unittest SRCS.zfsd_unittest:= ${SRCS:Nzfsd_main.cc} SRCS.zfsd_unittest+= libmocks.c zfsd_unittest.cc SRCS= # Use #include in test programs. -INCFLAGS+= -I${.CURDIR}/../.. +INCFLAGS+= -I${.CURDIR:H:H} .if defined(DESTDIR) INCFLAGS+= -I${DESTDIR}/usr/include LIBRARY_PATH= ${DESTDIR}/lib:${DESTDIR}/usr/lib LDFLAGS.zfsd_unittest+= -L${DESTDIR}/lib -L${DESTDIR}/usr/lib .elif defined(WORLDTMP) INCFLAGS+= -I${WORLDTMP}/usr/include LIBRARY_PATH= ${WORLDTMP}/lib:${WORLDTMP}/usr/lib LDFLAGS.zfsd_unittest+= -L${WORLDTMP}/lib -L${WORLDTMP}/usr/lib .else LIBRARY_PATH= .endif # Googletest options -LOCALBASE?= /usr/local INCFLAGS+= -I${LOCALBASE}/include -D_THREAD_SAFE -pthread LDFLAGS.zfsd_unittest+= -L${LOCALBASE}/lib -D_THREAD_SAFE -pthread LDADD.zfsd_unittest+= ${LOCALBASE}/lib/libgtest.a # GoogleMock options LDADD.zfsd_unittest+= ${LOCALBASE}/lib/libgmock.a ${LOCALBASE}/lib/libgmock_main.a # Googlemock fails if we don't have this line # https://groups.google.com/forum/#!msg/googletestframework/h8ixEPCFm0o/amwfu4xGJb0J CFLAGS.zfsd_unittest+= -DGTEST_HAS_PTHREAD - -# Install the tests -TESTSBASE?= /usr/tests .include Index: projects/clang390-import/cddl =================================================================== --- projects/clang390-import/cddl (revision 305028) +++ projects/clang390-import/cddl (revision 305029) Property changes on: projects/clang390-import/cddl ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/cddl:r305017-305028 Index: projects/clang390-import/lib/libc/sys/ptrace.2 =================================================================== --- projects/clang390-import/lib/libc/sys/ptrace.2 (revision 305028) +++ projects/clang390-import/lib/libc/sys/ptrace.2 (revision 305029) @@ -1,1056 +1,1066 @@ .\" $FreeBSD$ .\" $NetBSD: ptrace.2,v 1.2 1995/02/27 12:35:37 cgd Exp $ .\" .\" This file is in the public domain. -.Dd August 28, 2016 +.Dd August 29, 2016 .Dt PTRACE 2 .Os .Sh NAME .Nm ptrace .Nd process tracing and debugging .Sh LIBRARY .Lb libc .Sh SYNOPSIS .In sys/types.h .In sys/ptrace.h .Ft int .Fn ptrace "int request" "pid_t pid" "caddr_t addr" "int data" .Sh DESCRIPTION The .Fn ptrace system call provides tracing and debugging facilities. It allows one process (the .Em tracing process) to control another (the .Em traced process). The tracing process must first attach to the traced process, and then issue a series of .Fn ptrace system calls to control the execution of the process, as well as access process memory and register state. For the duration of the tracing session, the traced process will be .Dq re-parented , with its parent process ID (and resulting behavior) changed to the tracing process. It is permissible for a tracing process to attach to more than one other process at a time. When the tracing process has completed its work, it must detach the traced process; if a tracing process exits without first detaching all processes it has attached, those processes will be killed. .Pp Most of the time, the traced process runs normally, but when it receives a signal (see .Xr sigaction 2 ) , it stops. The tracing process is expected to notice this via .Xr wait 2 or the delivery of a .Dv SIGCHLD signal, examine the state of the stopped process, and cause it to terminate or continue as appropriate. The signal may be a normal process signal, generated as a result of traced process behavior, or use of the .Xr kill 2 system call; alternatively, it may be generated by the tracing facility as a result of attaching, stepping by the tracing process, or an event in the traced process. The tracing process may choose to intercept the signal, using it to observe process behavior (such as .Dv SIGTRAP ) , or forward the signal to the process if appropriate. The .Fn ptrace system call is the mechanism by which all this happens. .Pp A traced process may report additional signal stops corresponding to events in the traced process. These additional signal stops are reported as .Dv SIGTRAP or .Dv SIGSTOP signals. The tracing process can use the .Dv PT_LWPINFO request to determine which events are associated with a .Dv SIGTRAP or .Dv SIGSTOP signal. Note that multiple events may be associated with a single signal. For example, events indicated by the .Dv PL_FLAG_BORN , .Dv PL_FLAG_FORKED , and .Dv PL_FLAG_EXEC flags are also reported as a system call exit event .Pq Dv PL_FLAG_SCX . The signal stop for a new child process enabled via .Dv PTRACE_FORK will report a .Dv SIGSTOP signal. All other additional signal stops use .Dv SIGTRAP . .Pp Each traced process has a tracing event mask. An event in the traced process only reports a signal stop if the corresponding flag is set in the tracing event mask. The current set of tracing event flags include: -.Bl -tag -width ".Dv PTRACE_SYSCALL" +.Bl -tag -width "Dv PTRACE_SYSCALL" .It Dv PTRACE_EXEC Report a stop for a successful invocation of .Xr execve 2 . This event is indicated by the .Dv PL_FLAG_EXEC flag in the .Va pl_flags member of .Vt "struct ptrace_lwpinfo" . .It Dv PTRACE_SCE Report a stop on each system call entry. This event is indicated by the .Dv PL_FLAG_SCE flag in the .Va pl_flags member of .Vt "struct ptrace_lwpinfo" . .It Dv PTRACE_SCX Report a stop on each system call exit. This event is indicated by the .Dv PL_FLAG_SCX flag in the .Va pl_flags member of .Vt "struct ptrace_lwpinfo" . .It Dv PTRACE_SYSCALL Report stops for both system call entry and exit. .It Dv PTRACE_FORK This event flag controls tracing for new child processes of a traced process. .Pp When this event flag is enabled, new child processes will enable tracing and stop before executing their first instruction. The new child process will include the .Dv PL_FLAG_CHILD flag in the .Va pl_flags member of .Vt "struct ptrace_lwpinfo" . The traced process will report a stop that includes the .Dv PL_FLAG_FORKED flag. The process ID of the new child process will also be present in the .Va pl_child_pid member of .Vt "struct ptrace_lwpinfo" . If the new child process was created via .Xr vfork 2 , the traced process's stop will also include the .Dv PL_FLAG_VFORKED flag. Note that new child processes will be attached with the default tracing event mask; they do not inherit the event mask of the traced process. .Pp When this event flag is not enabled, new child processes will execute without tracing enabled. .It Dv PTRACE_LWP This event flag controls tracing of LWP .Pq kernel thread creation and destruction. When this event is enabled, new LWPs will stop and report an event with .Dv PL_FLAG_BORN set before executing their first instruction, and exiting LWPs will stop and report an event with .Dv PL_FLAG_EXITED set before completing their termination. .Pp Note that new processes do not report an event for the creation of their initial thread, and exiting processes do not report an event for the termination of the last thread. .It Dv PTRACE_VFORK Report a stop event when a parent process resumes after a .Xr vfork 2 . .Pp When a thread in the traced process creates a new child process via .Xr vfork 2 , the stop that reports .Dv PL_FLAG_FORKED and .Dv PL_FLAG_SCX occurs just after the child process is created, but before the thread waits for the child process to stop sharing process memory. If a debugger is not tracing the new child process, it must ensure that no breakpoints are enabled in the shared process memory before detaching from the new child process. This means that no breakpoints are enabled in the parent process either. .Pp The .Dv PTRACE_VFORK flag enables a new stop that indicates when the new child process stops sharing the process memory of the parent process. A debugger can reinsert breakpoints in the parent process and resume it in response to this event. This event is indicated by setting the .Dv PL_FLAG_VFORK_DONE flag. .El .Pp The default tracing event mask when attaching to a process via .Dv PT_ATTACH , .Dv PT_TRACE_ME , or .Dv PTRACE_FORK includes only .Dv PTRACE_EXEC events. All other event flags are disabled. .Pp The .Fa request argument specifies what operation is being performed; the meaning of the rest of the arguments depends on the operation, but except for one special case noted below, all .Fn ptrace calls are made by the tracing process, and the .Fa pid argument specifies the process ID of the traced process or a corresponding thread ID. The .Fa request argument can be: -.Bl -tag -width 12n +.Bl -tag -width "Dv PT_GET_EVENT_MASK" .It Dv PT_TRACE_ME This request is the only one used by the traced process; it declares that the process expects to be traced by its parent. All the other arguments are ignored. (If the parent process does not expect to trace the child, it will probably be rather confused by the results; once the traced process stops, it cannot be made to continue except via .Fn ptrace . ) When a process has used this request and calls .Xr execve 2 or any of the routines built on it (such as .Xr execv 3 ) , it will stop before executing the first instruction of the new image. Also, any setuid or setgid bits on the executable being executed will be ignored. If the child was created by .Xr vfork 2 system call or .Xr rfork 2 call with the .Dv RFMEM flag specified, the debugging events are reported to the parent only after the .Xr execve 2 is executed. .It Dv PT_READ_I , Dv PT_READ_D These requests read a single .Vt int of data from the traced process's address space. Traditionally, .Fn ptrace has allowed for machines with distinct address spaces for instruction and data, which is why there are two requests: conceptually, .Dv PT_READ_I reads from the instruction space and .Dv PT_READ_D reads from the data space. In the current .Fx implementation, these two requests are completely identical. The .Fa addr argument specifies the address (in the traced process's virtual address space) at which the read is to be done. This address does not have to meet any alignment constraints. The value read is returned as the return value from .Fn ptrace . .It Dv PT_WRITE_I , Dv PT_WRITE_D These requests parallel .Dv PT_READ_I and .Dv PT_READ_D , except that they write rather than read. The .Fa data argument supplies the value to be written. .It Dv PT_IO This request allows reading and writing arbitrary amounts of data in the traced process's address space. The .Fa addr argument specifies a pointer to a .Vt "struct ptrace_io_desc" , which is defined as follows: .Bd -literal struct ptrace_io_desc { int piod_op; /* I/O operation */ void *piod_offs; /* child offset */ void *piod_addr; /* parent offset */ size_t piod_len; /* request length */ }; /* * Operations in piod_op. */ #define PIOD_READ_D 1 /* Read from D space */ #define PIOD_WRITE_D 2 /* Write to D space */ #define PIOD_READ_I 3 /* Read from I space */ #define PIOD_WRITE_I 4 /* Write to I space */ .Ed .Pp The .Fa data argument is ignored. The actual number of bytes read or written is stored in .Va piod_len upon return. .It Dv PT_CONTINUE The traced process continues execution. The .Fa addr argument is an address specifying the place where execution is to be resumed (a new value for the program counter), or .Po Vt caddr_t Pc Ns 1 to indicate that execution is to pick up where it left off. The .Fa data argument provides a signal number to be delivered to the traced process as it resumes execution, or 0 if no signal is to be sent. .It Dv PT_STEP The traced process is single stepped one instruction. The .Fa addr argument should be passed .Po Vt caddr_t Pc Ns 1 . The .Fa data argument provides a signal number to be delivered to the traced process as it resumes execution, or 0 if no signal is to be sent. .It Dv PT_KILL The traced process terminates, as if .Dv PT_CONTINUE had been used with .Dv SIGKILL given as the signal to be delivered. .It Dv PT_ATTACH This request allows a process to gain control of an otherwise unrelated process and begin tracing it. It does not need any cooperation from the to-be-traced process. In this case, .Fa pid specifies the process ID of the to-be-traced process, and the other two arguments are ignored. This request requires that the target process must have the same real UID as the tracing process, and that it must not be executing a setuid or setgid executable. (If the tracing process is running as root, these restrictions do not apply.) The tracing process will see the newly-traced process stop and may then control it as if it had been traced all along. .It Dv PT_DETACH This request is like PT_CONTINUE, except that it does not allow specifying an alternate place to continue execution, and after it succeeds, the traced process is no longer traced and continues execution normally. .It Dv PT_GETREGS This request reads the traced process's machine registers into the .Do .Vt "struct reg" .Dc (defined in .In machine/reg.h ) pointed to by .Fa addr . .It Dv PT_SETREGS This request is the converse of .Dv PT_GETREGS ; it loads the traced process's machine registers from the .Do .Vt "struct reg" .Dc (defined in .In machine/reg.h ) pointed to by .Fa addr . .It Dv PT_GETFPREGS This request reads the traced process's floating-point registers into the .Do .Vt "struct fpreg" .Dc (defined in .In machine/reg.h ) pointed to by .Fa addr . .It Dv PT_SETFPREGS This request is the converse of .Dv PT_GETFPREGS ; it loads the traced process's floating-point registers from the .Do .Vt "struct fpreg" .Dc (defined in .In machine/reg.h ) pointed to by .Fa addr . .It Dv PT_GETDBREGS This request reads the traced process's debug registers into the .Do .Vt "struct dbreg" .Dc (defined in .In machine/reg.h ) pointed to by .Fa addr . .It Dv PT_SETDBREGS This request is the converse of .Dv PT_GETDBREGS ; it loads the traced process's debug registers from the .Do .Vt "struct dbreg" .Dc (defined in .In machine/reg.h ) pointed to by .Fa addr . .It Dv PT_LWPINFO This request can be used to obtain information about the kernel thread, also known as light-weight process, that caused the traced process to stop. The .Fa addr argument specifies a pointer to a .Vt "struct ptrace_lwpinfo" , which is defined as follows: .Bd -literal struct ptrace_lwpinfo { lwpid_t pl_lwpid; int pl_event; int pl_flags; sigset_t pl_sigmask; sigset_t pl_siglist; siginfo_t pl_siginfo; char pl_tdname[MAXCOMLEN + 1]; pid_t pl_child_pid; u_int pl_syscall_code; u_int pl_syscall_narg; }; .Ed .Pp The .Fa data argument is to be set to the size of the structure known to the caller. This allows the structure to grow without affecting older programs. .Pp The fields in the .Vt "struct ptrace_lwpinfo" have the following meaning: .Bl -tag -width indent -compact -.It pl_lwpid +.It Va pl_lwpid LWP id of the thread -.It pl_event +.It Va pl_event Event that caused the stop. -Currently defined events are -.Bl -tag -width indent -compact -.It PL_EVENT_NONE +Currently defined events are: +.Bl -tag -width "Dv PL_EVENT_SIGNAL" -compact +.It Dv PL_EVENT_NONE No reason given -.It PL_EVENT_SIGNAL +.It Dv PL_EVENT_SIGNAL Thread stopped due to the pending signal .El -.It pl_flags +.It Va pl_flags Flags that specify additional details about observed stop. Currently defined flags are: .Bl -tag -width indent -compact -.It PL_FLAG_SCE +.It Dv PL_FLAG_SCE The thread stopped due to system call entry, right after the kernel is entered. The debugger may examine syscall arguments that are stored in memory and registers according to the ABI of the current process, and modify them, if needed. -.It PL_FLAG_SCX +.It Dv PL_FLAG_SCX The thread is stopped immediately before syscall is returning to the usermode. The debugger may examine system call return values in the ABI-defined registers and/or memory. -.It PL_FLAG_EXEC +.It Dv PL_FLAG_EXEC When .Dv PL_FLAG_SCX is set, this flag may be additionally specified to inform that the program being executed by debuggee process has been changed by successful execution of a system call from the .Fn execve 2 family. -.It PL_FLAG_SI +.It Dv PL_FLAG_SI Indicates that .Va pl_siginfo member of .Vt "struct ptrace_lwpinfo" contains valid information. -.It PL_FLAG_FORKED +.It Dv PL_FLAG_FORKED Indicates that the process is returning from a call to .Fn fork 2 that created a new child process. The process identifier of the new process is available in the .Va pl_child_pid member of .Vt "struct ptrace_lwpinfo" . -.It PL_FLAG_CHILD +.It Dv PL_FLAG_CHILD The flag is set for first event reported from a new child which is automatically attached when .Dv PTRACE_FORK is enabled. -.It PL_FLAG_BORN +.It Dv PL_FLAG_BORN This flag is set for the first event reported from a new LWP when .Dv PTRACE_LWP is enabled. It is reported along with .Dv PL_FLAG_SCX . -.It PL_FLAG_EXITED +.It Dv PL_FLAG_EXITED This flag is set for the last event reported by an exiting LWP when .Dv PTRACE_LWP is enabled. Note that this event is not reported when the last LWP in a process exits. The termination of the last thread is reported via a normal process exit event. -.It PL_FLAG_VFORKED +.It Dv PL_FLAG_VFORKED Indicates that the thread is returning from a call to .Xr vfork 2 that created a new child process. This flag is set in addition to .Dv PL_FLAG_FORKED . -.It PL_FLAG_VFORK_DONE +.It Dv PL_FLAG_VFORK_DONE Indicates that the thread has resumed after a child process created via .Xr vfork 2 has stopped sharing its address space with the traced process. .El -.It pl_sigmask +.It Va pl_sigmask The current signal mask of the LWP -.It pl_siglist +.It Va pl_siglist The current pending set of signals for the LWP. Note that signals that are delivered to the process would not appear on an LWP siglist until the thread is selected for delivery. -.It pl_siginfo +.It Va pl_siginfo The siginfo that accompanies the signal pending. Only valid for .Dv PL_EVENT_SIGNAL stop when .Dv PL_FLAG_SI is set in .Va pl_flags . -.It pl_tdname +.It Va pl_tdname The name of the thread. -.It pl_child_pid +.It Va pl_child_pid The process identifier of the new child process. Only valid for a .Dv PL_EVENT_SIGNAL stop when .Dv PL_FLAG_FORKED is set in .Va pl_flags . -.It pl_syscall_code +.It Va pl_syscall_code The ABI-specific identifier of the current system call. Note that for indirect system calls this field reports the indirected system call. Only valid when .Dv PL_FLAG_SCE or .Dv PL_FLAG_SCX is set in .Va pl_flags. -.It pl_syscall_narg +.It Va pl_syscall_narg The number of arguments passed to the current system call not counting the system call identifier. Note that for indirect system calls this field reports the arguments passed to the indirected system call. Only valid when .Dv PL_FLAG_SCE or .Dv PL_FLAG_SCX is set in .Va pl_flags. .El -.It PT_GETNUMLWPS +.It Dv PT_GETNUMLWPS This request returns the number of kernel threads associated with the traced process. -.It PT_GETLWPLIST +.It Dv PT_GETLWPLIST This request can be used to get the current thread list. A pointer to an array of type .Vt lwpid_t should be passed in .Fa addr , with the array size specified by .Fa data . The return value from .Fn ptrace is the count of array entries filled in. -.It PT_SETSTEP +.It Dv PT_SETSTEP This request will turn on single stepping of the specified process. -.It PT_CLEARSTEP +.It Dv PT_CLEARSTEP This request will turn off single stepping of the specified process. -.It PT_SUSPEND +.It Dv PT_SUSPEND This request will suspend the specified thread. -.It PT_RESUME +.It Dv PT_RESUME This request will resume the specified thread. -.It PT_TO_SCE +.It Dv PT_TO_SCE This request will set the .Dv PTRACE_SCE event flag to trace all future system call entries and continue the process. The .Fa addr and .Fa data arguments are used the same as for .Dv PT_CONTINUE. -.It PT_TO_SCX +.It Dv PT_TO_SCX This request will set the .Dv PTRACE_SCX event flag to trace all future system call exits and continue the process. The .Fa addr and .Fa data arguments are used the same as for .Dv PT_CONTINUE. -.It PT_SYSCALL +.It Dv PT_SYSCALL This request will set the .Dv PTRACE_SYSCALL event flag to trace all future system call entries and exits and continue the process. The .Fa addr and .Fa data arguments are used the same as for .Dv PT_CONTINUE. -.It PT_FOLLOW_FORK +.It Dv PT_FOLLOW_FORK This request controls tracing for new child processes of a traced process. If .Fa data is non-zero, .Dv PTRACE_FORK is set in the traced process's event tracing mask. If .Fa data is zero, .Dv PTRACE_FORK is cleared from the traced process's event tracing mask. -.It PT_LWP_EVENTS +.It Dv PT_LWP_EVENTS This request controls tracing of LWP creation and destruction. If .Fa data is non-zero, .Dv PTRACE_LWP is set in the traced process's event tracing mask. If .Fa data is zero, .Dv PTRACE_LWP is cleared from the traced process's event tracing mask. -.It PT_GET_EVENT_MASK +.It Dv PT_GET_EVENT_MASK This request reads the traced process's event tracing mask into the integer pointed to by .Fa addr . The size of the integer must be passed in .Fa data . -.It PT_SET_EVENT_MASK +.It Dv PT_SET_EVENT_MASK This request sets the traced process's event tracing mask from the integer pointed to by .Fa addr . The size of the integer must be passed in .Fa data . -.It PT_VM_TIMESTAMP +.It Dv PT_VM_TIMESTAMP This request returns the generation number or timestamp of the memory map of the traced process as the return value from .Fn ptrace . This provides a low-cost way for the tracing process to determine if the VM map changed since the last time this request was made. -.It PT_VM_ENTRY +.It Dv PT_VM_ENTRY This request is used to iterate over the entries of the VM map of the traced process. The .Fa addr argument specifies a pointer to a .Vt "struct ptrace_vm_entry" , which is defined as follows: .Bd -literal struct ptrace_vm_entry { int pve_entry; int pve_timestamp; u_long pve_start; u_long pve_end; u_long pve_offset; u_int pve_prot; u_int pve_pathlen; long pve_fileid; uint32_t pve_fsid; char *pve_path; }; .Ed .Pp The first entry is returned by setting .Va pve_entry to zero. Subsequent entries are returned by leaving .Va pve_entry unmodified from the value returned by previous requests. The .Va pve_timestamp field can be used to detect changes to the VM map while iterating over the entries. The tracing process can then take appropriate action, such as restarting. By setting .Va pve_pathlen to a non-zero value on entry, the pathname of the backing object is returned in the buffer pointed to by .Va pve_path , provided the entry is backed by a vnode. The .Va pve_pathlen field is updated with the actual length of the pathname (including the terminating null character). The .Va pve_offset field is the offset within the backing object at which the range starts. The range is located in the VM space at .Va pve_start and extends up to .Va pve_end (inclusive). .Pp The .Fa data argument is ignored. .El .Sh x86 MACHINE-SPECIFIC REQUESTS .Bl -tag -width "Dv PT_GETXSTATE_INFO" .It Dv PT_GETXMMREGS Copy the XMM FPU state into the buffer pointed to by the argument .Fa addr . The buffer has the same layout as the 32-bit save buffer for the machine instruction .Dv FXSAVE . .Pp This request is only valid for i386 programs, both on native 32-bit systems and on amd64 kernels. For 64-bit amd64 programs, the XMM state is reported as part of the FPU state returned by the .Dv PT_GETFPREGS request. .Pp The .Fa data argument is ignored. .It Dv PT_SETXMMREGS Load the XMM FPU state for the thread from the buffer pointed to by the argument .Fa addr . The buffer has the same layout as the 32-bit load buffer for the machine instruction .Dv FXRSTOR . .Pp As with .Dv PT_GETXMMREGS, this request is only valid for i386 programs. .Pp The .Fa data argument is ignored. .It Dv PT_GETXSTATE_INFO Report which XSAVE FPU extensions are supported by the CPU and allowed in userspace programs. The .Fa addr argument must point to a variable of type .Vt struct ptrace_xstate_info , which contains the information on the request return. .Vt struct ptrace_xstate_info is defined as follows: .Bd -literal struct ptrace_xstate_info { uint64_t xsave_mask; uint32_t xsave_len; }; .Ed The .Dv xsave_mask field is a bitmask of the currently enabled extensions. The meaning of the bits is defined in the Intel and AMD processor documentation. The .Dv xsave_len field reports the length of the XSAVE area for storing the hardware state for currently enabled extensions in the format defined by the x86 .Dv XSAVE machine instruction. .Pp The .Fa data argument value must be equal to the size of the .Vt struct ptrace_xstate_info . .It Dv PT_GETXSTATE Return the content of the XSAVE area for the thread. The .Fa addr argument points to the buffer where the content is copied, and the .Fa data argument specifies the size of the buffer. The kernel copies out as much content as allowed by the buffer size. The buffer layout is specified by the layout of the save area for the .Dv XSAVE machine instruction. .It Dv PT_SETXSTATE Load the XSAVE state for the thread from the buffer specified by the .Fa addr pointer. The buffer size is passed in the .Fa data argument. The buffer must be at least as large as the .Vt struct savefpu (defined in .Pa x86/fpu.h ) to allow the complete x87 FPU and XMM state load. It must not be larger than the XSAVE state length, as reported by the .Dv xsave_len field from the .Vt struct ptrace_xstate_info of the .Dv PT_GETXSTATE_INFO request. Layout of the buffer is identical to the layout of the load area for the .Dv XRSTOR machine instruction. .It Dv PT_GETFSBASE Return the value of the base used when doing segmented memory addressing using the %fs segment register. The .Fa addr argument points to an .Vt unsigned long variable where the base value is stored. .Pp The .Fa data argument is ignored. .It Dv PT_GETGSBASE Like the .Dv PT_GETFSBASE request, but returns the base for the %gs segment register. .It Dv PT_SETFSBASE Set the base for the %fs segment register to the value pointed to by the .Fa addr argument. .Fa addr must point to the .Vt unsigned long variable containing the new base. .Pp The .Fa data argument is ignored. .It Dv PT_SETGSBASE Like the .Dv PT_SETFSBASE request, but sets the base for the %gs segment register. .El .Sh PowerPC MACHINE-SPECIFIC REQUESTS .Bl -tag -width "Dv PT_SETVRREGS" .It Dv PT_GETVRREGS Return the thread's .Dv ALTIVEC machine state in the buffer pointed to by .Fa addr . .Pp The .Fa data argument is ignored. .It Dv PT_SETVRREGS Set the thread's .Dv ALTIVEC machine state from the buffer pointed to by .Fa addr . .Pp The .Fa data argument is ignored. .El .Pp Additionally, other machine-specific requests can exist. .Sh RETURN VALUES +Most requests return 0 on success and \-1 on error. Some requests can cause .Fn ptrace to return \-1 -as a non-error value; to disambiguate, +as a non-error value, among them are +.Dv PT_READ_I +and +.Dv PT_READ_D , +which return the value read from the process memory on success. +To disambiguate, .Va errno -is set to 0 in the libc wrapper for the +can be set to 0 before the call and checked afterwards. +.Pp +The current .Fn ptrace -system call and -.Fn ptrace -callers can reliably check +implementation always sets .Va errno -for non-zero value afterwards. +to 0 before calling into the kernel, both for historic reasons and for +consistency with other operating systems. +It is recommended to assign zero to +.Va errno +explicitly for forward compatibility. .Sh ERRORS The .Fn ptrace system call may fail if: .Bl -tag -width Er .It Bq Er ESRCH .Bl -bullet -compact .It No process having the specified process ID exists. .El .It Bq Er EINVAL .Bl -bullet -compact .It A process attempted to use .Dv PT_ATTACH on itself. .It The .Fa request argument was not one of the legal requests. .It The signal number (in .Fa data ) to .Dv PT_CONTINUE was neither 0 nor a legal signal number. .It .Dv PT_GETREGS , .Dv PT_SETREGS , .Dv PT_GETFPREGS , .Dv PT_SETFPREGS , .Dv PT_GETDBREGS , or .Dv PT_SETDBREGS was attempted on a process with no valid register set. (This is normally true only of system processes.) .It .Dv PT_VM_ENTRY was given an invalid value for .Fa pve_entry . This can also be caused by changes to the VM map of the process. .It The size (in .Fa data ) provided to .Dv PT_LWPINFO was less than or equal to zero, or larger than the .Vt ptrace_lwpinfo structure known to the kernel. .It The size (in .Fa data ) provided to the x86-specific .Dv PT_GETXSTATE_INFO request was not equal to the size of the .Vt struct ptrace_xstate_info . .It The size (in .Fa data ) provided to the x86-specific .Dv PT_SETXSTATE request was less than the size of the x87 plus the XMM save area. .It The size (in .Fa data ) provided to the x86-specific .Dv PT_SETXSTATE request was larger than returned in the .Dv xsave_len member of the .Vt struct ptrace_xstate_info from the .Dv PT_GETXSTATE_INFO request. .It The base value, provided to the amd64-specific requests .Dv PT_SETFSBASE or .Dv PT_SETGSBASE , pointed outside of the valid user address space. This error will not occur in 32-bit programs. .El .It Bq Er EBUSY .Bl -bullet -compact .It .Dv PT_ATTACH was attempted on a process that was already being traced. .It A request attempted to manipulate a process that was being traced by some process other than the one making the request. .It A request (other than .Dv PT_ATTACH ) specified a process that was not stopped. .El .It Bq Er EPERM .Bl -bullet -compact .It A request (other than .Dv PT_ATTACH ) attempted to manipulate a process that was not being traced at all. .It An attempt was made to use .Dv PT_ATTACH on a process in violation of the requirements listed under .Dv PT_ATTACH above. .El .It Bq Er ENOENT .Bl -bullet -compact .It .Dv PT_VM_ENTRY previously returned the last entry of the memory map. No more entries exist. .El .It Bq Er ENAMETOOLONG .Bl -bullet -compact .It .Dv PT_VM_ENTRY cannot return the pathname of the backing object because the buffer is not big enough. .Fa pve_pathlen holds the minimum buffer size required on return. .El .El .Sh SEE ALSO .Xr execve 2 , .Xr sigaction 2 , .Xr wait 2 , .Xr execv 3 , .Xr i386_clr_watch 3 , .Xr i386_set_watch 3 .Sh HISTORY The .Fn ptrace function appeared in .At v7 . Index: projects/clang390-import/sys/boot/efi/libefi/efi_console.c =================================================================== --- projects/clang390-import/sys/boot/efi/libefi/efi_console.c (revision 305028) +++ projects/clang390-import/sys/boot/efi/libefi/efi_console.c (revision 305029) @@ -1,498 +1,518 @@ /*- * Copyright (c) 2000 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include "bootstrap.h" static SIMPLE_TEXT_OUTPUT_INTERFACE *conout; static SIMPLE_INPUT_INTERFACE *conin; #ifdef TERM_EMU #define DEFAULT_FGCOLOR EFI_LIGHTGRAY #define DEFAULT_BGCOLOR EFI_BLACK #define MAXARGS 8 static int args[MAXARGS], argc; static int fg_c, bg_c, curx, cury; static int esc; void get_pos(int *x, int *y); void curs_move(int *_x, int *_y, int x, int y); static void CL(int); void HO(void); void end_term(void); #endif +static EFI_INPUT_KEY key_cur; +static int key_pending; + static void efi_cons_probe(struct console *); static int efi_cons_init(int); void efi_cons_putchar(int); int efi_cons_getchar(void); void efi_cons_efiputchar(int); int efi_cons_poll(void); struct console efi_console = { "efi", "EFI console", C_WIDEOUT, efi_cons_probe, efi_cons_init, efi_cons_putchar, efi_cons_getchar, efi_cons_poll }; #ifdef TERM_EMU /* Get cursor position. */ void get_pos(int *x, int *y) { *x = conout->Mode->CursorColumn; *y = conout->Mode->CursorRow; } /* Move cursor to x rows and y cols (0-based). */ void curs_move(int *_x, int *_y, int x, int y) { conout->SetCursorPosition(conout, x, y); if (_x != NULL) *_x = conout->Mode->CursorColumn; if (_y != NULL) *_y = conout->Mode->CursorRow; } /* Clear internal state of the terminal emulation code. */ void end_term(void) { esc = 0; argc = -1; } #endif static void efi_cons_probe(struct console *cp) { conout = ST->ConOut; conin = ST->ConIn; cp->c_flags |= C_PRESENTIN | C_PRESENTOUT; } static int efi_cons_init(int arg) { #ifdef TERM_EMU conout->SetAttribute(conout, EFI_TEXT_ATTR(DEFAULT_FGCOLOR, DEFAULT_BGCOLOR)); end_term(); get_pos(&curx, &cury); curs_move(&curx, &cury, curx, cury); fg_c = DEFAULT_FGCOLOR; bg_c = DEFAULT_BGCOLOR; #endif conout->EnableCursor(conout, TRUE); return 0; } static void efi_cons_rawputchar(int c) { int i; UINTN x, y; conout->QueryMode(conout, conout->Mode->Mode, &x, &y); if (c == '\t') /* XXX lame tab expansion */ for (i = 0; i < 8; i++) efi_cons_rawputchar(' '); else { #ifndef TERM_EMU if (c == '\n') efi_cons_efiputchar('\r'); efi_cons_efiputchar(c); #else switch (c) { case '\r': curx = 0; curs_move(&curx, &cury, curx, cury); return; case '\n': cury++; if (cury >= y) { efi_cons_efiputchar('\n'); cury--; } else curs_move(&curx, &cury, curx, cury); return; case '\b': if (curx > 0) { curx--; curs_move(&curx, &cury, curx, cury); } return; default: efi_cons_efiputchar(c); curx++; if (curx > x-1) { curx = 0; cury++; } if (cury > y-1) { curx = 0; cury--; } } curs_move(&curx, &cury, curx, cury); #endif } } #ifdef TERM_EMU /* Gracefully exit ESC-sequence processing in case of misunderstanding. */ static void bail_out(int c) { char buf[16], *ch; int i; if (esc) { efi_cons_rawputchar('\033'); if (esc != '\033') efi_cons_rawputchar(esc); for (i = 0; i <= argc; ++i) { sprintf(buf, "%d", args[i]); ch = buf; while (*ch) efi_cons_rawputchar(*ch++); } } efi_cons_rawputchar(c); end_term(); } /* Clear display from current position to end of screen. */ static void CD(void) { int i; UINTN x, y; get_pos(&curx, &cury); if (curx == 0 && cury == 0) { conout->ClearScreen(conout); end_term(); return; } conout->QueryMode(conout, conout->Mode->Mode, &x, &y); CL(0); /* clear current line from cursor to end */ for (i = cury + 1; i < y-1; i++) { curs_move(NULL, NULL, 0, i); CL(0); } curs_move(NULL, NULL, curx, cury); end_term(); } /* * Absolute cursor move to args[0] rows and args[1] columns * (the coordinates are 1-based). */ static void CM(void) { if (args[0] > 0) args[0]--; if (args[1] > 0) args[1]--; curs_move(&curx, &cury, args[1], args[0]); end_term(); } /* Home cursor (left top corner), also called from mode command. */ void HO(void) { argc = 1; args[0] = args[1] = 1; CM(); } /* Clear line from current position to end of line */ static void CL(int direction) { int i, len; UINTN x, y; CHAR16 *line; conout->QueryMode(conout, conout->Mode->Mode, &x, &y); switch (direction) { case 0: /* from cursor to end */ len = x - curx + 1; break; case 1: /* from beginning to cursor */ len = curx; break; case 2: /* entire line */ len = x; break; default: /* NOTREACHED */ __unreachable(); } if (cury == y - 1) len--; line = malloc(len * sizeof (CHAR16)); if (line == NULL) { printf("out of memory\n"); return; } for (i = 0; i < len; i++) line[i] = ' '; line[len-1] = 0; if (direction != 0) curs_move(NULL, NULL, 0, cury); conout->OutputString(conout, line); /* restore cursor position */ curs_move(NULL, NULL, curx, cury); free(line); end_term(); } static void get_arg(int c) { if (argc < 0) argc = 0; args[argc] *= 10; args[argc] += c - '0'; } /* Emulate basic capabilities of cons25 terminal */ static void efi_term_emu(int c) { static int ansi_col[] = { 0, 4, 2, 6, 1, 5, 3, 7 }; int t, i; switch (esc) { case 0: switch (c) { case '\033': esc = c; break; default: efi_cons_rawputchar(c); break; } break; case '\033': switch (c) { case '[': esc = c; args[0] = 0; argc = -1; break; default: bail_out(c); break; } break; case '[': switch (c) { case ';': if (argc < 0) argc = 0; else if (argc + 1 >= MAXARGS) bail_out(c); else args[++argc] = 0; break; case 'H': /* ho = \E[H */ if (argc < 0) HO(); else if (argc == 1) CM(); else bail_out(c); break; case 'J': /* cd = \E[J */ if (argc < 0) CD(); else bail_out(c); break; case 'm': if (argc < 0) { fg_c = DEFAULT_FGCOLOR; bg_c = DEFAULT_BGCOLOR; } for (i = 0; i <= argc; ++i) { switch (args[i]) { case 0: /* back to normal */ fg_c = DEFAULT_FGCOLOR; bg_c = DEFAULT_BGCOLOR; break; case 1: /* bold */ fg_c |= 0x8; break; case 4: /* underline */ case 5: /* blink */ bg_c |= 0x8; break; case 7: /* reverse */ t = fg_c; fg_c = bg_c; bg_c = t; break; case 30: case 31: case 32: case 33: case 34: case 35: case 36: case 37: fg_c = ansi_col[args[i] - 30]; break; case 39: /* normal */ fg_c = DEFAULT_FGCOLOR; break; case 40: case 41: case 42: case 43: case 44: case 45: case 46: case 47: bg_c = ansi_col[args[i] - 40]; break; case 49: /* normal */ bg_c = DEFAULT_BGCOLOR; break; } } conout->SetAttribute(conout, EFI_TEXT_ATTR(fg_c, bg_c)); end_term(); break; default: if (isdigit(c)) get_arg(c); else bail_out(c); break; } break; default: bail_out(c); break; } } #else void HO(void) { } #endif void efi_cons_putchar(int c) { #ifdef TERM_EMU efi_term_emu(c); #else efi_cons_rawputchar(c); #endif } int efi_cons_getchar() { EFI_INPUT_KEY key; EFI_STATUS status; UINTN junk; - /* Try to read a key stroke. We wait for one if none is pending. */ - status = conin->ReadKeyStroke(conin, &key); - while (status == EFI_NOT_READY) { - /* Some EFI implementation (u-boot for example) do not support WaitForKey */ - if (conin->WaitForKey != NULL) - BS->WaitForEvent(1, &conin->WaitForKey, &junk); + if (key_pending) { + key = key_cur; + key_pending = 0; + } else { + /* Try to read a key stroke. We wait for one if none is pending. */ status = conin->ReadKeyStroke(conin, &key); + while (status == EFI_NOT_READY) { + /* Some EFI implementation (u-boot for example) do not support WaitForKey */ + if (conin->WaitForKey != NULL) + BS->WaitForEvent(1, &conin->WaitForKey, &junk); + status = conin->ReadKeyStroke(conin, &key); + } } + switch (key.ScanCode) { case 0x17: /* ESC */ return (0x1b); /* esc */ } /* this can return */ return (key.UnicodeChar); } int efi_cons_poll() { + EFI_INPUT_KEY key; + EFI_STATUS status; - if (conin->WaitForKey == NULL) - return (1); + if (conin->WaitForKey == NULL) { + if (key_pending) + return (1); + status = conin->ReadKeyStroke(conin, &key); + if (status == EFI_SUCCESS) { + key_cur = key; + key_pending = 1; + } + return (key_pending); + } + /* This can clear the signaled state. */ return (BS->CheckEvent(conin->WaitForKey) == EFI_SUCCESS); } /* Plain direct access to EFI OutputString(). */ void efi_cons_efiputchar(int c) { CHAR16 buf[2]; /* * translate box chars to unicode */ switch (c) { /* single frame */ case 0xb3: buf[0] = BOXDRAW_VERTICAL; break; case 0xbf: buf[0] = BOXDRAW_DOWN_LEFT; break; case 0xc0: buf[0] = BOXDRAW_UP_RIGHT; break; case 0xc4: buf[0] = BOXDRAW_HORIZONTAL; break; case 0xda: buf[0] = BOXDRAW_DOWN_RIGHT; break; case 0xd9: buf[0] = BOXDRAW_UP_LEFT; break; /* double frame */ case 0xba: buf[0] = BOXDRAW_DOUBLE_VERTICAL; break; case 0xbb: buf[0] = BOXDRAW_DOUBLE_DOWN_LEFT; break; case 0xbc: buf[0] = BOXDRAW_DOUBLE_UP_LEFT; break; case 0xc8: buf[0] = BOXDRAW_DOUBLE_UP_RIGHT; break; case 0xc9: buf[0] = BOXDRAW_DOUBLE_DOWN_RIGHT; break; case 0xcd: buf[0] = BOXDRAW_DOUBLE_HORIZONTAL; break; default: buf[0] = c; } buf[1] = 0; /* terminate string */ conout->OutputString(conout, buf); } Index: projects/clang390-import/sys/dev/ioat/ioat.c =================================================================== --- projects/clang390-import/sys/dev/ioat/ioat.c (revision 305028) +++ projects/clang390-import/sys/dev/ioat/ioat.c (revision 305029) @@ -1,2359 +1,2383 @@ /*- * Copyright (C) 2012 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #endif #include "ioat.h" #include "ioat_hw.h" #include "ioat_internal.h" #ifndef BUS_SPACE_MAXADDR_40BIT #define BUS_SPACE_MAXADDR_40BIT 0xFFFFFFFFFFULL #endif #define IOAT_REFLK (&ioat->submit_lock) #define IOAT_SHRINK_PERIOD (10 * hz) static int ioat_probe(device_t device); static int ioat_attach(device_t device); static int ioat_detach(device_t device); static int ioat_setup_intr(struct ioat_softc *ioat); static int ioat_teardown_intr(struct ioat_softc *ioat); static int ioat3_attach(device_t device); static int ioat_start_channel(struct ioat_softc *ioat); static int ioat_map_pci_bar(struct ioat_softc *ioat); static void ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error); static void ioat_interrupt_handler(void *arg); static boolean_t ioat_model_resets_msix(struct ioat_softc *ioat); static int chanerr_to_errno(uint32_t); static void ioat_process_events(struct ioat_softc *ioat); static inline uint32_t ioat_get_active(struct ioat_softc *ioat); static inline uint32_t ioat_get_ring_space(struct ioat_softc *ioat); static void ioat_free_ring(struct ioat_softc *, uint32_t size, struct ioat_descriptor **); static void ioat_free_ring_entry(struct ioat_softc *ioat, struct ioat_descriptor *desc); static struct ioat_descriptor *ioat_alloc_ring_entry(struct ioat_softc *, int mflags); static int ioat_reserve_space(struct ioat_softc *, uint32_t, int mflags); static struct ioat_descriptor *ioat_get_ring_entry(struct ioat_softc *ioat, uint32_t index); static struct ioat_descriptor **ioat_prealloc_ring(struct ioat_softc *, uint32_t size, boolean_t need_dscr, int mflags); static int ring_grow(struct ioat_softc *, uint32_t oldorder, struct ioat_descriptor **); static int ring_shrink(struct ioat_softc *, uint32_t oldorder, struct ioat_descriptor **); static void ioat_halted_debug(struct ioat_softc *, uint32_t); static void ioat_poll_timer_callback(void *arg); static void ioat_shrink_timer_callback(void *arg); static void dump_descriptor(void *hw_desc); static void ioat_submit_single(struct ioat_softc *ioat); static void ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, int error); static int ioat_reset_hw(struct ioat_softc *ioat); static void ioat_reset_hw_task(void *, int); static void ioat_setup_sysctl(device_t device); static int sysctl_handle_reset(SYSCTL_HANDLER_ARGS); static inline struct ioat_softc *ioat_get(struct ioat_softc *, enum ioat_ref_kind); static inline void ioat_put(struct ioat_softc *, enum ioat_ref_kind); static inline void _ioat_putn(struct ioat_softc *, uint32_t, enum ioat_ref_kind, boolean_t); static inline void ioat_putn(struct ioat_softc *, uint32_t, enum ioat_ref_kind); static inline void ioat_putn_locked(struct ioat_softc *, uint32_t, enum ioat_ref_kind); static void ioat_drain_locked(struct ioat_softc *); #define ioat_log_message(v, ...) do { \ if ((v) <= g_ioat_debug_level) { \ device_printf(ioat->device, __VA_ARGS__); \ } \ } while (0) MALLOC_DEFINE(M_IOAT, "ioat", "ioat driver memory allocations"); SYSCTL_NODE(_hw, OID_AUTO, ioat, CTLFLAG_RD, 0, "ioat node"); static int g_force_legacy_interrupts; SYSCTL_INT(_hw_ioat, OID_AUTO, force_legacy_interrupts, CTLFLAG_RDTUN, &g_force_legacy_interrupts, 0, "Set to non-zero to force MSI-X disabled"); int g_ioat_debug_level = 0; SYSCTL_INT(_hw_ioat, OID_AUTO, debug_level, CTLFLAG_RWTUN, &g_ioat_debug_level, 0, "Set log level (0-3) for ioat(4). Higher is more verbose."); /* * OS <-> Driver interface structures */ static device_method_t ioat_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ioat_probe), DEVMETHOD(device_attach, ioat_attach), DEVMETHOD(device_detach, ioat_detach), DEVMETHOD_END }; static driver_t ioat_pci_driver = { "ioat", ioat_pci_methods, sizeof(struct ioat_softc), }; static devclass_t ioat_devclass; DRIVER_MODULE(ioat, pci, ioat_pci_driver, ioat_devclass, 0, 0); MODULE_VERSION(ioat, 1); /* * Private data structures */ static struct ioat_softc *ioat_channel[IOAT_MAX_CHANNELS]; static unsigned ioat_channel_index = 0; SYSCTL_UINT(_hw_ioat, OID_AUTO, channels, CTLFLAG_RD, &ioat_channel_index, 0, "Number of IOAT channels attached"); static struct _pcsid { u_int32_t type; const char *desc; } pci_ids[] = { { 0x34308086, "TBG IOAT Ch0" }, { 0x34318086, "TBG IOAT Ch1" }, { 0x34328086, "TBG IOAT Ch2" }, { 0x34338086, "TBG IOAT Ch3" }, { 0x34298086, "TBG IOAT Ch4" }, { 0x342a8086, "TBG IOAT Ch5" }, { 0x342b8086, "TBG IOAT Ch6" }, { 0x342c8086, "TBG IOAT Ch7" }, { 0x37108086, "JSF IOAT Ch0" }, { 0x37118086, "JSF IOAT Ch1" }, { 0x37128086, "JSF IOAT Ch2" }, { 0x37138086, "JSF IOAT Ch3" }, { 0x37148086, "JSF IOAT Ch4" }, { 0x37158086, "JSF IOAT Ch5" }, { 0x37168086, "JSF IOAT Ch6" }, { 0x37178086, "JSF IOAT Ch7" }, { 0x37188086, "JSF IOAT Ch0 (RAID)" }, { 0x37198086, "JSF IOAT Ch1 (RAID)" }, { 0x3c208086, "SNB IOAT Ch0" }, { 0x3c218086, "SNB IOAT Ch1" }, { 0x3c228086, "SNB IOAT Ch2" }, { 0x3c238086, "SNB IOAT Ch3" }, { 0x3c248086, "SNB IOAT Ch4" }, { 0x3c258086, "SNB IOAT Ch5" }, { 0x3c268086, "SNB IOAT Ch6" }, { 0x3c278086, "SNB IOAT Ch7" }, { 0x3c2e8086, "SNB IOAT Ch0 (RAID)" }, { 0x3c2f8086, "SNB IOAT Ch1 (RAID)" }, { 0x0e208086, "IVB IOAT Ch0" }, { 0x0e218086, "IVB IOAT Ch1" }, { 0x0e228086, "IVB IOAT Ch2" }, { 0x0e238086, "IVB IOAT Ch3" }, { 0x0e248086, "IVB IOAT Ch4" }, { 0x0e258086, "IVB IOAT Ch5" }, { 0x0e268086, "IVB IOAT Ch6" }, { 0x0e278086, "IVB IOAT Ch7" }, { 0x0e2e8086, "IVB IOAT Ch0 (RAID)" }, { 0x0e2f8086, "IVB IOAT Ch1 (RAID)" }, { 0x2f208086, "HSW IOAT Ch0" }, { 0x2f218086, "HSW IOAT Ch1" }, { 0x2f228086, "HSW IOAT Ch2" }, { 0x2f238086, "HSW IOAT Ch3" }, { 0x2f248086, "HSW IOAT Ch4" }, { 0x2f258086, "HSW IOAT Ch5" }, { 0x2f268086, "HSW IOAT Ch6" }, { 0x2f278086, "HSW IOAT Ch7" }, { 0x2f2e8086, "HSW IOAT Ch0 (RAID)" }, { 0x2f2f8086, "HSW IOAT Ch1 (RAID)" }, { 0x0c508086, "BWD IOAT Ch0" }, { 0x0c518086, "BWD IOAT Ch1" }, { 0x0c528086, "BWD IOAT Ch2" }, { 0x0c538086, "BWD IOAT Ch3" }, { 0x6f508086, "BDXDE IOAT Ch0" }, { 0x6f518086, "BDXDE IOAT Ch1" }, { 0x6f528086, "BDXDE IOAT Ch2" }, { 0x6f538086, "BDXDE IOAT Ch3" }, { 0x6f208086, "BDX IOAT Ch0" }, { 0x6f218086, "BDX IOAT Ch1" }, { 0x6f228086, "BDX IOAT Ch2" }, { 0x6f238086, "BDX IOAT Ch3" }, { 0x6f248086, "BDX IOAT Ch4" }, { 0x6f258086, "BDX IOAT Ch5" }, { 0x6f268086, "BDX IOAT Ch6" }, { 0x6f278086, "BDX IOAT Ch7" }, { 0x6f2e8086, "BDX IOAT Ch0 (RAID)" }, { 0x6f2f8086, "BDX IOAT Ch1 (RAID)" }, { 0x00000000, NULL } }; /* * OS <-> Driver linkage functions */ static int ioat_probe(device_t device) { struct _pcsid *ep; u_int32_t type; type = pci_get_devid(device); for (ep = pci_ids; ep->type; ep++) { if (ep->type == type) { device_set_desc(device, ep->desc); return (0); } } return (ENXIO); } static int ioat_attach(device_t device) { struct ioat_softc *ioat; int error; ioat = DEVICE2SOFTC(device); ioat->device = device; error = ioat_map_pci_bar(ioat); if (error != 0) goto err; ioat->version = ioat_read_cbver(ioat); if (ioat->version < IOAT_VER_3_0) { error = ENODEV; goto err; } error = ioat3_attach(device); if (error != 0) goto err; error = pci_enable_busmaster(device); if (error != 0) goto err; error = ioat_setup_intr(ioat); if (error != 0) goto err; error = ioat_reset_hw(ioat); if (error != 0) goto err; ioat_process_events(ioat); ioat_setup_sysctl(device); ioat->chan_idx = ioat_channel_index; ioat_channel[ioat_channel_index++] = ioat; ioat_test_attach(); err: if (error != 0) ioat_detach(device); return (error); } static int ioat_detach(device_t device) { struct ioat_softc *ioat; ioat = DEVICE2SOFTC(device); ioat_test_detach(); taskqueue_drain(taskqueue_thread, &ioat->reset_task); mtx_lock(IOAT_REFLK); ioat->quiescing = TRUE; ioat->destroying = TRUE; wakeup(&ioat->quiescing); wakeup(&ioat->resetting); ioat_channel[ioat->chan_idx] = NULL; ioat_drain_locked(ioat); mtx_unlock(IOAT_REFLK); ioat_teardown_intr(ioat); callout_drain(&ioat->poll_timer); callout_drain(&ioat->shrink_timer); pci_disable_busmaster(device); if (ioat->pci_resource != NULL) bus_release_resource(device, SYS_RES_MEMORY, ioat->pci_resource_id, ioat->pci_resource); if (ioat->ring != NULL) ioat_free_ring(ioat, 1 << ioat->ring_size_order, ioat->ring); if (ioat->comp_update != NULL) { bus_dmamap_unload(ioat->comp_update_tag, ioat->comp_update_map); bus_dmamem_free(ioat->comp_update_tag, ioat->comp_update, ioat->comp_update_map); bus_dma_tag_destroy(ioat->comp_update_tag); } bus_dma_tag_destroy(ioat->hw_desc_tag); return (0); } static int ioat_teardown_intr(struct ioat_softc *ioat) { if (ioat->tag != NULL) bus_teardown_intr(ioat->device, ioat->res, ioat->tag); if (ioat->res != NULL) bus_release_resource(ioat->device, SYS_RES_IRQ, rman_get_rid(ioat->res), ioat->res); pci_release_msi(ioat->device); return (0); } static int ioat_start_channel(struct ioat_softc *ioat) { struct ioat_dma_hw_descriptor *hw_desc; struct ioat_descriptor *desc; struct bus_dmadesc *dmadesc; uint64_t status; uint32_t chanerr; int i; ioat_acquire(&ioat->dmaengine); /* Submit 'NULL' operation manually to avoid quiescing flag */ desc = ioat_get_ring_entry(ioat, ioat->head); dmadesc = &desc->bus_dmadesc; hw_desc = desc->u.dma; dmadesc->callback_fn = NULL; dmadesc->callback_arg = NULL; hw_desc->u.control_raw = 0; hw_desc->u.control_generic.op = IOAT_OP_COPY; hw_desc->u.control_generic.completion_update = 1; hw_desc->size = 8; hw_desc->src_addr = 0; hw_desc->dest_addr = 0; hw_desc->u.control.null = 1; ioat_submit_single(ioat); ioat_release(&ioat->dmaengine); for (i = 0; i < 100; i++) { DELAY(1); status = ioat_get_chansts(ioat); if (is_ioat_idle(status)) return (0); } chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); ioat_log_message(0, "could not start channel: " "status = %#jx error = %b\n", (uintmax_t)status, (int)chanerr, IOAT_CHANERR_STR); return (ENXIO); } /* * Initialize Hardware */ static int ioat3_attach(device_t device) { struct ioat_softc *ioat; struct ioat_descriptor **ring; struct ioat_descriptor *next; struct ioat_dma_hw_descriptor *dma_hw_desc; int i, num_descriptors; int error; uint8_t xfercap; error = 0; ioat = DEVICE2SOFTC(device); ioat->capabilities = ioat_read_dmacapability(ioat); ioat_log_message(0, "Capabilities: %b\n", (int)ioat->capabilities, IOAT_DMACAP_STR); xfercap = ioat_read_xfercap(ioat); ioat->max_xfer_size = 1 << xfercap; ioat->intrdelay_supported = (ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) & IOAT_INTRDELAY_SUPPORTED) != 0; if (ioat->intrdelay_supported) ioat->intrdelay_max = IOAT_INTRDELAY_US_MASK; /* TODO: need to check DCA here if we ever do XOR/PQ */ mtx_init(&ioat->submit_lock, "ioat_submit", NULL, MTX_DEF); mtx_init(&ioat->cleanup_lock, "ioat_cleanup", NULL, MTX_DEF); callout_init(&ioat->poll_timer, 1); callout_init(&ioat->shrink_timer, 1); TASK_INIT(&ioat->reset_task, 0, ioat_reset_hw_task, ioat); /* Establish lock order for Witness */ mtx_lock(&ioat->submit_lock); mtx_lock(&ioat->cleanup_lock); mtx_unlock(&ioat->cleanup_lock); mtx_unlock(&ioat->submit_lock); ioat->is_resize_pending = FALSE; ioat->is_submitter_processing = FALSE; ioat->is_completion_pending = FALSE; ioat->is_reset_pending = FALSE; ioat->is_channel_running = FALSE; bus_dma_tag_create(bus_get_dma_tag(ioat->device), sizeof(uint64_t), 0x0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(uint64_t), 1, sizeof(uint64_t), 0, NULL, NULL, &ioat->comp_update_tag); error = bus_dmamem_alloc(ioat->comp_update_tag, (void **)&ioat->comp_update, BUS_DMA_ZERO, &ioat->comp_update_map); if (ioat->comp_update == NULL) return (ENOMEM); error = bus_dmamap_load(ioat->comp_update_tag, ioat->comp_update_map, ioat->comp_update, sizeof(uint64_t), ioat_comp_update_map, ioat, 0); if (error != 0) return (error); ioat->ring_size_order = IOAT_MIN_ORDER; num_descriptors = 1 << ioat->ring_size_order; bus_dma_tag_create(bus_get_dma_tag(ioat->device), 0x40, 0x0, BUS_SPACE_MAXADDR_40BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct ioat_dma_hw_descriptor), 1, sizeof(struct ioat_dma_hw_descriptor), 0, NULL, NULL, &ioat->hw_desc_tag); ioat->ring = malloc(num_descriptors * sizeof(*ring), M_IOAT, M_ZERO | M_WAITOK); ring = ioat->ring; for (i = 0; i < num_descriptors; i++) { ring[i] = ioat_alloc_ring_entry(ioat, M_WAITOK); if (ring[i] == NULL) return (ENOMEM); ring[i]->id = i; } for (i = 0; i < num_descriptors - 1; i++) { next = ring[i + 1]; dma_hw_desc = ring[i]->u.dma; dma_hw_desc->next = next->hw_desc_bus_addr; } ring[i]->u.dma->next = ring[0]->hw_desc_bus_addr; ioat->head = ioat->hw_head = 0; ioat->tail = 0; ioat->last_seen = 0; *ioat->comp_update = 0; return (0); } static int ioat_map_pci_bar(struct ioat_softc *ioat) { ioat->pci_resource_id = PCIR_BAR(0); ioat->pci_resource = bus_alloc_resource_any(ioat->device, SYS_RES_MEMORY, &ioat->pci_resource_id, RF_ACTIVE); if (ioat->pci_resource == NULL) { ioat_log_message(0, "unable to allocate pci resource\n"); return (ENODEV); } ioat->pci_bus_tag = rman_get_bustag(ioat->pci_resource); ioat->pci_bus_handle = rman_get_bushandle(ioat->pci_resource); return (0); } static void ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) { struct ioat_softc *ioat = arg; KASSERT(error == 0, ("%s: error:%d", __func__, error)); ioat->comp_update_bus_addr = seg[0].ds_addr; } static void ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *baddr; KASSERT(error == 0, ("%s: error:%d", __func__, error)); baddr = arg; *baddr = segs->ds_addr; } /* * Interrupt setup and handlers */ static int ioat_setup_intr(struct ioat_softc *ioat) { uint32_t num_vectors; int error; boolean_t use_msix; boolean_t force_legacy_interrupts; use_msix = FALSE; force_legacy_interrupts = FALSE; if (!g_force_legacy_interrupts && pci_msix_count(ioat->device) >= 1) { num_vectors = 1; pci_alloc_msix(ioat->device, &num_vectors); if (num_vectors == 1) use_msix = TRUE; } if (use_msix) { ioat->rid = 1; ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ, &ioat->rid, RF_ACTIVE); } else { ioat->rid = 0; ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ, &ioat->rid, RF_SHAREABLE | RF_ACTIVE); } if (ioat->res == NULL) { ioat_log_message(0, "bus_alloc_resource failed\n"); return (ENOMEM); } ioat->tag = NULL; error = bus_setup_intr(ioat->device, ioat->res, INTR_MPSAFE | INTR_TYPE_MISC, NULL, ioat_interrupt_handler, ioat, &ioat->tag); if (error != 0) { ioat_log_message(0, "bus_setup_intr failed\n"); return (error); } ioat_write_intrctrl(ioat, IOAT_INTRCTRL_MASTER_INT_EN); return (0); } static boolean_t ioat_model_resets_msix(struct ioat_softc *ioat) { u_int32_t pciid; pciid = pci_get_devid(ioat->device); switch (pciid) { /* BWD: */ case 0x0c508086: case 0x0c518086: case 0x0c528086: case 0x0c538086: /* BDXDE: */ case 0x6f508086: case 0x6f518086: case 0x6f528086: case 0x6f538086: return (TRUE); } return (FALSE); } static void ioat_interrupt_handler(void *arg) { struct ioat_softc *ioat = arg; ioat->stats.interrupts++; ioat_process_events(ioat); } static int chanerr_to_errno(uint32_t chanerr) { if (chanerr == 0) return (0); if ((chanerr & (IOAT_CHANERR_XSADDERR | IOAT_CHANERR_XDADDERR)) != 0) return (EFAULT); if ((chanerr & (IOAT_CHANERR_RDERR | IOAT_CHANERR_WDERR)) != 0) return (EIO); /* This one is probably our fault: */ if ((chanerr & IOAT_CHANERR_NDADDERR) != 0) return (EIO); return (EIO); } static void ioat_process_events(struct ioat_softc *ioat) { struct ioat_descriptor *desc; struct bus_dmadesc *dmadesc; uint64_t comp_update, status; uint32_t completed, chanerr; boolean_t pending; int error; CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); mtx_lock(&ioat->cleanup_lock); /* * Don't run while the hardware is being reset. Reset is responsible * for blocking new work and draining & completing existing work, so * there is nothing to do until new work is queued after reset anyway. */ if (ioat->resetting_cleanup) { mtx_unlock(&ioat->cleanup_lock); return; } completed = 0; comp_update = ioat_get_chansts(ioat); CTR4(KTR_IOAT, "%s channel=%u hw_status=0x%lx last_seen=0x%lx", __func__, ioat->chan_idx, comp_update, ioat->last_seen); status = comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK; - while (ioat_get_active(ioat) > 0) { + if (status == ioat->last_seen) { + /* + * If we landed in process_events and nothing has been + * completed, check for a timeout due to channel halt. + */ + goto out; + } + + desc = ioat_get_ring_entry(ioat, ioat->tail - 1); + while (desc->hw_desc_bus_addr != status && ioat_get_active(ioat) > 0) { desc = ioat_get_ring_entry(ioat, ioat->tail); dmadesc = &desc->bus_dmadesc; CTR4(KTR_IOAT, "channel=%u completing desc %u ok cb %p(%p)", ioat->chan_idx, ioat->tail, dmadesc->callback_fn, dmadesc->callback_arg); if (dmadesc->callback_fn != NULL) dmadesc->callback_fn(dmadesc->callback_arg, 0); completed++; ioat->tail++; - if (desc->hw_desc_bus_addr == status) - break; } if (completed != 0) { ioat->last_seen = desc->hw_desc_bus_addr; ioat->stats.descriptors_processed += completed; } +out: ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); /* Perform a racy check first; only take the locks if it passes. */ pending = (ioat_get_active(ioat) != 0); if (!pending && ioat->is_completion_pending) { mtx_unlock(&ioat->cleanup_lock); mtx_lock(&ioat->submit_lock); mtx_lock(&ioat->cleanup_lock); pending = (ioat_get_active(ioat) != 0); if (!pending && ioat->is_completion_pending) { ioat->is_completion_pending = FALSE; callout_reset(&ioat->shrink_timer, IOAT_SHRINK_PERIOD, ioat_shrink_timer_callback, ioat); callout_stop(&ioat->poll_timer); } mtx_unlock(&ioat->submit_lock); } mtx_unlock(&ioat->cleanup_lock); if (pending) callout_reset(&ioat->poll_timer, 1, ioat_poll_timer_callback, ioat); if (completed != 0) { ioat_putn(ioat, completed, IOAT_ACTIVE_DESCR_REF); wakeup(&ioat->tail); } if (!is_ioat_halted(comp_update) && !is_ioat_suspended(comp_update)) return; ioat->stats.channel_halts++; /* * Fatal programming error on this DMA channel. Flush any outstanding * work with error status and restart the engine. */ ioat_log_message(0, "Channel halted due to fatal programming error\n"); mtx_lock(&ioat->submit_lock); mtx_lock(&ioat->cleanup_lock); ioat->quiescing = TRUE; chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); ioat_halted_debug(ioat, chanerr); ioat->stats.last_halt_chanerr = chanerr; while (ioat_get_active(ioat) > 0) { desc = ioat_get_ring_entry(ioat, ioat->tail); dmadesc = &desc->bus_dmadesc; CTR4(KTR_IOAT, "channel=%u completing desc %u err cb %p(%p)", ioat->chan_idx, ioat->tail, dmadesc->callback_fn, dmadesc->callback_arg); if (dmadesc->callback_fn != NULL) dmadesc->callback_fn(dmadesc->callback_arg, chanerr_to_errno(chanerr)); ioat_putn_locked(ioat, 1, IOAT_ACTIVE_DESCR_REF); ioat->tail++; ioat->stats.descriptors_processed++; ioat->stats.descriptors_error++; } if (ioat->is_completion_pending) { ioat->is_completion_pending = FALSE; callout_reset(&ioat->shrink_timer, IOAT_SHRINK_PERIOD, ioat_shrink_timer_callback, ioat); callout_stop(&ioat->poll_timer); } /* Clear error status */ ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr); mtx_unlock(&ioat->cleanup_lock); mtx_unlock(&ioat->submit_lock); ioat_log_message(0, "Resetting channel to recover from error\n"); error = taskqueue_enqueue(taskqueue_thread, &ioat->reset_task); KASSERT(error == 0, ("%s: taskqueue_enqueue failed: %d", __func__, error)); } static void ioat_reset_hw_task(void *ctx, int pending __unused) { struct ioat_softc *ioat; int error; ioat = ctx; ioat_log_message(1, "%s: Resetting channel\n", __func__); error = ioat_reset_hw(ioat); KASSERT(error == 0, ("%s: reset failed: %d", __func__, error)); (void)error; } /* * User API functions */ unsigned ioat_get_nchannels(void) { return (ioat_channel_index); } bus_dmaengine_t ioat_get_dmaengine(uint32_t index, int flags) { struct ioat_softc *ioat; KASSERT((flags & ~(M_NOWAIT | M_WAITOK)) == 0, ("invalid flags: 0x%08x", flags)); KASSERT((flags & (M_NOWAIT | M_WAITOK)) != (M_NOWAIT | M_WAITOK), ("invalid wait | nowait")); if (index >= ioat_channel_index) return (NULL); ioat = ioat_channel[index]; if (ioat == NULL || ioat->destroying) return (NULL); if (ioat->quiescing) { if ((flags & M_NOWAIT) != 0) return (NULL); mtx_lock(IOAT_REFLK); while (ioat->quiescing && !ioat->destroying) msleep(&ioat->quiescing, IOAT_REFLK, 0, "getdma", 0); mtx_unlock(IOAT_REFLK); if (ioat->destroying) return (NULL); } /* * There's a race here between the quiescing check and HW reset or * module destroy. */ return (&ioat_get(ioat, IOAT_DMAENGINE_REF)->dmaengine); } void ioat_put_dmaengine(bus_dmaengine_t dmaengine) { struct ioat_softc *ioat; ioat = to_ioat_softc(dmaengine); ioat_put(ioat, IOAT_DMAENGINE_REF); } int ioat_get_hwversion(bus_dmaengine_t dmaengine) { struct ioat_softc *ioat; ioat = to_ioat_softc(dmaengine); return (ioat->version); } size_t ioat_get_max_io_size(bus_dmaengine_t dmaengine) { struct ioat_softc *ioat; ioat = to_ioat_softc(dmaengine); return (ioat->max_xfer_size); } uint32_t ioat_get_capabilities(bus_dmaengine_t dmaengine) { struct ioat_softc *ioat; ioat = to_ioat_softc(dmaengine); return (ioat->capabilities); } int ioat_set_interrupt_coalesce(bus_dmaengine_t dmaengine, uint16_t delay) { struct ioat_softc *ioat; ioat = to_ioat_softc(dmaengine); if (!ioat->intrdelay_supported) return (ENODEV); if (delay > ioat->intrdelay_max) return (ERANGE); ioat_write_2(ioat, IOAT_INTRDELAY_OFFSET, delay); ioat->cached_intrdelay = ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) & IOAT_INTRDELAY_US_MASK; return (0); } uint16_t ioat_get_max_coalesce_period(bus_dmaengine_t dmaengine) { struct ioat_softc *ioat; ioat = to_ioat_softc(dmaengine); return (ioat->intrdelay_max); } void ioat_acquire(bus_dmaengine_t dmaengine) { struct ioat_softc *ioat; ioat = to_ioat_softc(dmaengine); mtx_lock(&ioat->submit_lock); CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); } int ioat_acquire_reserve(bus_dmaengine_t dmaengine, unsigned n, int mflags) { struct ioat_softc *ioat; int error; ioat = to_ioat_softc(dmaengine); ioat_acquire(dmaengine); error = ioat_reserve_space(ioat, n, mflags); if (error != 0) ioat_release(dmaengine); return (error); } void ioat_release(bus_dmaengine_t dmaengine) { struct ioat_softc *ioat; ioat = to_ioat_softc(dmaengine); CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); ioat_write_2(ioat, IOAT_DMACOUNT_OFFSET, (uint16_t)ioat->hw_head); mtx_unlock(&ioat->submit_lock); } static struct ioat_descriptor * ioat_op_generic(struct ioat_softc *ioat, uint8_t op, uint32_t size, uint64_t src, uint64_t dst, bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) { struct ioat_generic_hw_descriptor *hw_desc; struct ioat_descriptor *desc; int mflags; mtx_assert(&ioat->submit_lock, MA_OWNED); KASSERT((flags & ~_DMA_GENERIC_FLAGS) == 0, ("Unrecognized flag(s): %#x", flags & ~_DMA_GENERIC_FLAGS)); if ((flags & DMA_NO_WAIT) != 0) mflags = M_NOWAIT; else mflags = M_WAITOK; if (size > ioat->max_xfer_size) { ioat_log_message(0, "%s: max_xfer_size = %d, requested = %u\n", __func__, ioat->max_xfer_size, (unsigned)size); return (NULL); } if (ioat_reserve_space(ioat, 1, mflags) != 0) return (NULL); desc = ioat_get_ring_entry(ioat, ioat->head); hw_desc = desc->u.generic; hw_desc->u.control_raw = 0; hw_desc->u.control_generic.op = op; hw_desc->u.control_generic.completion_update = 1; if ((flags & DMA_INT_EN) != 0) hw_desc->u.control_generic.int_enable = 1; if ((flags & DMA_FENCE) != 0) hw_desc->u.control_generic.fence = 1; hw_desc->size = size; hw_desc->src_addr = src; hw_desc->dest_addr = dst; desc->bus_dmadesc.callback_fn = callback_fn; desc->bus_dmadesc.callback_arg = callback_arg; return (desc); } struct bus_dmadesc * ioat_null(bus_dmaengine_t dmaengine, bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) { struct ioat_dma_hw_descriptor *hw_desc; struct ioat_descriptor *desc; struct ioat_softc *ioat; ioat = to_ioat_softc(dmaengine); CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); desc = ioat_op_generic(ioat, IOAT_OP_COPY, 8, 0, 0, callback_fn, callback_arg, flags); if (desc == NULL) return (NULL); hw_desc = desc->u.dma; hw_desc->u.control.null = 1; ioat_submit_single(ioat); return (&desc->bus_dmadesc); } struct bus_dmadesc * ioat_copy(bus_dmaengine_t dmaengine, bus_addr_t dst, bus_addr_t src, bus_size_t len, bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) { struct ioat_dma_hw_descriptor *hw_desc; struct ioat_descriptor *desc; struct ioat_softc *ioat; ioat = to_ioat_softc(dmaengine); CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); if (((src | dst) & (0xffffull << 48)) != 0) { ioat_log_message(0, "%s: High 16 bits of src/dst invalid\n", __func__); return (NULL); } desc = ioat_op_generic(ioat, IOAT_OP_COPY, len, src, dst, callback_fn, callback_arg, flags); if (desc == NULL) return (NULL); hw_desc = desc->u.dma; if (g_ioat_debug_level >= 3) dump_descriptor(hw_desc); ioat_submit_single(ioat); return (&desc->bus_dmadesc); } struct bus_dmadesc * ioat_copy_8k_aligned(bus_dmaengine_t dmaengine, bus_addr_t dst1, bus_addr_t dst2, bus_addr_t src1, bus_addr_t src2, bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) { struct ioat_dma_hw_descriptor *hw_desc; struct ioat_descriptor *desc; struct ioat_softc *ioat; ioat = to_ioat_softc(dmaengine); CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); if (((src1 | src2 | dst1 | dst2) & (0xffffull << 48)) != 0) { ioat_log_message(0, "%s: High 16 bits of src/dst invalid\n", __func__); return (NULL); } if (((src1 | src2 | dst1 | dst2) & PAGE_MASK) != 0) { ioat_log_message(0, "%s: Addresses must be page-aligned\n", __func__); return (NULL); } desc = ioat_op_generic(ioat, IOAT_OP_COPY, 2 * PAGE_SIZE, src1, dst1, callback_fn, callback_arg, flags); if (desc == NULL) return (NULL); hw_desc = desc->u.dma; if (src2 != src1 + PAGE_SIZE) { hw_desc->u.control.src_page_break = 1; hw_desc->next_src_addr = src2; } if (dst2 != dst1 + PAGE_SIZE) { hw_desc->u.control.dest_page_break = 1; hw_desc->next_dest_addr = dst2; } if (g_ioat_debug_level >= 3) dump_descriptor(hw_desc); ioat_submit_single(ioat); return (&desc->bus_dmadesc); } struct bus_dmadesc * ioat_copy_crc(bus_dmaengine_t dmaengine, bus_addr_t dst, bus_addr_t src, bus_size_t len, uint32_t *initialseed, bus_addr_t crcptr, bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) { struct ioat_crc32_hw_descriptor *hw_desc; struct ioat_descriptor *desc; struct ioat_softc *ioat; uint32_t teststore; uint8_t op; ioat = to_ioat_softc(dmaengine); CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); if ((ioat->capabilities & IOAT_DMACAP_MOVECRC) == 0) { ioat_log_message(0, "%s: Device lacks MOVECRC capability\n", __func__); return (NULL); } if (((src | dst) & (0xffffffull << 40)) != 0) { ioat_log_message(0, "%s: High 24 bits of src/dst invalid\n", __func__); return (NULL); } teststore = (flags & _DMA_CRC_TESTSTORE); if (teststore == _DMA_CRC_TESTSTORE) { ioat_log_message(0, "%s: TEST and STORE invalid\n", __func__); return (NULL); } if (teststore == 0 && (flags & DMA_CRC_INLINE) != 0) { ioat_log_message(0, "%s: INLINE invalid without TEST or STORE\n", __func__); return (NULL); } switch (teststore) { case DMA_CRC_STORE: op = IOAT_OP_MOVECRC_STORE; break; case DMA_CRC_TEST: op = IOAT_OP_MOVECRC_TEST; break; default: KASSERT(teststore == 0, ("bogus")); op = IOAT_OP_MOVECRC; break; } if ((flags & DMA_CRC_INLINE) == 0 && (crcptr & (0xffffffull << 40)) != 0) { ioat_log_message(0, "%s: High 24 bits of crcptr invalid\n", __func__); return (NULL); } desc = ioat_op_generic(ioat, op, len, src, dst, callback_fn, callback_arg, flags & ~_DMA_CRC_FLAGS); if (desc == NULL) return (NULL); hw_desc = desc->u.crc32; if ((flags & DMA_CRC_INLINE) == 0) hw_desc->crc_address = crcptr; else hw_desc->u.control.crc_location = 1; if (initialseed != NULL) { hw_desc->u.control.use_seed = 1; hw_desc->seed = *initialseed; } if (g_ioat_debug_level >= 3) dump_descriptor(hw_desc); ioat_submit_single(ioat); return (&desc->bus_dmadesc); } struct bus_dmadesc * ioat_crc(bus_dmaengine_t dmaengine, bus_addr_t src, bus_size_t len, uint32_t *initialseed, bus_addr_t crcptr, bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) { struct ioat_crc32_hw_descriptor *hw_desc; struct ioat_descriptor *desc; struct ioat_softc *ioat; uint32_t teststore; uint8_t op; ioat = to_ioat_softc(dmaengine); CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); if ((ioat->capabilities & IOAT_DMACAP_CRC) == 0) { ioat_log_message(0, "%s: Device lacks CRC capability\n", __func__); return (NULL); } if ((src & (0xffffffull << 40)) != 0) { ioat_log_message(0, "%s: High 24 bits of src invalid\n", __func__); return (NULL); } teststore = (flags & _DMA_CRC_TESTSTORE); if (teststore == _DMA_CRC_TESTSTORE) { ioat_log_message(0, "%s: TEST and STORE invalid\n", __func__); return (NULL); } if (teststore == 0 && (flags & DMA_CRC_INLINE) != 0) { ioat_log_message(0, "%s: INLINE invalid without TEST or STORE\n", __func__); return (NULL); } switch (teststore) { case DMA_CRC_STORE: op = IOAT_OP_CRC_STORE; break; case DMA_CRC_TEST: op = IOAT_OP_CRC_TEST; break; default: KASSERT(teststore == 0, ("bogus")); op = IOAT_OP_CRC; break; } if ((flags & DMA_CRC_INLINE) == 0 && (crcptr & (0xffffffull << 40)) != 0) { ioat_log_message(0, "%s: High 24 bits of crcptr invalid\n", __func__); return (NULL); } desc = ioat_op_generic(ioat, op, len, src, 0, callback_fn, callback_arg, flags & ~_DMA_CRC_FLAGS); if (desc == NULL) return (NULL); hw_desc = desc->u.crc32; if ((flags & DMA_CRC_INLINE) == 0) hw_desc->crc_address = crcptr; else hw_desc->u.control.crc_location = 1; if (initialseed != NULL) { hw_desc->u.control.use_seed = 1; hw_desc->seed = *initialseed; } if (g_ioat_debug_level >= 3) dump_descriptor(hw_desc); ioat_submit_single(ioat); return (&desc->bus_dmadesc); } struct bus_dmadesc * ioat_blockfill(bus_dmaengine_t dmaengine, bus_addr_t dst, uint64_t fillpattern, bus_size_t len, bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) { struct ioat_fill_hw_descriptor *hw_desc; struct ioat_descriptor *desc; struct ioat_softc *ioat; ioat = to_ioat_softc(dmaengine); CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); if ((ioat->capabilities & IOAT_DMACAP_BFILL) == 0) { ioat_log_message(0, "%s: Device lacks BFILL capability\n", __func__); return (NULL); } if ((dst & (0xffffull << 48)) != 0) { ioat_log_message(0, "%s: High 16 bits of dst invalid\n", __func__); return (NULL); } desc = ioat_op_generic(ioat, IOAT_OP_FILL, len, fillpattern, dst, callback_fn, callback_arg, flags); if (desc == NULL) return (NULL); hw_desc = desc->u.fill; if (g_ioat_debug_level >= 3) dump_descriptor(hw_desc); ioat_submit_single(ioat); return (&desc->bus_dmadesc); } /* * Ring Management */ static inline uint32_t ioat_get_active(struct ioat_softc *ioat) { return ((ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1)); } static inline uint32_t ioat_get_ring_space(struct ioat_softc *ioat) { return ((1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1); } static struct ioat_descriptor * ioat_alloc_ring_entry(struct ioat_softc *ioat, int mflags) { struct ioat_generic_hw_descriptor *hw_desc; struct ioat_descriptor *desc; int error, busdmaflag; error = ENOMEM; hw_desc = NULL; if ((mflags & M_WAITOK) != 0) busdmaflag = BUS_DMA_WAITOK; else busdmaflag = BUS_DMA_NOWAIT; desc = malloc(sizeof(*desc), M_IOAT, mflags); if (desc == NULL) goto out; bus_dmamem_alloc(ioat->hw_desc_tag, (void **)&hw_desc, BUS_DMA_ZERO | busdmaflag, &ioat->hw_desc_map); if (hw_desc == NULL) goto out; memset(&desc->bus_dmadesc, 0, sizeof(desc->bus_dmadesc)); desc->u.generic = hw_desc; error = bus_dmamap_load(ioat->hw_desc_tag, ioat->hw_desc_map, hw_desc, sizeof(*hw_desc), ioat_dmamap_cb, &desc->hw_desc_bus_addr, busdmaflag); if (error) goto out; out: if (error) { ioat_free_ring_entry(ioat, desc); return (NULL); } return (desc); } static void ioat_free_ring_entry(struct ioat_softc *ioat, struct ioat_descriptor *desc) { if (desc == NULL) return; if (desc->u.generic) bus_dmamem_free(ioat->hw_desc_tag, desc->u.generic, ioat->hw_desc_map); free(desc, M_IOAT); } /* * Reserves space in this IOAT descriptor ring by ensuring enough slots remain * for 'num_descs'. * * If mflags contains M_WAITOK, blocks until enough space is available. * * Returns zero on success, or an errno on error. If num_descs is beyond the * maximum ring size, returns EINVAl; if allocation would block and mflags * contains M_NOWAIT, returns EAGAIN. * * Must be called with the submit_lock held; returns with the lock held. The * lock may be dropped to allocate the ring. * * (The submit_lock is needed to add any entries to the ring, so callers are * assured enough room is available.) */ static int ioat_reserve_space(struct ioat_softc *ioat, uint32_t num_descs, int mflags) { struct ioat_descriptor **new_ring; uint32_t order; boolean_t dug; int error; mtx_assert(&ioat->submit_lock, MA_OWNED); error = 0; dug = FALSE; if (num_descs < 1 || num_descs >= (1 << IOAT_MAX_ORDER)) { error = EINVAL; goto out; } for (;;) { if (ioat->quiescing) { error = ENXIO; goto out; } if (ioat_get_ring_space(ioat) >= num_descs) goto out; if (!dug && !ioat->is_submitter_processing && (1 << ioat->ring_size_order) > num_descs) { ioat->is_submitter_processing = TRUE; mtx_unlock(&ioat->submit_lock); ioat_process_events(ioat); mtx_lock(&ioat->submit_lock); dug = TRUE; KASSERT(ioat->is_submitter_processing == TRUE, ("is_submitter_processing")); ioat->is_submitter_processing = FALSE; wakeup(&ioat->tail); continue; } order = ioat->ring_size_order; if (ioat->is_resize_pending || order == IOAT_MAX_ORDER) { if ((mflags & M_WAITOK) != 0) { msleep(&ioat->tail, &ioat->submit_lock, 0, "ioat_rsz", 0); continue; } error = EAGAIN; break; } ioat->is_resize_pending = TRUE; for (;;) { mtx_unlock(&ioat->submit_lock); new_ring = ioat_prealloc_ring(ioat, 1 << (order + 1), TRUE, mflags); mtx_lock(&ioat->submit_lock); KASSERT(ioat->ring_size_order == order, ("is_resize_pending should protect order")); if (new_ring == NULL) { KASSERT((mflags & M_WAITOK) == 0, ("allocation failed")); error = EAGAIN; break; } error = ring_grow(ioat, order, new_ring); if (error == 0) break; } ioat->is_resize_pending = FALSE; wakeup(&ioat->tail); if (error) break; } out: mtx_assert(&ioat->submit_lock, MA_OWNED); KASSERT(!ioat->quiescing || error == ENXIO, ("reserved during quiesce")); return (error); } static struct ioat_descriptor ** ioat_prealloc_ring(struct ioat_softc *ioat, uint32_t size, boolean_t need_dscr, int mflags) { struct ioat_descriptor **ring; uint32_t i; int error; KASSERT(size > 0 && powerof2(size), ("bogus size")); ring = malloc(size * sizeof(*ring), M_IOAT, M_ZERO | mflags); if (ring == NULL) return (NULL); if (need_dscr) { error = ENOMEM; for (i = size / 2; i < size; i++) { ring[i] = ioat_alloc_ring_entry(ioat, mflags); if (ring[i] == NULL) goto out; ring[i]->id = i; } } error = 0; out: if (error != 0 && ring != NULL) { ioat_free_ring(ioat, size, ring); ring = NULL; } return (ring); } static void ioat_free_ring(struct ioat_softc *ioat, uint32_t size, struct ioat_descriptor **ring) { uint32_t i; for (i = 0; i < size; i++) { if (ring[i] != NULL) ioat_free_ring_entry(ioat, ring[i]); } free(ring, M_IOAT); } static struct ioat_descriptor * ioat_get_ring_entry(struct ioat_softc *ioat, uint32_t index) { return (ioat->ring[index % (1 << ioat->ring_size_order)]); } static int ring_grow(struct ioat_softc *ioat, uint32_t oldorder, struct ioat_descriptor **newring) { struct ioat_descriptor *tmp, *next; struct ioat_dma_hw_descriptor *hw; uint32_t oldsize, newsize, head, tail, i, end; int error; CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); mtx_assert(&ioat->submit_lock, MA_OWNED); if (oldorder != ioat->ring_size_order || oldorder >= IOAT_MAX_ORDER) { error = EINVAL; goto out; } oldsize = (1 << oldorder); newsize = (1 << (oldorder + 1)); mtx_lock(&ioat->cleanup_lock); head = ioat->head & (oldsize - 1); tail = ioat->tail & (oldsize - 1); /* Copy old descriptors to new ring */ for (i = 0; i < oldsize; i++) newring[i] = ioat->ring[i]; /* * If head has wrapped but tail hasn't, we must swap some descriptors * around so that tail can increment directly to head. */ if (head < tail) { for (i = 0; i <= head; i++) { tmp = newring[oldsize + i]; newring[oldsize + i] = newring[i]; newring[oldsize + i]->id = oldsize + i; newring[i] = tmp; newring[i]->id = i; } head += oldsize; } KASSERT(head >= tail, ("invariants")); /* Head didn't wrap; we only need to link in oldsize..newsize */ if (head < oldsize) { i = oldsize - 1; end = newsize; } else { /* Head did wrap; link newhead..newsize and 0..oldhead */ i = head; end = newsize + (head - oldsize) + 1; } /* * Fix up hardware ring, being careful not to trample the active * section (tail -> head). */ for (; i < end; i++) { KASSERT((i & (newsize - 1)) < tail || (i & (newsize - 1)) >= head, ("trampling snake")); next = newring[(i + 1) & (newsize - 1)]; hw = newring[i & (newsize - 1)]->u.dma; hw->next = next->hw_desc_bus_addr; } #ifdef INVARIANTS for (i = 0; i < newsize; i++) { next = newring[(i + 1) & (newsize - 1)]; hw = newring[i & (newsize - 1)]->u.dma; KASSERT(hw->next == next->hw_desc_bus_addr, ("mismatch at i:%u (oldsize:%u); next=%p nextaddr=0x%lx" " (tail:%u)", i, oldsize, next, next->hw_desc_bus_addr, tail)); } #endif free(ioat->ring, M_IOAT); ioat->ring = newring; ioat->ring_size_order = oldorder + 1; ioat->tail = tail; ioat->head = head; error = 0; mtx_unlock(&ioat->cleanup_lock); out: if (error) ioat_free_ring(ioat, (1 << (oldorder + 1)), newring); return (error); } static int ring_shrink(struct ioat_softc *ioat, uint32_t oldorder, struct ioat_descriptor **newring) { struct ioat_dma_hw_descriptor *hw; struct ioat_descriptor *ent, *next; uint32_t oldsize, newsize, current_idx, new_idx, i; int error; CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); mtx_assert(&ioat->submit_lock, MA_OWNED); if (oldorder != ioat->ring_size_order || oldorder <= IOAT_MIN_ORDER) { error = EINVAL; goto out_unlocked; } oldsize = (1 << oldorder); newsize = (1 << (oldorder - 1)); mtx_lock(&ioat->cleanup_lock); /* Can't shrink below current active set! */ if (ioat_get_active(ioat) >= newsize) { error = ENOMEM; goto out; } /* * Copy current descriptors to the new ring, dropping the removed * descriptors. */ for (i = 0; i < newsize; i++) { current_idx = (ioat->tail + i) & (oldsize - 1); new_idx = (ioat->tail + i) & (newsize - 1); newring[new_idx] = ioat->ring[current_idx]; newring[new_idx]->id = new_idx; } /* Free deleted descriptors */ for (i = newsize; i < oldsize; i++) { ent = ioat_get_ring_entry(ioat, ioat->tail + i); ioat_free_ring_entry(ioat, ent); } /* Fix up hardware ring. */ hw = newring[(ioat->tail + newsize - 1) & (newsize - 1)]->u.dma; next = newring[(ioat->tail + newsize) & (newsize - 1)]; hw->next = next->hw_desc_bus_addr; #ifdef INVARIANTS for (i = 0; i < newsize; i++) { next = newring[(i + 1) & (newsize - 1)]; hw = newring[i & (newsize - 1)]->u.dma; KASSERT(hw->next == next->hw_desc_bus_addr, ("mismatch at i:%u (newsize:%u); next=%p nextaddr=0x%lx " "(tail:%u)", i, newsize, next, next->hw_desc_bus_addr, ioat->tail)); } #endif free(ioat->ring, M_IOAT); ioat->ring = newring; ioat->ring_size_order = oldorder - 1; error = 0; out: mtx_unlock(&ioat->cleanup_lock); out_unlocked: if (error) ioat_free_ring(ioat, (1 << (oldorder - 1)), newring); return (error); } static void ioat_halted_debug(struct ioat_softc *ioat, uint32_t chanerr) { struct ioat_descriptor *desc; ioat_log_message(0, "Channel halted (%b)\n", (int)chanerr, IOAT_CHANERR_STR); if (chanerr == 0) return; mtx_assert(&ioat->cleanup_lock, MA_OWNED); desc = ioat_get_ring_entry(ioat, ioat->tail + 0); dump_descriptor(desc->u.raw); desc = ioat_get_ring_entry(ioat, ioat->tail + 1); dump_descriptor(desc->u.raw); } static void ioat_poll_timer_callback(void *arg) { struct ioat_softc *ioat; ioat = arg; ioat_log_message(3, "%s\n", __func__); ioat_process_events(ioat); } static void ioat_shrink_timer_callback(void *arg) { struct ioat_descriptor **newring; struct ioat_softc *ioat; uint32_t order; ioat = arg; ioat_log_message(1, "%s\n", __func__); /* Slowly scale the ring down if idle. */ mtx_lock(&ioat->submit_lock); /* Don't run while the hardware is being reset. */ if (ioat->resetting) { mtx_unlock(&ioat->submit_lock); return; } order = ioat->ring_size_order; if (ioat->is_completion_pending || ioat->is_resize_pending || order == IOAT_MIN_ORDER) { mtx_unlock(&ioat->submit_lock); goto out; } ioat->is_resize_pending = TRUE; mtx_unlock(&ioat->submit_lock); newring = ioat_prealloc_ring(ioat, 1 << (order - 1), FALSE, M_NOWAIT); mtx_lock(&ioat->submit_lock); KASSERT(ioat->ring_size_order == order, ("resize_pending protects order")); if (newring != NULL && !ioat->is_completion_pending) ring_shrink(ioat, order, newring); else if (newring != NULL) ioat_free_ring(ioat, (1 << (order - 1)), newring); ioat->is_resize_pending = FALSE; mtx_unlock(&ioat->submit_lock); out: if (ioat->ring_size_order > IOAT_MIN_ORDER) callout_reset(&ioat->shrink_timer, IOAT_SHRINK_PERIOD, ioat_shrink_timer_callback, ioat); } /* * Support Functions */ static void ioat_submit_single(struct ioat_softc *ioat) { ioat_get(ioat, IOAT_ACTIVE_DESCR_REF); atomic_add_rel_int(&ioat->head, 1); atomic_add_rel_int(&ioat->hw_head, 1); if (!ioat->is_completion_pending) { ioat->is_completion_pending = TRUE; callout_reset(&ioat->poll_timer, 1, ioat_poll_timer_callback, ioat); callout_stop(&ioat->shrink_timer); } ioat->stats.descriptors_submitted++; } static int ioat_reset_hw(struct ioat_softc *ioat) { uint64_t status; uint32_t chanerr; unsigned timeout; int error; CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); mtx_lock(IOAT_REFLK); while (ioat->resetting && !ioat->destroying) msleep(&ioat->resetting, IOAT_REFLK, 0, "IRH_drain", 0); if (ioat->destroying) { mtx_unlock(IOAT_REFLK); return (ENXIO); } ioat->resetting = TRUE; ioat->quiescing = TRUE; ioat_drain_locked(ioat); mtx_unlock(IOAT_REFLK); /* * Suspend ioat_process_events while the hardware and softc are in an * indeterminate state. */ mtx_lock(&ioat->cleanup_lock); ioat->resetting_cleanup = TRUE; mtx_unlock(&ioat->cleanup_lock); + CTR2(KTR_IOAT, "%s channel=%u quiesced and drained", __func__, + ioat->chan_idx); + status = ioat_get_chansts(ioat); if (is_ioat_active(status) || is_ioat_idle(status)) ioat_suspend(ioat); /* Wait at most 20 ms */ for (timeout = 0; (is_ioat_active(status) || is_ioat_idle(status)) && timeout < 20; timeout++) { DELAY(1000); status = ioat_get_chansts(ioat); } if (timeout == 20) { error = ETIMEDOUT; goto out; } KASSERT(ioat_get_active(ioat) == 0, ("active after quiesce")); chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr); + CTR2(KTR_IOAT, "%s channel=%u hardware suspended", __func__, + ioat->chan_idx); + /* * IOAT v3 workaround - CHANERRMSK_INT with 3E07h to masks out errors * that can cause stability issues for IOAT v3. */ pci_write_config(ioat->device, IOAT_CFG_CHANERRMASK_INT_OFFSET, 0x3e07, 4); chanerr = pci_read_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, 4); pci_write_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, chanerr, 4); /* * BDXDE and BWD models reset MSI-X registers on device reset. * Save/restore their contents manually. */ if (ioat_model_resets_msix(ioat)) { ioat_log_message(1, "device resets MSI-X registers; saving\n"); pci_save_state(ioat->device); } ioat_reset(ioat); + CTR2(KTR_IOAT, "%s channel=%u hardware reset", __func__, + ioat->chan_idx); /* Wait at most 20 ms */ for (timeout = 0; ioat_reset_pending(ioat) && timeout < 20; timeout++) DELAY(1000); if (timeout == 20) { error = ETIMEDOUT; goto out; } if (ioat_model_resets_msix(ioat)) { ioat_log_message(1, "device resets registers; restored\n"); pci_restore_state(ioat->device); } /* Reset attempts to return the hardware to "halted." */ status = ioat_get_chansts(ioat); if (is_ioat_active(status) || is_ioat_idle(status)) { /* So this really shouldn't happen... */ ioat_log_message(0, "Device is active after a reset?\n"); ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); error = 0; goto out; } chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); if (chanerr != 0) { mtx_lock(&ioat->cleanup_lock); ioat_halted_debug(ioat, chanerr); mtx_unlock(&ioat->cleanup_lock); error = EIO; goto out; } /* * Bring device back online after reset. Writing CHAINADDR brings the * device back to active. * * The internal ring counter resets to zero, so we have to start over * at zero as well. */ ioat->tail = ioat->head = ioat->hw_head = 0; ioat->last_seen = 0; *ioat->comp_update = 0; KASSERT(!ioat->is_completion_pending, ("bogus completion_pending")); ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); ioat_write_chancmp(ioat, ioat->comp_update_bus_addr); ioat_write_chainaddr(ioat, ioat->ring[0]->hw_desc_bus_addr); error = 0; + CTR2(KTR_IOAT, "%s channel=%u configured channel", __func__, + ioat->chan_idx); out: + /* Enqueues a null operation and ensures it completes. */ + if (error == 0) { + error = ioat_start_channel(ioat); + CTR2(KTR_IOAT, "%s channel=%u started channel", __func__, + ioat->chan_idx); + } + /* * Resume completions now that ring state is consistent. - * ioat_start_channel will add a pending completion and if we are still - * blocking completions, we may livelock. */ mtx_lock(&ioat->cleanup_lock); ioat->resetting_cleanup = FALSE; mtx_unlock(&ioat->cleanup_lock); - /* Enqueues a null operation and ensures it completes. */ - if (error == 0) - error = ioat_start_channel(ioat); - /* Unblock submission of new work */ mtx_lock(IOAT_REFLK); ioat->quiescing = FALSE; wakeup(&ioat->quiescing); ioat->resetting = FALSE; wakeup(&ioat->resetting); + + if (ioat->is_completion_pending) + callout_reset(&ioat->poll_timer, 1, ioat_poll_timer_callback, + ioat); + CTR2(KTR_IOAT, "%s channel=%u reset done", __func__, ioat->chan_idx); mtx_unlock(IOAT_REFLK); return (error); } static int sysctl_handle_chansts(SYSCTL_HANDLER_ARGS) { struct ioat_softc *ioat; struct sbuf sb; uint64_t status; int error; ioat = arg1; status = ioat_get_chansts(ioat) & IOAT_CHANSTS_STATUS; sbuf_new_for_sysctl(&sb, NULL, 256, req); switch (status) { case IOAT_CHANSTS_ACTIVE: sbuf_printf(&sb, "ACTIVE"); break; case IOAT_CHANSTS_IDLE: sbuf_printf(&sb, "IDLE"); break; case IOAT_CHANSTS_SUSPENDED: sbuf_printf(&sb, "SUSPENDED"); break; case IOAT_CHANSTS_HALTED: sbuf_printf(&sb, "HALTED"); break; case IOAT_CHANSTS_ARMED: sbuf_printf(&sb, "ARMED"); break; default: sbuf_printf(&sb, "UNKNOWN"); break; } error = sbuf_finish(&sb); sbuf_delete(&sb); if (error != 0 || req->newptr == NULL) return (error); return (EINVAL); } static int sysctl_handle_dpi(SYSCTL_HANDLER_ARGS) { struct ioat_softc *ioat; struct sbuf sb; #define PRECISION "1" const uintmax_t factor = 10; uintmax_t rate; int error; ioat = arg1; sbuf_new_for_sysctl(&sb, NULL, 16, req); if (ioat->stats.interrupts == 0) { sbuf_printf(&sb, "NaN"); goto out; } rate = ioat->stats.descriptors_processed * factor / ioat->stats.interrupts; sbuf_printf(&sb, "%ju.%." PRECISION "ju", rate / factor, rate % factor); #undef PRECISION out: error = sbuf_finish(&sb); sbuf_delete(&sb); if (error != 0 || req->newptr == NULL) return (error); return (EINVAL); } static int sysctl_handle_reset(SYSCTL_HANDLER_ARGS) { struct ioat_softc *ioat; int error, arg; ioat = arg1; arg = 0; error = SYSCTL_OUT(req, &arg, sizeof(arg)); if (error != 0 || req->newptr == NULL) return (error); error = SYSCTL_IN(req, &arg, sizeof(arg)); if (error != 0) return (error); if (arg != 0) error = ioat_reset_hw(ioat); return (error); } static void dump_descriptor(void *hw_desc) { int i, j; for (i = 0; i < 2; i++) { for (j = 0; j < 8; j++) printf("%08x ", ((uint32_t *)hw_desc)[i * 8 + j]); printf("\n"); } } static void ioat_setup_sysctl(device_t device) { struct sysctl_oid_list *par, *statpar, *state, *hammer; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree, *tmp; struct ioat_softc *ioat; ioat = DEVICE2SOFTC(device); ctx = device_get_sysctl_ctx(device); tree = device_get_sysctl_tree(device); par = SYSCTL_CHILDREN(tree); SYSCTL_ADD_INT(ctx, par, OID_AUTO, "version", CTLFLAG_RD, &ioat->version, 0, "HW version (0xMM form)"); SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "max_xfer_size", CTLFLAG_RD, &ioat->max_xfer_size, 0, "HW maximum transfer size"); SYSCTL_ADD_INT(ctx, par, OID_AUTO, "intrdelay_supported", CTLFLAG_RD, &ioat->intrdelay_supported, 0, "Is INTRDELAY supported"); SYSCTL_ADD_U16(ctx, par, OID_AUTO, "intrdelay_max", CTLFLAG_RD, &ioat->intrdelay_max, 0, "Maximum configurable INTRDELAY on this channel (microseconds)"); tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "state", CTLFLAG_RD, NULL, "IOAT channel internal state"); state = SYSCTL_CHILDREN(tmp); SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "ring_size_order", CTLFLAG_RD, &ioat->ring_size_order, 0, "SW descriptor ring size order"); SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "head", CTLFLAG_RD, &ioat->head, 0, "SW descriptor head pointer index"); SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "tail", CTLFLAG_RD, &ioat->tail, 0, "SW descriptor tail pointer index"); SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "hw_head", CTLFLAG_RD, &ioat->hw_head, 0, "HW DMACOUNT"); SYSCTL_ADD_UQUAD(ctx, state, OID_AUTO, "last_completion", CTLFLAG_RD, ioat->comp_update, "HW addr of last completion"); SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_resize_pending", CTLFLAG_RD, &ioat->is_resize_pending, 0, "resize pending"); SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_submitter_processing", CTLFLAG_RD, &ioat->is_submitter_processing, 0, "submitter processing"); SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_completion_pending", CTLFLAG_RD, &ioat->is_completion_pending, 0, "completion pending"); SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_reset_pending", CTLFLAG_RD, &ioat->is_reset_pending, 0, "reset pending"); SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_channel_running", CTLFLAG_RD, &ioat->is_channel_running, 0, "channel running"); SYSCTL_ADD_PROC(ctx, state, OID_AUTO, "chansts", CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_chansts, "A", "String of the channel status"); SYSCTL_ADD_U16(ctx, state, OID_AUTO, "intrdelay", CTLFLAG_RD, &ioat->cached_intrdelay, 0, "Current INTRDELAY on this channel (cached, microseconds)"); tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "hammer", CTLFLAG_RD, NULL, "Big hammers (mostly for testing)"); hammer = SYSCTL_CHILDREN(tmp); SYSCTL_ADD_PROC(ctx, hammer, OID_AUTO, "force_hw_reset", CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_reset, "I", "Set to non-zero to reset the hardware"); tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "stats", CTLFLAG_RD, NULL, "IOAT channel statistics"); statpar = SYSCTL_CHILDREN(tmp); SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "interrupts", CTLFLAG_RW, &ioat->stats.interrupts, "Number of interrupts processed on this channel"); SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "descriptors", CTLFLAG_RW, &ioat->stats.descriptors_processed, "Number of descriptors processed on this channel"); SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "submitted", CTLFLAG_RW, &ioat->stats.descriptors_submitted, "Number of descriptors submitted to this channel"); SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "errored", CTLFLAG_RW, &ioat->stats.descriptors_error, "Number of descriptors failed by channel errors"); SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "halts", CTLFLAG_RW, &ioat->stats.channel_halts, 0, "Number of times the channel has halted"); SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "last_halt_chanerr", CTLFLAG_RW, &ioat->stats.last_halt_chanerr, 0, "The raw CHANERR when the channel was last halted"); SYSCTL_ADD_PROC(ctx, statpar, OID_AUTO, "desc_per_interrupt", CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_dpi, "A", "Descriptors per interrupt"); } static inline struct ioat_softc * ioat_get(struct ioat_softc *ioat, enum ioat_ref_kind kind) { uint32_t old; KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus")); old = atomic_fetchadd_32(&ioat->refcnt, 1); KASSERT(old < UINT32_MAX, ("refcnt overflow")); #ifdef INVARIANTS old = atomic_fetchadd_32(&ioat->refkinds[kind], 1); KASSERT(old < UINT32_MAX, ("refcnt kind overflow")); #endif return (ioat); } static inline void ioat_putn(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind) { _ioat_putn(ioat, n, kind, FALSE); } static inline void ioat_putn_locked(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind) { _ioat_putn(ioat, n, kind, TRUE); } static inline void _ioat_putn(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind, boolean_t locked) { uint32_t old; KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus")); if (n == 0) return; #ifdef INVARIANTS old = atomic_fetchadd_32(&ioat->refkinds[kind], -n); KASSERT(old >= n, ("refcnt kind underflow")); #endif /* Skip acquiring the lock if resulting refcnt > 0. */ for (;;) { old = ioat->refcnt; if (old <= n) break; if (atomic_cmpset_32(&ioat->refcnt, old, old - n)) return; } if (locked) mtx_assert(IOAT_REFLK, MA_OWNED); else mtx_lock(IOAT_REFLK); old = atomic_fetchadd_32(&ioat->refcnt, -n); KASSERT(old >= n, ("refcnt error")); if (old == n) wakeup(IOAT_REFLK); if (!locked) mtx_unlock(IOAT_REFLK); } static inline void ioat_put(struct ioat_softc *ioat, enum ioat_ref_kind kind) { ioat_putn(ioat, 1, kind); } static void ioat_drain_locked(struct ioat_softc *ioat) { mtx_assert(IOAT_REFLK, MA_OWNED); while (ioat->refcnt > 0) msleep(IOAT_REFLK, IOAT_REFLK, 0, "ioat_drain", 0); } #ifdef DDB #define _db_show_lock(lo) LOCK_CLASS(lo)->lc_ddb_show(lo) #define db_show_lock(lk) _db_show_lock(&(lk)->lock_object) DB_SHOW_COMMAND(ioat, db_show_ioat) { struct ioat_softc *sc; unsigned idx; if (!have_addr) goto usage; idx = (unsigned)addr; if (idx >= ioat_channel_index) goto usage; sc = ioat_channel[idx]; db_printf("ioat softc at %p\n", sc); if (sc == NULL) return; db_printf(" version: %d\n", sc->version); db_printf(" chan_idx: %u\n", sc->chan_idx); db_printf(" submit_lock: "); db_show_lock(&sc->submit_lock); db_printf(" capabilities: %b\n", (int)sc->capabilities, IOAT_DMACAP_STR); db_printf(" cached_intrdelay: %u\n", sc->cached_intrdelay); db_printf(" *comp_update: 0x%jx\n", (uintmax_t)*sc->comp_update); db_printf(" poll_timer:\n"); db_printf(" c_time: %ju\n", (uintmax_t)sc->poll_timer.c_time); db_printf(" c_arg: %p\n", sc->poll_timer.c_arg); db_printf(" c_func: %p\n", sc->poll_timer.c_func); db_printf(" c_lock: %p\n", sc->poll_timer.c_lock); db_printf(" c_flags: 0x%x\n", (unsigned)sc->poll_timer.c_flags); db_printf(" shrink_timer:\n"); db_printf(" c_time: %ju\n", (uintmax_t)sc->shrink_timer.c_time); db_printf(" c_arg: %p\n", sc->shrink_timer.c_arg); db_printf(" c_func: %p\n", sc->shrink_timer.c_func); db_printf(" c_lock: %p\n", sc->shrink_timer.c_lock); db_printf(" c_flags: 0x%x\n", (unsigned)sc->shrink_timer.c_flags); db_printf(" quiescing: %d\n", (int)sc->quiescing); db_printf(" destroying: %d\n", (int)sc->destroying); db_printf(" is_resize_pending: %d\n", (int)sc->is_resize_pending); db_printf(" is_submitter_processing: %d\n", (int)sc->is_submitter_processing); db_printf(" is_completion_pending: %d\n", (int)sc->is_completion_pending); db_printf(" is_reset_pending: %d\n", (int)sc->is_reset_pending); db_printf(" is_channel_running: %d\n", (int)sc->is_channel_running); db_printf(" intrdelay_supported: %d\n", (int)sc->intrdelay_supported); db_printf(" resetting: %d\n", (int)sc->resetting); db_printf(" head: %u\n", sc->head); db_printf(" tail: %u\n", sc->tail); db_printf(" hw_head: %u\n", sc->hw_head); db_printf(" ring_size_order: %u\n", sc->ring_size_order); db_printf(" last_seen: 0x%lx\n", sc->last_seen); db_printf(" ring: %p\n", sc->ring); db_printf(" ring[%u] (tail):\n", sc->tail % (1 << sc->ring_size_order)); db_printf(" id: %u\n", ioat_get_ring_entry(sc, sc->tail)->id); db_printf(" addr: 0x%lx\n", ioat_get_ring_entry(sc, sc->tail)->hw_desc_bus_addr); db_printf(" next: 0x%lx\n", ioat_get_ring_entry(sc, sc->tail)->u.generic->next); db_printf(" ring[%u] (head - 1):\n", (sc->head - 1) % (1 << sc->ring_size_order)); db_printf(" id: %u\n", ioat_get_ring_entry(sc, sc->head - 1)->id); db_printf(" addr: 0x%lx\n", ioat_get_ring_entry(sc, sc->head - 1)->hw_desc_bus_addr); db_printf(" next: 0x%lx\n", ioat_get_ring_entry(sc, sc->head - 1)->u.generic->next); db_printf(" ring[%u] (head):\n", (sc->head) % (1 << sc->ring_size_order)); db_printf(" id: %u\n", ioat_get_ring_entry(sc, sc->head)->id); db_printf(" addr: 0x%lx\n", ioat_get_ring_entry(sc, sc->head)->hw_desc_bus_addr); db_printf(" next: 0x%lx\n", ioat_get_ring_entry(sc, sc->head)->u.generic->next); for (idx = 0; idx < (1 << sc->ring_size_order); idx++) if ((*sc->comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK) == ioat_get_ring_entry(sc, idx)->hw_desc_bus_addr) db_printf(" ring[%u] == hardware tail\n", idx); db_printf(" cleanup_lock: "); db_show_lock(&sc->cleanup_lock); db_printf(" refcnt: %u\n", sc->refcnt); #ifdef INVARIANTS CTASSERT(IOAT_NUM_REF_KINDS == 2); db_printf(" refkinds: [ENG=%u, DESCR=%u]\n", sc->refkinds[0], sc->refkinds[1]); #endif db_printf(" stats:\n"); db_printf(" interrupts: %lu\n", sc->stats.interrupts); db_printf(" descriptors_processed: %lu\n", sc->stats.descriptors_processed); db_printf(" descriptors_error: %lu\n", sc->stats.descriptors_error); db_printf(" descriptors_submitted: %lu\n", sc->stats.descriptors_submitted); db_printf(" channel_halts: %u\n", sc->stats.channel_halts); db_printf(" last_halt_chanerr: %u\n", sc->stats.last_halt_chanerr); if (db_pager_quit) return; db_printf(" hw status:\n"); db_printf(" status: 0x%lx\n", ioat_get_chansts(sc)); db_printf(" chanctrl: 0x%x\n", (unsigned)ioat_read_2(sc, IOAT_CHANCTRL_OFFSET)); db_printf(" chancmd: 0x%x\n", (unsigned)ioat_read_1(sc, IOAT_CHANCMD_OFFSET)); db_printf(" dmacount: 0x%x\n", (unsigned)ioat_read_2(sc, IOAT_DMACOUNT_OFFSET)); db_printf(" chainaddr: 0x%lx\n", ioat_read_double_4(sc, IOAT_CHAINADDR_OFFSET_LOW)); db_printf(" chancmp: 0x%lx\n", ioat_read_double_4(sc, IOAT_CHANCMP_OFFSET_LOW)); db_printf(" chanerr: %b\n", (int)ioat_read_4(sc, IOAT_CHANERR_OFFSET), IOAT_CHANERR_STR); return; usage: db_printf("usage: show ioat <0-%u>\n", ioat_channel_index); return; } #endif /* DDB */ Index: projects/clang390-import/usr.sbin/bluetooth/btpand/bnep.c =================================================================== --- projects/clang390-import/usr.sbin/bluetooth/btpand/bnep.c (revision 305028) +++ projects/clang390-import/usr.sbin/bluetooth/btpand/bnep.c (revision 305029) @@ -1,756 +1,756 @@ /* $NetBSD: bnep.c,v 1.1 2008/08/17 13:20:57 plunky Exp $ */ /*- * Copyright (c) 2008 Iain Hibbert * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* $FreeBSD$ */ #include __RCSID("$NetBSD: bnep.c,v 1.1 2008/08/17 13:20:57 plunky Exp $"); #include #define L2CAP_SOCKET_CHECKED #include #include #include #include #include #include "btpand.h" #include "bnep.h" static bool bnep_recv_extension(packet_t *); static size_t bnep_recv_control(channel_t *, uint8_t *, size_t, bool); static size_t bnep_recv_control_command_not_understood(channel_t *, uint8_t *, size_t); static size_t bnep_recv_setup_connection_req(channel_t *, uint8_t *, size_t); static size_t bnep_recv_setup_connection_rsp(channel_t *, uint8_t *, size_t); static size_t bnep_recv_filter_net_type_set(channel_t *, uint8_t *, size_t); static size_t bnep_recv_filter_net_type_rsp(channel_t *, uint8_t *, size_t); static size_t bnep_recv_filter_multi_addr_set(channel_t *, uint8_t *, size_t); static size_t bnep_recv_filter_multi_addr_rsp(channel_t *, uint8_t *, size_t); static bool bnep_pfilter(channel_t *, packet_t *); static bool bnep_mfilter(channel_t *, packet_t *); static uint8_t NAP_UUID[] = { 0x00, 0x00, 0x11, 0x16, 0x00, 0x00, 0x10, 0x00, 0x80, 0x00, 0x00, 0x80, 0x5f, 0x9b, 0x34, 0xfb }; static uint8_t GN_UUID[] = { 0x00, 0x00, 0x11, 0x17, 0x00, 0x00, 0x10, 0x00, 0x80, 0x00, 0x00, 0x80, 0x5f, 0x9b, 0x34, 0xfb, }; static uint8_t PANU_UUID[] = { 0x00, 0x00, 0x11, 0x15, 0x00, 0x00, 0x10, 0x00, 0x80, 0x00, 0x00, 0x80, 0x5f, 0x9b, 0x34, 0xfb }; /* * receive BNEP packet * return true if packet is to be forwarded */ bool bnep_recv(packet_t *pkt) { size_t len; uint8_t type; if (pkt->len < 1) return false; type = pkt->ptr[0]; packet_adj(pkt, 1); switch (BNEP_TYPE(type)) { case BNEP_GENERAL_ETHERNET: if (pkt->len < (ETHER_ADDR_LEN * 2) + ETHER_TYPE_LEN) { log_debug("dropped short packet (type 0x%2.2x)", type); return false; } pkt->dst = pkt->ptr; packet_adj(pkt, ETHER_ADDR_LEN); pkt->src = pkt->ptr; packet_adj(pkt, ETHER_ADDR_LEN); pkt->type = pkt->ptr; packet_adj(pkt, ETHER_TYPE_LEN); break; case BNEP_CONTROL: len = bnep_recv_control(pkt->chan, pkt->ptr, pkt->len, false); if (len == 0) return false; packet_adj(pkt, len); break; case BNEP_COMPRESSED_ETHERNET: if (pkt->len < ETHER_TYPE_LEN) { log_debug("dropped short packet (type 0x%2.2x)", type); return false; } pkt->dst = pkt->chan->laddr; pkt->src = pkt->chan->raddr; pkt->type = pkt->ptr; packet_adj(pkt, ETHER_TYPE_LEN); break; case BNEP_COMPRESSED_ETHERNET_SRC_ONLY: if (pkt->len < ETHER_ADDR_LEN + ETHER_TYPE_LEN) { log_debug("dropped short packet (type 0x%2.2x)", type); return false; } pkt->dst = pkt->chan->laddr; pkt->src = pkt->ptr; packet_adj(pkt, ETHER_ADDR_LEN); pkt->type = pkt->ptr; packet_adj(pkt, ETHER_TYPE_LEN); break; case BNEP_COMPRESSED_ETHERNET_DST_ONLY: if (pkt->len < ETHER_ADDR_LEN + ETHER_TYPE_LEN) { log_debug("dropped short packet (type 0x%2.2x)", type); return false; } pkt->dst = pkt->ptr; packet_adj(pkt, ETHER_ADDR_LEN); pkt->src = pkt->chan->raddr; pkt->type = pkt->ptr; packet_adj(pkt, ETHER_TYPE_LEN); break; default: /* * Any packet containing a reserved BNEP * header packet type SHALL be dropped. */ log_debug("dropped packet with reserved type 0x%2.2x", type); return false; } if (BNEP_TYPE_EXT(type) && !bnep_recv_extension(pkt)) return false; /* invalid extensions */ if (BNEP_TYPE(type) == BNEP_CONTROL || pkt->chan->state != CHANNEL_OPEN) return false; /* no forwarding */ return true; } static bool bnep_recv_extension(packet_t *pkt) { exthdr_t *eh; size_t len, size; uint8_t type; do { if (pkt->len < 2) return false; type = pkt->ptr[0]; size = pkt->ptr[1]; if (pkt->len < size + 2) return false; switch (type) { case BNEP_EXTENSION_CONTROL: len = bnep_recv_control(pkt->chan, pkt->ptr + 2, size, true); if (len != size) log_err("ignored spurious data in exthdr"); break; default: /* Unknown extension headers in data packets */ /* SHALL be forwarded irrespective of any */ /* network protocol or multicast filter settings */ /* and any local filtering policy. */ eh = malloc(sizeof(exthdr_t)); if (eh == NULL) { log_err("exthdr malloc() failed: %m"); break; } eh->ptr = pkt->ptr; eh->len = size; STAILQ_INSERT_TAIL(&pkt->extlist, eh, next); break; } packet_adj(pkt, size + 2); } while (BNEP_TYPE_EXT(type)); return true; } static size_t bnep_recv_control(channel_t *chan, uint8_t *ptr, size_t size, bool isext) { uint8_t type; size_t len; if (size-- < 1) return 0; type = *ptr++; switch (type) { case BNEP_CONTROL_COMMAND_NOT_UNDERSTOOD: len = bnep_recv_control_command_not_understood(chan, ptr, size); break; case BNEP_SETUP_CONNECTION_REQUEST: if (isext) return 0; /* not allowed in extension headers */ len = bnep_recv_setup_connection_req(chan, ptr, size); break; case BNEP_SETUP_CONNECTION_RESPONSE: if (isext) return 0; /* not allowed in extension headers */ len = bnep_recv_setup_connection_rsp(chan, ptr, size); break; case BNEP_FILTER_NET_TYPE_SET: len = bnep_recv_filter_net_type_set(chan, ptr, size); break; case BNEP_FILTER_NET_TYPE_RESPONSE: len = bnep_recv_filter_net_type_rsp(chan, ptr, size); break; case BNEP_FILTER_MULTI_ADDR_SET: len = bnep_recv_filter_multi_addr_set(chan, ptr, size); break; case BNEP_FILTER_MULTI_ADDR_RESPONSE: len = bnep_recv_filter_multi_addr_rsp(chan, ptr, size); break; default: len = 0; break; } if (len == 0) bnep_send_control(chan, BNEP_CONTROL_COMMAND_NOT_UNDERSTOOD, type); return len; } static size_t bnep_recv_control_command_not_understood(channel_t *chan, uint8_t *ptr, size_t size) { uint8_t type; if (size < 1) return 0; type = *ptr++; log_err("received Control Command Not Understood (0x%2.2x)", type); /* we didn't send any reserved commands, just cut them off */ channel_close(chan); return 1; } static size_t bnep_recv_setup_connection_req(channel_t *chan, uint8_t *ptr, size_t size) { uint8_t off; int src, dst, rsp; size_t len; if (size < 1) return 0; len = *ptr++; if (size < (len * 2 + 1)) return 0; if (chan->state != CHANNEL_WAIT_CONNECT_REQ && chan->state != CHANNEL_OPEN) { log_debug("ignored"); return (len * 2 + 1); } if (len == 2) off = 2; else if (len == 4) off = 0; else if (len == 16) off = 0; else { rsp = BNEP_SETUP_INVALID_UUID_SIZE; goto done; } if (memcmp(ptr, NAP_UUID + off, len) == 0) dst = SDP_SERVICE_CLASS_NAP; else if (memcmp(ptr, GN_UUID + off, len) == 0) dst = SDP_SERVICE_CLASS_GN; else if (memcmp(ptr, PANU_UUID + off, len) == 0) dst = SDP_SERVICE_CLASS_PANU; else dst = 0; if (dst != service_class) { rsp = BNEP_SETUP_INVALID_DST_UUID; goto done; } ptr += len; if (memcmp(ptr, NAP_UUID + off, len) == 0) src = SDP_SERVICE_CLASS_NAP; else if (memcmp(ptr, GN_UUID + off, len) == 0) src = SDP_SERVICE_CLASS_GN; else if (memcmp(ptr, PANU_UUID + off, len) == 0) src = SDP_SERVICE_CLASS_PANU; else src = 0; if ((dst != SDP_SERVICE_CLASS_PANU && src != SDP_SERVICE_CLASS_PANU) || src == 0) { rsp = BNEP_SETUP_INVALID_SRC_UUID; goto done; } rsp = BNEP_SETUP_SUCCESS; chan->state = CHANNEL_OPEN; channel_timeout(chan, 0); done: log_debug("addr %s response 0x%2.2x", ether_ntoa((struct ether_addr *)chan->raddr), rsp); bnep_send_control(chan, BNEP_SETUP_CONNECTION_RESPONSE, rsp); return (len * 2 + 1); } static size_t bnep_recv_setup_connection_rsp(channel_t *chan, uint8_t *ptr, size_t size) { int rsp; if (size < 2) return 0; rsp = be16dec(ptr); if (chan->state != CHANNEL_WAIT_CONNECT_RSP) { log_debug("ignored"); return 2; } log_debug("addr %s response 0x%2.2x", ether_ntoa((struct ether_addr *)chan->raddr), rsp); if (rsp == BNEP_SETUP_SUCCESS) { chan->state = CHANNEL_OPEN; channel_timeout(chan, 0); } else { channel_close(chan); } return 2; } static size_t bnep_recv_filter_net_type_set(channel_t *chan, uint8_t *ptr, size_t size) { pfilter_t *pf; int i, nf, rsp; size_t len; if (size < 2) return 0; len = be16dec(ptr); ptr += 2; if (size < (len + 2)) return 0; if (chan->state != CHANNEL_OPEN) { log_debug("ignored"); return (len + 2); } nf = len / 4; pf = malloc(nf * sizeof(pfilter_t)); if (pf == NULL) { rsp = BNEP_FILTER_TOO_MANY_FILTERS; goto done; } log_debug("nf = %d", nf); for (i = 0; i < nf; i++) { pf[i].start = be16dec(ptr); ptr += 2; pf[i].end = be16dec(ptr); ptr += 2; if (pf[i].start > pf[i].end) { free(pf); rsp = BNEP_FILTER_INVALID_RANGE; goto done; } log_debug("pf[%d] = %#4.4x, %#4.4x", i, pf[i].start, pf[i].end); } if (chan->pfilter) free(chan->pfilter); chan->pfilter = pf; chan->npfilter = nf; rsp = BNEP_FILTER_SUCCESS; done: log_debug("addr %s response 0x%2.2x", ether_ntoa((struct ether_addr *)chan->raddr), rsp); bnep_send_control(chan, BNEP_FILTER_NET_TYPE_RESPONSE, rsp); return (len + 2); } static size_t bnep_recv_filter_net_type_rsp(channel_t *chan, uint8_t *ptr, size_t size) { int rsp; if (size < 2) return 0; if (chan->state != CHANNEL_OPEN) { log_debug("ignored"); return 2; } rsp = be16dec(ptr); log_debug("addr %s response 0x%2.2x", ether_ntoa((struct ether_addr *)chan->raddr), rsp); /* we did not send any filter_net_type_set message */ return 2; } static size_t bnep_recv_filter_multi_addr_set(channel_t *chan, uint8_t *ptr, size_t size) { mfilter_t *mf; int i, nf, rsp; size_t len; if (size < 2) return 0; len = be16dec(ptr); ptr += 2; if (size < (len + 2)) return 0; if (chan->state != CHANNEL_OPEN) { log_debug("ignored"); return (len + 2); } nf = len / (ETHER_ADDR_LEN * 2); mf = malloc(nf * sizeof(mfilter_t)); if (mf == NULL) { rsp = BNEP_FILTER_TOO_MANY_FILTERS; goto done; } log_debug("nf = %d", nf); for (i = 0; i < nf; i++) { memcpy(mf[i].start, ptr, ETHER_ADDR_LEN); ptr += ETHER_ADDR_LEN; memcpy(mf[i].end, ptr, ETHER_ADDR_LEN); ptr += ETHER_ADDR_LEN; if (memcmp(mf[i].start, mf[i].end, ETHER_ADDR_LEN) > 0) { free(mf); rsp = BNEP_FILTER_INVALID_RANGE; goto done; } log_debug("pf[%d] = " "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, " "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x", i, mf[i].start[0], mf[i].start[1], mf[i].start[2], mf[i].start[3], mf[i].start[4], mf[i].start[5], mf[i].end[0], mf[i].end[1], mf[i].end[2], mf[i].end[3], mf[i].end[4], mf[i].end[5]); } if (chan->mfilter) free(chan->mfilter); chan->mfilter = mf; chan->nmfilter = nf; rsp = BNEP_FILTER_SUCCESS; done: log_debug("addr %s response 0x%2.2x", ether_ntoa((struct ether_addr *)chan->raddr), rsp); bnep_send_control(chan, BNEP_FILTER_MULTI_ADDR_RESPONSE, rsp); return (len + 2); } static size_t bnep_recv_filter_multi_addr_rsp(channel_t *chan, uint8_t *ptr, size_t size) { int rsp; if (size < 2) return false; if (chan->state != CHANNEL_OPEN) { log_debug("ignored"); return 2; } rsp = be16dec(ptr); log_debug("addr %s response 0x%2.2x", ether_ntoa((struct ether_addr *)chan->raddr), rsp); /* we did not send any filter_multi_addr_set message */ return 2; } void -bnep_send_control(channel_t *chan, uint8_t type, ...) +bnep_send_control(channel_t *chan, unsigned type, ...) { packet_t *pkt; uint8_t *p; va_list ap; assert(chan->state != CHANNEL_CLOSED); pkt = packet_alloc(chan); if (pkt == NULL) return; p = pkt->ptr; va_start(ap, type); *p++ = BNEP_CONTROL; - *p++ = type; + *p++ = (uint8_t)type; switch(type) { case BNEP_CONTROL_COMMAND_NOT_UNDERSTOOD: *p++ = va_arg(ap, int); break; case BNEP_SETUP_CONNECTION_REQUEST: *p++ = va_arg(ap, int); be16enc(p, va_arg(ap, int)); p += 2; be16enc(p, va_arg(ap, int)); p += 2; break; case BNEP_SETUP_CONNECTION_RESPONSE: case BNEP_FILTER_NET_TYPE_RESPONSE: case BNEP_FILTER_MULTI_ADDR_RESPONSE: be16enc(p, va_arg(ap, int)); p += 2; break; case BNEP_FILTER_NET_TYPE_SET: /* TODO */ case BNEP_FILTER_MULTI_ADDR_SET: /* TODO */ default: log_err("Can't send control type 0x%2.2x", type); break; } va_end(ap); pkt->len = p - pkt->ptr; channel_put(chan, pkt); packet_free(pkt); } /* * BNEP send packet routine * return true if packet can be removed from queue */ bool bnep_send(channel_t *chan, packet_t *pkt) { struct iovec iov[2]; uint8_t *p, *type, *proto; exthdr_t *eh; bool src, dst; size_t nw; if (pkt->type == NULL) { iov[0].iov_base = pkt->ptr; iov[0].iov_len = pkt->len; iov[1].iov_base = NULL; iov[1].iov_len = 0; } else { p = chan->sendbuf; dst = (memcmp(pkt->dst, chan->raddr, ETHER_ADDR_LEN) != 0); src = (memcmp(pkt->src, chan->laddr, ETHER_ADDR_LEN) != 0); type = p; p += 1; if (dst && src) *type = BNEP_GENERAL_ETHERNET; else if (dst && !src) *type = BNEP_COMPRESSED_ETHERNET_DST_ONLY; else if (!dst && src) *type = BNEP_COMPRESSED_ETHERNET_SRC_ONLY; else /* (!dst && !src) */ *type = BNEP_COMPRESSED_ETHERNET; if (dst) { memcpy(p, pkt->dst, ETHER_ADDR_LEN); p += ETHER_ADDR_LEN; } if (src) { memcpy(p, pkt->src, ETHER_ADDR_LEN); p += ETHER_ADDR_LEN; } proto = p; memcpy(p, pkt->type, ETHER_TYPE_LEN); p += ETHER_TYPE_LEN; STAILQ_FOREACH(eh, &pkt->extlist, next) { if (p + eh->len > chan->sendbuf + chan->mtu) break; *type |= BNEP_EXT; type = p; memcpy(p, eh->ptr, eh->len); p += eh->len; } *type &= ~BNEP_EXT; iov[0].iov_base = chan->sendbuf; iov[0].iov_len = (p - chan->sendbuf); if ((chan->npfilter == 0 || bnep_pfilter(chan, pkt)) && (chan->nmfilter == 0 || bnep_mfilter(chan, pkt))) { iov[1].iov_base = pkt->ptr; iov[1].iov_len = pkt->len; } else if (be16dec(proto) == ETHERTYPE_VLAN && pkt->len >= ETHER_VLAN_ENCAP_LEN) { iov[1].iov_base = pkt->ptr; iov[1].iov_len = ETHER_VLAN_ENCAP_LEN; } else { iov[1].iov_base = NULL; iov[1].iov_len = 0; memset(proto, 0, ETHER_TYPE_LEN); } } if (iov[0].iov_len + iov[1].iov_len > chan->mtu) { log_err("packet exceeded MTU (dropped)"); return false; } nw = writev(chan->fd, iov, __arraycount(iov)); return (nw > 0); } static bool bnep_pfilter(channel_t *chan, packet_t *pkt) { int proto, i; proto = be16dec(pkt->type); if (proto == ETHERTYPE_VLAN) { /* IEEE 802.1Q tag header */ if (pkt->len < 4) return false; proto = be16dec(pkt->ptr + 2); } for (i = 0; i < chan->npfilter; i++) { if (chan->pfilter[i].start <= proto && chan->pfilter[i].end >=proto) return true; } return false; } static bool bnep_mfilter(channel_t *chan, packet_t *pkt) { int i; if (!ETHER_IS_MULTICAST(pkt->dst)) return true; for (i = 0; i < chan->nmfilter; i++) { if (memcmp(pkt->dst, chan->mfilter[i].start, ETHER_ADDR_LEN) >= 0 && memcmp(pkt->dst, chan->mfilter[i].end, ETHER_ADDR_LEN) <= 0) return true; } return false; } Index: projects/clang390-import/usr.sbin/bluetooth/btpand/btpand.h =================================================================== --- projects/clang390-import/usr.sbin/bluetooth/btpand/btpand.h (revision 305028) +++ projects/clang390-import/usr.sbin/bluetooth/btpand/btpand.h (revision 305029) @@ -1,212 +1,212 @@ /* $NetBSD: btpand.h,v 1.1 2008/08/17 13:20:57 plunky Exp $ */ /*- * Copyright (c) 2008 Iain Hibbert * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include "event.h" #ifndef __arraycount #define __arraycount(__x) (int)(sizeof((__x)) / sizeof((__x)[0])) #endif #ifndef L2CAP_PSM_INVALID #define L2CAP_PSM_INVALID(psm) (((psm) & 0x0101) != 0x0001) #endif #ifndef L2CAP_PSM_BNEP #define L2CAP_PSM_BNEP 15 #endif typedef struct channel channel_t; typedef struct pfilter pfilter_t; typedef struct mfilter mfilter_t; typedef struct packet packet_t; typedef struct pkthdr pkthdr_t; typedef struct pktlist pktlist_t; typedef struct exthdr exthdr_t; typedef struct extlist extlist_t; LIST_HEAD(chlist, channel); STAILQ_HEAD(extlist, exthdr); STAILQ_HEAD(pktlist, pkthdr); enum channel_state { CHANNEL_CLOSED, CHANNEL_WAIT_CONNECT_REQ, CHANNEL_WAIT_CONNECT_RSP, CHANNEL_OPEN, }; #define CHANNEL_MAXQLEN 128 /* BNEP or tap channel */ struct channel { enum channel_state state; bool oactive; uint8_t laddr[ETHER_ADDR_LEN]; uint8_t raddr[ETHER_ADDR_LEN]; size_t mru; size_t mtu; int npfilter; pfilter_t * pfilter; int nmfilter; mfilter_t * mfilter; pktlist_t pktlist; int qlen; int fd; struct event rd_ev; struct event wr_ev; uint8_t * sendbuf; bool (*send)(channel_t *, packet_t *); bool (*recv)(packet_t *); int tick; struct pidfh *pfh; int refcnt; LIST_ENTRY(channel) next; }; /* network protocol type filter */ struct pfilter { uint16_t start; uint16_t end; }; /* multicast address filter */ struct mfilter { uint8_t start[ETHER_ADDR_LEN]; uint8_t end[ETHER_ADDR_LEN]; }; /* packet data buffer */ struct packet { channel_t * chan; /* source channel */ uint8_t * dst; /* dest address */ uint8_t * src; /* source address */ uint8_t * type; /* protocol type */ uint8_t * ptr; /* data pointer */ size_t len; /* data length */ int refcnt; /* reference count */ extlist_t extlist;/* extension headers */ uint8_t buf[0]; /* data starts here */ }; /* extension header */ struct exthdr { STAILQ_ENTRY(exthdr) next; uint8_t * ptr; uint8_t len; }; /* packet header */ struct pkthdr { STAILQ_ENTRY(pkthdr) next; packet_t * data; }; /* global variables */ extern const char * control_path; extern const char * service_name; extern const char * interface_name; extern bdaddr_t local_bdaddr; extern bdaddr_t remote_bdaddr; extern uint16_t l2cap_psm; extern int l2cap_mode; extern uint16_t service_class; extern int server_limit; /* * Bluetooth addresses are stored the other way around than * Ethernet addresses even though they are of the same family */ static inline void b2eaddr(void *dst, bdaddr_t *src) { uint8_t *d = dst; int i; for (i = 0; i < ETHER_ADDR_LEN; i++) d[i] = src->b[ETHER_ADDR_LEN - i - 1]; } #define log_err(fmt, args...) syslog(LOG_ERR, fmt , ##args) #define log_info(fmt, args...) syslog(LOG_INFO, fmt , ##args) #define log_notice(fmt, args...) syslog(LOG_NOTICE, fmt , ##args) #define log_debug(fmt, args...) syslog(LOG_DEBUG, "%s: " fmt, __func__ , ##args) /* bnep.c */ bool bnep_send(channel_t *, packet_t *); bool bnep_recv(packet_t *); -void bnep_send_control(channel_t *, uint8_t, ...); +void bnep_send_control(channel_t *, unsigned, ...); /* channel.c */ void channel_init(void); channel_t * channel_alloc(void); bool channel_open(channel_t *, int); void channel_close(channel_t *); void channel_free(channel_t *); void channel_timeout(channel_t *, int); void channel_put(channel_t *, packet_t *); /* client.c */ void client_init(void); /* packet.c */ packet_t * packet_alloc(channel_t *); void packet_free(packet_t *); void packet_adj(packet_t *, size_t); pkthdr_t * pkthdr_alloc(packet_t *); void pkthdr_free(pkthdr_t *); /* server.c */ void server_init(void); void server_update(int); /* tap.c */ void tap_init(void); Index: projects/clang390-import =================================================================== --- projects/clang390-import (revision 305028) +++ projects/clang390-import (revision 305029) Property changes on: projects/clang390-import ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r305017-305028