Index: head/share/man/man9/mbuf.9 =================================================================== --- head/share/man/man9/mbuf.9 (revision 276749) +++ head/share/man/man9/mbuf.9 (revision 276750) @@ -1,1192 +1,1195 @@ .\" Copyright (c) 2000 FreeBSD Inc. .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL [your name] OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" $FreeBSD$ .\" -.Dd October 21, 2014 +.Dd January 5, 2015 .Dt MBUF 9 .Os .\" .Sh NAME .Nm mbuf .Nd "memory management in the kernel IPC subsystem" .\" .Sh SYNOPSIS .In sys/param.h .In sys/systm.h .In sys/mbuf.h .\" .Ss Mbuf allocation macros .Fn MGET "struct mbuf *mbuf" "int how" "short type" .Fn MGETHDR "struct mbuf *mbuf" "int how" "short type" +.Ft int .Fn MCLGET "struct mbuf *mbuf" "int how" .Fo MEXTADD .Fa "struct mbuf *mbuf" .Fa "caddr_t buf" .Fa "u_int size" .Fa "void (*free)(void *opt_arg1, void *opt_arg2)" .Fa "void *opt_arg1" .Fa "void *opt_arg2" .Fa "short flags" .Fa "int type" .Fc .Fn MEXTFREE "struct mbuf *mbuf" .Fn MFREE "struct mbuf *mbuf" "struct mbuf *successor" .\" .Ss Mbuf utility macros .Fn mtod "struct mbuf *mbuf" "type" .Fn M_ALIGN "struct mbuf *mbuf" "u_int len" .Fn MH_ALIGN "struct mbuf *mbuf" "u_int len" .Ft int .Fn M_LEADINGSPACE "struct mbuf *mbuf" .Ft int .Fn M_TRAILINGSPACE "struct mbuf *mbuf" .Fn M_MOVE_PKTHDR "struct mbuf *to" "struct mbuf *from" .Fn M_PREPEND "struct mbuf *mbuf" "int len" "int how" .Fn MCHTYPE "struct mbuf *mbuf" "short type" .Ft int .Fn M_WRITABLE "struct mbuf *mbuf" .\" .Ss Mbuf allocation functions .Ft struct mbuf * .Fn m_get "int how" "short type" .Ft struct mbuf * .Fn m_get2 "int size" "int how" "short type" "int flags" .Ft struct mbuf * .Fn m_getm "struct mbuf *orig" "int len" "int how" "short type" .Ft struct mbuf * .Fn m_getjcl "int how" "short type" "int flags" "int size" .Ft struct mbuf * .Fn m_getcl "int how" "short type" "int flags" .Ft struct mbuf * .Fn m_getclr "int how" "short type" .Ft struct mbuf * .Fn m_gethdr "int how" "short type" .Ft struct mbuf * .Fn m_free "struct mbuf *mbuf" .Ft void .Fn m_freem "struct mbuf *mbuf" .\" .Ss Mbuf utility functions .Ft void .Fn m_adj "struct mbuf *mbuf" "int len" .Ft void .Fn m_align "struct mbuf *mbuf" "int len" .Ft int .Fn m_append "struct mbuf *mbuf" "int len" "c_caddr_t cp" .Ft struct mbuf * .Fn m_prepend "struct mbuf *mbuf" "int len" "int how" .Ft struct mbuf * .Fn m_copyup "struct mbuf *mbuf" "int len" "int dstoff" .Ft struct mbuf * .Fn m_pullup "struct mbuf *mbuf" "int len" .Ft struct mbuf * .Fn m_pulldown "struct mbuf *mbuf" "int offset" "int len" "int *offsetp" .Ft struct mbuf * .Fn m_copym "struct mbuf *mbuf" "int offset" "int len" "int how" .Ft struct mbuf * .Fn m_copypacket "struct mbuf *mbuf" "int how" .Ft struct mbuf * .Fn m_dup "struct mbuf *mbuf" "int how" .Ft void .Fn m_copydata "const struct mbuf *mbuf" "int offset" "int len" "caddr_t buf" .Ft void .Fn m_copyback "struct mbuf *mbuf" "int offset" "int len" "caddr_t buf" .Ft struct mbuf * .Fo m_devget .Fa "char *buf" .Fa "int len" .Fa "int offset" .Fa "struct ifnet *ifp" .Fa "void (*copy)(char *from, caddr_t to, u_int len)" .Fc .Ft void .Fn m_cat "struct mbuf *m" "struct mbuf *n" .Ft u_int .Fn m_fixhdr "struct mbuf *mbuf" .Ft void .Fn m_dup_pkthdr "struct mbuf *to" "struct mbuf *from" .Ft void .Fn m_move_pkthdr "struct mbuf *to" "struct mbuf *from" .Ft u_int .Fn m_length "struct mbuf *mbuf" "struct mbuf **last" .Ft struct mbuf * .Fn m_split "struct mbuf *mbuf" "int len" "int how" .Ft int .Fn m_apply "struct mbuf *mbuf" "int off" "int len" "int (*f)(void *arg, void *data, u_int len)" "void *arg" .Ft struct mbuf * .Fn m_getptr "struct mbuf *mbuf" "int loc" "int *off" .Ft struct mbuf * .Fn m_defrag "struct mbuf *m0" "int how" .Ft struct mbuf * .Fn m_unshare "struct mbuf *m0" "int how" .\" .Sh DESCRIPTION An .Vt mbuf is a basic unit of memory management in the kernel IPC subsystem. Network packets and socket buffers are stored in .Vt mbufs . A network packet may span multiple .Vt mbufs arranged into a .Vt mbuf chain (linked list), which allows adding or trimming network headers with little overhead. .Pp While a developer should not bother with .Vt mbuf internals without serious reason in order to avoid incompatibilities with future changes, it is useful to understand the general structure of an .Vt mbuf . .Pp An .Vt mbuf consists of a variable-sized header and a small internal buffer for data. The total size of an .Vt mbuf , .Dv MSIZE , is a constant defined in .In sys/param.h . The .Vt mbuf header includes: .Bl -tag -width "m_nextpkt" -offset indent .It Va m_next .Pq Vt struct mbuf * A pointer to the next .Vt mbuf in the .Vt mbuf chain . .It Va m_nextpkt .Pq Vt struct mbuf * A pointer to the next .Vt mbuf chain in the queue. .It Va m_data .Pq Vt caddr_t A pointer to data attached to this .Vt mbuf . .It Va m_len .Pq Vt int The length of the data. .It Va m_type .Pq Vt short The type of the data. .It Va m_flags .Pq Vt int The .Vt mbuf flags. .El .Pp The .Vt mbuf flag bits are defined as follows: .Bd -literal /* mbuf flags */ #define M_EXT 0x00000001 /* has associated external storage */ #define M_PKTHDR 0x00000002 /* start of record */ #define M_EOR 0x00000004 /* end of record */ #define M_RDONLY 0x00000008 /* associated data marked read-only */ #define M_PROTO1 0x00001000 /* protocol-specific */ #define M_PROTO2 0x00002000 /* protocol-specific */ #define M_PROTO3 0x00004000 /* protocol-specific */ #define M_PROTO4 0x00008000 /* protocol-specific */ #define M_PROTO5 0x00010000 /* protocol-specific */ #define M_PROTO6 0x00020000 /* protocol-specific */ #define M_PROTO7 0x00040000 /* protocol-specific */ #define M_PROTO8 0x00080000 /* protocol-specific */ #define M_PROTO9 0x00100000 /* protocol-specific */ #define M_PROTO10 0x00200000 /* protocol-specific */ #define M_PROTO11 0x00400000 /* protocol-specific */ #define M_PROTO12 0x00800000 /* protocol-specific */ /* mbuf pkthdr flags (also stored in m_flags) */ #define M_BCAST 0x00000010 /* send/received as link-level broadcast */ #define M_MCAST 0x00000020 /* send/received as link-level multicast */ .Ed .Pp The available .Vt mbuf types are defined as follows: .Bd -literal /* mbuf types */ #define MT_DATA 1 /* dynamic (data) allocation */ #define MT_HEADER MT_DATA /* packet header */ #define MT_SONAME 8 /* socket name */ #define MT_CONTROL 14 /* extra-data protocol message */ #define MT_OOBDATA 15 /* expedited data */ .Ed .Pp The available external buffer types are defined as follows: .Bd -literal /* external buffer types */ #define EXT_CLUSTER 1 /* mbuf cluster */ #define EXT_SFBUF 2 /* sendfile(2)'s sf_bufs */ #define EXT_JUMBOP 3 /* jumbo cluster 4096 bytes */ #define EXT_JUMBO9 4 /* jumbo cluster 9216 bytes */ #define EXT_JUMBO16 5 /* jumbo cluster 16184 bytes */ #define EXT_PACKET 6 /* mbuf+cluster from packet zone */ #define EXT_MBUF 7 /* external mbuf reference (M_IOVEC) */ #define EXT_NET_DRV 252 /* custom ext_buf provided by net driver(s) */ #define EXT_MOD_TYPE 253 /* custom module's ext_buf type */ #define EXT_DISPOSABLE 254 /* can throw this buffer away w/page flipping */ #define EXT_EXTREF 255 /* has externally maintained ref_cnt ptr */ .Ed .Pp If the .Dv M_PKTHDR flag is set, a .Vt struct pkthdr Va m_pkthdr is added to the .Vt mbuf header. It contains a pointer to the interface the packet has been received from .Pq Vt struct ifnet Va *rcvif , and the total packet length .Pq Vt int Va len . Optionally, it may also contain an attached list of packet tags .Pq Vt "struct m_tag" . See .Xr mbuf_tags 9 for details. Fields used in offloading checksum calculation to the hardware are kept in .Va m_pkthdr as well. See .Sx HARDWARE-ASSISTED CHECKSUM CALCULATION for details. .Pp If small enough, data is stored in the internal data buffer of an .Vt mbuf . If the data is sufficiently large, another .Vt mbuf may be added to the .Vt mbuf chain , or external storage may be associated with the .Vt mbuf . .Dv MHLEN bytes of data can fit into an .Vt mbuf with the .Dv M_PKTHDR flag set, .Dv MLEN bytes can otherwise. .Pp If external storage is being associated with an .Vt mbuf , the .Va m_ext header is added at the cost of losing the internal data buffer. It includes a pointer to external storage, the size of the storage, a pointer to a function used for freeing the storage, a pointer to an optional argument that can be passed to the function, and a pointer to a reference counter. An .Vt mbuf using external storage has the .Dv M_EXT flag set. .Pp The system supplies a macro for allocating the desired external storage buffer, .Dv MEXTADD . .Pp The allocation and management of the reference counter is handled by the subsystem. .Pp The system also supplies a default type of external storage buffer called an .Vt mbuf cluster . .Vt Mbuf clusters can be allocated and configured with the use of the .Dv MCLGET macro. Each .Vt mbuf cluster is .Dv MCLBYTES in size, where MCLBYTES is a machine-dependent constant. The system defines an advisory macro .Dv MINCLSIZE , which is the smallest amount of data to put into an .Vt mbuf cluster . It is equal to .Dv MHLEN plus one. It is typically preferable to store data into the data region of an .Vt mbuf , if size permits, as opposed to allocating a separate .Vt mbuf cluster to hold the same data. .\" .Ss Macros and Functions There are numerous predefined macros and functions that provide the developer with common utilities. .\" .Bl -ohang -offset indent .It Fn mtod mbuf type Convert an .Fa mbuf pointer to a data pointer. The macro expands to the data pointer cast to the specified .Fa type . .Sy Note : It is advisable to ensure that there is enough contiguous data in .Fa mbuf . See .Fn m_pullup for details. .It Fn MGET mbuf how type Allocate an .Vt mbuf and initialize it to contain internal data. .Fa mbuf will point to the allocated .Vt mbuf on success, or be set to .Dv NULL on failure. The .Fa how argument is to be set to .Dv M_WAITOK or .Dv M_NOWAIT . It specifies whether the caller is willing to block if necessary. A number of other functions and macros related to .Vt mbufs have the same argument because they may at some point need to allocate new .Vt mbufs . .It Fn MGETHDR mbuf how type Allocate an .Vt mbuf and initialize it to contain a packet header and internal data. See .Fn MGET for details. .It Fn MEXTADD mbuf buf size free opt_arg1 opt_arg2 flags type Associate externally managed data with .Fa mbuf . Any internal data contained in the mbuf will be discarded, and the .Dv M_EXT flag will be set. The .Fa buf and .Fa size arguments are the address and length, respectively, of the data. The .Fa free argument points to a function which will be called to free the data when the mbuf is freed; it is only used if .Fa type is .Dv EXT_EXTREF . The .Fa opt_arg1 and .Fa opt_arg2 arguments will be passed unmodified to .Fa free . The .Fa flags argument specifies additional .Vt mbuf flags; it is not necessary to specify .Dv M_EXT . Finally, the .Fa type argument specifies the type of external data, which controls how it will be disposed of when the .Vt mbuf is freed. In most cases, the correct value is .Dv EXT_EXTREF . .It Fn MCLGET mbuf how Allocate and attach an .Vt mbuf cluster to .Fa mbuf . -If the macro fails, the +On success, a non-zero value returned; otherwise, 0. +Historically, consumers would check for success by testing the .Dv M_EXT -flag will not be set in -.Fa mbuf . +flag on the mbuf, but this is now discouraged to avoid unnecessary awareness +of the implementation of external storage in protocol stacks and device +drivers. .It Fn M_ALIGN mbuf len Set the pointer .Fa mbuf->m_data to place an object of the size .Fa len at the end of the internal data area of .Fa mbuf , long word aligned. Applicable only if .Fa mbuf is newly allocated with .Fn MGET or .Fn m_get . .It Fn MH_ALIGN mbuf len Serves the same purpose as .Fn M_ALIGN does, but only for .Fa mbuf newly allocated with .Fn MGETHDR or .Fn m_gethdr , or initialized by .Fn m_dup_pkthdr or .Fn m_move_pkthdr . .It Fn m_align mbuf len Services the same purpose as .Fn M_ALIGN but handles any type of mbuf. .It Fn M_LEADINGSPACE mbuf Returns the number of bytes available before the beginning of data in .Fa mbuf . .It Fn M_TRAILINGSPACE mbuf Returns the number of bytes available after the end of data in .Fa mbuf . .It Fn M_PREPEND mbuf len how This macro operates on an .Vt mbuf chain . It is an optimized wrapper for .Fn m_prepend that can make use of possible empty space before data (e.g.\& left after trimming of a link-layer header). The new .Vt mbuf chain pointer or .Dv NULL is in .Fa mbuf after the call. .It Fn M_MOVE_PKTHDR to from Using this macro is equivalent to calling .Fn m_move_pkthdr to from . .It Fn M_WRITABLE mbuf This macro will evaluate true if .Fa mbuf is not marked .Dv M_RDONLY and if either .Fa mbuf does not contain external storage or, if it does, then if the reference count of the storage is not greater than 1. The .Dv M_RDONLY flag can be set in .Fa mbuf->m_flags . This can be achieved during setup of the external storage, by passing the .Dv M_RDONLY bit as a .Fa flags argument to the .Fn MEXTADD macro, or can be directly set in individual .Vt mbufs . .It Fn MCHTYPE mbuf type Change the type of .Fa mbuf to .Fa type . This is a relatively expensive operation and should be avoided. .El .Pp The functions are: .Bl -ohang -offset indent .It Fn m_get how type A function version of .Fn MGET for non-critical paths. .It Fn m_get2 size how type flags Allocate an .Vt mbuf with enough space to hold specified amount of data. .It Fn m_getm orig len how type Allocate .Fa len bytes worth of .Vt mbufs and .Vt mbuf clusters if necessary and append the resulting allocated .Vt mbuf chain to the .Vt mbuf chain .Fa orig , if it is .No non- Ns Dv NULL . If the allocation fails at any point, free whatever was allocated and return .Dv NULL . If .Fa orig is .No non- Ns Dv NULL , it will not be freed. It is possible to use .Fn m_getm to either append .Fa len bytes to an existing .Vt mbuf or .Vt mbuf chain (for example, one which may be sitting in a pre-allocated ring) or to simply perform an all-or-nothing .Vt mbuf and .Vt mbuf cluster allocation. .It Fn m_gethdr how type A function version of .Fn MGETHDR for non-critical paths. .It Fn m_getcl how type flags Fetch an .Vt mbuf with a .Vt mbuf cluster attached to it. If one of the allocations fails, the entire allocation fails. This routine is the preferred way of fetching both the .Vt mbuf and .Vt mbuf cluster together, as it avoids having to unlock/relock between allocations. Returns .Dv NULL on failure. .It Fn m_getjcl how type flags size This is like .Fn m_getcl but it the size of the cluster allocated will be large enough for .Fa size bytes. .It Fn m_getclr how type Allocate an .Vt mbuf and zero out the data region. .It Fn m_free mbuf Frees .Vt mbuf . Returns .Va m_next of the freed .Vt mbuf . .El .Pp The functions below operate on .Vt mbuf chains . .Bl -ohang -offset indent .It Fn m_freem mbuf Free an entire .Vt mbuf chain , including any external storage. .\" .It Fn m_adj mbuf len Trim .Fa len bytes from the head of an .Vt mbuf chain if .Fa len is positive, from the tail otherwise. .\" .It Fn m_append mbuf len cp Append .Vt len bytes of data .Vt cp to the .Vt mbuf chain . Extend the mbuf chain if the new data does not fit in existing space. .\" .It Fn m_prepend mbuf len how Allocate a new .Vt mbuf and prepend it to the .Vt mbuf chain , handle .Dv M_PKTHDR properly. .Sy Note : It does not allocate any .Vt mbuf clusters , so .Fa len must be less than .Dv MLEN or .Dv MHLEN , depending on the .Dv M_PKTHDR flag setting. .\" .It Fn m_copyup mbuf len dstoff Similar to .Fn m_pullup but copies .Fa len bytes of data into a new mbuf at .Fa dstoff bytes into the mbuf. The .Fa dstoff argument aligns the data and leaves room for a link layer header. Returns the new .Vt mbuf chain on success, and frees the .Vt mbuf chain and returns .Dv NULL on failure. .Sy Note : The function does not allocate .Vt mbuf clusters , so .Fa len + dstoff must be less than .Dv MHLEN . .\" .It Fn m_pullup mbuf len Arrange that the first .Fa len bytes of an .Vt mbuf chain are contiguous and lay in the data area of .Fa mbuf , so they are accessible with .Fn mtod mbuf type . It is important to remember that this may involve reallocating some mbufs and moving data so all pointers referencing data within the old mbuf chain must be recalculated or made invalid. Return the new .Vt mbuf chain on success, .Dv NULL on failure (the .Vt mbuf chain is freed in this case). .Sy Note : It does not allocate any .Vt mbuf clusters , so .Fa len must be less than or equal to .Dv MHLEN . .\" .It Fn m_pulldown mbuf offset len offsetp Arrange that .Fa len bytes between .Fa offset and .Fa offset + len in the .Vt mbuf chain are contiguous and lay in the data area of .Fa mbuf , so they are accessible with .Fn mtod mbuf type . .Fa len must be smaller than, or equal to, the size of an .Vt mbuf cluster . Return a pointer to an intermediate .Vt mbuf in the chain containing the requested region; the offset in the data region of the .Vt mbuf chain to the data contained in the returned mbuf is stored in .Fa *offsetp . If .Fa offsetp is NULL, the region may be accessed using .Fn mtod mbuf type . If .Fa offsetp is non-NULL, the region may be accessed using .Fn mtod mbuf uint8_t + *offsetp. The region of the mbuf chain between its beginning and .Fa offset is not modified, therefore it is safe to hold pointers to data within this region before calling .Fn m_pulldown . .\" .It Fn m_copym mbuf offset len how Make a copy of an .Vt mbuf chain starting .Fa offset bytes from the beginning, continuing for .Fa len bytes. If .Fa len is .Dv M_COPYALL , copy to the end of the .Vt mbuf chain . .Sy Note : The copy is read-only, because the .Vt mbuf clusters are not copied, only their reference counts are incremented. .\" .It Fn m_copypacket mbuf how Copy an entire packet including header, which must be present. This is an optimized version of the common case .Fn m_copym mbuf 0 M_COPYALL how . .Sy Note : the copy is read-only, because the .Vt mbuf clusters are not copied, only their reference counts are incremented. .\" .It Fn m_dup mbuf how Copy a packet header .Vt mbuf chain into a completely new .Vt mbuf chain , including copying any .Vt mbuf clusters . Use this instead of .Fn m_copypacket when you need a writable copy of an .Vt mbuf chain . .\" .It Fn m_copydata mbuf offset len buf Copy data from an .Vt mbuf chain starting .Fa off bytes from the beginning, continuing for .Fa len bytes, into the indicated buffer .Fa buf . .\" .It Fn m_copyback mbuf offset len buf Copy .Fa len bytes from the buffer .Fa buf back into the indicated .Vt mbuf chain , starting at .Fa offset bytes from the beginning of the .Vt mbuf chain , extending the .Vt mbuf chain if necessary. .Sy Note : It does not allocate any .Vt mbuf clusters , just adds .Vt mbufs to the .Vt mbuf chain . It is safe to set .Fa offset beyond the current .Vt mbuf chain end: zeroed .Vt mbufs will be allocated to fill the space. .\" .It Fn m_length mbuf last Return the length of the .Vt mbuf chain , and optionally a pointer to the last .Vt mbuf . .\" .It Fn m_dup_pkthdr to from how Upon the function's completion, the .Vt mbuf .Fa to will contain an identical copy of .Fa from->m_pkthdr and the per-packet attributes found in the .Vt mbuf chain .Fa from . The .Vt mbuf .Fa from must have the flag .Dv M_PKTHDR initially set, and .Fa to must be empty on entry. .\" .It Fn m_move_pkthdr to from Move .Va m_pkthdr and the per-packet attributes from the .Vt mbuf chain .Fa from to the .Vt mbuf .Fa to . The .Vt mbuf .Fa from must have the flag .Dv M_PKTHDR initially set, and .Fa to must be empty on entry. Upon the function's completion, .Fa from will have the flag .Dv M_PKTHDR and the per-packet attributes cleared. .\" .It Fn m_fixhdr mbuf Set the packet-header length to the length of the .Vt mbuf chain . .\" .It Fn m_devget buf len offset ifp copy Copy data from a device local memory pointed to by .Fa buf to an .Vt mbuf chain . The copy is done using a specified copy routine .Fa copy , or .Fn bcopy if .Fa copy is .Dv NULL . .\" .It Fn m_cat m n Concatenate .Fa n to .Fa m . Both .Vt mbuf chains must be of the same type. .Fa N is still valid after the function returned. .Sy Note : It does not handle .Dv M_PKTHDR and friends. .\" .It Fn m_split mbuf len how Partition an .Vt mbuf chain in two pieces, returning the tail: all but the first .Fa len bytes. In case of failure, it returns .Dv NULL and attempts to restore the .Vt mbuf chain to its original state. .\" .It Fn m_apply mbuf off len f arg Apply a function to an .Vt mbuf chain , at offset .Fa off , for length .Fa len bytes. Typically used to avoid calls to .Fn m_pullup which would otherwise be unnecessary or undesirable. .Fa arg is a convenience argument which is passed to the callback function .Fa f . .Pp Each time .Fn f is called, it will be passed .Fa arg , a pointer to the .Fa data in the current mbuf, and the length .Fa len of the data in this mbuf to which the function should be applied. .Pp The function should return zero to indicate success; otherwise, if an error is indicated, then .Fn m_apply will return the error and stop iterating through the .Vt mbuf chain . .\" .It Fn m_getptr mbuf loc off Return a pointer to the mbuf containing the data located at .Fa loc bytes from the beginning of the .Vt mbuf chain . The corresponding offset into the mbuf will be stored in .Fa *off . .It Fn m_defrag m0 how Defragment an mbuf chain, returning the shortest possible chain of mbufs and clusters. If allocation fails and this can not be completed, .Dv NULL will be returned and the original chain will be unchanged. Upon success, the original chain will be freed and the new chain will be returned. .Fa how should be either .Dv M_WAITOK or .Dv M_NOWAIT , depending on the caller's preference. .Pp This function is especially useful in network drivers, where certain long mbuf chains must be shortened before being added to TX descriptor lists. .It Fn m_unshare m0 how Create a version of the specified mbuf chain whose contents can be safely modified without affecting other users. If allocation fails and this operation can not be completed, .Dv NULL will be returned. The original mbuf chain is always reclaimed and the reference count of any shared mbuf clusters is decremented. .Fa how should be either .Dv M_WAITOK or .Dv M_NOWAIT , depending on the caller's preference. As a side-effect of this process the returned mbuf chain may be compacted. .Pp This function is especially useful in the transmit path of network code, when data must be encrypted or otherwise altered prior to transmission. .El .Sh HARDWARE-ASSISTED CHECKSUM CALCULATION This section currently applies to TCP/IP only. In order to save the host CPU resources, computing checksums is offloaded to the network interface hardware if possible. The .Va m_pkthdr member of the leading .Vt mbuf of a packet contains two fields used for that purpose, .Vt int Va csum_flags and .Vt int Va csum_data . The meaning of those fields depends on the direction a packet flows in, and on whether the packet is fragmented. Henceforth, .Va csum_flags or .Va csum_data of a packet will denote the corresponding field of the .Va m_pkthdr member of the leading .Vt mbuf in the .Vt mbuf chain containing the packet. .Pp On output, checksum offloading is attempted after the outgoing interface has been determined for a packet. The interface-specific field .Va ifnet.if_data.ifi_hwassist (see .Xr ifnet 9 ) is consulted for the capabilities of the interface to assist in computing checksums. The .Va csum_flags field of the packet header is set to indicate which actions the interface is supposed to perform on it. The actions unsupported by the network interface are done in the software prior to passing the packet down to the interface driver; such actions will never be requested through .Va csum_flags . .Pp The flags demanding a particular action from an interface are as follows: .Bl -tag -width ".Dv CSUM_TCP" -offset indent .It Dv CSUM_IP The IP header checksum is to be computed and stored in the corresponding field of the packet. The hardware is expected to know the format of an IP header to determine the offset of the IP checksum field. .It Dv CSUM_TCP The TCP checksum is to be computed. (See below.) .It Dv CSUM_UDP The UDP checksum is to be computed. (See below.) .El .Pp Should a TCP or UDP checksum be offloaded to the hardware, the field .Va csum_data will contain the byte offset of the checksum field relative to the end of the IP header. In this case, the checksum field will be initially set by the TCP/IP module to the checksum of the pseudo header defined by the TCP and UDP specifications. .Pp On input, an interface indicates the actions it has performed on a packet by setting one or more of the following flags in .Va csum_flags associated with the packet: .Bl -tag -width ".Dv CSUM_IP_CHECKED" -offset indent .It Dv CSUM_IP_CHECKED The IP header checksum has been computed. .It Dv CSUM_IP_VALID The IP header has a valid checksum. This flag can appear only in combination with .Dv CSUM_IP_CHECKED . .It Dv CSUM_DATA_VALID The checksum of the data portion of the IP packet has been computed and stored in the field .Va csum_data in network byte order. .It Dv CSUM_PSEUDO_HDR Can be set only along with .Dv CSUM_DATA_VALID to indicate that the IP data checksum found in .Va csum_data allows for the pseudo header defined by the TCP and UDP specifications. Otherwise the checksum of the pseudo header must be calculated by the host CPU and added to .Va csum_data to obtain the final checksum to be used for TCP or UDP validation purposes. .El .Pp If a particular network interface just indicates success or failure of TCP or UDP checksum validation without returning the exact value of the checksum to the host CPU, its driver can mark .Dv CSUM_DATA_VALID and .Dv CSUM_PSEUDO_HDR in .Va csum_flags , and set .Va csum_data to .Li 0xFFFF hexadecimal to indicate a valid checksum. It is a peculiarity of the algorithm used that the Internet checksum calculated over any valid packet will be .Li 0xFFFF as long as the original checksum field is included. .Sh STRESS TESTING When running a kernel compiled with the option .Dv MBUF_STRESS_TEST , the following .Xr sysctl 8 Ns -controlled options may be used to create various failure/extreme cases for testing of network drivers and other parts of the kernel that rely on .Vt mbufs . .Bl -tag -width ident .It Va net.inet.ip.mbuf_frag_size Causes .Fn ip_output to fragment outgoing .Vt mbuf chains into fragments of the specified size. Setting this variable to 1 is an excellent way to test the long .Vt mbuf chain handling ability of network drivers. .It Va kern.ipc.m_defragrandomfailures Causes the function .Fn m_defrag to randomly fail, returning .Dv NULL . Any piece of code which uses .Fn m_defrag should be tested with this feature. .El .Sh RETURN VALUES See above. .Sh SEE ALSO .Xr ifnet 9 , .Xr mbuf_tags 9 .Sh HISTORY .\" Please correct me if I'm wrong .Vt Mbufs appeared in an early version of .Bx . Besides being used for network packets, they were used to store various dynamic structures, such as routing table entries, interface addresses, protocol control blocks, etc. In more recent .Fx use of .Vt mbufs is almost entirely limited to packet storage, with .Xr uma 9 zones being used directly to store other network-related memory. .Pp Historically, the .Vt mbuf allocator has been a special-purpose memory allocator able to run in interrupt contexts and allocating from a special kernel address space map. As of .Fx 5.3 , the .Vt mbuf allocator is a wrapper around .Xr uma 9 , allowing caching of .Vt mbufs , clusters, and .Vt mbuf + cluster pairs in per-CPU caches, as well as bringing other benefits of slab allocation. .Sh AUTHORS The original .Nm manual page was written by .An Yar Tikhiy . The .Xr uma 9 .Vt mbuf allocator was written by .An Bosko Milekic . Index: head/sys/contrib/ipfilter/netinet/ip_fil_freebsd.c =================================================================== --- head/sys/contrib/ipfilter/netinet/ip_fil_freebsd.c (revision 276749) +++ head/sys/contrib/ipfilter/netinet/ip_fil_freebsd.c (revision 276750) @@ -1,1478 +1,1476 @@ /* $FreeBSD$ */ /* * Copyright (C) 2012 by Darren Reed. * * See the IPFILTER.LICENCE file for details on licencing. */ #if !defined(lint) static const char sccsid[] = "@(#)ip_fil.c 2.41 6/5/96 (C) 1993-2000 Darren Reed"; static const char rcsid[] = "@(#)$Id$"; #endif #if defined(KERNEL) || defined(_KERNEL) # undef KERNEL # undef _KERNEL # define KERNEL 1 # define _KERNEL 1 #endif #if defined(__FreeBSD_version) && (__FreeBSD_version >= 400000) && \ !defined(KLD_MODULE) && !defined(IPFILTER_LKM) # include "opt_inet6.h" #endif #if defined(__FreeBSD_version) && (__FreeBSD_version >= 440000) && \ !defined(KLD_MODULE) && !defined(IPFILTER_LKM) # include "opt_random_ip_id.h" #endif #include #include #include #include # include # include #include #include # include #if defined(__FreeBSD_version) && (__FreeBSD_version >= 800000) #include #endif # include # include #if !defined(__hpux) # include #endif #include # include # include #include # include # include #include #include #include #include #include #include #include #if defined(__FreeBSD_version) && (__FreeBSD_version >= 800000) #include #else #define CURVNET_SET(arg) #define CURVNET_RESTORE() #endif #if defined(__osf__) # include #endif #include #include #include #include "netinet/ip_compat.h" #ifdef USE_INET6 # include #endif #include "netinet/ip_fil.h" #include "netinet/ip_nat.h" #include "netinet/ip_frag.h" #include "netinet/ip_state.h" #include "netinet/ip_proxy.h" #include "netinet/ip_auth.h" #include "netinet/ip_sync.h" #include "netinet/ip_lookup.h" #include "netinet/ip_dstlist.h" #ifdef IPFILTER_SCAN #include "netinet/ip_scan.h" #endif #include "netinet/ip_pool.h" # include #include #ifdef CSUM_DATA_VALID #include #endif extern int ip_optcopy __P((struct ip *, struct ip *)); # ifdef IPFILTER_M_IPFILTER MALLOC_DEFINE(M_IPFILTER, "ipfilter", "IP Filter packet filter data structures"); # endif static u_short ipid = 0; static int (*ipf_savep) __P((void *, ip_t *, int, void *, int, struct mbuf **)); static int ipf_send_ip __P((fr_info_t *, mb_t *)); static void ipf_timer_func __P((void *arg)); int ipf_locks_done = 0; ipf_main_softc_t ipfmain; # include # if defined(NETBSD_PF) # include # endif /* NETBSD_PF */ /* * We provide the ipf_checkp name just to minimize changes later. */ int (*ipf_checkp) __P((void *, ip_t *ip, int hlen, void *ifp, int out, mb_t **mp)); static eventhandler_tag ipf_arrivetag, ipf_departtag, ipf_clonetag; static void ipf_ifevent(void *arg); static void ipf_ifevent(arg) void *arg; { ipf_sync(arg, NULL); } static int ipf_check_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir) { struct ip *ip = mtod(*mp, struct ip *); int rv; /* * IPFilter expects evreything in network byte order */ #if (__FreeBSD_version < 1000019) ip->ip_len = htons(ip->ip_len); ip->ip_off = htons(ip->ip_off); #endif rv = ipf_check(&ipfmain, ip, ip->ip_hl << 2, ifp, (dir == PFIL_OUT), mp); #if (__FreeBSD_version < 1000019) if ((rv == 0) && (*mp != NULL)) { ip = mtod(*mp, struct ip *); ip->ip_len = ntohs(ip->ip_len); ip->ip_off = ntohs(ip->ip_off); } #endif return rv; } # ifdef USE_INET6 # include static int ipf_check_wrapper6(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir) { return (ipf_check(&ipfmain, mtod(*mp, struct ip *), sizeof(struct ip6_hdr), ifp, (dir == PFIL_OUT), mp)); } # endif #if defined(IPFILTER_LKM) int ipf_identify(s) char *s; { if (strcmp(s, "ipl") == 0) return 1; return 0; } #endif /* IPFILTER_LKM */ static void ipf_timer_func(arg) void *arg; { ipf_main_softc_t *softc = arg; SPL_INT(s); SPL_NET(s); READ_ENTER(&softc->ipf_global); if (softc->ipf_running > 0) ipf_slowtimer(softc); if (softc->ipf_running == -1 || softc->ipf_running == 1) { #if 0 softc->ipf_slow_ch = timeout(ipf_timer_func, softc, hz/2); #endif callout_init(&softc->ipf_slow_ch, CALLOUT_MPSAFE); callout_reset(&softc->ipf_slow_ch, (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT, ipf_timer_func, softc); } RWLOCK_EXIT(&softc->ipf_global); SPL_X(s); } int ipfattach(softc) ipf_main_softc_t *softc; { #ifdef USE_SPL int s; #endif SPL_NET(s); if (softc->ipf_running > 0) { SPL_X(s); return EBUSY; } if (ipf_init_all(softc) < 0) { SPL_X(s); return EIO; } if (ipf_checkp != ipf_check) { ipf_savep = ipf_checkp; ipf_checkp = ipf_check; } bzero((char *)ipfmain.ipf_selwait, sizeof(ipfmain.ipf_selwait)); softc->ipf_running = 1; if (softc->ipf_control_forwarding & 1) V_ipforwarding = 1; ipid = 0; SPL_X(s); #if 0 softc->ipf_slow_ch = timeout(ipf_timer_func, softc, (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT); #endif callout_init(&softc->ipf_slow_ch, CALLOUT_MPSAFE); callout_reset(&softc->ipf_slow_ch, (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT, ipf_timer_func, softc); return 0; } /* * Disable the filter by removing the hooks from the IP input/output * stream. */ int ipfdetach(softc) ipf_main_softc_t *softc; { #ifdef USE_SPL int s; #endif if (softc->ipf_control_forwarding & 2) V_ipforwarding = 0; SPL_NET(s); #if 0 if (softc->ipf_slow_ch.callout != NULL) untimeout(ipf_timer_func, softc, softc->ipf_slow_ch); bzero(&softc->ipf_slow, sizeof(softc->ipf_slow)); #endif callout_drain(&softc->ipf_slow_ch); #ifndef NETBSD_PF if (ipf_checkp != NULL) ipf_checkp = ipf_savep; ipf_savep = NULL; #endif ipf_fini_all(softc); softc->ipf_running = -2; SPL_X(s); return 0; } /* * Filter ioctl interface. */ int ipfioctl(dev, cmd, data, mode , p) struct thread *p; # define p_cred td_ucred # define p_uid td_ucred->cr_ruid struct cdev *dev; ioctlcmd_t cmd; caddr_t data; int mode; { int error = 0, unit = 0; SPL_INT(s); #if (BSD >= 199306) if (securelevel_ge(p->p_cred, 3) && (mode & FWRITE)) { ipfmain.ipf_interror = 130001; return EPERM; } #endif unit = GET_MINOR(dev); if ((IPL_LOGMAX < unit) || (unit < 0)) { ipfmain.ipf_interror = 130002; return ENXIO; } if (ipfmain.ipf_running <= 0) { if (unit != IPL_LOGIPF && cmd != SIOCIPFINTERROR) { ipfmain.ipf_interror = 130003; return EIO; } if (cmd != SIOCIPFGETNEXT && cmd != SIOCIPFGET && cmd != SIOCIPFSET && cmd != SIOCFRENB && cmd != SIOCGETFS && cmd != SIOCGETFF && cmd != SIOCIPFINTERROR) { ipfmain.ipf_interror = 130004; return EIO; } } SPL_NET(s); CURVNET_SET(TD_TO_VNET(p)); error = ipf_ioctlswitch(&ipfmain, unit, data, cmd, mode, p->p_uid, p); CURVNET_RESTORE(); if (error != -1) { SPL_X(s); return error; } SPL_X(s); return error; } /* * ipf_send_reset - this could conceivably be a call to tcp_respond(), but that * requires a large amount of setting up and isn't any more efficient. */ int ipf_send_reset(fin) fr_info_t *fin; { struct tcphdr *tcp, *tcp2; int tlen = 0, hlen; struct mbuf *m; #ifdef USE_INET6 ip6_t *ip6; #endif ip_t *ip; tcp = fin->fin_dp; if (tcp->th_flags & TH_RST) return -1; /* feedback loop */ if (ipf_checkl4sum(fin) == -1) return -1; tlen = fin->fin_dlen - (TCP_OFF(tcp) << 2) + ((tcp->th_flags & TH_SYN) ? 1 : 0) + ((tcp->th_flags & TH_FIN) ? 1 : 0); #ifdef USE_INET6 hlen = (fin->fin_v == 6) ? sizeof(ip6_t) : sizeof(ip_t); #else hlen = sizeof(ip_t); #endif #ifdef MGETHDR MGETHDR(m, M_NOWAIT, MT_HEADER); #else MGET(m, M_NOWAIT, MT_HEADER); #endif if (m == NULL) return -1; if (sizeof(*tcp2) + hlen > MLEN) { - MCLGET(m, M_NOWAIT); - if ((m->m_flags & M_EXT) == 0) { + if (!(MCLGET(m, M_NOWAIT))) { FREE_MB_T(m); return -1; } } m->m_len = sizeof(*tcp2) + hlen; #if (BSD >= 199103) m->m_data += max_linkhdr; m->m_pkthdr.len = m->m_len; m->m_pkthdr.rcvif = (struct ifnet *)0; #endif ip = mtod(m, struct ip *); bzero((char *)ip, hlen); #ifdef USE_INET6 ip6 = (ip6_t *)ip; #endif tcp2 = (struct tcphdr *)((char *)ip + hlen); tcp2->th_sport = tcp->th_dport; tcp2->th_dport = tcp->th_sport; if (tcp->th_flags & TH_ACK) { tcp2->th_seq = tcp->th_ack; tcp2->th_flags = TH_RST; tcp2->th_ack = 0; } else { tcp2->th_seq = 0; tcp2->th_ack = ntohl(tcp->th_seq); tcp2->th_ack += tlen; tcp2->th_ack = htonl(tcp2->th_ack); tcp2->th_flags = TH_RST|TH_ACK; } TCP_X2_A(tcp2, 0); TCP_OFF_A(tcp2, sizeof(*tcp2) >> 2); tcp2->th_win = tcp->th_win; tcp2->th_sum = 0; tcp2->th_urp = 0; #ifdef USE_INET6 if (fin->fin_v == 6) { ip6->ip6_flow = ((ip6_t *)fin->fin_ip)->ip6_flow; ip6->ip6_plen = htons(sizeof(struct tcphdr)); ip6->ip6_nxt = IPPROTO_TCP; ip6->ip6_hlim = 0; ip6->ip6_src = fin->fin_dst6.in6; ip6->ip6_dst = fin->fin_src6.in6; tcp2->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(*ip6), sizeof(*tcp2)); return ipf_send_ip(fin, m); } #endif ip->ip_p = IPPROTO_TCP; ip->ip_len = htons(sizeof(struct tcphdr)); ip->ip_src.s_addr = fin->fin_daddr; ip->ip_dst.s_addr = fin->fin_saddr; tcp2->th_sum = in_cksum(m, hlen + sizeof(*tcp2)); ip->ip_len = htons(hlen + sizeof(*tcp2)); return ipf_send_ip(fin, m); } /* * ip_len must be in network byte order when called. */ static int ipf_send_ip(fin, m) fr_info_t *fin; mb_t *m; { fr_info_t fnew; ip_t *ip, *oip; int hlen; ip = mtod(m, ip_t *); bzero((char *)&fnew, sizeof(fnew)); fnew.fin_main_soft = fin->fin_main_soft; IP_V_A(ip, fin->fin_v); switch (fin->fin_v) { case 4 : oip = fin->fin_ip; hlen = sizeof(*oip); fnew.fin_v = 4; fnew.fin_p = ip->ip_p; fnew.fin_plen = ntohs(ip->ip_len); IP_HL_A(ip, sizeof(*oip) >> 2); ip->ip_tos = oip->ip_tos; ip->ip_id = fin->fin_ip->ip_id; #if defined(FreeBSD) && (__FreeBSD_version > 460000) ip->ip_off = htons(path_mtu_discovery ? IP_DF : 0); #else ip->ip_off = 0; #endif ip->ip_ttl = V_ip_defttl; ip->ip_sum = 0; break; #ifdef USE_INET6 case 6 : { ip6_t *ip6 = (ip6_t *)ip; ip6->ip6_vfc = 0x60; ip6->ip6_hlim = IPDEFTTL; hlen = sizeof(*ip6); fnew.fin_p = ip6->ip6_nxt; fnew.fin_v = 6; fnew.fin_plen = ntohs(ip6->ip6_plen) + hlen; break; } #endif default : return EINVAL; } #ifdef IPSEC m->m_pkthdr.rcvif = NULL; #endif fnew.fin_ifp = fin->fin_ifp; fnew.fin_flx = FI_NOCKSUM; fnew.fin_m = m; fnew.fin_ip = ip; fnew.fin_mp = &m; fnew.fin_hlen = hlen; fnew.fin_dp = (char *)ip + hlen; (void) ipf_makefrip(hlen, ip, &fnew); return ipf_fastroute(m, &m, &fnew, NULL); } int ipf_send_icmp_err(type, fin, dst) int type; fr_info_t *fin; int dst; { int err, hlen, xtra, iclen, ohlen, avail, code; struct in_addr dst4; struct icmp *icmp; struct mbuf *m; i6addr_t dst6; void *ifp; #ifdef USE_INET6 ip6_t *ip6; #endif ip_t *ip, *ip2; if ((type < 0) || (type >= ICMP_MAXTYPE)) return -1; code = fin->fin_icode; #ifdef USE_INET6 #if 0 /* XXX Fix an off by one error: s/>/>=/ was: if ((code < 0) || (code > sizeof(icmptoicmp6unreach)/sizeof(int))) Fix obtained from NetBSD ip_fil_netbsd.c r1.4: */ #endif if ((code < 0) || (code >= sizeof(icmptoicmp6unreach)/sizeof(int))) return -1; #endif if (ipf_checkl4sum(fin) == -1) return -1; #ifdef MGETHDR MGETHDR(m, M_NOWAIT, MT_HEADER); #else MGET(m, M_NOWAIT, MT_HEADER); #endif if (m == NULL) return -1; avail = MHLEN; xtra = 0; hlen = 0; ohlen = 0; dst4.s_addr = 0; ifp = fin->fin_ifp; if (fin->fin_v == 4) { if ((fin->fin_p == IPPROTO_ICMP) && !(fin->fin_flx & FI_SHORT)) switch (ntohs(fin->fin_data[0]) >> 8) { case ICMP_ECHO : case ICMP_TSTAMP : case ICMP_IREQ : case ICMP_MASKREQ : break; default : FREE_MB_T(m); return 0; } if (dst == 0) { if (ipf_ifpaddr(&ipfmain, 4, FRI_NORMAL, ifp, &dst6, NULL) == -1) { FREE_MB_T(m); return -1; } dst4 = dst6.in4; } else dst4.s_addr = fin->fin_daddr; hlen = sizeof(ip_t); ohlen = fin->fin_hlen; iclen = hlen + offsetof(struct icmp, icmp_ip) + ohlen; if (fin->fin_hlen < fin->fin_plen) xtra = MIN(fin->fin_dlen, 8); else xtra = 0; } #ifdef USE_INET6 else if (fin->fin_v == 6) { hlen = sizeof(ip6_t); ohlen = sizeof(ip6_t); iclen = hlen + offsetof(struct icmp, icmp_ip) + ohlen; type = icmptoicmp6types[type]; if (type == ICMP6_DST_UNREACH) code = icmptoicmp6unreach[code]; if (iclen + max_linkhdr + fin->fin_plen > avail) { - MCLGET(m, M_NOWAIT); - if ((m->m_flags & M_EXT) == 0) { + if (!(MCLGET(m, M_NOWAIT))) { FREE_MB_T(m); return -1; } avail = MCLBYTES; } xtra = MIN(fin->fin_plen, avail - iclen - max_linkhdr); xtra = MIN(xtra, IPV6_MMTU - iclen); if (dst == 0) { if (ipf_ifpaddr(&ipfmain, 6, FRI_NORMAL, ifp, &dst6, NULL) == -1) { FREE_MB_T(m); return -1; } } else dst6 = fin->fin_dst6; } #endif else { FREE_MB_T(m); return -1; } avail -= (max_linkhdr + iclen); if (avail < 0) { FREE_MB_T(m); return -1; } if (xtra > avail) xtra = avail; iclen += xtra; m->m_data += max_linkhdr; m->m_pkthdr.rcvif = (struct ifnet *)0; m->m_pkthdr.len = iclen; m->m_len = iclen; ip = mtod(m, ip_t *); icmp = (struct icmp *)((char *)ip + hlen); ip2 = (ip_t *)&icmp->icmp_ip; icmp->icmp_type = type; icmp->icmp_code = fin->fin_icode; icmp->icmp_cksum = 0; #ifdef icmp_nextmtu if (type == ICMP_UNREACH && fin->fin_icode == ICMP_UNREACH_NEEDFRAG) { if (fin->fin_mtu != 0) { icmp->icmp_nextmtu = htons(fin->fin_mtu); } else if (ifp != NULL) { icmp->icmp_nextmtu = htons(GETIFMTU_4(ifp)); } else { /* make up a number... */ icmp->icmp_nextmtu = htons(fin->fin_plen - 20); } } #endif bcopy((char *)fin->fin_ip, (char *)ip2, ohlen); #ifdef USE_INET6 ip6 = (ip6_t *)ip; if (fin->fin_v == 6) { ip6->ip6_flow = ((ip6_t *)fin->fin_ip)->ip6_flow; ip6->ip6_plen = htons(iclen - hlen); ip6->ip6_nxt = IPPROTO_ICMPV6; ip6->ip6_hlim = 0; ip6->ip6_src = dst6.in6; ip6->ip6_dst = fin->fin_src6.in6; if (xtra > 0) bcopy((char *)fin->fin_ip + ohlen, (char *)&icmp->icmp_ip + ohlen, xtra); icmp->icmp_cksum = in6_cksum(m, IPPROTO_ICMPV6, sizeof(*ip6), iclen - hlen); } else #endif { ip->ip_p = IPPROTO_ICMP; ip->ip_src.s_addr = dst4.s_addr; ip->ip_dst.s_addr = fin->fin_saddr; if (xtra > 0) bcopy((char *)fin->fin_ip + ohlen, (char *)&icmp->icmp_ip + ohlen, xtra); icmp->icmp_cksum = ipf_cksum((u_short *)icmp, sizeof(*icmp) + 8); ip->ip_len = htons(iclen); ip->ip_p = IPPROTO_ICMP; } err = ipf_send_ip(fin, m); return err; } /* * m0 - pointer to mbuf where the IP packet starts * mpp - pointer to the mbuf pointer that is the start of the mbuf chain */ int ipf_fastroute(m0, mpp, fin, fdp) mb_t *m0, **mpp; fr_info_t *fin; frdest_t *fdp; { register struct ip *ip, *mhip; register struct mbuf *m = *mpp; register struct route *ro; int len, off, error = 0, hlen, code; struct ifnet *ifp, *sifp; struct sockaddr_in *dst; struct route iproute; u_short ip_off; frdest_t node; frentry_t *fr; ro = NULL; #ifdef M_WRITABLE /* * HOT FIX/KLUDGE: * * If the mbuf we're about to send is not writable (because of * a cluster reference, for example) we'll need to make a copy * of it since this routine modifies the contents. * * If you have non-crappy network hardware that can transmit data * from the mbuf, rather than making a copy, this is gonna be a * problem. */ if (M_WRITABLE(m) == 0) { m0 = m_dup(m, M_NOWAIT); if (m0 != 0) { FREE_MB_T(m); m = m0; *mpp = m; } else { error = ENOBUFS; FREE_MB_T(m); goto done; } } #endif #ifdef USE_INET6 if (fin->fin_v == 6) { /* * currently "to " and "to :ip#" are not supported * for IPv6 */ return ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL); } #endif hlen = fin->fin_hlen; ip = mtod(m0, struct ip *); ifp = NULL; /* * Route packet. */ ro = &iproute; bzero(ro, sizeof (*ro)); dst = (struct sockaddr_in *)&ro->ro_dst; dst->sin_family = AF_INET; dst->sin_addr = ip->ip_dst; fr = fin->fin_fr; if ((fr != NULL) && !(fr->fr_flags & FR_KEEPSTATE) && (fdp != NULL) && (fdp->fd_type == FRD_DSTLIST)) { if (ipf_dstlist_select_node(fin, fdp->fd_ptr, NULL, &node) == 0) fdp = &node; } if (fdp != NULL) ifp = fdp->fd_ptr; else ifp = fin->fin_ifp; if ((ifp == NULL) && ((fr == NULL) || !(fr->fr_flags & FR_FASTROUTE))) { error = -2; goto bad; } if ((fdp != NULL) && (fdp->fd_ip.s_addr != 0)) dst->sin_addr = fdp->fd_ip; dst->sin_len = sizeof(*dst); in_rtalloc(ro, M_GETFIB(m0)); if ((ifp == NULL) && (ro->ro_rt != NULL)) ifp = ro->ro_rt->rt_ifp; if ((ro->ro_rt == NULL) || (ifp == NULL)) { if (in_localaddr(ip->ip_dst)) error = EHOSTUNREACH; else error = ENETUNREACH; goto bad; } if (ro->ro_rt->rt_flags & RTF_GATEWAY) dst = (struct sockaddr_in *)ro->ro_rt->rt_gateway; if (ro->ro_rt) counter_u64_add(ro->ro_rt->rt_pksent, 1); /* * For input packets which are being "fastrouted", they won't * go back through output filtering and miss their chance to get * NAT'd and counted. Duplicated packets aren't considered to be * part of the normal packet stream, so do not NAT them or pass * them through stateful checking, etc. */ if ((fdp != &fr->fr_dif) && (fin->fin_out == 0)) { sifp = fin->fin_ifp; fin->fin_ifp = ifp; fin->fin_out = 1; (void) ipf_acctpkt(fin, NULL); fin->fin_fr = NULL; if (!fr || !(fr->fr_flags & FR_RETMASK)) { u_32_t pass; (void) ipf_state_check(fin, &pass); } switch (ipf_nat_checkout(fin, NULL)) { case 0 : break; case 1 : ip->ip_sum = 0; break; case -1 : error = -1; goto bad; break; } fin->fin_ifp = sifp; fin->fin_out = 0; } else ip->ip_sum = 0; /* * If small enough for interface, can just send directly. */ if (ntohs(ip->ip_len) <= ifp->if_mtu) { if (!ip->ip_sum) ip->ip_sum = in_cksum(m, hlen); error = (*ifp->if_output)(ifp, m, (struct sockaddr *)dst, ro ); goto done; } /* * Too large for interface; fragment if possible. * Must be able to put at least 8 bytes per fragment. */ ip_off = ntohs(ip->ip_off); if (ip_off & IP_DF) { error = EMSGSIZE; goto bad; } len = (ifp->if_mtu - hlen) &~ 7; if (len < 8) { error = EMSGSIZE; goto bad; } { int mhlen, firstlen = len; struct mbuf **mnext = &m->m_act; /* * Loop through length of segment after first fragment, * make new header and copy data of each part and link onto chain. */ m0 = m; mhlen = sizeof (struct ip); for (off = hlen + len; off < ntohs(ip->ip_len); off += len) { #ifdef MGETHDR MGETHDR(m, M_NOWAIT, MT_HEADER); #else MGET(m, M_NOWAIT, MT_HEADER); #endif if (m == 0) { m = m0; error = ENOBUFS; goto bad; } m->m_data += max_linkhdr; mhip = mtod(m, struct ip *); bcopy((char *)ip, (char *)mhip, sizeof(*ip)); if (hlen > sizeof (struct ip)) { mhlen = ip_optcopy(ip, mhip) + sizeof (struct ip); IP_HL_A(mhip, mhlen >> 2); } m->m_len = mhlen; mhip->ip_off = ((off - hlen) >> 3) + ip_off; if (off + len >= ntohs(ip->ip_len)) len = ntohs(ip->ip_len) - off; else mhip->ip_off |= IP_MF; mhip->ip_len = htons((u_short)(len + mhlen)); *mnext = m; m->m_next = m_copy(m0, off, len); if (m->m_next == 0) { error = ENOBUFS; /* ??? */ goto sendorfree; } m->m_pkthdr.len = mhlen + len; m->m_pkthdr.rcvif = NULL; mhip->ip_off = htons((u_short)mhip->ip_off); mhip->ip_sum = 0; mhip->ip_sum = in_cksum(m, mhlen); mnext = &m->m_act; } /* * Update first fragment by trimming what's been copied out * and updating header, then send each fragment (in order). */ m_adj(m0, hlen + firstlen - ip->ip_len); ip->ip_len = htons((u_short)(hlen + firstlen)); ip->ip_off = htons((u_short)IP_MF); ip->ip_sum = 0; ip->ip_sum = in_cksum(m0, hlen); sendorfree: for (m = m0; m; m = m0) { m0 = m->m_act; m->m_act = 0; if (error == 0) error = (*ifp->if_output)(ifp, m, (struct sockaddr *)dst, ro ); else FREE_MB_T(m); } } done: if (!error) ipfmain.ipf_frouteok[0]++; else ipfmain.ipf_frouteok[1]++; if ((ro != NULL) && (ro->ro_rt != NULL)) { RTFREE(ro->ro_rt); } return 0; bad: if (error == EMSGSIZE) { sifp = fin->fin_ifp; code = fin->fin_icode; fin->fin_icode = ICMP_UNREACH_NEEDFRAG; fin->fin_ifp = ifp; (void) ipf_send_icmp_err(ICMP_UNREACH, fin, 1); fin->fin_ifp = sifp; fin->fin_icode = code; } FREE_MB_T(m); goto done; } int ipf_verifysrc(fin) fr_info_t *fin; { struct sockaddr_in *dst; struct route iproute; bzero((char *)&iproute, sizeof(iproute)); dst = (struct sockaddr_in *)&iproute.ro_dst; dst->sin_len = sizeof(*dst); dst->sin_family = AF_INET; dst->sin_addr = fin->fin_src; in_rtalloc(&iproute, 0); if (iproute.ro_rt == NULL) return 0; return (fin->fin_ifp == iproute.ro_rt->rt_ifp); } /* * return the first IP Address associated with an interface */ int ipf_ifpaddr(softc, v, atype, ifptr, inp, inpmask) ipf_main_softc_t *softc; int v, atype; void *ifptr; i6addr_t *inp, *inpmask; { #ifdef USE_INET6 struct in6_addr *inp6 = NULL; #endif struct sockaddr *sock, *mask; struct sockaddr_in *sin; struct ifaddr *ifa; struct ifnet *ifp; if ((ifptr == NULL) || (ifptr == (void *)-1)) return -1; sin = NULL; ifp = ifptr; if (v == 4) inp->in4.s_addr = 0; #ifdef USE_INET6 else if (v == 6) bzero((char *)inp, sizeof(*inp)); #endif ifa = TAILQ_FIRST(&ifp->if_addrhead); sock = ifa->ifa_addr; while (sock != NULL && ifa != NULL) { sin = (struct sockaddr_in *)sock; if ((v == 4) && (sin->sin_family == AF_INET)) break; #ifdef USE_INET6 if ((v == 6) && (sin->sin_family == AF_INET6)) { inp6 = &((struct sockaddr_in6 *)sin)->sin6_addr; if (!IN6_IS_ADDR_LINKLOCAL(inp6) && !IN6_IS_ADDR_LOOPBACK(inp6)) break; } #endif ifa = TAILQ_NEXT(ifa, ifa_link); if (ifa != NULL) sock = ifa->ifa_addr; } if (ifa == NULL || sin == NULL) return -1; mask = ifa->ifa_netmask; if (atype == FRI_BROADCAST) sock = ifa->ifa_broadaddr; else if (atype == FRI_PEERADDR) sock = ifa->ifa_dstaddr; if (sock == NULL) return -1; #ifdef USE_INET6 if (v == 6) { return ipf_ifpfillv6addr(atype, (struct sockaddr_in6 *)sock, (struct sockaddr_in6 *)mask, inp, inpmask); } #endif return ipf_ifpfillv4addr(atype, (struct sockaddr_in *)sock, (struct sockaddr_in *)mask, &inp->in4, &inpmask->in4); } u_32_t ipf_newisn(fin) fr_info_t *fin; { u_32_t newiss; newiss = arc4random(); return newiss; } /* ------------------------------------------------------------------------ */ /* Function: ipf_nextipid */ /* Returns: int - 0 == success, -1 == error (packet should be droppped) */ /* Parameters: fin(I) - pointer to packet information */ /* */ /* Returns the next IPv4 ID to use for this packet. */ /* ------------------------------------------------------------------------ */ u_short ipf_nextipid(fin) fr_info_t *fin; { u_short id; #ifndef RANDOM_IP_ID MUTEX_ENTER(&ipfmain.ipf_rw); id = ipid++; MUTEX_EXIT(&ipfmain.ipf_rw); #else id = ip_randomid(); #endif return id; } INLINE int ipf_checkv4sum(fin) fr_info_t *fin; { #ifdef CSUM_DATA_VALID int manual = 0; u_short sum; ip_t *ip; mb_t *m; if ((fin->fin_flx & FI_NOCKSUM) != 0) return 0; if ((fin->fin_flx & FI_SHORT) != 0) return 1; if (fin->fin_cksum != FI_CK_NEEDED) return (fin->fin_cksum > FI_CK_NEEDED) ? 0 : -1; m = fin->fin_m; if (m == NULL) { manual = 1; goto skipauto; } ip = fin->fin_ip; if ((m->m_pkthdr.csum_flags & (CSUM_IP_CHECKED|CSUM_IP_VALID)) == CSUM_IP_CHECKED) { fin->fin_cksum = FI_CK_BAD; fin->fin_flx |= FI_BAD; return -1; } if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) sum = m->m_pkthdr.csum_data; else sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htonl(m->m_pkthdr.csum_data + fin->fin_dlen + fin->fin_p)); sum ^= 0xffff; if (sum != 0) { fin->fin_cksum = FI_CK_BAD; fin->fin_flx |= FI_BAD; } else { fin->fin_cksum = FI_CK_SUMOK; return 0; } } else { if (m->m_pkthdr.csum_flags == CSUM_DELAY_DATA) { fin->fin_cksum = FI_CK_L4FULL; return 0; } else if (m->m_pkthdr.csum_flags == CSUM_TCP || m->m_pkthdr.csum_flags == CSUM_UDP) { fin->fin_cksum = FI_CK_L4PART; return 0; } else if (m->m_pkthdr.csum_flags == CSUM_IP) { fin->fin_cksum = FI_CK_L4PART; return 0; } else { manual = 1; } } skipauto: if (manual != 0) { if (ipf_checkl4sum(fin) == -1) { fin->fin_flx |= FI_BAD; return -1; } } #else if (ipf_checkl4sum(fin) == -1) { fin->fin_flx |= FI_BAD; return -1; } #endif return 0; } #ifdef USE_INET6 INLINE int ipf_checkv6sum(fin) fr_info_t *fin; { if ((fin->fin_flx & FI_NOCKSUM) != 0) return 0; if ((fin->fin_flx & FI_SHORT) != 0) return 1; if (fin->fin_cksum != FI_CK_NEEDED) return (fin->fin_cksum > FI_CK_NEEDED) ? 0 : -1; if (ipf_checkl4sum(fin) == -1) { fin->fin_flx |= FI_BAD; return -1; } return 0; } #endif /* USE_INET6 */ size_t mbufchainlen(m0) struct mbuf *m0; { size_t len; if ((m0->m_flags & M_PKTHDR) != 0) { len = m0->m_pkthdr.len; } else { struct mbuf *m; for (m = m0, len = 0; m != NULL; m = m->m_next) len += m->m_len; } return len; } /* ------------------------------------------------------------------------ */ /* Function: ipf_pullup */ /* Returns: NULL == pullup failed, else pointer to protocol header */ /* Parameters: xmin(I)- pointer to buffer where data packet starts */ /* fin(I) - pointer to packet information */ /* len(I) - number of bytes to pullup */ /* */ /* Attempt to move at least len bytes (from the start of the buffer) into a */ /* single buffer for ease of access. Operating system native functions are */ /* used to manage buffers - if necessary. If the entire packet ends up in */ /* a single buffer, set the FI_COALESCE flag even though ipf_coalesce() has */ /* not been called. Both fin_ip and fin_dp are updated before exiting _IF_ */ /* and ONLY if the pullup succeeds. */ /* */ /* We assume that 'xmin' is a pointer to a buffer that is part of the chain */ /* of buffers that starts at *fin->fin_mp. */ /* ------------------------------------------------------------------------ */ void * ipf_pullup(xmin, fin, len) mb_t *xmin; fr_info_t *fin; int len; { int dpoff, ipoff; mb_t *m = xmin; char *ip; if (m == NULL) return NULL; ip = (char *)fin->fin_ip; if ((fin->fin_flx & FI_COALESCE) != 0) return ip; ipoff = fin->fin_ipoff; if (fin->fin_dp != NULL) dpoff = (char *)fin->fin_dp - (char *)ip; else dpoff = 0; if (M_LEN(m) < len) { mb_t *n = *fin->fin_mp; /* * Assume that M_PKTHDR is set and just work with what is left * rather than check.. * Should not make any real difference, anyway. */ if (m != n) { /* * Record the mbuf that points to the mbuf that we're * about to go to work on so that we can update the * m_next appropriately later. */ for (; n->m_next != m; n = n->m_next) ; } else { n = NULL; } #ifdef MHLEN if (len > MHLEN) #else if (len > MLEN) #endif { #ifdef HAVE_M_PULLDOWN if (m_pulldown(m, 0, len, NULL) == NULL) m = NULL; #else FREE_MB_T(*fin->fin_mp); m = NULL; n = NULL; #endif } else { m = m_pullup(m, len); } if (n != NULL) n->m_next = m; if (m == NULL) { /* * When n is non-NULL, it indicates that m pointed to * a sub-chain (tail) of the mbuf and that the head * of this chain has not yet been free'd. */ if (n != NULL) { FREE_MB_T(*fin->fin_mp); } *fin->fin_mp = NULL; fin->fin_m = NULL; return NULL; } if (n == NULL) *fin->fin_mp = m; while (M_LEN(m) == 0) { m = m->m_next; } fin->fin_m = m; ip = MTOD(m, char *) + ipoff; fin->fin_ip = (ip_t *)ip; if (fin->fin_dp != NULL) fin->fin_dp = (char *)fin->fin_ip + dpoff; if (fin->fin_fraghdr != NULL) fin->fin_fraghdr = (char *)ip + ((char *)fin->fin_fraghdr - (char *)fin->fin_ip); } if (len == fin->fin_plen) fin->fin_flx |= FI_COALESCE; return ip; } int ipf_inject(fin, m) fr_info_t *fin; mb_t *m; { int error = 0; if (fin->fin_out == 0) { netisr_dispatch(NETISR_IP, m); } else { fin->fin_ip->ip_len = ntohs(fin->fin_ip->ip_len); fin->fin_ip->ip_off = ntohs(fin->fin_ip->ip_off); error = ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL); } return error; } int ipf_pfil_unhook(void) { #if defined(NETBSD_PF) && (__FreeBSD_version >= 500011) struct pfil_head *ph_inet; # ifdef USE_INET6 struct pfil_head *ph_inet6; # endif #endif #ifdef NETBSD_PF ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); if (ph_inet != NULL) pfil_remove_hook((void *)ipf_check_wrapper, NULL, PFIL_IN|PFIL_OUT|PFIL_WAITOK, ph_inet); # ifdef USE_INET6 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); if (ph_inet6 != NULL) pfil_remove_hook((void *)ipf_check_wrapper6, NULL, PFIL_IN|PFIL_OUT|PFIL_WAITOK, ph_inet6); # endif #endif return (0); } int ipf_pfil_hook(void) { #if defined(NETBSD_PF) && (__FreeBSD_version >= 500011) struct pfil_head *ph_inet; # ifdef USE_INET6 struct pfil_head *ph_inet6; # endif #endif # ifdef NETBSD_PF ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); # ifdef USE_INET6 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); # endif if (ph_inet == NULL # ifdef USE_INET6 && ph_inet6 == NULL # endif ) { return ENODEV; } if (ph_inet != NULL) pfil_add_hook((void *)ipf_check_wrapper, NULL, PFIL_IN|PFIL_OUT|PFIL_WAITOK, ph_inet); # ifdef USE_INET6 if (ph_inet6 != NULL) pfil_add_hook((void *)ipf_check_wrapper6, NULL, PFIL_IN|PFIL_OUT|PFIL_WAITOK, ph_inet6); # endif # endif return (0); } void ipf_event_reg(void) { ipf_arrivetag = EVENTHANDLER_REGISTER(ifnet_arrival_event, \ ipf_ifevent, &ipfmain, \ EVENTHANDLER_PRI_ANY); ipf_departtag = EVENTHANDLER_REGISTER(ifnet_departure_event, \ ipf_ifevent, &ipfmain, \ EVENTHANDLER_PRI_ANY); ipf_clonetag = EVENTHANDLER_REGISTER(if_clone_event, ipf_ifevent, \ &ipfmain, EVENTHANDLER_PRI_ANY); } void ipf_event_dereg(void) { if (ipf_arrivetag != NULL) { EVENTHANDLER_DEREGISTER(ifnet_arrival_event, ipf_arrivetag); } if (ipf_departtag != NULL) { EVENTHANDLER_DEREGISTER(ifnet_departure_event, ipf_departtag); } if (ipf_clonetag != NULL) { EVENTHANDLER_DEREGISTER(if_clone_event, ipf_clonetag); } } u_32_t ipf_random() { return arc4random(); } u_int ipf_pcksum(fin, hlen, sum) fr_info_t *fin; int hlen; u_int sum; { struct mbuf *m; u_int sum2; int off; m = fin->fin_m; off = (char *)fin->fin_dp - (char *)fin->fin_ip; m->m_data += hlen; m->m_len -= hlen; sum2 = in_cksum(fin->fin_m, fin->fin_plen - off); m->m_len += hlen; m->m_data -= hlen; /* * Both sum and sum2 are partial sums, so combine them together. */ sum += ~sum2 & 0xffff; while (sum > 0xffff) sum = (sum & 0xffff) + (sum >> 16); sum2 = ~sum & 0xffff; return sum2; } Index: head/sys/dev/an/if_an.c =================================================================== --- head/sys/dev/an/if_an.c (revision 276749) +++ head/sys/dev/an/if_an.c (revision 276750) @@ -1,3823 +1,3821 @@ /*- * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /* * Aironet 4500/4800 802.11 PCMCIA/ISA/PCI driver for FreeBSD. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ #include __FBSDID("$FreeBSD$"); /* * The Aironet 4500/4800 series cards come in PCMCIA, ISA and PCI form. * This driver supports all three device types (PCI devices are supported * through an extra PCI shim: /sys/dev/an/if_an_pci.c). ISA devices can be * supported either using hard-coded IO port/IRQ settings or via Plug * and Play. The 4500 series devices support 1Mbps and 2Mbps data rates. * The 4800 devices support 1, 2, 5.5 and 11Mbps rates. * * Like the WaveLAN/IEEE cards, the Aironet NICs are all essentially * PCMCIA devices. The ISA and PCI cards are a combination of a PCMCIA * device and a PCMCIA to ISA or PCMCIA to PCI adapter card. There are * a couple of important differences though: * * - Lucent ISA card looks to the host like a PCMCIA controller with * a PCMCIA WaveLAN card inserted. This means that even desktop * machines need to be configured with PCMCIA support in order to * use WaveLAN/IEEE ISA cards. The Aironet cards on the other hand * actually look like normal ISA and PCI devices to the host, so * no PCMCIA controller support is needed * * The latter point results in a small gotcha. The Aironet PCMCIA * cards can be configured for one of two operating modes depending * on how the Vpp1 and Vpp2 programming voltages are set when the * card is activated. In order to put the card in proper PCMCIA * operation (where the CIS table is visible and the interface is * programmed for PCMCIA operation), both Vpp1 and Vpp2 have to be * set to 5 volts. FreeBSD by default doesn't set the Vpp voltages, * which leaves the card in ISA/PCI mode, which prevents it from * being activated as an PCMCIA device. * * Note that some PCMCIA controller software packages for Windows NT * fail to set the voltages as well. * * The Aironet devices can operate in both station mode and access point * mode. Typically, when programmed for station mode, the card can be set * to automatically perform encapsulation/decapsulation of Ethernet II * and 802.3 frames within 802.11 frames so that the host doesn't have * to do it itself. This driver doesn't program the card that way: the * driver handles all of the encapsulation/decapsulation itself. */ #include "opt_inet.h" #ifdef INET #define ANCACHE /* enable signal strength cache */ #endif #include #include #include #include #include #include #include #include #include #ifdef ANCACHE #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #endif #include #include #include #include /* These are global because we need them in sys/pci/if_an_p.c. */ static void an_reset(struct an_softc *); static int an_init_mpi350_desc(struct an_softc *); static int an_ioctl(struct ifnet *, u_long, caddr_t); static void an_init(void *); static void an_init_locked(struct an_softc *); static int an_init_tx_ring(struct an_softc *); static void an_start(struct ifnet *); static void an_start_locked(struct ifnet *); static void an_watchdog(struct an_softc *); static void an_rxeof(struct an_softc *); static void an_txeof(struct an_softc *, int); static void an_promisc(struct an_softc *, int); static int an_cmd(struct an_softc *, int, int); static int an_cmd_struct(struct an_softc *, struct an_command *, struct an_reply *); static int an_read_record(struct an_softc *, struct an_ltv_gen *); static int an_write_record(struct an_softc *, struct an_ltv_gen *); static int an_read_data(struct an_softc *, int, int, caddr_t, int); static int an_write_data(struct an_softc *, int, int, caddr_t, int); static int an_seek(struct an_softc *, int, int, int); static int an_alloc_nicmem(struct an_softc *, int, int *); static int an_dma_malloc(struct an_softc *, bus_size_t, struct an_dma_alloc *, int); static void an_dma_free(struct an_softc *, struct an_dma_alloc *); static void an_dma_malloc_cb(void *, bus_dma_segment_t *, int, int); static void an_stats_update(void *); static void an_setdef(struct an_softc *, struct an_req *); #ifdef ANCACHE static void an_cache_store(struct an_softc *, struct ether_header *, struct mbuf *, u_int8_t, u_int8_t); #endif /* function definitions for use with the Cisco's Linux configuration utilities */ static int readrids(struct ifnet*, struct aironet_ioctl*); static int writerids(struct ifnet*, struct aironet_ioctl*); static int flashcard(struct ifnet*, struct aironet_ioctl*); static int cmdreset(struct ifnet *); static int setflashmode(struct ifnet *); static int flashgchar(struct ifnet *,int,int); static int flashpchar(struct ifnet *,int,int); static int flashputbuf(struct ifnet *); static int flashrestart(struct ifnet *); static int WaitBusy(struct ifnet *, int); static int unstickbusy(struct ifnet *); static void an_dump_record (struct an_softc *,struct an_ltv_gen *, char *); static int an_media_change (struct ifnet *); static void an_media_status (struct ifnet *, struct ifmediareq *); static int an_dump = 0; static int an_cache_mode = 0; #define DBM 0 #define PERCENT 1 #define RAW 2 static char an_conf[256]; static char an_conf_cache[256]; /* sysctl vars */ static SYSCTL_NODE(_hw, OID_AUTO, an, CTLFLAG_RD, 0, "Wireless driver parameters"); /* XXX violate ethernet/netgraph callback hooks */ extern void (*ng_ether_attach_p)(struct ifnet *ifp); extern void (*ng_ether_detach_p)(struct ifnet *ifp); static int sysctl_an_dump(SYSCTL_HANDLER_ARGS) { int error, r, last; char *s = an_conf; last = an_dump; switch (an_dump) { case 0: strcpy(an_conf, "off"); break; case 1: strcpy(an_conf, "type"); break; case 2: strcpy(an_conf, "dump"); break; default: snprintf(an_conf, 5, "%x", an_dump); break; } error = sysctl_handle_string(oidp, an_conf, sizeof(an_conf), req); if (strncmp(an_conf,"off", 3) == 0) { an_dump = 0; } if (strncmp(an_conf,"dump", 4) == 0) { an_dump = 1; } if (strncmp(an_conf,"type", 4) == 0) { an_dump = 2; } if (*s == 'f') { r = 0; for (;;s++) { if ((*s >= '0') && (*s <= '9')) { r = r * 16 + (*s - '0'); } else if ((*s >= 'a') && (*s <= 'f')) { r = r * 16 + (*s - 'a' + 10); } else { break; } } an_dump = r; } if (an_dump != last) printf("Sysctl changed for Aironet driver\n"); return error; } SYSCTL_PROC(_hw_an, OID_AUTO, an_dump, CTLTYPE_STRING | CTLFLAG_RW, 0, sizeof(an_conf), sysctl_an_dump, "A", ""); static int sysctl_an_cache_mode(SYSCTL_HANDLER_ARGS) { int error; switch (an_cache_mode) { case 1: strcpy(an_conf_cache, "per"); break; case 2: strcpy(an_conf_cache, "raw"); break; default: strcpy(an_conf_cache, "dbm"); break; } error = sysctl_handle_string(oidp, an_conf_cache, sizeof(an_conf_cache), req); if (strncmp(an_conf_cache,"dbm", 3) == 0) { an_cache_mode = 0; } if (strncmp(an_conf_cache,"per", 3) == 0) { an_cache_mode = 1; } if (strncmp(an_conf_cache,"raw", 3) == 0) { an_cache_mode = 2; } return error; } SYSCTL_PROC(_hw_an, OID_AUTO, an_cache_mode, CTLTYPE_STRING | CTLFLAG_RW, 0, sizeof(an_conf_cache), sysctl_an_cache_mode, "A", ""); /* * Setup the lock for PCI attachment since it skips the an_probe * function. We need to setup the lock in an_probe since some * operations need the lock. So we might as well create the * lock in the probe. */ int an_pci_probe(device_t dev) { struct an_softc *sc = device_get_softc(dev); mtx_init(&sc->an_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); return(0); } /* * We probe for an Aironet 4500/4800 card by attempting to * read the default SSID list. On reset, the first entry in * the SSID list will contain the name "tsunami." If we don't * find this, then there's no card present. */ int an_probe(device_t dev) { struct an_softc *sc = device_get_softc(dev); struct an_ltv_ssidlist_new ssid; int error; bzero((char *)&ssid, sizeof(ssid)); error = an_alloc_port(dev, 0, AN_IOSIZ); if (error != 0) return (0); /* can't do autoprobing */ if (rman_get_start(sc->port_res) == -1) return(0); /* * We need to fake up a softc structure long enough * to be able to issue commands and call some of the * other routines. */ ssid.an_len = sizeof(ssid); ssid.an_type = AN_RID_SSIDLIST; /* Make sure interrupts are disabled. */ sc->mpi350 = 0; CSR_WRITE_2(sc, AN_INT_EN(sc->mpi350), 0); CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), 0xFFFF); sc->an_dev = dev; mtx_init(&sc->an_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); AN_LOCK(sc); an_reset(sc); if (an_cmd(sc, AN_CMD_READCFG, 0)) { AN_UNLOCK(sc); goto fail; } if (an_read_record(sc, (struct an_ltv_gen *)&ssid)) { AN_UNLOCK(sc); goto fail; } /* See if the ssid matches what we expect ... but doesn't have to */ if (strcmp(ssid.an_entry[0].an_ssid, AN_DEF_SSID)) { AN_UNLOCK(sc); goto fail; } AN_UNLOCK(sc); return(AN_IOSIZ); fail: mtx_destroy(&sc->an_mtx); return(0); } /* * Allocate a port resource with the given resource id. */ int an_alloc_port(device_t dev, int rid, int size) { struct an_softc *sc = device_get_softc(dev); struct resource *res; res = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0ul, ~0ul, size, RF_ACTIVE); if (res) { sc->port_rid = rid; sc->port_res = res; return (0); } else { return (ENOENT); } } /* * Allocate a memory resource with the given resource id. */ int an_alloc_memory(device_t dev, int rid, int size) { struct an_softc *sc = device_get_softc(dev); struct resource *res; res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0ul, ~0ul, size, RF_ACTIVE); if (res) { sc->mem_rid = rid; sc->mem_res = res; sc->mem_used = size; return (0); } else { return (ENOENT); } } /* * Allocate a auxilary memory resource with the given resource id. */ int an_alloc_aux_memory(device_t dev, int rid, int size) { struct an_softc *sc = device_get_softc(dev); struct resource *res; res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0ul, ~0ul, size, RF_ACTIVE); if (res) { sc->mem_aux_rid = rid; sc->mem_aux_res = res; sc->mem_aux_used = size; return (0); } else { return (ENOENT); } } /* * Allocate an irq resource with the given resource id. */ int an_alloc_irq(device_t dev, int rid, int flags) { struct an_softc *sc = device_get_softc(dev); struct resource *res; res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, (RF_ACTIVE | flags)); if (res) { sc->irq_rid = rid; sc->irq_res = res; return (0); } else { return (ENOENT); } } static void an_dma_malloc_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *paddr = (bus_addr_t*) arg; *paddr = segs->ds_addr; } /* * Alloc DMA memory and set the pointer to it */ static int an_dma_malloc(struct an_softc *sc, bus_size_t size, struct an_dma_alloc *dma, int mapflags) { int r; r = bus_dmamem_alloc(sc->an_dtag, (void**) &dma->an_dma_vaddr, BUS_DMA_NOWAIT, &dma->an_dma_map); if (r != 0) goto fail_1; r = bus_dmamap_load(sc->an_dtag, dma->an_dma_map, dma->an_dma_vaddr, size, an_dma_malloc_cb, &dma->an_dma_paddr, mapflags | BUS_DMA_NOWAIT); if (r != 0) goto fail_2; dma->an_dma_size = size; return (0); fail_2: bus_dmamap_unload(sc->an_dtag, dma->an_dma_map); fail_1: bus_dmamem_free(sc->an_dtag, dma->an_dma_vaddr, dma->an_dma_map); return (r); } static void an_dma_free(struct an_softc *sc, struct an_dma_alloc *dma) { bus_dmamap_unload(sc->an_dtag, dma->an_dma_map); bus_dmamem_free(sc->an_dtag, dma->an_dma_vaddr, dma->an_dma_map); dma->an_dma_vaddr = 0; } /* * Release all resources */ void an_release_resources(device_t dev) { struct an_softc *sc = device_get_softc(dev); int i; if (sc->port_res) { bus_release_resource(dev, SYS_RES_IOPORT, sc->port_rid, sc->port_res); sc->port_res = 0; } if (sc->mem_res) { bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res); sc->mem_res = 0; } if (sc->mem_aux_res) { bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_aux_rid, sc->mem_aux_res); sc->mem_aux_res = 0; } if (sc->irq_res) { bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); sc->irq_res = 0; } if (sc->an_rid_buffer.an_dma_paddr) { an_dma_free(sc, &sc->an_rid_buffer); } for (i = 0; i < AN_MAX_RX_DESC; i++) if (sc->an_rx_buffer[i].an_dma_paddr) { an_dma_free(sc, &sc->an_rx_buffer[i]); } for (i = 0; i < AN_MAX_TX_DESC; i++) if (sc->an_tx_buffer[i].an_dma_paddr) { an_dma_free(sc, &sc->an_tx_buffer[i]); } if (sc->an_dtag) { bus_dma_tag_destroy(sc->an_dtag); } } int an_init_mpi350_desc(struct an_softc *sc) { struct an_command cmd_struct; struct an_reply reply; struct an_card_rid_desc an_rid_desc; struct an_card_rx_desc an_rx_desc; struct an_card_tx_desc an_tx_desc; int i, desc; AN_LOCK_ASSERT(sc); if(!sc->an_rid_buffer.an_dma_paddr) an_dma_malloc(sc, AN_RID_BUFFER_SIZE, &sc->an_rid_buffer, 0); for (i = 0; i < AN_MAX_RX_DESC; i++) if(!sc->an_rx_buffer[i].an_dma_paddr) an_dma_malloc(sc, AN_RX_BUFFER_SIZE, &sc->an_rx_buffer[i], 0); for (i = 0; i < AN_MAX_TX_DESC; i++) if(!sc->an_tx_buffer[i].an_dma_paddr) an_dma_malloc(sc, AN_TX_BUFFER_SIZE, &sc->an_tx_buffer[i], 0); /* * Allocate RX descriptor */ bzero(&reply,sizeof(reply)); cmd_struct.an_cmd = AN_CMD_ALLOC_DESC; cmd_struct.an_parm0 = AN_DESCRIPTOR_RX; cmd_struct.an_parm1 = AN_RX_DESC_OFFSET; cmd_struct.an_parm2 = AN_MAX_RX_DESC; if (an_cmd_struct(sc, &cmd_struct, &reply)) { if_printf(sc->an_ifp, "failed to allocate RX descriptor\n"); return(EIO); } for (desc = 0; desc < AN_MAX_RX_DESC; desc++) { bzero(&an_rx_desc, sizeof(an_rx_desc)); an_rx_desc.an_valid = 1; an_rx_desc.an_len = AN_RX_BUFFER_SIZE; an_rx_desc.an_done = 0; an_rx_desc.an_phys = sc->an_rx_buffer[desc].an_dma_paddr; for (i = 0; i < sizeof(an_rx_desc) / 4; i++) CSR_MEM_AUX_WRITE_4(sc, AN_RX_DESC_OFFSET + (desc * sizeof(an_rx_desc)) + (i * 4), ((u_int32_t *)(void *)&an_rx_desc)[i]); } /* * Allocate TX descriptor */ bzero(&reply,sizeof(reply)); cmd_struct.an_cmd = AN_CMD_ALLOC_DESC; cmd_struct.an_parm0 = AN_DESCRIPTOR_TX; cmd_struct.an_parm1 = AN_TX_DESC_OFFSET; cmd_struct.an_parm2 = AN_MAX_TX_DESC; if (an_cmd_struct(sc, &cmd_struct, &reply)) { if_printf(sc->an_ifp, "failed to allocate TX descriptor\n"); return(EIO); } for (desc = 0; desc < AN_MAX_TX_DESC; desc++) { bzero(&an_tx_desc, sizeof(an_tx_desc)); an_tx_desc.an_offset = 0; an_tx_desc.an_eoc = 0; an_tx_desc.an_valid = 0; an_tx_desc.an_len = 0; an_tx_desc.an_phys = sc->an_tx_buffer[desc].an_dma_paddr; for (i = 0; i < sizeof(an_tx_desc) / 4; i++) CSR_MEM_AUX_WRITE_4(sc, AN_TX_DESC_OFFSET + (desc * sizeof(an_tx_desc)) + (i * 4), ((u_int32_t *)(void *)&an_tx_desc)[i]); } /* * Allocate RID descriptor */ bzero(&reply,sizeof(reply)); cmd_struct.an_cmd = AN_CMD_ALLOC_DESC; cmd_struct.an_parm0 = AN_DESCRIPTOR_HOSTRW; cmd_struct.an_parm1 = AN_HOST_DESC_OFFSET; cmd_struct.an_parm2 = 1; if (an_cmd_struct(sc, &cmd_struct, &reply)) { if_printf(sc->an_ifp, "failed to allocate host descriptor\n"); return(EIO); } bzero(&an_rid_desc, sizeof(an_rid_desc)); an_rid_desc.an_valid = 1; an_rid_desc.an_len = AN_RID_BUFFER_SIZE; an_rid_desc.an_rid = 0; an_rid_desc.an_phys = sc->an_rid_buffer.an_dma_paddr; for (i = 0; i < sizeof(an_rid_desc) / 4; i++) CSR_MEM_AUX_WRITE_4(sc, AN_HOST_DESC_OFFSET + i * 4, ((u_int32_t *)(void *)&an_rid_desc)[i]); return(0); } int an_attach(struct an_softc *sc, int flags) { struct ifnet *ifp; int error = EIO; int i, nrate, mword; u_int8_t r; ifp = sc->an_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(sc->an_dev, "can not if_alloc()\n"); goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(sc->an_dev), device_get_unit(sc->an_dev)); sc->an_gone = 0; sc->an_associated = 0; sc->an_monitor = 0; sc->an_was_monitor = 0; sc->an_flash_buffer = NULL; /* Reset the NIC. */ AN_LOCK(sc); an_reset(sc); if (sc->mpi350) { error = an_init_mpi350_desc(sc); if (error) goto fail; } /* Load factory config */ if (an_cmd(sc, AN_CMD_READCFG, 0)) { device_printf(sc->an_dev, "failed to load config data\n"); goto fail; } /* Read the current configuration */ sc->an_config.an_type = AN_RID_GENCONFIG; sc->an_config.an_len = sizeof(struct an_ltv_genconfig); if (an_read_record(sc, (struct an_ltv_gen *)&sc->an_config)) { device_printf(sc->an_dev, "read record failed\n"); goto fail; } /* Read the card capabilities */ sc->an_caps.an_type = AN_RID_CAPABILITIES; sc->an_caps.an_len = sizeof(struct an_ltv_caps); if (an_read_record(sc, (struct an_ltv_gen *)&sc->an_caps)) { device_printf(sc->an_dev, "read record failed\n"); goto fail; } /* Read ssid list */ sc->an_ssidlist.an_type = AN_RID_SSIDLIST; sc->an_ssidlist.an_len = sizeof(struct an_ltv_ssidlist_new); if (an_read_record(sc, (struct an_ltv_gen *)&sc->an_ssidlist)) { device_printf(sc->an_dev, "read record failed\n"); goto fail; } /* Read AP list */ sc->an_aplist.an_type = AN_RID_APLIST; sc->an_aplist.an_len = sizeof(struct an_ltv_aplist); if (an_read_record(sc, (struct an_ltv_gen *)&sc->an_aplist)) { device_printf(sc->an_dev, "read record failed\n"); goto fail; } #ifdef ANCACHE /* Read the RSSI <-> dBm map */ sc->an_have_rssimap = 0; if (sc->an_caps.an_softcaps & 8) { sc->an_rssimap.an_type = AN_RID_RSSI_MAP; sc->an_rssimap.an_len = sizeof(struct an_ltv_rssi_map); if (an_read_record(sc, (struct an_ltv_gen *)&sc->an_rssimap)) { device_printf(sc->an_dev, "unable to get RSSI <-> dBM map\n"); } else { device_printf(sc->an_dev, "got RSSI <-> dBM map\n"); sc->an_have_rssimap = 1; } } else { device_printf(sc->an_dev, "no RSSI <-> dBM map\n"); } #endif AN_UNLOCK(sc); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = an_ioctl; ifp->if_start = an_start; ifp->if_init = an_init; ifp->if_baudrate = 10000000; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); bzero(sc->an_config.an_nodename, sizeof(sc->an_config.an_nodename)); bcopy(AN_DEFAULT_NODENAME, sc->an_config.an_nodename, sizeof(AN_DEFAULT_NODENAME) - 1); bzero(sc->an_ssidlist.an_entry[0].an_ssid, sizeof(sc->an_ssidlist.an_entry[0].an_ssid)); bcopy(AN_DEFAULT_NETNAME, sc->an_ssidlist.an_entry[0].an_ssid, sizeof(AN_DEFAULT_NETNAME) - 1); sc->an_ssidlist.an_entry[0].an_len = strlen(AN_DEFAULT_NETNAME); sc->an_config.an_opmode = AN_OPMODE_INFRASTRUCTURE_STATION; sc->an_tx_rate = 0; bzero((char *)&sc->an_stats, sizeof(sc->an_stats)); nrate = 8; ifmedia_init(&sc->an_ifmedia, 0, an_media_change, an_media_status); if_printf(ifp, "supported rates: "); #define ADD(s, o) ifmedia_add(&sc->an_ifmedia, \ IFM_MAKEWORD(IFM_IEEE80211, (s), (o), 0), 0, NULL) ADD(IFM_AUTO, 0); ADD(IFM_AUTO, IFM_IEEE80211_ADHOC); for (i = 0; i < nrate; i++) { r = sc->an_caps.an_rates[i]; mword = ieee80211_rate2media(NULL, r, IEEE80211_MODE_AUTO); if (mword == 0) continue; printf("%s%d%sMbps", (i != 0 ? " " : ""), (r & IEEE80211_RATE_VAL) / 2, ((r & 0x1) != 0 ? ".5" : "")); ADD(mword, 0); ADD(mword, IFM_IEEE80211_ADHOC); } printf("\n"); ifmedia_set(&sc->an_ifmedia, IFM_MAKEWORD(IFM_IEEE80211, IFM_AUTO, 0, 0)); #undef ADD /* * Call MI attach routine. */ ether_ifattach(ifp, sc->an_caps.an_oemaddr); callout_init_mtx(&sc->an_stat_ch, &sc->an_mtx, 0); return(0); fail: AN_UNLOCK(sc); mtx_destroy(&sc->an_mtx); if (ifp != NULL) if_free(ifp); return(error); } int an_detach(device_t dev) { struct an_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->an_ifp; if (sc->an_gone) { device_printf(dev,"already unloaded\n"); return(0); } AN_LOCK(sc); an_stop(sc); sc->an_gone = 1; ifmedia_removeall(&sc->an_ifmedia); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; AN_UNLOCK(sc); ether_ifdetach(ifp); bus_teardown_intr(dev, sc->irq_res, sc->irq_handle); callout_drain(&sc->an_stat_ch); if_free(ifp); an_release_resources(dev); mtx_destroy(&sc->an_mtx); return (0); } static void an_rxeof(struct an_softc *sc) { struct ifnet *ifp; struct ether_header *eh; struct ieee80211_frame *ih; struct an_rxframe rx_frame; struct an_rxframe_802_3 rx_frame_802_3; struct mbuf *m; int len, id, error = 0, i, count = 0; int ieee80211_header_len; u_char *bpf_buf; u_short fc1; struct an_card_rx_desc an_rx_desc; u_int8_t *buf; AN_LOCK_ASSERT(sc); ifp = sc->an_ifp; if (!sc->mpi350) { id = CSR_READ_2(sc, AN_RX_FID); if (sc->an_monitor && (ifp->if_flags & IFF_PROMISC)) { /* read raw 802.11 packet */ bpf_buf = sc->buf_802_11; /* read header */ if (an_read_data(sc, id, 0x0, (caddr_t)&rx_frame, sizeof(rx_frame))) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } /* * skip beacon by default since this increases the * system load a lot */ if (!(sc->an_monitor & AN_MONITOR_INCLUDE_BEACON) && (rx_frame.an_frame_ctl & IEEE80211_FC0_SUBTYPE_BEACON)) { return; } if (sc->an_monitor & AN_MONITOR_AIRONET_HEADER) { len = rx_frame.an_rx_payload_len + sizeof(rx_frame); /* Check for insane frame length */ if (len > sizeof(sc->buf_802_11)) { if_printf(ifp, "oversized packet " "received (%d, %d)\n", len, MCLBYTES); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } bcopy((char *)&rx_frame, bpf_buf, sizeof(rx_frame)); error = an_read_data(sc, id, sizeof(rx_frame), (caddr_t)bpf_buf+sizeof(rx_frame), rx_frame.an_rx_payload_len); } else { fc1=rx_frame.an_frame_ctl >> 8; ieee80211_header_len = sizeof(struct ieee80211_frame); if ((fc1 & IEEE80211_FC1_DIR_TODS) && (fc1 & IEEE80211_FC1_DIR_FROMDS)) { ieee80211_header_len += ETHER_ADDR_LEN; } len = rx_frame.an_rx_payload_len + ieee80211_header_len; /* Check for insane frame length */ if (len > sizeof(sc->buf_802_11)) { if_printf(ifp, "oversized packet " "received (%d, %d)\n", len, MCLBYTES); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } ih = (struct ieee80211_frame *)bpf_buf; bcopy((char *)&rx_frame.an_frame_ctl, (char *)ih, ieee80211_header_len); error = an_read_data(sc, id, sizeof(rx_frame) + rx_frame.an_gaplen, (caddr_t)ih +ieee80211_header_len, rx_frame.an_rx_payload_len); } /* dump raw 802.11 packet to bpf and skip ip stack */ BPF_TAP(ifp, bpf_buf, len); } else { MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } - MCLGET(m, M_NOWAIT); - if (!(m->m_flags & M_EXT)) { + if (!(MCLGET(m, M_NOWAIT))) { m_freem(m); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } m->m_pkthdr.rcvif = ifp; /* Read Ethernet encapsulated packet */ #ifdef ANCACHE /* Read NIC frame header */ if (an_read_data(sc, id, 0, (caddr_t)&rx_frame, sizeof(rx_frame))) { m_freem(m); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } #endif /* Read in the 802_3 frame header */ if (an_read_data(sc, id, 0x34, (caddr_t)&rx_frame_802_3, sizeof(rx_frame_802_3))) { m_freem(m); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } if (rx_frame_802_3.an_rx_802_3_status != 0) { m_freem(m); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } /* Check for insane frame length */ len = rx_frame_802_3.an_rx_802_3_payload_len; if (len > sizeof(sc->buf_802_11)) { m_freem(m); if_printf(ifp, "oversized packet " "received (%d, %d)\n", len, MCLBYTES); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } m->m_pkthdr.len = m->m_len = rx_frame_802_3.an_rx_802_3_payload_len + 12; eh = mtod(m, struct ether_header *); bcopy((char *)&rx_frame_802_3.an_rx_dst_addr, (char *)&eh->ether_dhost, ETHER_ADDR_LEN); bcopy((char *)&rx_frame_802_3.an_rx_src_addr, (char *)&eh->ether_shost, ETHER_ADDR_LEN); /* in mbuf header type is just before payload */ error = an_read_data(sc, id, 0x44, (caddr_t)&(eh->ether_type), rx_frame_802_3.an_rx_802_3_payload_len); if (error) { m_freem(m); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); /* Receive packet. */ #ifdef ANCACHE an_cache_store(sc, eh, m, rx_frame.an_rx_signal_strength, rx_frame.an_rsvd0); #endif AN_UNLOCK(sc); (*ifp->if_input)(ifp, m); AN_LOCK(sc); } } else { /* MPI-350 */ for (count = 0; count < AN_MAX_RX_DESC; count++){ for (i = 0; i < sizeof(an_rx_desc) / 4; i++) ((u_int32_t *)(void *)&an_rx_desc)[i] = CSR_MEM_AUX_READ_4(sc, AN_RX_DESC_OFFSET + (count * sizeof(an_rx_desc)) + (i * 4)); if (an_rx_desc.an_done && !an_rx_desc.an_valid) { buf = sc->an_rx_buffer[count].an_dma_vaddr; MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } - MCLGET(m, M_NOWAIT); - if (!(m->m_flags & M_EXT)) { + if (!(MCLGET(m, M_NOWAIT))) { m_freem(m); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } m->m_pkthdr.rcvif = ifp; /* Read Ethernet encapsulated packet */ /* * No ANCACHE support since we just get back * an Ethernet packet no 802.11 info */ #if 0 #ifdef ANCACHE /* Read NIC frame header */ bcopy(buf, (caddr_t)&rx_frame, sizeof(rx_frame)); #endif #endif /* Check for insane frame length */ len = an_rx_desc.an_len + 12; if (len > MCLBYTES) { m_freem(m); if_printf(ifp, "oversized packet " "received (%d, %d)\n", len, MCLBYTES); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } m->m_pkthdr.len = m->m_len = an_rx_desc.an_len + 12; eh = mtod(m, struct ether_header *); bcopy(buf, (char *)eh, m->m_pkthdr.len); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); /* Receive packet. */ #if 0 #ifdef ANCACHE an_cache_store(sc, eh, m, rx_frame.an_rx_signal_strength, rx_frame.an_rsvd0); #endif #endif AN_UNLOCK(sc); (*ifp->if_input)(ifp, m); AN_LOCK(sc); an_rx_desc.an_valid = 1; an_rx_desc.an_len = AN_RX_BUFFER_SIZE; an_rx_desc.an_done = 0; an_rx_desc.an_phys = sc->an_rx_buffer[count].an_dma_paddr; for (i = 0; i < sizeof(an_rx_desc) / 4; i++) CSR_MEM_AUX_WRITE_4(sc, AN_RX_DESC_OFFSET + (count * sizeof(an_rx_desc)) + (i * 4), ((u_int32_t *)(void *)&an_rx_desc)[i]); } else { if_printf(ifp, "Didn't get valid RX packet " "%x %x %d\n", an_rx_desc.an_done, an_rx_desc.an_valid, an_rx_desc.an_len); } } } } static void an_txeof(struct an_softc *sc, int status) { struct ifnet *ifp; int id, i; AN_LOCK_ASSERT(sc); ifp = sc->an_ifp; sc->an_timer = 0; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (!sc->mpi350) { id = CSR_READ_2(sc, AN_TX_CMP_FID(sc->mpi350)); if (status & AN_EV_TX_EXC) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } else if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); for (i = 0; i < AN_TX_RING_CNT; i++) { if (id == sc->an_rdata.an_tx_ring[i]) { sc->an_rdata.an_tx_ring[i] = 0; break; } } AN_INC(sc->an_rdata.an_tx_cons, AN_TX_RING_CNT); } else { /* MPI 350 */ id = CSR_READ_2(sc, AN_TX_CMP_FID(sc->mpi350)); if (!sc->an_rdata.an_tx_empty){ if (status & AN_EV_TX_EXC) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } else if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); AN_INC(sc->an_rdata.an_tx_cons, AN_MAX_TX_DESC); if (sc->an_rdata.an_tx_prod == sc->an_rdata.an_tx_cons) sc->an_rdata.an_tx_empty = 1; } } return; } /* * We abuse the stats updater to check the current NIC status. This * is important because we don't want to allow transmissions until * the NIC has synchronized to the current cell (either as the master * in an ad-hoc group, or as a station connected to an access point). * * Note that this function will be called via callout(9) with a lock held. */ static void an_stats_update(void *xsc) { struct an_softc *sc; struct ifnet *ifp; sc = xsc; AN_LOCK_ASSERT(sc); ifp = sc->an_ifp; if (sc->an_timer > 0 && --sc->an_timer == 0) an_watchdog(sc); sc->an_status.an_type = AN_RID_STATUS; sc->an_status.an_len = sizeof(struct an_ltv_status); if (an_read_record(sc, (struct an_ltv_gen *)&sc->an_status)) return; if (sc->an_status.an_opmode & AN_STATUS_OPMODE_IN_SYNC) sc->an_associated = 1; else sc->an_associated = 0; /* Don't do this while we're transmitting */ if (ifp->if_drv_flags & IFF_DRV_OACTIVE) { callout_reset(&sc->an_stat_ch, hz, an_stats_update, sc); return; } sc->an_stats.an_len = sizeof(struct an_ltv_stats); sc->an_stats.an_type = AN_RID_32BITS_CUM; if (an_read_record(sc, (struct an_ltv_gen *)&sc->an_stats.an_len)) return; callout_reset(&sc->an_stat_ch, hz, an_stats_update, sc); return; } void an_intr(void *xsc) { struct an_softc *sc; struct ifnet *ifp; u_int16_t status; sc = (struct an_softc*)xsc; AN_LOCK(sc); if (sc->an_gone) { AN_UNLOCK(sc); return; } ifp = sc->an_ifp; /* Disable interrupts. */ CSR_WRITE_2(sc, AN_INT_EN(sc->mpi350), 0); status = CSR_READ_2(sc, AN_EVENT_STAT(sc->mpi350)); CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), ~AN_INTRS(sc->mpi350)); if (status & AN_EV_MIC) { CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), AN_EV_MIC); } if (status & AN_EV_LINKSTAT) { if (CSR_READ_2(sc, AN_LINKSTAT(sc->mpi350)) == AN_LINKSTAT_ASSOCIATED) sc->an_associated = 1; else sc->an_associated = 0; CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), AN_EV_LINKSTAT); } if (status & AN_EV_RX) { an_rxeof(sc); CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), AN_EV_RX); } if (sc->mpi350 && status & AN_EV_TX_CPY) { an_txeof(sc, status); CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), AN_EV_TX_CPY); } if (status & AN_EV_TX) { an_txeof(sc, status); CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), AN_EV_TX); } if (status & AN_EV_TX_EXC) { an_txeof(sc, status); CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), AN_EV_TX_EXC); } if (status & AN_EV_ALLOC) CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), AN_EV_ALLOC); /* Re-enable interrupts. */ CSR_WRITE_2(sc, AN_INT_EN(sc->mpi350), AN_INTRS(sc->mpi350)); if ((ifp->if_flags & IFF_UP) && !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) an_start_locked(ifp); AN_UNLOCK(sc); return; } static int an_cmd_struct(struct an_softc *sc, struct an_command *cmd, struct an_reply *reply) { int i; AN_LOCK_ASSERT(sc); for (i = 0; i != AN_TIMEOUT; i++) { if (CSR_READ_2(sc, AN_COMMAND(sc->mpi350)) & AN_CMD_BUSY) { DELAY(1000); } else break; } if( i == AN_TIMEOUT) { printf("BUSY\n"); return(ETIMEDOUT); } CSR_WRITE_2(sc, AN_PARAM0(sc->mpi350), cmd->an_parm0); CSR_WRITE_2(sc, AN_PARAM1(sc->mpi350), cmd->an_parm1); CSR_WRITE_2(sc, AN_PARAM2(sc->mpi350), cmd->an_parm2); CSR_WRITE_2(sc, AN_COMMAND(sc->mpi350), cmd->an_cmd); for (i = 0; i < AN_TIMEOUT; i++) { if (CSR_READ_2(sc, AN_EVENT_STAT(sc->mpi350)) & AN_EV_CMD) break; DELAY(1000); } reply->an_resp0 = CSR_READ_2(sc, AN_RESP0(sc->mpi350)); reply->an_resp1 = CSR_READ_2(sc, AN_RESP1(sc->mpi350)); reply->an_resp2 = CSR_READ_2(sc, AN_RESP2(sc->mpi350)); reply->an_status = CSR_READ_2(sc, AN_STATUS(sc->mpi350)); if (CSR_READ_2(sc, AN_COMMAND(sc->mpi350)) & AN_CMD_BUSY) CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), AN_EV_CLR_STUCK_BUSY); /* Ack the command */ CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), AN_EV_CMD); if (i == AN_TIMEOUT) return(ETIMEDOUT); return(0); } static int an_cmd(struct an_softc *sc, int cmd, int val) { int i, s = 0; AN_LOCK_ASSERT(sc); CSR_WRITE_2(sc, AN_PARAM0(sc->mpi350), val); CSR_WRITE_2(sc, AN_PARAM1(sc->mpi350), 0); CSR_WRITE_2(sc, AN_PARAM2(sc->mpi350), 0); CSR_WRITE_2(sc, AN_COMMAND(sc->mpi350), cmd); for (i = 0; i < AN_TIMEOUT; i++) { if (CSR_READ_2(sc, AN_EVENT_STAT(sc->mpi350)) & AN_EV_CMD) break; else { if (CSR_READ_2(sc, AN_COMMAND(sc->mpi350)) == cmd) CSR_WRITE_2(sc, AN_COMMAND(sc->mpi350), cmd); } } for (i = 0; i < AN_TIMEOUT; i++) { CSR_READ_2(sc, AN_RESP0(sc->mpi350)); CSR_READ_2(sc, AN_RESP1(sc->mpi350)); CSR_READ_2(sc, AN_RESP2(sc->mpi350)); s = CSR_READ_2(sc, AN_STATUS(sc->mpi350)); if ((s & AN_STAT_CMD_CODE) == (cmd & AN_STAT_CMD_CODE)) break; } /* Ack the command */ CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), AN_EV_CMD); if (CSR_READ_2(sc, AN_COMMAND(sc->mpi350)) & AN_CMD_BUSY) CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), AN_EV_CLR_STUCK_BUSY); if (i == AN_TIMEOUT) return(ETIMEDOUT); return(0); } /* * This reset sequence may look a little strange, but this is the * most reliable method I've found to really kick the NIC in the * head and force it to reboot correctly. */ static void an_reset(struct an_softc *sc) { if (sc->an_gone) return; AN_LOCK_ASSERT(sc); an_cmd(sc, AN_CMD_ENABLE, 0); an_cmd(sc, AN_CMD_FW_RESTART, 0); an_cmd(sc, AN_CMD_NOOP2, 0); if (an_cmd(sc, AN_CMD_FORCE_SYNCLOSS, 0) == ETIMEDOUT) device_printf(sc->an_dev, "reset failed\n"); an_cmd(sc, AN_CMD_DISABLE, 0); return; } /* * Read an LTV record from the NIC. */ static int an_read_record(struct an_softc *sc, struct an_ltv_gen *ltv) { struct an_ltv_gen *an_ltv; struct an_card_rid_desc an_rid_desc; struct an_command cmd; struct an_reply reply; struct ifnet *ifp; u_int16_t *ptr; u_int8_t *ptr2; int i, len; AN_LOCK_ASSERT(sc); if (ltv->an_len < 4 || ltv->an_type == 0) return(EINVAL); ifp = sc->an_ifp; if (!sc->mpi350){ /* Tell the NIC to enter record read mode. */ if (an_cmd(sc, AN_CMD_ACCESS|AN_ACCESS_READ, ltv->an_type)) { if_printf(ifp, "RID access failed\n"); return(EIO); } /* Seek to the record. */ if (an_seek(sc, ltv->an_type, 0, AN_BAP1)) { if_printf(ifp, "seek to record failed\n"); return(EIO); } /* * Read the length and record type and make sure they * match what we expect (this verifies that we have enough * room to hold all of the returned data). * Length includes type but not length. */ len = CSR_READ_2(sc, AN_DATA1); if (len > (ltv->an_len - 2)) { if_printf(ifp, "record length mismatch -- expected %d, " "got %d for Rid %x\n", ltv->an_len - 2, len, ltv->an_type); len = ltv->an_len - 2; } else { ltv->an_len = len + 2; } /* Now read the data. */ len -= 2; /* skip the type */ ptr = <v->an_val; for (i = len; i > 1; i -= 2) *ptr++ = CSR_READ_2(sc, AN_DATA1); if (i) { ptr2 = (u_int8_t *)ptr; *ptr2 = CSR_READ_1(sc, AN_DATA1); } } else { /* MPI-350 */ if (!sc->an_rid_buffer.an_dma_vaddr) return(EIO); an_rid_desc.an_valid = 1; an_rid_desc.an_len = AN_RID_BUFFER_SIZE; an_rid_desc.an_rid = 0; an_rid_desc.an_phys = sc->an_rid_buffer.an_dma_paddr; bzero(sc->an_rid_buffer.an_dma_vaddr, AN_RID_BUFFER_SIZE); bzero(&cmd, sizeof(cmd)); bzero(&reply, sizeof(reply)); cmd.an_cmd = AN_CMD_ACCESS|AN_ACCESS_READ; cmd.an_parm0 = ltv->an_type; for (i = 0; i < sizeof(an_rid_desc) / 4; i++) CSR_MEM_AUX_WRITE_4(sc, AN_HOST_DESC_OFFSET + i * 4, ((u_int32_t *)(void *)&an_rid_desc)[i]); if (an_cmd_struct(sc, &cmd, &reply) || reply.an_status & AN_CMD_QUAL_MASK) { if_printf(ifp, "failed to read RID %x %x %x %x %x, %d\n", ltv->an_type, reply.an_status, reply.an_resp0, reply.an_resp1, reply.an_resp2, i); return(EIO); } an_ltv = (struct an_ltv_gen *)sc->an_rid_buffer.an_dma_vaddr; if (an_ltv->an_len + 2 < an_rid_desc.an_len) { an_rid_desc.an_len = an_ltv->an_len; } len = an_rid_desc.an_len; if (len > (ltv->an_len - 2)) { if_printf(ifp, "record length mismatch -- expected %d, " "got %d for Rid %x\n", ltv->an_len - 2, len, ltv->an_type); len = ltv->an_len - 2; } else { ltv->an_len = len + 2; } bcopy(&an_ltv->an_type, <v->an_val, len); } if (an_dump) an_dump_record(sc, ltv, "Read"); return(0); } /* * Same as read, except we inject data instead of reading it. */ static int an_write_record(struct an_softc *sc, struct an_ltv_gen *ltv) { struct an_card_rid_desc an_rid_desc; struct an_command cmd; struct an_reply reply; u_int16_t *ptr; u_int8_t *ptr2; int i, len; AN_LOCK_ASSERT(sc); if (an_dump) an_dump_record(sc, ltv, "Write"); if (!sc->mpi350){ if (an_cmd(sc, AN_CMD_ACCESS|AN_ACCESS_READ, ltv->an_type)) return(EIO); if (an_seek(sc, ltv->an_type, 0, AN_BAP1)) return(EIO); /* * Length includes type but not length. */ len = ltv->an_len - 2; CSR_WRITE_2(sc, AN_DATA1, len); len -= 2; /* skip the type */ ptr = <v->an_val; for (i = len; i > 1; i -= 2) CSR_WRITE_2(sc, AN_DATA1, *ptr++); if (i) { ptr2 = (u_int8_t *)ptr; CSR_WRITE_1(sc, AN_DATA0, *ptr2); } if (an_cmd(sc, AN_CMD_ACCESS|AN_ACCESS_WRITE, ltv->an_type)) return(EIO); } else { /* MPI-350 */ for (i = 0; i != AN_TIMEOUT; i++) { if (CSR_READ_2(sc, AN_COMMAND(sc->mpi350)) & AN_CMD_BUSY) { DELAY(10); } else break; } if (i == AN_TIMEOUT) { printf("BUSY\n"); } an_rid_desc.an_valid = 1; an_rid_desc.an_len = ltv->an_len - 2; an_rid_desc.an_rid = ltv->an_type; an_rid_desc.an_phys = sc->an_rid_buffer.an_dma_paddr; bcopy(<v->an_type, sc->an_rid_buffer.an_dma_vaddr, an_rid_desc.an_len); bzero(&cmd,sizeof(cmd)); bzero(&reply,sizeof(reply)); cmd.an_cmd = AN_CMD_ACCESS|AN_ACCESS_WRITE; cmd.an_parm0 = ltv->an_type; for (i = 0; i < sizeof(an_rid_desc) / 4; i++) CSR_MEM_AUX_WRITE_4(sc, AN_HOST_DESC_OFFSET + i * 4, ((u_int32_t *)(void *)&an_rid_desc)[i]); DELAY(100000); if ((i = an_cmd_struct(sc, &cmd, &reply))) { if_printf(sc->an_ifp, "failed to write RID 1 %x %x %x %x %x, %d\n", ltv->an_type, reply.an_status, reply.an_resp0, reply.an_resp1, reply.an_resp2, i); return(EIO); } if (reply.an_status & AN_CMD_QUAL_MASK) { if_printf(sc->an_ifp, "failed to write RID 2 %x %x %x %x %x, %d\n", ltv->an_type, reply.an_status, reply.an_resp0, reply.an_resp1, reply.an_resp2, i); return(EIO); } DELAY(100000); } return(0); } static void an_dump_record(struct an_softc *sc, struct an_ltv_gen *ltv, char *string) { u_int8_t *ptr2; int len; int i; int count = 0; char buf[17], temp; len = ltv->an_len - 4; if_printf(sc->an_ifp, "RID %4x, Length %4d, Mode %s\n", ltv->an_type, ltv->an_len - 4, string); if (an_dump == 1 || (an_dump == ltv->an_type)) { if_printf(sc->an_ifp, "\t"); bzero(buf,sizeof(buf)); ptr2 = (u_int8_t *)<v->an_val; for (i = len; i > 0; i--) { printf("%02x ", *ptr2); temp = *ptr2++; if (isprint(temp)) buf[count] = temp; else buf[count] = '.'; if (++count == 16) { count = 0; printf("%s\n",buf); if_printf(sc->an_ifp, "\t"); bzero(buf,sizeof(buf)); } } for (; count != 16; count++) { printf(" "); } printf(" %s\n",buf); } } static int an_seek(struct an_softc *sc, int id, int off, int chan) { int i; int selreg, offreg; switch (chan) { case AN_BAP0: selreg = AN_SEL0; offreg = AN_OFF0; break; case AN_BAP1: selreg = AN_SEL1; offreg = AN_OFF1; break; default: if_printf(sc->an_ifp, "invalid data path: %x\n", chan); return(EIO); } CSR_WRITE_2(sc, selreg, id); CSR_WRITE_2(sc, offreg, off); for (i = 0; i < AN_TIMEOUT; i++) { if (!(CSR_READ_2(sc, offreg) & (AN_OFF_BUSY|AN_OFF_ERR))) break; } if (i == AN_TIMEOUT) return(ETIMEDOUT); return(0); } static int an_read_data(struct an_softc *sc, int id, int off, caddr_t buf, int len) { int i; u_int16_t *ptr; u_int8_t *ptr2; if (off != -1) { if (an_seek(sc, id, off, AN_BAP1)) return(EIO); } ptr = (u_int16_t *)buf; for (i = len; i > 1; i -= 2) *ptr++ = CSR_READ_2(sc, AN_DATA1); if (i) { ptr2 = (u_int8_t *)ptr; *ptr2 = CSR_READ_1(sc, AN_DATA1); } return(0); } static int an_write_data(struct an_softc *sc, int id, int off, caddr_t buf, int len) { int i; u_int16_t *ptr; u_int8_t *ptr2; if (off != -1) { if (an_seek(sc, id, off, AN_BAP0)) return(EIO); } ptr = (u_int16_t *)buf; for (i = len; i > 1; i -= 2) CSR_WRITE_2(sc, AN_DATA0, *ptr++); if (i) { ptr2 = (u_int8_t *)ptr; CSR_WRITE_1(sc, AN_DATA0, *ptr2); } return(0); } /* * Allocate a region of memory inside the NIC and zero * it out. */ static int an_alloc_nicmem(struct an_softc *sc, int len, int *id) { int i; if (an_cmd(sc, AN_CMD_ALLOC_MEM, len)) { if_printf(sc->an_ifp, "failed to allocate %d bytes on NIC\n", len); return(ENOMEM); } for (i = 0; i < AN_TIMEOUT; i++) { if (CSR_READ_2(sc, AN_EVENT_STAT(sc->mpi350)) & AN_EV_ALLOC) break; } if (i == AN_TIMEOUT) return(ETIMEDOUT); CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), AN_EV_ALLOC); *id = CSR_READ_2(sc, AN_ALLOC_FID); if (an_seek(sc, *id, 0, AN_BAP0)) return(EIO); for (i = 0; i < len / 2; i++) CSR_WRITE_2(sc, AN_DATA0, 0); return(0); } static void an_setdef(struct an_softc *sc, struct an_req *areq) { struct ifnet *ifp; struct an_ltv_genconfig *cfg; struct an_ltv_ssidlist_new *ssid; struct an_ltv_aplist *ap; struct an_ltv_gen *sp; ifp = sc->an_ifp; AN_LOCK_ASSERT(sc); switch (areq->an_type) { case AN_RID_GENCONFIG: cfg = (struct an_ltv_genconfig *)areq; bcopy((char *)&cfg->an_macaddr, IF_LLADDR(sc->an_ifp), ETHER_ADDR_LEN); bcopy((char *)cfg, (char *)&sc->an_config, sizeof(struct an_ltv_genconfig)); break; case AN_RID_SSIDLIST: ssid = (struct an_ltv_ssidlist_new *)areq; bcopy((char *)ssid, (char *)&sc->an_ssidlist, sizeof(struct an_ltv_ssidlist_new)); break; case AN_RID_APLIST: ap = (struct an_ltv_aplist *)areq; bcopy((char *)ap, (char *)&sc->an_aplist, sizeof(struct an_ltv_aplist)); break; case AN_RID_TX_SPEED: sp = (struct an_ltv_gen *)areq; sc->an_tx_rate = sp->an_val; /* Read the current configuration */ sc->an_config.an_type = AN_RID_GENCONFIG; sc->an_config.an_len = sizeof(struct an_ltv_genconfig); an_read_record(sc, (struct an_ltv_gen *)&sc->an_config); cfg = &sc->an_config; /* clear other rates and set the only one we want */ bzero(cfg->an_rates, sizeof(cfg->an_rates)); cfg->an_rates[0] = sc->an_tx_rate; /* Save the new rate */ sc->an_config.an_type = AN_RID_GENCONFIG; sc->an_config.an_len = sizeof(struct an_ltv_genconfig); break; case AN_RID_WEP_TEMP: /* Cache the temp keys */ bcopy(areq, &sc->an_temp_keys[((struct an_ltv_key *)areq)->kindex], sizeof(struct an_ltv_key)); case AN_RID_WEP_PERM: case AN_RID_LEAPUSERNAME: case AN_RID_LEAPPASSWORD: an_init_locked(sc); /* Disable the MAC. */ an_cmd(sc, AN_CMD_DISABLE, 0); /* Write the key */ an_write_record(sc, (struct an_ltv_gen *)areq); /* Turn the MAC back on. */ an_cmd(sc, AN_CMD_ENABLE, 0); break; case AN_RID_MONITOR_MODE: cfg = (struct an_ltv_genconfig *)areq; bpfdetach(ifp); if (ng_ether_detach_p != NULL) (*ng_ether_detach_p) (ifp); sc->an_monitor = cfg->an_len; if (sc->an_monitor & AN_MONITOR) { if (sc->an_monitor & AN_MONITOR_AIRONET_HEADER) { bpfattach(ifp, DLT_AIRONET_HEADER, sizeof(struct ether_header)); } else { bpfattach(ifp, DLT_IEEE802_11, sizeof(struct ether_header)); } } else { bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header)); if (ng_ether_attach_p != NULL) (*ng_ether_attach_p) (ifp); } break; default: if_printf(ifp, "unknown RID: %x\n", areq->an_type); return; } /* Reinitialize the card. */ if (ifp->if_flags) an_init_locked(sc); return; } /* * Derived from Linux driver to enable promiscious mode. */ static void an_promisc(struct an_softc *sc, int promisc) { AN_LOCK_ASSERT(sc); if (sc->an_was_monitor) { an_reset(sc); if (sc->mpi350) an_init_mpi350_desc(sc); } if (sc->an_monitor || sc->an_was_monitor) an_init_locked(sc); sc->an_was_monitor = sc->an_monitor; an_cmd(sc, AN_CMD_SET_MODE, promisc ? 0xffff : 0); return; } static int an_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { int error = 0; int len; int i, max; struct an_softc *sc; struct ifreq *ifr; struct thread *td = curthread; struct ieee80211req *ireq; struct ieee80211_channel ch; u_int8_t tmpstr[IEEE80211_NWID_LEN*2]; u_int8_t *tmpptr; struct an_ltv_genconfig *config; struct an_ltv_key *key; struct an_ltv_status *status; struct an_ltv_ssidlist_new *ssids; int mode; struct aironet_ioctl l_ioctl; sc = ifp->if_softc; ifr = (struct ifreq *)data; ireq = (struct ieee80211req *)data; config = (struct an_ltv_genconfig *)&sc->areq; key = (struct an_ltv_key *)&sc->areq; status = (struct an_ltv_status *)&sc->areq; ssids = (struct an_ltv_ssidlist_new *)&sc->areq; if (sc->an_gone) { error = ENODEV; goto out; } switch (command) { case SIOCSIFFLAGS: AN_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->an_if_flags & IFF_PROMISC)) { an_promisc(sc, 1); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->an_if_flags & IFF_PROMISC) { an_promisc(sc, 0); } else an_init_locked(sc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) an_stop(sc); } sc->an_if_flags = ifp->if_flags; AN_UNLOCK(sc); error = 0; break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->an_ifmedia, command); break; case SIOCADDMULTI: case SIOCDELMULTI: /* The Aironet has no multicast filter. */ error = 0; break; case SIOCGAIRONET: error = copyin(ifr->ifr_data, &sc->areq, sizeof(sc->areq)); if (error != 0) break; AN_LOCK(sc); #ifdef ANCACHE if (sc->areq.an_type == AN_RID_ZERO_CACHE) { error = priv_check(td, PRIV_DRIVER); if (error) break; sc->an_sigitems = sc->an_nextitem = 0; break; } else if (sc->areq.an_type == AN_RID_READ_CACHE) { char *pt = (char *)&sc->areq.an_val; bcopy((char *)&sc->an_sigitems, (char *)pt, sizeof(int)); pt += sizeof(int); sc->areq.an_len = sizeof(int) / 2; bcopy((char *)&sc->an_sigcache, (char *)pt, sizeof(struct an_sigcache) * sc->an_sigitems); sc->areq.an_len += ((sizeof(struct an_sigcache) * sc->an_sigitems) / 2) + 1; } else #endif if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { AN_UNLOCK(sc); error = EINVAL; break; } AN_UNLOCK(sc); error = copyout(&sc->areq, ifr->ifr_data, sizeof(sc->areq)); break; case SIOCSAIRONET: if ((error = priv_check(td, PRIV_DRIVER))) goto out; AN_LOCK(sc); error = copyin(ifr->ifr_data, &sc->areq, sizeof(sc->areq)); if (error != 0) break; an_setdef(sc, &sc->areq); AN_UNLOCK(sc); break; case SIOCGPRIVATE_0: /* used by Cisco client utility */ if ((error = priv_check(td, PRIV_DRIVER))) goto out; error = copyin(ifr->ifr_data, &l_ioctl, sizeof(l_ioctl)); if (error) goto out; mode = l_ioctl.command; AN_LOCK(sc); if (mode >= AIROGCAP && mode <= AIROGSTATSD32) { error = readrids(ifp, &l_ioctl); } else if (mode >= AIROPCAP && mode <= AIROPLEAPUSR) { error = writerids(ifp, &l_ioctl); } else if (mode >= AIROFLSHRST && mode <= AIRORESTART) { error = flashcard(ifp, &l_ioctl); } else { error =-1; } AN_UNLOCK(sc); if (!error) { /* copy out the updated command info */ error = copyout(&l_ioctl, ifr->ifr_data, sizeof(l_ioctl)); } break; case SIOCGPRIVATE_1: /* used by Cisco client utility */ if ((error = priv_check(td, PRIV_DRIVER))) goto out; error = copyin(ifr->ifr_data, &l_ioctl, sizeof(l_ioctl)); if (error) goto out; l_ioctl.command = 0; error = AIROMAGIC; (void) copyout(&error, l_ioctl.data, sizeof(error)); error = 0; break; case SIOCG80211: sc->areq.an_len = sizeof(sc->areq); /* was that a good idea DJA we are doing a short-cut */ switch (ireq->i_type) { case IEEE80211_IOC_SSID: AN_LOCK(sc); if (ireq->i_val == -1) { sc->areq.an_type = AN_RID_STATUS; if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { error = EINVAL; AN_UNLOCK(sc); break; } len = status->an_ssidlen; tmpptr = status->an_ssid; } else if (ireq->i_val >= 0) { sc->areq.an_type = AN_RID_SSIDLIST; if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { error = EINVAL; AN_UNLOCK(sc); break; } max = (sc->areq.an_len - 4) / sizeof(struct an_ltv_ssid_entry); if ( max > MAX_SSIDS ) { printf("To many SSIDs only using " "%d of %d\n", MAX_SSIDS, max); max = MAX_SSIDS; } if (ireq->i_val > max) { error = EINVAL; AN_UNLOCK(sc); break; } else { len = ssids->an_entry[ireq->i_val].an_len; tmpptr = ssids->an_entry[ireq->i_val].an_ssid; } } else { error = EINVAL; AN_UNLOCK(sc); break; } if (len > IEEE80211_NWID_LEN) { error = EINVAL; AN_UNLOCK(sc); break; } AN_UNLOCK(sc); ireq->i_len = len; bzero(tmpstr, IEEE80211_NWID_LEN); bcopy(tmpptr, tmpstr, len); error = copyout(tmpstr, ireq->i_data, IEEE80211_NWID_LEN); break; case IEEE80211_IOC_NUMSSIDS: AN_LOCK(sc); sc->areq.an_len = sizeof(sc->areq); sc->areq.an_type = AN_RID_SSIDLIST; if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { AN_UNLOCK(sc); error = EINVAL; break; } max = (sc->areq.an_len - 4) / sizeof(struct an_ltv_ssid_entry); AN_UNLOCK(sc); if ( max > MAX_SSIDS ) { printf("To many SSIDs only using " "%d of %d\n", MAX_SSIDS, max); max = MAX_SSIDS; } ireq->i_val = max; break; case IEEE80211_IOC_WEP: AN_LOCK(sc); sc->areq.an_type = AN_RID_ACTUALCFG; if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { error = EINVAL; AN_UNLOCK(sc); break; } AN_UNLOCK(sc); if (config->an_authtype & AN_AUTHTYPE_PRIVACY_IN_USE) { if (config->an_authtype & AN_AUTHTYPE_ALLOW_UNENCRYPTED) ireq->i_val = IEEE80211_WEP_MIXED; else ireq->i_val = IEEE80211_WEP_ON; } else { ireq->i_val = IEEE80211_WEP_OFF; } break; case IEEE80211_IOC_WEPKEY: /* * XXX: I'm not entierly convinced this is * correct, but it's what is implemented in * ancontrol so it will have to do until we get * access to actual Cisco code. */ if (ireq->i_val < 0 || ireq->i_val > 8) { error = EINVAL; break; } len = 0; if (ireq->i_val < 5) { AN_LOCK(sc); sc->areq.an_type = AN_RID_WEP_TEMP; for (i = 0; i < 5; i++) { if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { error = EINVAL; break; } if (key->kindex == 0xffff) break; if (key->kindex == ireq->i_val) len = key->klen; /* Required to get next entry */ sc->areq.an_type = AN_RID_WEP_PERM; } AN_UNLOCK(sc); if (error != 0) { break; } } /* We aren't allowed to read the value of the * key from the card so we just output zeros * like we would if we could read the card, but * denied the user access. */ bzero(tmpstr, len); ireq->i_len = len; error = copyout(tmpstr, ireq->i_data, len); break; case IEEE80211_IOC_NUMWEPKEYS: ireq->i_val = 9; /* include home key */ break; case IEEE80211_IOC_WEPTXKEY: /* * For some strange reason, you have to read all * keys before you can read the txkey. */ AN_LOCK(sc); sc->areq.an_type = AN_RID_WEP_TEMP; for (i = 0; i < 5; i++) { if (an_read_record(sc, (struct an_ltv_gen *) &sc->areq)) { error = EINVAL; break; } if (key->kindex == 0xffff) { break; } /* Required to get next entry */ sc->areq.an_type = AN_RID_WEP_PERM; } if (error != 0) { AN_UNLOCK(sc); break; } sc->areq.an_type = AN_RID_WEP_PERM; key->kindex = 0xffff; if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { error = EINVAL; AN_UNLOCK(sc); break; } ireq->i_val = key->mac[0]; /* * Check for home mode. Map home mode into * 5th key since that is how it is stored on * the card */ sc->areq.an_len = sizeof(struct an_ltv_genconfig); sc->areq.an_type = AN_RID_GENCONFIG; if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { error = EINVAL; AN_UNLOCK(sc); break; } if (config->an_home_product & AN_HOME_NETWORK) ireq->i_val = 4; AN_UNLOCK(sc); break; case IEEE80211_IOC_AUTHMODE: AN_LOCK(sc); sc->areq.an_type = AN_RID_ACTUALCFG; if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { error = EINVAL; AN_UNLOCK(sc); break; } AN_UNLOCK(sc); if ((config->an_authtype & AN_AUTHTYPE_MASK) == AN_AUTHTYPE_NONE) { ireq->i_val = IEEE80211_AUTH_NONE; } else if ((config->an_authtype & AN_AUTHTYPE_MASK) == AN_AUTHTYPE_OPEN) { ireq->i_val = IEEE80211_AUTH_OPEN; } else if ((config->an_authtype & AN_AUTHTYPE_MASK) == AN_AUTHTYPE_SHAREDKEY) { ireq->i_val = IEEE80211_AUTH_SHARED; } else error = EINVAL; break; case IEEE80211_IOC_STATIONNAME: AN_LOCK(sc); sc->areq.an_type = AN_RID_ACTUALCFG; if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { error = EINVAL; AN_UNLOCK(sc); break; } AN_UNLOCK(sc); ireq->i_len = sizeof(config->an_nodename); tmpptr = config->an_nodename; bzero(tmpstr, IEEE80211_NWID_LEN); bcopy(tmpptr, tmpstr, ireq->i_len); error = copyout(tmpstr, ireq->i_data, IEEE80211_NWID_LEN); break; case IEEE80211_IOC_CHANNEL: AN_LOCK(sc); sc->areq.an_type = AN_RID_STATUS; if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { error = EINVAL; AN_UNLOCK(sc); break; } AN_UNLOCK(sc); ireq->i_val = status->an_cur_channel; break; case IEEE80211_IOC_CURCHAN: AN_LOCK(sc); sc->areq.an_type = AN_RID_STATUS; if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { error = EINVAL; AN_UNLOCK(sc); break; } AN_UNLOCK(sc); bzero(&ch, sizeof(ch)); ch.ic_freq = ieee80211_ieee2mhz(status->an_cur_channel, IEEE80211_CHAN_B); ch.ic_flags = IEEE80211_CHAN_B; ch.ic_ieee = status->an_cur_channel; error = copyout(&ch, ireq->i_data, sizeof(ch)); break; case IEEE80211_IOC_POWERSAVE: AN_LOCK(sc); sc->areq.an_type = AN_RID_ACTUALCFG; if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { error = EINVAL; AN_UNLOCK(sc); break; } AN_UNLOCK(sc); if (config->an_psave_mode == AN_PSAVE_NONE) { ireq->i_val = IEEE80211_POWERSAVE_OFF; } else if (config->an_psave_mode == AN_PSAVE_CAM) { ireq->i_val = IEEE80211_POWERSAVE_CAM; } else if (config->an_psave_mode == AN_PSAVE_PSP) { ireq->i_val = IEEE80211_POWERSAVE_PSP; } else if (config->an_psave_mode == AN_PSAVE_PSP_CAM) { ireq->i_val = IEEE80211_POWERSAVE_PSP_CAM; } else error = EINVAL; break; case IEEE80211_IOC_POWERSAVESLEEP: AN_LOCK(sc); sc->areq.an_type = AN_RID_ACTUALCFG; if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { error = EINVAL; AN_UNLOCK(sc); break; } AN_UNLOCK(sc); ireq->i_val = config->an_listen_interval; break; } break; case SIOCS80211: if ((error = priv_check(td, PRIV_NET80211_MANAGE))) goto out; AN_LOCK(sc); sc->areq.an_len = sizeof(sc->areq); /* * We need a config structure for everything but the WEP * key management and SSIDs so we get it now so avoid * duplicating this code every time. */ if (ireq->i_type != IEEE80211_IOC_SSID && ireq->i_type != IEEE80211_IOC_WEPKEY && ireq->i_type != IEEE80211_IOC_WEPTXKEY) { sc->areq.an_type = AN_RID_GENCONFIG; if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { error = EINVAL; AN_UNLOCK(sc); break; } } switch (ireq->i_type) { case IEEE80211_IOC_SSID: sc->areq.an_len = sizeof(sc->areq); sc->areq.an_type = AN_RID_SSIDLIST; if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { error = EINVAL; AN_UNLOCK(sc); break; } if (ireq->i_len > IEEE80211_NWID_LEN) { error = EINVAL; AN_UNLOCK(sc); break; } max = (sc->areq.an_len - 4) / sizeof(struct an_ltv_ssid_entry); if ( max > MAX_SSIDS ) { printf("To many SSIDs only using " "%d of %d\n", MAX_SSIDS, max); max = MAX_SSIDS; } if (ireq->i_val > max) { error = EINVAL; AN_UNLOCK(sc); break; } else { error = copyin(ireq->i_data, ssids->an_entry[ireq->i_val].an_ssid, ireq->i_len); ssids->an_entry[ireq->i_val].an_len = ireq->i_len; sc->areq.an_len = sizeof(sc->areq); sc->areq.an_type = AN_RID_SSIDLIST; an_setdef(sc, &sc->areq); AN_UNLOCK(sc); break; } break; case IEEE80211_IOC_WEP: switch (ireq->i_val) { case IEEE80211_WEP_OFF: config->an_authtype &= ~(AN_AUTHTYPE_PRIVACY_IN_USE | AN_AUTHTYPE_ALLOW_UNENCRYPTED); break; case IEEE80211_WEP_ON: config->an_authtype |= AN_AUTHTYPE_PRIVACY_IN_USE; config->an_authtype &= ~AN_AUTHTYPE_ALLOW_UNENCRYPTED; break; case IEEE80211_WEP_MIXED: config->an_authtype |= AN_AUTHTYPE_PRIVACY_IN_USE | AN_AUTHTYPE_ALLOW_UNENCRYPTED; break; default: error = EINVAL; break; } if (error != EINVAL) an_setdef(sc, &sc->areq); AN_UNLOCK(sc); break; case IEEE80211_IOC_WEPKEY: if (ireq->i_val < 0 || ireq->i_val > 8 || ireq->i_len > 13) { error = EINVAL; AN_UNLOCK(sc); break; } error = copyin(ireq->i_data, tmpstr, 13); if (error != 0) { AN_UNLOCK(sc); break; } /* * Map the 9th key into the home mode * since that is how it is stored on * the card */ bzero(&sc->areq, sizeof(struct an_ltv_key)); sc->areq.an_len = sizeof(struct an_ltv_key); key->mac[0] = 1; /* The others are 0. */ if (ireq->i_val < 4) { sc->areq.an_type = AN_RID_WEP_TEMP; key->kindex = ireq->i_val; } else { sc->areq.an_type = AN_RID_WEP_PERM; key->kindex = ireq->i_val - 4; } key->klen = ireq->i_len; bcopy(tmpstr, key->key, key->klen); an_setdef(sc, &sc->areq); AN_UNLOCK(sc); break; case IEEE80211_IOC_WEPTXKEY: if (ireq->i_val < 0 || ireq->i_val > 4) { error = EINVAL; AN_UNLOCK(sc); break; } /* * Map the 5th key into the home mode * since that is how it is stored on * the card */ sc->areq.an_len = sizeof(struct an_ltv_genconfig); sc->areq.an_type = AN_RID_ACTUALCFG; if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { error = EINVAL; AN_UNLOCK(sc); break; } if (ireq->i_val == 4) { config->an_home_product |= AN_HOME_NETWORK; ireq->i_val = 0; } else { config->an_home_product &= ~AN_HOME_NETWORK; } sc->an_config.an_home_product = config->an_home_product; /* update configuration */ an_init_locked(sc); bzero(&sc->areq, sizeof(struct an_ltv_key)); sc->areq.an_len = sizeof(struct an_ltv_key); sc->areq.an_type = AN_RID_WEP_PERM; key->kindex = 0xffff; key->mac[0] = ireq->i_val; an_setdef(sc, &sc->areq); AN_UNLOCK(sc); break; case IEEE80211_IOC_AUTHMODE: switch (ireq->i_val) { case IEEE80211_AUTH_NONE: config->an_authtype = AN_AUTHTYPE_NONE | (config->an_authtype & ~AN_AUTHTYPE_MASK); break; case IEEE80211_AUTH_OPEN: config->an_authtype = AN_AUTHTYPE_OPEN | (config->an_authtype & ~AN_AUTHTYPE_MASK); break; case IEEE80211_AUTH_SHARED: config->an_authtype = AN_AUTHTYPE_SHAREDKEY | (config->an_authtype & ~AN_AUTHTYPE_MASK); break; default: error = EINVAL; } if (error != EINVAL) { an_setdef(sc, &sc->areq); } AN_UNLOCK(sc); break; case IEEE80211_IOC_STATIONNAME: if (ireq->i_len > 16) { error = EINVAL; AN_UNLOCK(sc); break; } bzero(config->an_nodename, 16); error = copyin(ireq->i_data, config->an_nodename, ireq->i_len); an_setdef(sc, &sc->areq); AN_UNLOCK(sc); break; case IEEE80211_IOC_CHANNEL: /* * The actual range is 1-14, but if you set it * to 0 you get the default so we let that work * too. */ if (ireq->i_val < 0 || ireq->i_val >14) { error = EINVAL; AN_UNLOCK(sc); break; } config->an_ds_channel = ireq->i_val; an_setdef(sc, &sc->areq); AN_UNLOCK(sc); break; case IEEE80211_IOC_POWERSAVE: switch (ireq->i_val) { case IEEE80211_POWERSAVE_OFF: config->an_psave_mode = AN_PSAVE_NONE; break; case IEEE80211_POWERSAVE_CAM: config->an_psave_mode = AN_PSAVE_CAM; break; case IEEE80211_POWERSAVE_PSP: config->an_psave_mode = AN_PSAVE_PSP; break; case IEEE80211_POWERSAVE_PSP_CAM: config->an_psave_mode = AN_PSAVE_PSP_CAM; break; default: error = EINVAL; break; } an_setdef(sc, &sc->areq); AN_UNLOCK(sc); break; case IEEE80211_IOC_POWERSAVESLEEP: config->an_listen_interval = ireq->i_val; an_setdef(sc, &sc->areq); AN_UNLOCK(sc); break; default: AN_UNLOCK(sc); break; } /* if (!error) { AN_LOCK(sc); an_setdef(sc, &sc->areq); AN_UNLOCK(sc); } */ break; default: error = ether_ioctl(ifp, command, data); break; } out: return(error != 0); } static int an_init_tx_ring(struct an_softc *sc) { int i; int id; if (sc->an_gone) return (0); if (!sc->mpi350) { for (i = 0; i < AN_TX_RING_CNT; i++) { if (an_alloc_nicmem(sc, 1518 + 0x44, &id)) return(ENOMEM); sc->an_rdata.an_tx_fids[i] = id; sc->an_rdata.an_tx_ring[i] = 0; } } sc->an_rdata.an_tx_prod = 0; sc->an_rdata.an_tx_cons = 0; sc->an_rdata.an_tx_empty = 1; return(0); } static void an_init(void *xsc) { struct an_softc *sc = xsc; AN_LOCK(sc); an_init_locked(sc); AN_UNLOCK(sc); } static void an_init_locked(struct an_softc *sc) { struct ifnet *ifp; AN_LOCK_ASSERT(sc); ifp = sc->an_ifp; if (sc->an_gone) return; if (ifp->if_drv_flags & IFF_DRV_RUNNING) an_stop(sc); sc->an_associated = 0; /* Allocate the TX buffers */ if (an_init_tx_ring(sc)) { an_reset(sc); if (sc->mpi350) an_init_mpi350_desc(sc); if (an_init_tx_ring(sc)) { if_printf(ifp, "tx buffer allocation failed\n"); return; } } /* Set our MAC address. */ bcopy((char *)IF_LLADDR(sc->an_ifp), (char *)&sc->an_config.an_macaddr, ETHER_ADDR_LEN); if (ifp->if_flags & IFF_BROADCAST) sc->an_config.an_rxmode = AN_RXMODE_BC_ADDR; else sc->an_config.an_rxmode = AN_RXMODE_ADDR; if (ifp->if_flags & IFF_MULTICAST) sc->an_config.an_rxmode = AN_RXMODE_BC_MC_ADDR; if (ifp->if_flags & IFF_PROMISC) { if (sc->an_monitor & AN_MONITOR) { if (sc->an_monitor & AN_MONITOR_ANY_BSS) { sc->an_config.an_rxmode |= AN_RXMODE_80211_MONITOR_ANYBSS | AN_RXMODE_NO_8023_HEADER; } else { sc->an_config.an_rxmode |= AN_RXMODE_80211_MONITOR_CURBSS | AN_RXMODE_NO_8023_HEADER; } } } #ifdef ANCACHE if (sc->an_have_rssimap) sc->an_config.an_rxmode |= AN_RXMODE_NORMALIZED_RSSI; #endif /* Set the ssid list */ sc->an_ssidlist.an_type = AN_RID_SSIDLIST; sc->an_ssidlist.an_len = sizeof(struct an_ltv_ssidlist_new); if (an_write_record(sc, (struct an_ltv_gen *)&sc->an_ssidlist)) { if_printf(ifp, "failed to set ssid list\n"); return; } /* Set the AP list */ sc->an_aplist.an_type = AN_RID_APLIST; sc->an_aplist.an_len = sizeof(struct an_ltv_aplist); if (an_write_record(sc, (struct an_ltv_gen *)&sc->an_aplist)) { if_printf(ifp, "failed to set AP list\n"); return; } /* Set the configuration in the NIC */ sc->an_config.an_len = sizeof(struct an_ltv_genconfig); sc->an_config.an_type = AN_RID_GENCONFIG; if (an_write_record(sc, (struct an_ltv_gen *)&sc->an_config)) { if_printf(ifp, "failed to set configuration\n"); return; } /* Enable the MAC */ if (an_cmd(sc, AN_CMD_ENABLE, 0)) { if_printf(ifp, "failed to enable MAC\n"); return; } if (ifp->if_flags & IFF_PROMISC) an_cmd(sc, AN_CMD_SET_MODE, 0xffff); /* enable interrupts */ CSR_WRITE_2(sc, AN_INT_EN(sc->mpi350), AN_INTRS(sc->mpi350)); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&sc->an_stat_ch, hz, an_stats_update, sc); return; } static void an_start(struct ifnet *ifp) { struct an_softc *sc; sc = ifp->if_softc; AN_LOCK(sc); an_start_locked(ifp); AN_UNLOCK(sc); } static void an_start_locked(struct ifnet *ifp) { struct an_softc *sc; struct mbuf *m0 = NULL; struct an_txframe_802_3 tx_frame_802_3; struct ether_header *eh; int id, idx, i; unsigned char txcontrol; struct an_card_tx_desc an_tx_desc; u_int8_t *buf; sc = ifp->if_softc; AN_LOCK_ASSERT(sc); if (sc->an_gone) return; if (ifp->if_drv_flags & IFF_DRV_OACTIVE) return; if (!sc->an_associated) return; /* We can't send in monitor mode so toss any attempts. */ if (sc->an_monitor && (ifp->if_flags & IFF_PROMISC)) { for (;;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); if (m0 == NULL) break; m_freem(m0); } return; } idx = sc->an_rdata.an_tx_prod; if (!sc->mpi350) { bzero((char *)&tx_frame_802_3, sizeof(tx_frame_802_3)); while (sc->an_rdata.an_tx_ring[idx] == 0) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); if (m0 == NULL) break; id = sc->an_rdata.an_tx_fids[idx]; eh = mtod(m0, struct ether_header *); bcopy((char *)&eh->ether_dhost, (char *)&tx_frame_802_3.an_tx_dst_addr, ETHER_ADDR_LEN); bcopy((char *)&eh->ether_shost, (char *)&tx_frame_802_3.an_tx_src_addr, ETHER_ADDR_LEN); /* minus src/dest mac & type */ tx_frame_802_3.an_tx_802_3_payload_len = m0->m_pkthdr.len - 12; m_copydata(m0, sizeof(struct ether_header) - 2 , tx_frame_802_3.an_tx_802_3_payload_len, (caddr_t)&sc->an_txbuf); txcontrol = AN_TXCTL_8023 | AN_TXCTL_HW(sc->mpi350); /* write the txcontrol only */ an_write_data(sc, id, 0x08, (caddr_t)&txcontrol, sizeof(txcontrol)); /* 802_3 header */ an_write_data(sc, id, 0x34, (caddr_t)&tx_frame_802_3, sizeof(struct an_txframe_802_3)); /* in mbuf header type is just before payload */ an_write_data(sc, id, 0x44, (caddr_t)&sc->an_txbuf, tx_frame_802_3.an_tx_802_3_payload_len); /* * If there's a BPF listner, bounce a copy of * this frame to him. */ BPF_MTAP(ifp, m0); m_freem(m0); m0 = NULL; sc->an_rdata.an_tx_ring[idx] = id; if (an_cmd(sc, AN_CMD_TX, id)) if_printf(ifp, "xmit failed\n"); AN_INC(idx, AN_TX_RING_CNT); /* * Set a timeout in case the chip goes out to lunch. */ sc->an_timer = 5; } } else { /* MPI-350 */ /* Disable interrupts. */ CSR_WRITE_2(sc, AN_INT_EN(sc->mpi350), 0); while (sc->an_rdata.an_tx_empty || idx != sc->an_rdata.an_tx_cons) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); if (m0 == NULL) { break; } buf = sc->an_tx_buffer[idx].an_dma_vaddr; eh = mtod(m0, struct ether_header *); /* DJA optimize this to limit bcopy */ bcopy((char *)&eh->ether_dhost, (char *)&tx_frame_802_3.an_tx_dst_addr, ETHER_ADDR_LEN); bcopy((char *)&eh->ether_shost, (char *)&tx_frame_802_3.an_tx_src_addr, ETHER_ADDR_LEN); /* minus src/dest mac & type */ tx_frame_802_3.an_tx_802_3_payload_len = m0->m_pkthdr.len - 12; m_copydata(m0, sizeof(struct ether_header) - 2 , tx_frame_802_3.an_tx_802_3_payload_len, (caddr_t)&sc->an_txbuf); txcontrol = AN_TXCTL_8023 | AN_TXCTL_HW(sc->mpi350); /* write the txcontrol only */ bcopy((caddr_t)&txcontrol, &buf[0x08], sizeof(txcontrol)); /* 802_3 header */ bcopy((caddr_t)&tx_frame_802_3, &buf[0x34], sizeof(struct an_txframe_802_3)); /* in mbuf header type is just before payload */ bcopy((caddr_t)&sc->an_txbuf, &buf[0x44], tx_frame_802_3.an_tx_802_3_payload_len); bzero(&an_tx_desc, sizeof(an_tx_desc)); an_tx_desc.an_offset = 0; an_tx_desc.an_eoc = 1; an_tx_desc.an_valid = 1; an_tx_desc.an_len = 0x44 + tx_frame_802_3.an_tx_802_3_payload_len; an_tx_desc.an_phys = sc->an_tx_buffer[idx].an_dma_paddr; for (i = sizeof(an_tx_desc) / 4 - 1; i >= 0; i--) { CSR_MEM_AUX_WRITE_4(sc, AN_TX_DESC_OFFSET /* zero for now */ + (0 * sizeof(an_tx_desc)) + (i * 4), ((u_int32_t *)(void *)&an_tx_desc)[i]); } /* * If there's a BPF listner, bounce a copy of * this frame to him. */ BPF_MTAP(ifp, m0); m_freem(m0); m0 = NULL; AN_INC(idx, AN_MAX_TX_DESC); sc->an_rdata.an_tx_empty = 0; CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), AN_EV_ALLOC); /* * Set a timeout in case the chip goes out to lunch. */ sc->an_timer = 5; } /* Re-enable interrupts. */ CSR_WRITE_2(sc, AN_INT_EN(sc->mpi350), AN_INTRS(sc->mpi350)); } if (m0 != NULL) ifp->if_drv_flags |= IFF_DRV_OACTIVE; sc->an_rdata.an_tx_prod = idx; return; } void an_stop(struct an_softc *sc) { struct ifnet *ifp; int i; AN_LOCK_ASSERT(sc); if (sc->an_gone) return; ifp = sc->an_ifp; an_cmd(sc, AN_CMD_FORCE_SYNCLOSS, 0); CSR_WRITE_2(sc, AN_INT_EN(sc->mpi350), 0); an_cmd(sc, AN_CMD_DISABLE, 0); for (i = 0; i < AN_TX_RING_CNT; i++) an_cmd(sc, AN_CMD_DEALLOC_MEM, sc->an_rdata.an_tx_fids[i]); callout_stop(&sc->an_stat_ch); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE); if (sc->an_flash_buffer) { free(sc->an_flash_buffer, M_DEVBUF); sc->an_flash_buffer = NULL; } } static void an_watchdog(struct an_softc *sc) { struct ifnet *ifp; AN_LOCK_ASSERT(sc); if (sc->an_gone) return; ifp = sc->an_ifp; if_printf(ifp, "device timeout\n"); an_reset(sc); if (sc->mpi350) an_init_mpi350_desc(sc); an_init_locked(sc); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } int an_shutdown(device_t dev) { struct an_softc *sc; sc = device_get_softc(dev); AN_LOCK(sc); an_stop(sc); sc->an_gone = 1; AN_UNLOCK(sc); return (0); } void an_resume(device_t dev) { struct an_softc *sc; struct ifnet *ifp; int i; sc = device_get_softc(dev); AN_LOCK(sc); ifp = sc->an_ifp; sc->an_gone = 0; an_reset(sc); if (sc->mpi350) an_init_mpi350_desc(sc); an_init_locked(sc); /* Recovery temporary keys */ for (i = 0; i < 4; i++) { sc->areq.an_type = AN_RID_WEP_TEMP; sc->areq.an_len = sizeof(struct an_ltv_key); bcopy(&sc->an_temp_keys[i], &sc->areq, sizeof(struct an_ltv_key)); an_setdef(sc, &sc->areq); } if (ifp->if_flags & IFF_UP) an_start_locked(ifp); AN_UNLOCK(sc); return; } #ifdef ANCACHE /* Aironet signal strength cache code. * store signal/noise/quality on per MAC src basis in * a small fixed cache. The cache wraps if > MAX slots * used. The cache may be zeroed out to start over. * Two simple filters exist to reduce computation: * 1. ip only (literally 0x800, ETHERTYPE_IP) which may be used * to ignore some packets. It defaults to ip only. * it could be used to focus on broadcast, non-IP 802.11 beacons. * 2. multicast/broadcast only. This may be used to * ignore unicast packets and only cache signal strength * for multicast/broadcast packets (beacons); e.g., Mobile-IP * beacons and not unicast traffic. * * The cache stores (MAC src(index), IP src (major clue), signal, * quality, noise) * * No apologies for storing IP src here. It's easy and saves much * trouble elsewhere. The cache is assumed to be INET dependent, * although it need not be. * * Note: the Aironet only has a single byte of signal strength value * in the rx frame header, and it's not scaled to anything sensible. * This is kind of lame, but it's all we've got. */ #ifdef documentation int an_sigitems; /* number of cached entries */ struct an_sigcache an_sigcache[MAXANCACHE]; /* array of cache entries */ int an_nextitem; /* index/# of entries */ #endif /* control variables for cache filtering. Basic idea is * to reduce cost (e.g., to only Mobile-IP agent beacons * which are broadcast or multicast). Still you might * want to measure signal strength anth unicast ping packets * on a pt. to pt. ant. setup. */ /* set true if you want to limit cache items to broadcast/mcast * only packets (not unicast). Useful for mobile-ip beacons which * are broadcast/multicast at network layer. Default is all packets * so ping/unicast anll work say anth pt. to pt. antennae setup. */ static int an_cache_mcastonly = 0; SYSCTL_INT(_hw_an, OID_AUTO, an_cache_mcastonly, CTLFLAG_RW, &an_cache_mcastonly, 0, ""); /* set true if you want to limit cache items to IP packets only */ static int an_cache_iponly = 1; SYSCTL_INT(_hw_an, OID_AUTO, an_cache_iponly, CTLFLAG_RW, &an_cache_iponly, 0, ""); /* * an_cache_store, per rx packet store signal * strength in MAC (src) indexed cache. */ static void an_cache_store(struct an_softc *sc, struct ether_header *eh, struct mbuf *m, u_int8_t rx_rssi, u_int8_t rx_quality) { struct ip *ip = 0; int i; static int cache_slot = 0; /* use this cache entry */ static int wrapindex = 0; /* next "free" cache entry */ int type_ipv4 = 0; /* filters: * 1. ip only * 2. configurable filter to throw out unicast packets, * keep multicast only. */ if ((ntohs(eh->ether_type) == ETHERTYPE_IP)) { type_ipv4 = 1; } /* filter for ip packets only */ if ( an_cache_iponly && !type_ipv4) { return; } /* filter for broadcast/multicast only */ if (an_cache_mcastonly && ((eh->ether_dhost[0] & 1) == 0)) { return; } #ifdef SIGDEBUG if_printf(sc->an_ifp, "q value %x (MSB=0x%x, LSB=0x%x) \n", rx_rssi & 0xffff, rx_rssi >> 8, rx_rssi & 0xff); #endif /* find the ip header. we want to store the ip_src * address. */ if (type_ipv4) { ip = mtod(m, struct ip *); } /* do a linear search for a matching MAC address * in the cache table * . MAC address is 6 bytes, * . var w_nextitem holds total number of entries already cached */ for (i = 0; i < sc->an_nextitem; i++) { if (! bcmp(eh->ether_shost , sc->an_sigcache[i].macsrc, 6 )) { /* Match!, * so we already have this entry, * update the data */ break; } } /* did we find a matching mac address? * if yes, then overwrite a previously existing cache entry */ if (i < sc->an_nextitem ) { cache_slot = i; } /* else, have a new address entry,so * add this new entry, * if table full, then we need to replace LRU entry */ else { /* check for space in cache table * note: an_nextitem also holds number of entries * added in the cache table */ if ( sc->an_nextitem < MAXANCACHE ) { cache_slot = sc->an_nextitem; sc->an_nextitem++; sc->an_sigitems = sc->an_nextitem; } /* no space found, so simply wrap anth wrap index * and "zap" the next entry */ else { if (wrapindex == MAXANCACHE) { wrapindex = 0; } cache_slot = wrapindex++; } } /* invariant: cache_slot now points at some slot * in cache. */ if (cache_slot < 0 || cache_slot >= MAXANCACHE) { log(LOG_ERR, "an_cache_store, bad index: %d of " "[0..%d], gross cache error\n", cache_slot, MAXANCACHE); return; } /* store items in cache * .ip source address * .mac src * .signal, etc. */ if (type_ipv4) { sc->an_sigcache[cache_slot].ipsrc = ip->ip_src.s_addr; } bcopy( eh->ether_shost, sc->an_sigcache[cache_slot].macsrc, 6); switch (an_cache_mode) { case DBM: if (sc->an_have_rssimap) { sc->an_sigcache[cache_slot].signal = - sc->an_rssimap.an_entries[rx_rssi].an_rss_dbm; sc->an_sigcache[cache_slot].quality = - sc->an_rssimap.an_entries[rx_quality].an_rss_dbm; } else { sc->an_sigcache[cache_slot].signal = rx_rssi - 100; sc->an_sigcache[cache_slot].quality = rx_quality - 100; } break; case PERCENT: if (sc->an_have_rssimap) { sc->an_sigcache[cache_slot].signal = sc->an_rssimap.an_entries[rx_rssi].an_rss_pct; sc->an_sigcache[cache_slot].quality = sc->an_rssimap.an_entries[rx_quality].an_rss_pct; } else { if (rx_rssi > 100) rx_rssi = 100; if (rx_quality > 100) rx_quality = 100; sc->an_sigcache[cache_slot].signal = rx_rssi; sc->an_sigcache[cache_slot].quality = rx_quality; } break; case RAW: sc->an_sigcache[cache_slot].signal = rx_rssi; sc->an_sigcache[cache_slot].quality = rx_quality; break; } sc->an_sigcache[cache_slot].noise = 0; return; } #endif static int an_media_change(struct ifnet *ifp) { struct an_softc *sc = ifp->if_softc; struct an_ltv_genconfig *cfg; int otype = sc->an_config.an_opmode; int orate = sc->an_tx_rate; AN_LOCK(sc); sc->an_tx_rate = ieee80211_media2rate( IFM_SUBTYPE(sc->an_ifmedia.ifm_cur->ifm_media)); if (sc->an_tx_rate < 0) sc->an_tx_rate = 0; if (orate != sc->an_tx_rate) { /* Read the current configuration */ sc->an_config.an_type = AN_RID_GENCONFIG; sc->an_config.an_len = sizeof(struct an_ltv_genconfig); an_read_record(sc, (struct an_ltv_gen *)&sc->an_config); cfg = &sc->an_config; /* clear other rates and set the only one we want */ bzero(cfg->an_rates, sizeof(cfg->an_rates)); cfg->an_rates[0] = sc->an_tx_rate; /* Save the new rate */ sc->an_config.an_type = AN_RID_GENCONFIG; sc->an_config.an_len = sizeof(struct an_ltv_genconfig); } if ((sc->an_ifmedia.ifm_cur->ifm_media & IFM_IEEE80211_ADHOC) != 0) sc->an_config.an_opmode &= ~AN_OPMODE_INFRASTRUCTURE_STATION; else sc->an_config.an_opmode |= AN_OPMODE_INFRASTRUCTURE_STATION; if (otype != sc->an_config.an_opmode || orate != sc->an_tx_rate) an_init_locked(sc); AN_UNLOCK(sc); return(0); } static void an_media_status(struct ifnet *ifp, struct ifmediareq *imr) { struct an_ltv_status status; struct an_softc *sc = ifp->if_softc; imr->ifm_active = IFM_IEEE80211; AN_LOCK(sc); status.an_len = sizeof(status); status.an_type = AN_RID_STATUS; if (an_read_record(sc, (struct an_ltv_gen *)&status)) { /* If the status read fails, just lie. */ imr->ifm_active = sc->an_ifmedia.ifm_cur->ifm_media; imr->ifm_status = IFM_AVALID|IFM_ACTIVE; } if (sc->an_tx_rate == 0) { imr->ifm_active = IFM_IEEE80211|IFM_AUTO; } if (sc->an_config.an_opmode == AN_OPMODE_IBSS_ADHOC) imr->ifm_active |= IFM_IEEE80211_ADHOC; imr->ifm_active |= ieee80211_rate2media(NULL, status.an_current_tx_rate, IEEE80211_MODE_AUTO); imr->ifm_status = IFM_AVALID; if (status.an_opmode & AN_STATUS_OPMODE_ASSOCIATED) imr->ifm_status |= IFM_ACTIVE; AN_UNLOCK(sc); } /********************** Cisco utility support routines *************/ /* * ReadRids & WriteRids derived from Cisco driver additions to Ben Reed's * Linux driver */ static int readrids(struct ifnet *ifp, struct aironet_ioctl *l_ioctl) { unsigned short rid; struct an_softc *sc; int error; switch (l_ioctl->command) { case AIROGCAP: rid = AN_RID_CAPABILITIES; break; case AIROGCFG: rid = AN_RID_GENCONFIG; break; case AIROGSLIST: rid = AN_RID_SSIDLIST; break; case AIROGVLIST: rid = AN_RID_APLIST; break; case AIROGDRVNAM: rid = AN_RID_DRVNAME; break; case AIROGEHTENC: rid = AN_RID_ENCAPPROTO; break; case AIROGWEPKTMP: rid = AN_RID_WEP_TEMP; break; case AIROGWEPKNV: rid = AN_RID_WEP_PERM; break; case AIROGSTAT: rid = AN_RID_STATUS; break; case AIROGSTATSD32: rid = AN_RID_32BITS_DELTA; break; case AIROGSTATSC32: rid = AN_RID_32BITS_CUM; break; default: rid = 999; break; } if (rid == 999) /* Is bad command */ return -EINVAL; sc = ifp->if_softc; sc->areq.an_len = AN_MAX_DATALEN; sc->areq.an_type = rid; an_read_record(sc, (struct an_ltv_gen *)&sc->areq); l_ioctl->len = sc->areq.an_len - 4; /* just data */ AN_UNLOCK(sc); /* the data contains the length at first */ if (copyout(&(sc->areq.an_len), l_ioctl->data, sizeof(sc->areq.an_len))) { error = -EFAULT; goto lock_exit; } /* Just copy the data back */ if (copyout(&(sc->areq.an_val), l_ioctl->data + 2, l_ioctl->len)) { error = -EFAULT; goto lock_exit; } error = 0; lock_exit: AN_LOCK(sc); return (error); } static int writerids(struct ifnet *ifp, struct aironet_ioctl *l_ioctl) { struct an_softc *sc; int rid, command, error; sc = ifp->if_softc; AN_LOCK_ASSERT(sc); rid = 0; command = l_ioctl->command; switch (command) { case AIROPSIDS: rid = AN_RID_SSIDLIST; break; case AIROPCAP: rid = AN_RID_CAPABILITIES; break; case AIROPAPLIST: rid = AN_RID_APLIST; break; case AIROPCFG: rid = AN_RID_GENCONFIG; break; case AIROPMACON: an_cmd(sc, AN_CMD_ENABLE, 0); return 0; break; case AIROPMACOFF: an_cmd(sc, AN_CMD_DISABLE, 0); return 0; break; case AIROPSTCLR: /* * This command merely clears the counts does not actually * store any data only reads rid. But as it changes the cards * state, I put it in the writerid routines. */ rid = AN_RID_32BITS_DELTACLR; sc = ifp->if_softc; sc->areq.an_len = AN_MAX_DATALEN; sc->areq.an_type = rid; an_read_record(sc, (struct an_ltv_gen *)&sc->areq); l_ioctl->len = sc->areq.an_len - 4; /* just data */ AN_UNLOCK(sc); /* the data contains the length at first */ error = copyout(&(sc->areq.an_len), l_ioctl->data, sizeof(sc->areq.an_len)); if (error) { AN_LOCK(sc); return -EFAULT; } /* Just copy the data */ error = copyout(&(sc->areq.an_val), l_ioctl->data + 2, l_ioctl->len); AN_LOCK(sc); if (error) return -EFAULT; return 0; break; case AIROPWEPKEY: rid = AN_RID_WEP_TEMP; break; case AIROPWEPKEYNV: rid = AN_RID_WEP_PERM; break; case AIROPLEAPUSR: rid = AN_RID_LEAPUSERNAME; break; case AIROPLEAPPWD: rid = AN_RID_LEAPPASSWORD; break; default: return -EOPNOTSUPP; } if (rid) { if (l_ioctl->len > sizeof(sc->areq.an_val) + 4) return -EINVAL; sc->areq.an_len = l_ioctl->len + 4; /* add type & length */ sc->areq.an_type = rid; /* Just copy the data back */ AN_UNLOCK(sc); error = copyin((l_ioctl->data) + 2, &sc->areq.an_val, l_ioctl->len); AN_LOCK(sc); if (error) return -EFAULT; an_cmd(sc, AN_CMD_DISABLE, 0); an_write_record(sc, (struct an_ltv_gen *)&sc->areq); an_cmd(sc, AN_CMD_ENABLE, 0); return 0; } return -EOPNOTSUPP; } /* * General Flash utilities derived from Cisco driver additions to Ben Reed's * Linux driver */ #define FLASH_DELAY(_sc, x) msleep(ifp, &(_sc)->an_mtx, PZERO, \ "flash", ((x) / hz) + 1); #define FLASH_COMMAND 0x7e7e #define FLASH_SIZE 32 * 1024 static int unstickbusy(struct ifnet *ifp) { struct an_softc *sc = ifp->if_softc; if (CSR_READ_2(sc, AN_COMMAND(sc->mpi350)) & AN_CMD_BUSY) { CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), AN_EV_CLR_STUCK_BUSY); return 1; } return 0; } /* * Wait for busy completion from card wait for delay uSec's Return true for * success meaning command reg is clear */ static int WaitBusy(struct ifnet *ifp, int uSec) { int statword = 0xffff; int delay = 0; struct an_softc *sc = ifp->if_softc; while ((statword & AN_CMD_BUSY) && delay <= (1000 * 100)) { FLASH_DELAY(sc, 10); delay += 10; statword = CSR_READ_2(sc, AN_COMMAND(sc->mpi350)); if ((AN_CMD_BUSY & statword) && (delay % 200)) { unstickbusy(ifp); } } return 0 == (AN_CMD_BUSY & statword); } /* * STEP 1) Disable MAC and do soft reset on card. */ static int cmdreset(struct ifnet *ifp) { int status; struct an_softc *sc = ifp->if_softc; AN_LOCK(sc); an_stop(sc); an_cmd(sc, AN_CMD_DISABLE, 0); if (!(status = WaitBusy(ifp, AN_TIMEOUT))) { if_printf(ifp, "Waitbusy hang b4 RESET =%d\n", status); AN_UNLOCK(sc); return -EBUSY; } CSR_WRITE_2(sc, AN_COMMAND(sc->mpi350), AN_CMD_FW_RESTART); FLASH_DELAY(sc, 1000); /* WAS 600 12/7/00 */ if (!(status = WaitBusy(ifp, 100))) { if_printf(ifp, "Waitbusy hang AFTER RESET =%d\n", status); AN_UNLOCK(sc); return -EBUSY; } AN_UNLOCK(sc); return 0; } /* * STEP 2) Put the card in legendary flash mode */ static int setflashmode(struct ifnet *ifp) { int status; struct an_softc *sc = ifp->if_softc; CSR_WRITE_2(sc, AN_SW0(sc->mpi350), FLASH_COMMAND); CSR_WRITE_2(sc, AN_SW1(sc->mpi350), FLASH_COMMAND); CSR_WRITE_2(sc, AN_SW0(sc->mpi350), FLASH_COMMAND); CSR_WRITE_2(sc, AN_COMMAND(sc->mpi350), FLASH_COMMAND); /* * mdelay(500); // 500ms delay */ FLASH_DELAY(sc, 500); if (!(status = WaitBusy(ifp, AN_TIMEOUT))) { printf("Waitbusy hang after setflash mode\n"); return -EIO; } return 0; } /* * Get a character from the card matching matchbyte Step 3) */ static int flashgchar(struct ifnet *ifp, int matchbyte, int dwelltime) { int rchar; unsigned char rbyte = 0; int success = -1; struct an_softc *sc = ifp->if_softc; do { rchar = CSR_READ_2(sc, AN_SW1(sc->mpi350)); if (dwelltime && !(0x8000 & rchar)) { dwelltime -= 10; FLASH_DELAY(sc, 10); continue; } rbyte = 0xff & rchar; if ((rbyte == matchbyte) && (0x8000 & rchar)) { CSR_WRITE_2(sc, AN_SW1(sc->mpi350), 0); success = 1; break; } if (rbyte == 0x81 || rbyte == 0x82 || rbyte == 0x83 || rbyte == 0x1a || 0xffff == rchar) break; CSR_WRITE_2(sc, AN_SW1(sc->mpi350), 0); } while (dwelltime > 0); return success; } /* * Put character to SWS0 wait for dwelltime x 50us for echo . */ static int flashpchar(struct ifnet *ifp, int byte, int dwelltime) { int echo; int pollbusy, waittime; struct an_softc *sc = ifp->if_softc; byte |= 0x8000; if (dwelltime == 0) dwelltime = 200; waittime = dwelltime; /* * Wait for busy bit d15 to go false indicating buffer empty */ do { pollbusy = CSR_READ_2(sc, AN_SW0(sc->mpi350)); if (pollbusy & 0x8000) { FLASH_DELAY(sc, 50); waittime -= 50; continue; } else break; } while (waittime >= 0); /* timeout for busy clear wait */ if (waittime <= 0) { if_printf(ifp, "flash putchar busywait timeout!\n"); return -1; } /* * Port is clear now write byte and wait for it to echo back */ do { CSR_WRITE_2(sc, AN_SW0(sc->mpi350), byte); FLASH_DELAY(sc, 50); dwelltime -= 50; echo = CSR_READ_2(sc, AN_SW1(sc->mpi350)); } while (dwelltime >= 0 && echo != byte); CSR_WRITE_2(sc, AN_SW1(sc->mpi350), 0); return echo == byte; } /* * Transfer 32k of firmware data from user buffer to our buffer and send to * the card */ static int flashputbuf(struct ifnet *ifp) { unsigned short *bufp; int nwords; struct an_softc *sc = ifp->if_softc; /* Write stuff */ bufp = sc->an_flash_buffer; if (!sc->mpi350) { CSR_WRITE_2(sc, AN_AUX_PAGE, 0x100); CSR_WRITE_2(sc, AN_AUX_OFFSET, 0); for (nwords = 0; nwords != FLASH_SIZE / 2; nwords++) { CSR_WRITE_2(sc, AN_AUX_DATA, bufp[nwords] & 0xffff); } } else { for (nwords = 0; nwords != FLASH_SIZE / 4; nwords++) { CSR_MEM_AUX_WRITE_4(sc, 0x8000, ((u_int32_t *)bufp)[nwords] & 0xffff); } } CSR_WRITE_2(sc, AN_SW0(sc->mpi350), 0x8000); return 0; } /* * After flashing restart the card. */ static int flashrestart(struct ifnet *ifp) { int status = 0; struct an_softc *sc = ifp->if_softc; FLASH_DELAY(sc, 1024); /* Added 12/7/00 */ an_init_locked(sc); FLASH_DELAY(sc, 1024); /* Added 12/7/00 */ return status; } /* * Entry point for flash ioclt. */ static int flashcard(struct ifnet *ifp, struct aironet_ioctl *l_ioctl) { int z = 0, status; struct an_softc *sc; sc = ifp->if_softc; if (sc->mpi350) { if_printf(ifp, "flashing not supported on MPI 350 yet\n"); return(-1); } status = l_ioctl->command; switch (l_ioctl->command) { case AIROFLSHRST: return cmdreset(ifp); break; case AIROFLSHSTFL: if (sc->an_flash_buffer) { free(sc->an_flash_buffer, M_DEVBUF); sc->an_flash_buffer = NULL; } sc->an_flash_buffer = malloc(FLASH_SIZE, M_DEVBUF, M_WAITOK); if (sc->an_flash_buffer) return setflashmode(ifp); else return ENOBUFS; break; case AIROFLSHGCHR: /* Get char from aux */ AN_UNLOCK(sc); status = copyin(l_ioctl->data, &sc->areq, l_ioctl->len); AN_LOCK(sc); if (status) return status; z = *(int *)&sc->areq; if ((status = flashgchar(ifp, z, 8000)) == 1) return 0; else return -1; case AIROFLSHPCHR: /* Send char to card. */ AN_UNLOCK(sc); status = copyin(l_ioctl->data, &sc->areq, l_ioctl->len); AN_LOCK(sc); if (status) return status; z = *(int *)&sc->areq; if ((status = flashpchar(ifp, z, 8000)) == -1) return -EIO; else return 0; break; case AIROFLPUTBUF: /* Send 32k to card */ if (l_ioctl->len > FLASH_SIZE) { if_printf(ifp, "Buffer to big, %x %x\n", l_ioctl->len, FLASH_SIZE); return -EINVAL; } AN_UNLOCK(sc); status = copyin(l_ioctl->data, sc->an_flash_buffer, l_ioctl->len); AN_LOCK(sc); if (status) return status; if ((status = flashputbuf(ifp)) != 0) return -EIO; else return 0; break; case AIRORESTART: if ((status = flashrestart(ifp)) != 0) { if_printf(ifp, "FLASHRESTART returned %d\n", status); return -EIO; } else return 0; break; default: return -EINVAL; } return -EINVAL; } Index: head/sys/dev/bge/if_bge.c =================================================================== --- head/sys/dev/bge/if_bge.c (revision 276749) +++ head/sys/dev/bge/if_bge.c (revision 276750) @@ -1,6790 +1,6789 @@ /*- * Copyright (c) 2001 Wind River Systems * Copyright (c) 1997, 1998, 1999, 2001 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Broadcom BCM57xx(x)/BCM590x NetXtreme and NetLink family Ethernet driver * * The Broadcom BCM5700 is based on technology originally developed by * Alteon Networks as part of the Tigon I and Tigon II Gigabit Ethernet * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has * two on-board MIPS R4000 CPUs and can have as much as 16MB of external * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo * frames, highly configurable RX filtering, and 16 RX and TX queues * (which, along with RX filter rules, can be used for QOS applications). * Other features, such as TCP segmentation, may be available as part * of value-added firmware updates. Unlike the Tigon I and Tigon II, * firmware images can be stored in hardware and need not be compiled * into the driver. * * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus. * * The BCM5701 is a single-chip solution incorporating both the BCM5700 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 * does not support external SSRAM. * * Broadcom also produces a variation of the BCM5700 under the "Altima" * brand name, which is functionally similar but lacks PCI-X support. * * Without external SSRAM, you can only have at most 4 TX rings, * and the use of the mini RX ring is disabled. This seems to imply * that these features are simply not available on the BCM5701. As a * result, this driver does not implement any support for the mini RX * ring. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "miidevs.h" #include #ifdef __sparc64__ #include #include #include #include #endif #include #include #include #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP) #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ MODULE_DEPEND(bge, pci, 1, 1, 1); MODULE_DEPEND(bge, ether, 1, 1, 1); MODULE_DEPEND(bge, miibus, 1, 1, 1); /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* * Various supported device vendors/types and their names. Note: the * spec seems to indicate that the hardware still has Alteon's vendor * ID burned into it, though it will always be overriden by the vendor * ID in the EEPROM. Just to be safe, we cover all possibilities. */ static const struct bge_type { uint16_t bge_vid; uint16_t bge_did; } bge_devs[] = { { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 }, { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 }, { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 }, { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 }, { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 }, { APPLE_VENDORID, APPLE_DEVICE_BCM5701 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5717 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5718 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5719 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5725 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5727 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5756 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5762 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57761 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57762 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57764 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57765 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57766 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57767 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57781 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57782 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57785 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57786 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57787 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57791 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57795 }, { SK_VENDORID, SK_DEVICEID_ALTIMA }, { TC_VENDORID, TC_DEVICEID_3C996 }, { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 }, { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 }, { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 }, { 0, 0 } }; static const struct bge_vendor { uint16_t v_id; const char *v_name; } bge_vendors[] = { { ALTEON_VENDORID, "Alteon" }, { ALTIMA_VENDORID, "Altima" }, { APPLE_VENDORID, "Apple" }, { BCOM_VENDORID, "Broadcom" }, { SK_VENDORID, "SysKonnect" }, { TC_VENDORID, "3Com" }, { FJTSU_VENDORID, "Fujitsu" }, { 0, NULL } }; static const struct bge_revision { uint32_t br_chipid; const char *br_name; } bge_revisions[] = { { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" }, { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" }, { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" }, { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" }, { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" }, { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" }, { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" }, { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" }, { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" }, { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" }, { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" }, { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" }, { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" }, { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" }, { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" }, { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" }, { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" }, { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" }, { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" }, { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" }, { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" }, { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" }, { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" }, { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" }, { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" }, { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" }, { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" }, { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" }, { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" }, { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" }, { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" }, { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" }, { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" }, { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" }, { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" }, { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" }, { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" }, { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" }, { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" }, { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" }, { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" }, { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" }, { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" }, { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" }, { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" }, { BGE_CHIPID_BCM5719_A0, "BCM5719 A0" }, { BGE_CHIPID_BCM5720_A0, "BCM5720 A0" }, { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" }, { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" }, { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" }, { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" }, { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" }, { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" }, { BGE_CHIPID_BCM5762_A0, "BCM5762 A0" }, { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" }, { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" }, /* 5754 and 5787 share the same ASIC ID */ { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" }, { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" }, { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" }, { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" }, { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" }, { BGE_CHIPID_BCM57765_A0, "BCM57765 A0" }, { BGE_CHIPID_BCM57765_B0, "BCM57765 B0" }, { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" }, { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" }, { 0, NULL } }; /* * Some defaults for major revisions, so that newer steppings * that we don't know about have a shot at working. */ static const struct bge_revision bge_majorrevs[] = { { BGE_ASICREV_BCM5700, "unknown BCM5700" }, { BGE_ASICREV_BCM5701, "unknown BCM5701" }, { BGE_ASICREV_BCM5703, "unknown BCM5703" }, { BGE_ASICREV_BCM5704, "unknown BCM5704" }, { BGE_ASICREV_BCM5705, "unknown BCM5705" }, { BGE_ASICREV_BCM5750, "unknown BCM5750" }, { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" }, { BGE_ASICREV_BCM5752, "unknown BCM5752" }, { BGE_ASICREV_BCM5780, "unknown BCM5780" }, { BGE_ASICREV_BCM5714, "unknown BCM5714" }, { BGE_ASICREV_BCM5755, "unknown BCM5755" }, { BGE_ASICREV_BCM5761, "unknown BCM5761" }, { BGE_ASICREV_BCM5784, "unknown BCM5784" }, { BGE_ASICREV_BCM5785, "unknown BCM5785" }, /* 5754 and 5787 share the same ASIC ID */ { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" }, { BGE_ASICREV_BCM5906, "unknown BCM5906" }, { BGE_ASICREV_BCM57765, "unknown BCM57765" }, { BGE_ASICREV_BCM57766, "unknown BCM57766" }, { BGE_ASICREV_BCM57780, "unknown BCM57780" }, { BGE_ASICREV_BCM5717, "unknown BCM5717" }, { BGE_ASICREV_BCM5719, "unknown BCM5719" }, { BGE_ASICREV_BCM5720, "unknown BCM5720" }, { BGE_ASICREV_BCM5762, "unknown BCM5762" }, { 0, NULL } }; #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO) #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY) #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS) #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY) #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS) #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS) #define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5717_PLUS) #define BGE_IS_57765_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_57765_PLUS) static uint32_t bge_chipid(device_t); static const struct bge_vendor * bge_lookup_vendor(uint16_t); static const struct bge_revision * bge_lookup_rev(uint32_t); typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]); static int bge_probe(device_t); static int bge_attach(device_t); static int bge_detach(device_t); static int bge_suspend(device_t); static int bge_resume(device_t); static void bge_release_resources(struct bge_softc *); static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int); static int bge_dma_alloc(struct bge_softc *); static void bge_dma_free(struct bge_softc *); static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t, bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *); static void bge_devinfo(struct bge_softc *); static int bge_mbox_reorder(struct bge_softc *); static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]); static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]); static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]); static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]); static int bge_get_eaddr(struct bge_softc *, uint8_t[]); static void bge_txeof(struct bge_softc *, uint16_t); static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *); static int bge_rxeof(struct bge_softc *, uint16_t, int); static void bge_asf_driver_up (struct bge_softc *); static void bge_tick(void *); static void bge_stats_clear_regs(struct bge_softc *); static void bge_stats_update(struct bge_softc *); static void bge_stats_update_regs(struct bge_softc *); static struct mbuf *bge_check_short_dma(struct mbuf *); static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *, uint16_t *, uint16_t *); static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *); static void bge_intr(void *); static int bge_msi_intr(void *); static void bge_intr_task(void *, int); static void bge_start_locked(if_t); static void bge_start(if_t); static int bge_ioctl(if_t, u_long, caddr_t); static void bge_init_locked(struct bge_softc *); static void bge_init(void *); static void bge_stop_block(struct bge_softc *, bus_size_t, uint32_t); static void bge_stop(struct bge_softc *); static void bge_watchdog(struct bge_softc *); static int bge_shutdown(device_t); static int bge_ifmedia_upd_locked(if_t); static int bge_ifmedia_upd(if_t); static void bge_ifmedia_sts(if_t, struct ifmediareq *); static uint64_t bge_get_counter(if_t, ift_counter); static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *); static int bge_read_nvram(struct bge_softc *, caddr_t, int, int); static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *); static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int); static void bge_setpromisc(struct bge_softc *); static void bge_setmulti(struct bge_softc *); static void bge_setvlan(struct bge_softc *); static __inline void bge_rxreuse_std(struct bge_softc *, int); static __inline void bge_rxreuse_jumbo(struct bge_softc *, int); static int bge_newbuf_std(struct bge_softc *, int); static int bge_newbuf_jumbo(struct bge_softc *, int); static int bge_init_rx_ring_std(struct bge_softc *); static void bge_free_rx_ring_std(struct bge_softc *); static int bge_init_rx_ring_jumbo(struct bge_softc *); static void bge_free_rx_ring_jumbo(struct bge_softc *); static void bge_free_tx_ring(struct bge_softc *); static int bge_init_tx_ring(struct bge_softc *); static int bge_chipinit(struct bge_softc *); static int bge_blockinit(struct bge_softc *); static uint32_t bge_dma_swap_options(struct bge_softc *); static int bge_has_eaddr(struct bge_softc *); static uint32_t bge_readmem_ind(struct bge_softc *, int); static void bge_writemem_ind(struct bge_softc *, int, int); static void bge_writembx(struct bge_softc *, int, int); #ifdef notdef static uint32_t bge_readreg_ind(struct bge_softc *, int); #endif static void bge_writemem_direct(struct bge_softc *, int, int); static void bge_writereg_ind(struct bge_softc *, int, int); static int bge_miibus_readreg(device_t, int, int); static int bge_miibus_writereg(device_t, int, int, int); static void bge_miibus_statchg(device_t); #ifdef DEVICE_POLLING static int bge_poll(if_t ifp, enum poll_cmd cmd, int count); #endif #define BGE_RESET_SHUTDOWN 0 #define BGE_RESET_START 1 #define BGE_RESET_SUSPEND 2 static void bge_sig_post_reset(struct bge_softc *, int); static void bge_sig_legacy(struct bge_softc *, int); static void bge_sig_pre_reset(struct bge_softc *, int); static void bge_stop_fw(struct bge_softc *); static int bge_reset(struct bge_softc *); static void bge_link_upd(struct bge_softc *); static void bge_ape_lock_init(struct bge_softc *); static void bge_ape_read_fw_ver(struct bge_softc *); static int bge_ape_lock(struct bge_softc *, int); static void bge_ape_unlock(struct bge_softc *, int); static void bge_ape_send_event(struct bge_softc *, uint32_t); static void bge_ape_driver_state_change(struct bge_softc *, int); /* * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may * leak information to untrusted users. It is also known to cause alignment * traps on certain architectures. */ #ifdef BGE_REGISTER_DEBUG static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS); static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS); static int bge_sysctl_ape_read(SYSCTL_HANDLER_ARGS); static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS); #endif static void bge_add_sysctls(struct bge_softc *); static void bge_add_sysctl_stats_regs(struct bge_softc *, struct sysctl_ctx_list *, struct sysctl_oid_list *); static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *, struct sysctl_oid_list *); static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS); static device_method_t bge_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bge_probe), DEVMETHOD(device_attach, bge_attach), DEVMETHOD(device_detach, bge_detach), DEVMETHOD(device_shutdown, bge_shutdown), DEVMETHOD(device_suspend, bge_suspend), DEVMETHOD(device_resume, bge_resume), /* MII interface */ DEVMETHOD(miibus_readreg, bge_miibus_readreg), DEVMETHOD(miibus_writereg, bge_miibus_writereg), DEVMETHOD(miibus_statchg, bge_miibus_statchg), DEVMETHOD_END }; static driver_t bge_driver = { "bge", bge_methods, sizeof(struct bge_softc) }; static devclass_t bge_devclass; DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0); DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0); static int bge_allow_asf = 1; static SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters"); SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RDTUN, &bge_allow_asf, 0, "Allow ASF mode if available"); #define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500" #define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2" #define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500" #define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3" #define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id" static int bge_has_eaddr(struct bge_softc *sc) { #ifdef __sparc64__ char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)]; device_t dev; uint32_t subvendor; dev = sc->bge_dev; /* * The on-board BGEs found in sun4u machines aren't fitted with * an EEPROM which means that we have to obtain the MAC address * via OFW and that some tests will always fail. We distinguish * such BGEs by the subvendor ID, which also has to be obtained * from OFW instead of the PCI configuration space as the latter * indicates Broadcom as the subvendor of the netboot interface. * For early Blade 1500 and 2500 we even have to check the OFW * device path as the subvendor ID always defaults to Broadcom * there. */ if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR, &subvendor, sizeof(subvendor)) == sizeof(subvendor) && (subvendor == FJTSU_VENDORID || subvendor == SUN_VENDORID)) return (0); memset(buf, 0, sizeof(buf)); if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) { if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 && strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0) return (0); if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 && strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0) return (0); } #endif return (1); } static uint32_t bge_readmem_ind(struct bge_softc *sc, int off) { device_t dev; uint32_t val; if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4) return (0); dev = sc->bge_dev; pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4); pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); return (val); } static void bge_writemem_ind(struct bge_softc *sc, int off, int val) { device_t dev; if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4) return; dev = sc->bge_dev; pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); } #ifdef notdef static uint32_t bge_readreg_ind(struct bge_softc *sc, int off) { device_t dev; dev = sc->bge_dev; pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); return (pci_read_config(dev, BGE_PCI_REG_DATA, 4)); } #endif static void bge_writereg_ind(struct bge_softc *sc, int off, int val) { device_t dev; dev = sc->bge_dev; pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); } static void bge_writemem_direct(struct bge_softc *sc, int off, int val) { CSR_WRITE_4(sc, off, val); } static void bge_writembx(struct bge_softc *sc, int off, int val) { if (sc->bge_asicrev == BGE_ASICREV_BCM5906) off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; CSR_WRITE_4(sc, off, val); if ((sc->bge_flags & BGE_FLAG_MBOX_REORDER) != 0) CSR_READ_4(sc, off); } /* * Clear all stale locks and select the lock for this driver instance. */ static void bge_ape_lock_init(struct bge_softc *sc) { uint32_t bit, regbase; int i; if (sc->bge_asicrev == BGE_ASICREV_BCM5761) regbase = BGE_APE_LOCK_GRANT; else regbase = BGE_APE_PER_LOCK_GRANT; /* Clear any stale locks. */ for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) { switch (i) { case BGE_APE_LOCK_PHY0: case BGE_APE_LOCK_PHY1: case BGE_APE_LOCK_PHY2: case BGE_APE_LOCK_PHY3: bit = BGE_APE_LOCK_GRANT_DRIVER0; break; default: if (sc->bge_func_addr == 0) bit = BGE_APE_LOCK_GRANT_DRIVER0; else bit = (1 << sc->bge_func_addr); } APE_WRITE_4(sc, regbase + 4 * i, bit); } /* Select the PHY lock based on the device's function number. */ switch (sc->bge_func_addr) { case 0: sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY0; break; case 1: sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY1; break; case 2: sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY2; break; case 3: sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY3; break; default: device_printf(sc->bge_dev, "PHY lock not supported on this function\n"); } } /* * Check for APE firmware, set flags, and print version info. */ static void bge_ape_read_fw_ver(struct bge_softc *sc) { const char *fwtype; uint32_t apedata, features; /* Check for a valid APE signature in shared memory. */ apedata = APE_READ_4(sc, BGE_APE_SEG_SIG); if (apedata != BGE_APE_SEG_SIG_MAGIC) { sc->bge_mfw_flags &= ~ BGE_MFW_ON_APE; return; } /* Check if APE firmware is running. */ apedata = APE_READ_4(sc, BGE_APE_FW_STATUS); if ((apedata & BGE_APE_FW_STATUS_READY) == 0) { device_printf(sc->bge_dev, "APE signature found " "but FW status not ready! 0x%08x\n", apedata); return; } sc->bge_mfw_flags |= BGE_MFW_ON_APE; /* Fetch the APE firwmare type and version. */ apedata = APE_READ_4(sc, BGE_APE_FW_VERSION); features = APE_READ_4(sc, BGE_APE_FW_FEATURES); if ((features & BGE_APE_FW_FEATURE_NCSI) != 0) { sc->bge_mfw_flags |= BGE_MFW_TYPE_NCSI; fwtype = "NCSI"; } else if ((features & BGE_APE_FW_FEATURE_DASH) != 0) { sc->bge_mfw_flags |= BGE_MFW_TYPE_DASH; fwtype = "DASH"; } else fwtype = "UNKN"; /* Print the APE firmware version. */ device_printf(sc->bge_dev, "APE FW version: %s v%d.%d.%d.%d\n", fwtype, (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT, (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT, (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT, (apedata & BGE_APE_FW_VERSION_BLDMSK)); } static int bge_ape_lock(struct bge_softc *sc, int locknum) { uint32_t bit, gnt, req, status; int i, off; if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) return (0); /* Lock request/grant registers have different bases. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5761) { req = BGE_APE_LOCK_REQ; gnt = BGE_APE_LOCK_GRANT; } else { req = BGE_APE_PER_LOCK_REQ; gnt = BGE_APE_PER_LOCK_GRANT; } off = 4 * locknum; switch (locknum) { case BGE_APE_LOCK_GPIO: /* Lock required when using GPIO. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5761) return (0); if (sc->bge_func_addr == 0) bit = BGE_APE_LOCK_REQ_DRIVER0; else bit = (1 << sc->bge_func_addr); break; case BGE_APE_LOCK_GRC: /* Lock required to reset the device. */ if (sc->bge_func_addr == 0) bit = BGE_APE_LOCK_REQ_DRIVER0; else bit = (1 << sc->bge_func_addr); break; case BGE_APE_LOCK_MEM: /* Lock required when accessing certain APE memory. */ if (sc->bge_func_addr == 0) bit = BGE_APE_LOCK_REQ_DRIVER0; else bit = (1 << sc->bge_func_addr); break; case BGE_APE_LOCK_PHY0: case BGE_APE_LOCK_PHY1: case BGE_APE_LOCK_PHY2: case BGE_APE_LOCK_PHY3: /* Lock required when accessing PHYs. */ bit = BGE_APE_LOCK_REQ_DRIVER0; break; default: return (EINVAL); } /* Request a lock. */ APE_WRITE_4(sc, req + off, bit); /* Wait up to 1 second to acquire lock. */ for (i = 0; i < 20000; i++) { status = APE_READ_4(sc, gnt + off); if (status == bit) break; DELAY(50); } /* Handle any errors. */ if (status != bit) { device_printf(sc->bge_dev, "APE lock %d request failed! " "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n", locknum, req + off, bit & 0xFFFF, gnt + off, status & 0xFFFF); /* Revoke the lock request. */ APE_WRITE_4(sc, gnt + off, bit); return (EBUSY); } return (0); } static void bge_ape_unlock(struct bge_softc *sc, int locknum) { uint32_t bit, gnt; int off; if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) return; if (sc->bge_asicrev == BGE_ASICREV_BCM5761) gnt = BGE_APE_LOCK_GRANT; else gnt = BGE_APE_PER_LOCK_GRANT; off = 4 * locknum; switch (locknum) { case BGE_APE_LOCK_GPIO: if (sc->bge_asicrev == BGE_ASICREV_BCM5761) return; if (sc->bge_func_addr == 0) bit = BGE_APE_LOCK_GRANT_DRIVER0; else bit = (1 << sc->bge_func_addr); break; case BGE_APE_LOCK_GRC: if (sc->bge_func_addr == 0) bit = BGE_APE_LOCK_GRANT_DRIVER0; else bit = (1 << sc->bge_func_addr); break; case BGE_APE_LOCK_MEM: if (sc->bge_func_addr == 0) bit = BGE_APE_LOCK_GRANT_DRIVER0; else bit = (1 << sc->bge_func_addr); break; case BGE_APE_LOCK_PHY0: case BGE_APE_LOCK_PHY1: case BGE_APE_LOCK_PHY2: case BGE_APE_LOCK_PHY3: bit = BGE_APE_LOCK_GRANT_DRIVER0; break; default: return; } APE_WRITE_4(sc, gnt + off, bit); } /* * Send an event to the APE firmware. */ static void bge_ape_send_event(struct bge_softc *sc, uint32_t event) { uint32_t apedata; int i; /* NCSI does not support APE events. */ if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) return; /* Wait up to 1ms for APE to service previous event. */ for (i = 10; i > 0; i--) { if (bge_ape_lock(sc, BGE_APE_LOCK_MEM) != 0) break; apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS); if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) { APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event | BGE_APE_EVENT_STATUS_EVENT_PENDING); bge_ape_unlock(sc, BGE_APE_LOCK_MEM); APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1); break; } bge_ape_unlock(sc, BGE_APE_LOCK_MEM); DELAY(100); } if (i == 0) device_printf(sc->bge_dev, "APE event 0x%08x send timed out\n", event); } static void bge_ape_driver_state_change(struct bge_softc *sc, int kind) { uint32_t apedata, event; if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) return; switch (kind) { case BGE_RESET_START: /* If this is the first load, clear the load counter. */ apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG); if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC) APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0); else { apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT); APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata); } APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG, BGE_APE_HOST_SEG_SIG_MAGIC); APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN, BGE_APE_HOST_SEG_LEN_MAGIC); /* Add some version info if bge(4) supports it. */ APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID, BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0)); APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR, BGE_APE_HOST_BEHAV_NO_PHYLOCK); APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS, BGE_APE_HOST_HEARTBEAT_INT_DISABLE); APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, BGE_APE_HOST_DRVR_STATE_START); event = BGE_APE_EVENT_STATUS_STATE_START; break; case BGE_RESET_SHUTDOWN: APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, BGE_APE_HOST_DRVR_STATE_UNLOAD); event = BGE_APE_EVENT_STATUS_STATE_UNLOAD; break; case BGE_RESET_SUSPEND: event = BGE_APE_EVENT_STATUS_STATE_SUSPEND; break; default: return; } bge_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT | BGE_APE_EVENT_STATUS_STATE_CHNGE); } /* * Map a single buffer address. */ static void bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct bge_dmamap_arg *ctx; if (error) return; KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg)); ctx = arg; ctx->bge_busaddr = segs->ds_addr; } static uint8_t bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) { uint32_t access, byte = 0; int i; /* Lock. */ CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); for (i = 0; i < 8000; i++) { if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) break; DELAY(20); } if (i == 8000) return (1); /* Enable access. */ access = CSR_READ_4(sc, BGE_NVRAM_ACCESS); CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE); CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc); CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD); for (i = 0; i < BGE_TIMEOUT * 10; i++) { DELAY(10); if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) { DELAY(10); break; } } if (i == BGE_TIMEOUT * 10) { if_printf(sc->bge_ifp, "nvram read timed out\n"); return (1); } /* Get result. */ byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA); *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF; /* Disable access. */ CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access); /* Unlock. */ CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1); CSR_READ_4(sc, BGE_NVRAM_SWARB); return (0); } /* * Read a sequence of bytes from NVRAM. */ static int bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt) { int err = 0, i; uint8_t byte = 0; if (sc->bge_asicrev != BGE_ASICREV_BCM5906) return (1); for (i = 0; i < cnt; i++) { err = bge_nvram_getbyte(sc, off + i, &byte); if (err) break; *(dest + i) = byte; } return (err ? 1 : 0); } /* * Read a byte of data stored in the EEPROM at address 'addr.' The * BCM570x supports both the traditional bitbang interface and an * auto access interface for reading the EEPROM. We use the auto * access method. */ static uint8_t bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) { int i; uint32_t byte = 0; /* * Enable use of auto EEPROM access so we can avoid * having to use the bitbang method. */ BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); /* Reset the EEPROM, load the clock period. */ CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); DELAY(20); /* Issue the read EEPROM command. */ CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); /* Wait for completion */ for(i = 0; i < BGE_TIMEOUT * 10; i++) { DELAY(10); if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) break; } if (i == BGE_TIMEOUT * 10) { device_printf(sc->bge_dev, "EEPROM read timed out\n"); return (1); } /* Get result. */ byte = CSR_READ_4(sc, BGE_EE_DATA); *dest = (byte >> ((addr % 4) * 8)) & 0xFF; return (0); } /* * Read a sequence of bytes from the EEPROM. */ static int bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt) { int i, error = 0; uint8_t byte = 0; for (i = 0; i < cnt; i++) { error = bge_eeprom_getbyte(sc, off + i, &byte); if (error) break; *(dest + i) = byte; } return (error ? 1 : 0); } static int bge_miibus_readreg(device_t dev, int phy, int reg) { struct bge_softc *sc; uint32_t val; int i; sc = device_get_softc(dev); if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) return (0); /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */ if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) { CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL); DELAY(80); } CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | BGE_MIPHY(phy) | BGE_MIREG(reg)); /* Poll for the PHY register access to complete. */ for (i = 0; i < BGE_TIMEOUT; i++) { DELAY(10); val = CSR_READ_4(sc, BGE_MI_COMM); if ((val & BGE_MICOMM_BUSY) == 0) { DELAY(5); val = CSR_READ_4(sc, BGE_MI_COMM); break; } } if (i == BGE_TIMEOUT) { device_printf(sc->bge_dev, "PHY read timed out (phy %d, reg %d, val 0x%08x)\n", phy, reg, val); val = 0; } /* Restore the autopoll bit if necessary. */ if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) { CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode); DELAY(80); } bge_ape_unlock(sc, sc->bge_phy_ape_lock); if (val & BGE_MICOMM_READFAIL) return (0); return (val & 0xFFFF); } static int bge_miibus_writereg(device_t dev, int phy, int reg, int val) { struct bge_softc *sc; int i; sc = device_get_softc(dev); if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL)) return (0); if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) return (0); /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */ if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) { CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL); DELAY(80); } CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | BGE_MIPHY(phy) | BGE_MIREG(reg) | val); for (i = 0; i < BGE_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) { DELAY(5); CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */ break; } } /* Restore the autopoll bit if necessary. */ if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) { CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode); DELAY(80); } bge_ape_unlock(sc, sc->bge_phy_ape_lock); if (i == BGE_TIMEOUT) device_printf(sc->bge_dev, "PHY write timed out (phy %d, reg %d, val 0x%04x)\n", phy, reg, val); return (0); } static void bge_miibus_statchg(device_t dev) { struct bge_softc *sc; struct mii_data *mii; uint32_t mac_mode, rx_mode, tx_mode; sc = device_get_softc(dev); if ((if_getdrvflags(sc->bge_ifp) & IFF_DRV_RUNNING) == 0) return; mii = device_get_softc(sc->bge_miibus); if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: sc->bge_link = 1; break; case IFM_1000_T: case IFM_1000_SX: case IFM_2500_SX: if (sc->bge_asicrev != BGE_ASICREV_BCM5906) sc->bge_link = 1; else sc->bge_link = 0; break; default: sc->bge_link = 0; break; } } else sc->bge_link = 0; if (sc->bge_link == 0) return; /* * APE firmware touches these registers to keep the MAC * connected to the outside world. Try to keep the * accesses atomic. */ /* Set the port mode (MII/GMII) to match the link speed. */ mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX); tx_mode = CSR_READ_4(sc, BGE_TX_MODE); rx_mode = CSR_READ_4(sc, BGE_RX_MODE); if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) mac_mode |= BGE_PORTMODE_GMII; else mac_mode |= BGE_PORTMODE_MII; /* Set MAC flow control behavior to match link flow control settings. */ tx_mode &= ~BGE_TXMODE_FLOWCTL_ENABLE; rx_mode &= ~BGE_RXMODE_FLOWCTL_ENABLE; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) tx_mode |= BGE_TXMODE_FLOWCTL_ENABLE; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) rx_mode |= BGE_RXMODE_FLOWCTL_ENABLE; } else mac_mode |= BGE_MACMODE_HALF_DUPLEX; CSR_WRITE_4(sc, BGE_MAC_MODE, mac_mode); DELAY(40); CSR_WRITE_4(sc, BGE_TX_MODE, tx_mode); CSR_WRITE_4(sc, BGE_RX_MODE, rx_mode); } /* * Intialize a standard receive ring descriptor. */ static int bge_newbuf_std(struct bge_softc *sc, int i) { struct mbuf *m; struct bge_rx_bd *r; bus_dma_segment_t segs[1]; bus_dmamap_t map; int error, nsegs; if (sc->bge_flags & BGE_FLAG_JUMBO_STD && (if_getmtu(sc->bge_ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))) { m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MJUM9BYTES; } else { m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MCLBYTES; } if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) m_adj(m, ETHER_ALIGN); error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0); if (error != 0) { m_freem(m); return (error); } if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_dmamap[i]); } map = sc->bge_cdata.bge_rx_std_dmamap[i]; sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap; sc->bge_cdata.bge_rx_std_sparemap = map; sc->bge_cdata.bge_rx_std_chain[i] = m; sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len; r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std]; r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr); r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr); r->bge_flags = BGE_RXBDFLAG_END; r->bge_len = segs[0].ds_len; r->bge_idx = i; bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD); return (0); } /* * Initialize a jumbo receive ring descriptor. This allocates * a jumbo buffer from the pool managed internally by the driver. */ static int bge_newbuf_jumbo(struct bge_softc *sc, int i) { bus_dma_segment_t segs[BGE_NSEG_JUMBO]; bus_dmamap_t map; struct bge_extrx_bd *r; struct mbuf *m; int error, nsegs; MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) return (ENOBUFS); - m_cljget(m, M_NOWAIT, MJUM9BYTES); - if (!(m->m_flags & M_EXT)) { + if (m_cljget(m, M_NOWAIT, MJUM9BYTES) == NULL) { m_freem(m); return (ENOBUFS); } m->m_len = m->m_pkthdr.len = MJUM9BYTES; if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) m_adj(m, ETHER_ALIGN); error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0); if (error != 0) { m_freem(m); return (error); } if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i]); } map = sc->bge_cdata.bge_rx_jumbo_dmamap[i]; sc->bge_cdata.bge_rx_jumbo_dmamap[i] = sc->bge_cdata.bge_rx_jumbo_sparemap; sc->bge_cdata.bge_rx_jumbo_sparemap = map; sc->bge_cdata.bge_rx_jumbo_chain[i] = m; sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0; sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0; sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0; sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0; /* * Fill in the extended RX buffer descriptor. */ r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo]; r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END; r->bge_idx = i; r->bge_len3 = r->bge_len2 = r->bge_len1 = 0; switch (nsegs) { case 4: r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr); r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr); r->bge_len3 = segs[3].ds_len; sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len; case 3: r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr); r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr); r->bge_len2 = segs[2].ds_len; sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len; case 2: r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr); r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr); r->bge_len1 = segs[1].ds_len; sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len; case 1: r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr); r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr); r->bge_len0 = segs[0].ds_len; sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len; break; default: panic("%s: %d segments\n", __func__, nsegs); } bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD); return (0); } static int bge_init_rx_ring_std(struct bge_softc *sc) { int error, i; bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ); sc->bge_std = 0; for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { if ((error = bge_newbuf_std(sc, i)) != 0) return (error); BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); } bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE); sc->bge_std = 0; bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1); return (0); } static void bge_free_rx_ring_std(struct bge_softc *sc) { int i; for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_dmamap[i]); m_freem(sc->bge_cdata.bge_rx_std_chain[i]); sc->bge_cdata.bge_rx_std_chain[i] = NULL; } bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i], sizeof(struct bge_rx_bd)); } } static int bge_init_rx_ring_jumbo(struct bge_softc *sc) { struct bge_rcb *rcb; int error, i; bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ); sc->bge_jumbo = 0; for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { if ((error = bge_newbuf_jumbo(sc, i)) != 0) return (error); BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); } bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE); sc->bge_jumbo = 0; /* Enable the jumbo receive producer ring. */ rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD); CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1); return (0); } static void bge_free_rx_ring_jumbo(struct bge_softc *sc) { int i; for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i]); m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; } bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i], sizeof(struct bge_extrx_bd)); } } static void bge_free_tx_ring(struct bge_softc *sc) { int i; if (sc->bge_ldata.bge_tx_ring == NULL) return; for (i = 0; i < BGE_TX_RING_CNT; i++) { if (sc->bge_cdata.bge_tx_chain[i] != NULL) { bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, sc->bge_cdata.bge_tx_dmamap[i], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, sc->bge_cdata.bge_tx_dmamap[i]); m_freem(sc->bge_cdata.bge_tx_chain[i]); sc->bge_cdata.bge_tx_chain[i] = NULL; } bzero((char *)&sc->bge_ldata.bge_tx_ring[i], sizeof(struct bge_tx_bd)); } } static int bge_init_tx_ring(struct bge_softc *sc) { sc->bge_txcnt = 0; sc->bge_tx_saved_considx = 0; bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ); bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE); /* Initialize transmit producer index for host-memory send ring. */ sc->bge_tx_prodidx = 0; bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); /* 5700 b2 errata */ if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); /* NIC-memory send ring not used; initialize to zero. */ bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); /* 5700 b2 errata */ if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); return (0); } static void bge_setpromisc(struct bge_softc *sc) { if_t ifp; BGE_LOCK_ASSERT(sc); ifp = sc->bge_ifp; /* Enable or disable promiscuous mode as needed. */ if (if_getflags(ifp) & IFF_PROMISC) BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); else BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); } static void bge_setmulti(struct bge_softc *sc) { if_t ifp; int mc_count = 0; uint32_t hashes[4] = { 0, 0, 0, 0 }; int h, i, mcnt; unsigned char *mta; BGE_LOCK_ASSERT(sc); ifp = sc->bge_ifp; mc_count = if_multiaddr_count(ifp, -1); mta = malloc(sizeof(unsigned char) * ETHER_ADDR_LEN * mc_count, M_DEVBUF, M_NOWAIT); if(mta == NULL) { device_printf(sc->bge_dev, "Failed to allocated temp mcast list\n"); return; } if (if_getflags(ifp) & IFF_ALLMULTI || if_getflags(ifp) & IFF_PROMISC) { for (i = 0; i < 4; i++) CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); return; } /* First, zot all the existing filters. */ for (i = 0; i < 4; i++) CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); if_multiaddr_array(ifp, mta, &mcnt, mc_count); for(i = 0; i < mcnt; i++) { h = ether_crc32_le(mta + (i * ETHER_ADDR_LEN), ETHER_ADDR_LEN) & 0x7F; hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); } for (i = 0; i < 4; i++) CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); free(mta, M_DEVBUF); } static void bge_setvlan(struct bge_softc *sc) { if_t ifp; BGE_LOCK_ASSERT(sc); ifp = sc->bge_ifp; /* Enable or disable VLAN tag stripping as needed. */ if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG); else BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG); } static void bge_sig_pre_reset(struct bge_softc *sc, int type) { /* * Some chips don't like this so only do this if ASF is enabled */ if (sc->bge_asf_mode) bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC); if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { switch (type) { case BGE_RESET_START: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, BGE_FW_DRV_STATE_START); break; case BGE_RESET_SHUTDOWN: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, BGE_FW_DRV_STATE_UNLOAD); break; case BGE_RESET_SUSPEND: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, BGE_FW_DRV_STATE_SUSPEND); break; } } if (type == BGE_RESET_START || type == BGE_RESET_SUSPEND) bge_ape_driver_state_change(sc, type); } static void bge_sig_post_reset(struct bge_softc *sc, int type) { if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { switch (type) { case BGE_RESET_START: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, BGE_FW_DRV_STATE_START_DONE); /* START DONE */ break; case BGE_RESET_SHUTDOWN: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, BGE_FW_DRV_STATE_UNLOAD_DONE); break; } } if (type == BGE_RESET_SHUTDOWN) bge_ape_driver_state_change(sc, type); } static void bge_sig_legacy(struct bge_softc *sc, int type) { if (sc->bge_asf_mode) { switch (type) { case BGE_RESET_START: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, BGE_FW_DRV_STATE_START); break; case BGE_RESET_SHUTDOWN: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, BGE_FW_DRV_STATE_UNLOAD); break; } } } static void bge_stop_fw(struct bge_softc *sc) { int i; if (sc->bge_asf_mode) { bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE); CSR_WRITE_4(sc, BGE_RX_CPU_EVENT, CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT); for (i = 0; i < 100; i++ ) { if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) & BGE_RX_CPU_DRV_EVENT)) break; DELAY(10); } } } static uint32_t bge_dma_swap_options(struct bge_softc *sc) { uint32_t dma_options; dma_options = BGE_MODECTL_WORDSWAP_NONFRAME | BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA; #if BYTE_ORDER == BIG_ENDIAN dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME; #endif return (dma_options); } /* * Do endian, PCI and DMA initialization. */ static int bge_chipinit(struct bge_softc *sc) { uint32_t dma_rw_ctl, misc_ctl, mode_ctl; uint16_t val; int i; /* Set endianness before we access any non-PCI registers. */ misc_ctl = BGE_INIT; if (sc->bge_flags & BGE_FLAG_TAGGED_STATUS) misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS; pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, misc_ctl, 4); /* * Clear the MAC statistics block in the NIC's * internal memory. */ for (i = BGE_STATS_BLOCK; i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) BGE_MEMWIN_WRITE(sc, i, 0); for (i = BGE_STATUS_BLOCK; i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) BGE_MEMWIN_WRITE(sc, i, 0); if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) { /* * Fix data corruption caused by non-qword write with WB. * Fix master abort in PCI mode. * Fix PCI latency timer. */ val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2); val |= (1 << 10) | (1 << 12) | (1 << 13); pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2); } if (sc->bge_asicrev == BGE_ASICREV_BCM57765 || sc->bge_asicrev == BGE_ASICREV_BCM57766) { /* * For the 57766 and non Ax versions of 57765, bootcode * needs to setup the PCIE Fast Training Sequence (FTS) * value to prevent transmit hangs. */ if (sc->bge_chiprev != BGE_CHIPREV_57765_AX) { CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL, CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL) | BGE_CPMU_PADRNG_CTL_RDIV2); } } /* * Set up the PCI DMA control register. */ dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) | BGE_PCIDMARWCTL_WR_CMD_SHIFT(7); if (sc->bge_flags & BGE_FLAG_PCIE) { if (sc->bge_mps >= 256) dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(7); else dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); } else if (sc->bge_flags & BGE_FLAG_PCIX) { if (BGE_IS_5714_FAMILY(sc)) { /* 256 bytes for read and write. */ dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) | BGE_PCIDMARWCTL_WR_WAT_SHIFT(2); dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ? BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL : BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL; } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) { /* * In the BCM5703, the DMA read watermark should * be set to less than or equal to the maximum * memory read byte count of the PCI-X command * register. */ dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) | BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { /* 1536 bytes for read, 384 bytes for write. */ dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); } else { /* 384 bytes for read and write. */ dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) | BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) | 0x0F; } if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || sc->bge_asicrev == BGE_ASICREV_BCM5704) { uint32_t tmp; /* Set ONE_DMA_AT_ONCE for hardware workaround. */ tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F; if (tmp == 6 || tmp == 7) dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; /* Set PCI-X DMA write workaround. */ dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE; } } else { /* Conventional PCI bus: 256 bytes for read and write. */ dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | BGE_PCIDMARWCTL_WR_WAT_SHIFT(7); if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && sc->bge_asicrev != BGE_ASICREV_BCM5750) dma_rw_ctl |= 0x0F; } if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || sc->bge_asicrev == BGE_ASICREV_BCM5701) dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM | BGE_PCIDMARWCTL_ASRT_ALL_BE; if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || sc->bge_asicrev == BGE_ASICREV_BCM5704) dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; if (BGE_IS_5717_PLUS(sc)) { dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT; if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK; /* * Enable HW workaround for controllers that misinterpret * a status tag update and leave interrupts permanently * disabled. */ if (!BGE_IS_57765_PLUS(sc) && sc->bge_asicrev != BGE_ASICREV_BCM5717 && sc->bge_asicrev != BGE_ASICREV_BCM5762) dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA; } pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); /* * Set up general mode register. */ mode_ctl = bge_dma_swap_options(sc); if (sc->bge_asicrev == BGE_ASICREV_BCM5720 || sc->bge_asicrev == BGE_ASICREV_BCM5762) { /* Retain Host-2-BMC settings written by APE firmware. */ mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) & (BGE_MODECTL_BYTESWAP_B2HRX_DATA | BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE); } mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM; /* * BCM5701 B5 have a bug causing data corruption when using * 64-bit DMA reads, which can be terminated early and then * completed later as 32-bit accesses, in combination with * certain bridges. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && sc->bge_chipid == BGE_CHIPID_BCM5701_B5) mode_ctl |= BGE_MODECTL_FORCE_PCI32; /* * Tell the firmware the driver is running */ if (sc->bge_asf_mode & ASF_STACKUP) mode_ctl |= BGE_MODECTL_STACKUP; CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); /* * Disable memory write invalidate. Apparently it is not supported * properly by these devices. */ PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4); /* Set the timer prescaler (always 66 MHz). */ CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ); /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { DELAY(40); /* XXX */ /* Put PHY into ready state */ BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ); CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */ DELAY(40); } return (0); } static int bge_blockinit(struct bge_softc *sc) { struct bge_rcb *rcb; bus_size_t vrcb; bge_hostaddr taddr; uint32_t dmactl, rdmareg, val; int i, limit; /* * Initialize the memory window pointer register so that * we can access the first 32K of internal NIC RAM. This will * allow us to set up the TX send ring RCBs and the RX return * ring RCBs, plus other things which live in NIC memory. */ CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); /* Note: the BCM5704 has a smaller mbuf space than other chips. */ if (!(BGE_IS_5705_PLUS(sc))) { /* Configure mbuf memory pool */ CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1); if (sc->bge_asicrev == BGE_ASICREV_BCM5704) CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); else CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); /* Configure DMA resource pool */ CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, BGE_DMA_DESCRIPTORS); CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); } /* Configure mbuf pool watermarks */ if (BGE_IS_5717_PLUS(sc)) { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); if (if_getmtu(sc->bge_ifp) > ETHERMTU) { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea); } else { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0); } } else if (!BGE_IS_5705_PLUS(sc)) { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10); } else { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); } /* Configure DMA resource watermarks */ CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); /* Enable buffer manager */ val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN; /* * Change the arbitration algorithm of TXMBUF read request to * round-robin instead of priority based for BCM5719. When * TXFIFO is almost empty, RDMA will hold its request until * TXFIFO is not almost empty. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5719) val |= BGE_BMANMODE_NO_TX_UNDERRUN; CSR_WRITE_4(sc, BGE_BMAN_MODE, val); /* Poll for buffer manager start indication */ for (i = 0; i < BGE_TIMEOUT; i++) { DELAY(10); if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) break; } if (i == BGE_TIMEOUT) { device_printf(sc->bge_dev, "buffer manager failed to start\n"); return (ENXIO); } /* Enable flow-through queues */ CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); /* Wait until queue initialization is complete */ for (i = 0; i < BGE_TIMEOUT; i++) { DELAY(10); if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) break; } if (i == BGE_TIMEOUT) { device_printf(sc->bge_dev, "flow-through queue init failed\n"); return (ENXIO); } /* * Summary of rings supported by the controller: * * Standard Receive Producer Ring * - This ring is used to feed receive buffers for "standard" * sized frames (typically 1536 bytes) to the controller. * * Jumbo Receive Producer Ring * - This ring is used to feed receive buffers for jumbo sized * frames (i.e. anything bigger than the "standard" frames) * to the controller. * * Mini Receive Producer Ring * - This ring is used to feed receive buffers for "mini" * sized frames to the controller. * - This feature required external memory for the controller * but was never used in a production system. Should always * be disabled. * * Receive Return Ring * - After the controller has placed an incoming frame into a * receive buffer that buffer is moved into a receive return * ring. The driver is then responsible to passing the * buffer up to the stack. Many versions of the controller * support multiple RR rings. * * Send Ring * - This ring is used for outgoing frames. Many versions of * the controller support multiple send rings. */ /* Initialize the standard receive producer ring control block. */ rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb; rcb->bge_hostaddr.bge_addr_lo = BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr); rcb->bge_hostaddr.bge_addr_hi = BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr); bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD); if (BGE_IS_5717_PLUS(sc)) { /* * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32) * Bits 15-2 : Maximum RX frame size * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled * Bit 0 : Reserved */ rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2); } else if (BGE_IS_5705_PLUS(sc)) { /* * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32) * Bits 15-2 : Reserved (should be 0) * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled * Bit 0 : Reserved */ rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); } else { /* * Ring size is always XXX entries * Bits 31-16: Maximum RX frame size * Bits 15-2 : Reserved (should be 0) * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled * Bit 0 : Reserved */ rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); } if (sc->bge_asicrev == BGE_ASICREV_BCM5717 || sc->bge_asicrev == BGE_ASICREV_BCM5719 || sc->bge_asicrev == BGE_ASICREV_BCM5720) rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717; else rcb->bge_nicaddr = BGE_STD_RX_RINGS; /* Write the standard receive producer ring control block. */ CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); /* Reset the standard receive producer ring producer index. */ bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); /* * Initialize the jumbo RX producer ring control * block. We set the 'ring disabled' bit in the * flags field until we're actually ready to start * using this ring (i.e. once we set the MTU * high enough to require it). */ if (BGE_IS_JUMBO_CAPABLE(sc)) { rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; /* Get the jumbo receive producer ring RCB parameters. */ rcb->bge_hostaddr.bge_addr_lo = BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr); rcb->bge_hostaddr.bge_addr_hi = BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr); bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREREAD); rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED); if (sc->bge_asicrev == BGE_ASICREV_BCM5717 || sc->bge_asicrev == BGE_ASICREV_BCM5719 || sc->bge_asicrev == BGE_ASICREV_BCM5720) rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717; else rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); /* Program the jumbo receive producer ring RCB parameters. */ CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); /* Reset the jumbo receive producer ring producer index. */ bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); } /* Disable the mini receive producer ring RCB. */ if (BGE_IS_5700_FAMILY(sc)) { rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb; rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); /* Reset the mini receive producer ring producer index. */ bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0); } /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 || sc->bge_chipid == BGE_CHIPID_BCM5906_A1 || sc->bge_chipid == BGE_CHIPID_BCM5906_A2) CSR_WRITE_4(sc, BGE_ISO_PKT_TX, (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2); } /* * The BD ring replenish thresholds control how often the * hardware fetches new BD's from the producer rings in host * memory. Setting the value too low on a busy system can * starve the hardware and recue the throughpout. * * Set the BD ring replentish thresholds. The recommended * values are 1/8th the number of descriptors allocated to * each ring. * XXX The 5754 requires a lower threshold, so it might be a * requirement of all 575x family chips. The Linux driver sets * the lower threshold for all 5705 family chips as well, but there * are reports that it might not need to be so strict. * * XXX Linux does some extra fiddling here for the 5906 parts as * well. */ if (BGE_IS_5705_PLUS(sc)) val = 8; else val = BGE_STD_RX_RING_CNT / 8; CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val); if (BGE_IS_JUMBO_CAPABLE(sc)) CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); if (BGE_IS_5717_PLUS(sc)) { CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32); CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16); } /* * Disable all send rings by setting the 'ring disabled' bit * in the flags field of all the TX send ring control blocks, * located in NIC memory. */ if (!BGE_IS_5705_PLUS(sc)) /* 5700 to 5704 had 16 send rings. */ limit = BGE_TX_RINGS_EXTSSRAM_MAX; else if (BGE_IS_57765_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5762) limit = 2; else if (BGE_IS_5717_PLUS(sc)) limit = 4; else limit = 1; vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; for (i = 0; i < limit; i++) { RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); vrcb += sizeof(struct bge_rcb); } /* Configure send ring RCB 0 (we use only the first ring) */ vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr); RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); if (sc->bge_asicrev == BGE_ASICREV_BCM5717 || sc->bge_asicrev == BGE_ASICREV_BCM5719 || sc->bge_asicrev == BGE_ASICREV_BCM5720) RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717); else RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); /* * Disable all receive return rings by setting the * 'ring diabled' bit in the flags field of all the receive * return ring control blocks, located in NIC memory. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5717 || sc->bge_asicrev == BGE_ASICREV_BCM5719 || sc->bge_asicrev == BGE_ASICREV_BCM5720) { /* Should be 17, use 16 until we get an SRAM map. */ limit = 16; } else if (!BGE_IS_5705_PLUS(sc)) limit = BGE_RX_RINGS_MAX; else if (sc->bge_asicrev == BGE_ASICREV_BCM5755 || sc->bge_asicrev == BGE_ASICREV_BCM5762 || BGE_IS_57765_PLUS(sc)) limit = 4; else limit = 1; /* Disable all receive return rings. */ vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; for (i = 0; i < limit; i++) { RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0); RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0); RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, BGE_RCB_FLAG_RING_DISABLED); RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); bge_writembx(sc, BGE_MBX_RX_CONS0_LO + (i * (sizeof(uint64_t))), 0); vrcb += sizeof(struct bge_rcb); } /* * Set up receive return ring 0. Note that the NIC address * for RX return rings is 0x0. The return rings live entirely * within the host, so the nicaddr field in the RCB isn't used. */ vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr); RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); /* Set random backoff seed for TX */ CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, (IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] + IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] + IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5]) & BGE_TX_BACKOFF_SEED_MASK); /* Set inter-packet gap */ val = 0x2620; if (sc->bge_asicrev == BGE_ASICREV_BCM5720 || sc->bge_asicrev == BGE_ASICREV_BCM5762) val |= CSR_READ_4(sc, BGE_TX_LENGTHS) & (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK); CSR_WRITE_4(sc, BGE_TX_LENGTHS, val); /* * Specify which ring to use for packets that don't match * any RX rules. */ CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); /* * Configure number of RX lists. One interrupt distribution * list, sixteen active lists, one bad frames class. */ CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); /* Inialize RX list placement stats mask. */ CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); /* Disable host coalescing until we get it set up */ CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); /* Poll to make sure it's shut down. */ for (i = 0; i < BGE_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) break; } if (i == BGE_TIMEOUT) { device_printf(sc->bge_dev, "host coalescing engine failed to idle\n"); return (ENXIO); } /* Set up host coalescing defaults */ CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); if (!(BGE_IS_5705_PLUS(sc))) { CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); } CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1); CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1); /* Set up address of statistics block */ if (!(BGE_IS_5705_PLUS(sc))) { CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr)); CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr)); CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); } /* Set up address of status block */ CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr)); CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr)); /* Set up status block size. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_C0) { val = BGE_STATBLKSZ_FULL; bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ); } else { val = BGE_STATBLKSZ_32BYTE; bzero(sc->bge_ldata.bge_status_block, 32); } bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Turn on host coalescing state machine */ CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE); /* Turn on RX BD completion state machine and enable attentions */ CSR_WRITE_4(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN); /* Turn on RX list placement state machine */ CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); /* Turn on RX list selector state machine. */ if (!(BGE_IS_5705_PLUS(sc))) CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); /* Turn on DMA, clear stats. */ val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB | BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR | BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB | BGE_MACMODE_FRMHDR_DMA_ENB; if (sc->bge_flags & BGE_FLAG_TBI) val |= BGE_PORTMODE_TBI; else if (sc->bge_flags & BGE_FLAG_MII_SERDES) val |= BGE_PORTMODE_GMII; else val |= BGE_PORTMODE_MII; /* Allow APE to send/receive frames. */ if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; CSR_WRITE_4(sc, BGE_MAC_MODE, val); DELAY(40); /* Set misc. local control, enable interrupts on attentions */ BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); #ifdef notdef /* Assert GPIO pins for PHY reset */ BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 | BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2); BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 | BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2); #endif /* Turn on DMA completion state machine */ if (!(BGE_IS_5705_PLUS(sc))) CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS; /* Enable host coalescing bug fix. */ if (BGE_IS_5755_PLUS(sc)) val |= BGE_WDMAMODE_STATUS_TAG_FIX; /* Request larger DMA burst size to get better performance. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5785) val |= BGE_WDMAMODE_BURST_ALL_DATA; /* Turn on write DMA state machine */ CSR_WRITE_4(sc, BGE_WDMA_MODE, val); DELAY(40); /* Turn on read DMA state machine */ val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; if (sc->bge_asicrev == BGE_ASICREV_BCM5717) val |= BGE_RDMAMODE_MULT_DMA_RD_DIS; if (sc->bge_asicrev == BGE_ASICREV_BCM5784 || sc->bge_asicrev == BGE_ASICREV_BCM5785 || sc->bge_asicrev == BGE_ASICREV_BCM57780) val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN | BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; if (sc->bge_flags & BGE_FLAG_PCIE) val |= BGE_RDMAMODE_FIFO_LONG_BURST; if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) { val |= BGE_RDMAMODE_TSO4_ENABLE; if (sc->bge_flags & BGE_FLAG_TSO3 || sc->bge_asicrev == BGE_ASICREV_BCM5785 || sc->bge_asicrev == BGE_ASICREV_BCM57780) val |= BGE_RDMAMODE_TSO6_ENABLE; } if (sc->bge_asicrev == BGE_ASICREV_BCM5720 || sc->bge_asicrev == BGE_ASICREV_BCM5762) { val |= CSR_READ_4(sc, BGE_RDMA_MODE) & BGE_RDMAMODE_H2BNC_VLAN_DET; /* * Allow multiple outstanding read requests from * non-LSO read DMA engine. */ val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS; } if (sc->bge_asicrev == BGE_ASICREV_BCM5761 || sc->bge_asicrev == BGE_ASICREV_BCM5784 || sc->bge_asicrev == BGE_ASICREV_BCM5785 || sc->bge_asicrev == BGE_ASICREV_BCM57780 || BGE_IS_5717_PLUS(sc) || BGE_IS_57765_PLUS(sc)) { if (sc->bge_asicrev == BGE_ASICREV_BCM5762) rdmareg = BGE_RDMA_RSRVCTRL_REG2; else rdmareg = BGE_RDMA_RSRVCTRL; dmactl = CSR_READ_4(sc, rdmareg); /* * Adjust tx margin to prevent TX data corruption and * fix internal FIFO overflow. */ if (sc->bge_chipid == BGE_CHIPID_BCM5719_A0 || sc->bge_asicrev == BGE_ASICREV_BCM5762) { dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK | BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK | BGE_RDMA_RSRVCTRL_TXMRGN_MASK); dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K | BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K | BGE_RDMA_RSRVCTRL_TXMRGN_320B; } /* * Enable fix for read DMA FIFO overruns. * The fix is to limit the number of RX BDs * the hardware would fetch at a fime. */ CSR_WRITE_4(sc, rdmareg, dmactl | BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX); } if (sc->bge_asicrev == BGE_ASICREV_BCM5719) { CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); } else if (sc->bge_asicrev == BGE_ASICREV_BCM5720) { /* * Allow 4KB burst length reads for non-LSO frames. * Enable 512B burst length reads for buffer descriptors. */ CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); } else if (sc->bge_asicrev == BGE_ASICREV_BCM5762) { CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2, CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2) | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); } CSR_WRITE_4(sc, BGE_RDMA_MODE, val); DELAY(40); if (sc->bge_flags & BGE_FLAG_RDMA_BUG) { for (i = 0; i < BGE_NUM_RDMA_CHANNELS / 2; i++) { val = CSR_READ_4(sc, BGE_RDMA_LENGTH + i * 4); if ((val & 0xFFFF) > BGE_FRAMELEN) break; if (((val >> 16) & 0xFFFF) > BGE_FRAMELEN) break; } if (i != BGE_NUM_RDMA_CHANNELS / 2) { val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL); if (sc->bge_asicrev == BGE_ASICREV_BCM5719) val |= BGE_RDMA_TX_LENGTH_WA_5719; else val |= BGE_RDMA_TX_LENGTH_WA_5720; CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val); } } /* Turn on RX data completion state machine */ CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); /* Turn on RX BD initiator state machine */ CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); /* Turn on RX data and RX BD initiator state machine */ CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); /* Turn on Mbuf cluster free state machine */ if (!(BGE_IS_5705_PLUS(sc))) CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); /* Turn on send BD completion state machine */ CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); /* Turn on send data completion state machine */ val = BGE_SDCMODE_ENABLE; if (sc->bge_asicrev == BGE_ASICREV_BCM5761) val |= BGE_SDCMODE_CDELAY; CSR_WRITE_4(sc, BGE_SDC_MODE, val); /* Turn on send data initiator state machine */ if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | BGE_SDIMODE_HW_LSO_PRE_DMA); else CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); /* Turn on send BD initiator state machine */ CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); /* Turn on send BD selector state machine */ CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER); /* ack/clear link change events */ CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | BGE_MACSTAT_LINK_CHANGED); CSR_WRITE_4(sc, BGE_MI_STS, 0); /* * Enable attention when the link has changed state for * devices that use auto polling. */ if (sc->bge_flags & BGE_FLAG_TBI) { CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); } else { if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) { CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode); DELAY(80); } if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_B2) CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_MI_INTERRUPT); } /* * Clear any pending link state attention. * Otherwise some link state change events may be lost until attention * is cleared by bge_intr() -> bge_link_upd() sequence. * It's not necessary on newer BCM chips - perhaps enabling link * state change attentions implies clearing pending attention. */ CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | BGE_MACSTAT_LINK_CHANGED); /* Enable link state change attentions. */ BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); return (0); } static const struct bge_revision * bge_lookup_rev(uint32_t chipid) { const struct bge_revision *br; for (br = bge_revisions; br->br_name != NULL; br++) { if (br->br_chipid == chipid) return (br); } for (br = bge_majorrevs; br->br_name != NULL; br++) { if (br->br_chipid == BGE_ASICREV(chipid)) return (br); } return (NULL); } static const struct bge_vendor * bge_lookup_vendor(uint16_t vid) { const struct bge_vendor *v; for (v = bge_vendors; v->v_name != NULL; v++) if (v->v_id == vid) return (v); return (NULL); } static uint32_t bge_chipid(device_t dev) { uint32_t id; id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> BGE_PCIMISCCTL_ASICREV_SHIFT; if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) { /* * Find the ASCI revision. Different chips use different * registers. */ switch (pci_get_device(dev)) { case BCOM_DEVICEID_BCM5717: case BCOM_DEVICEID_BCM5718: case BCOM_DEVICEID_BCM5719: case BCOM_DEVICEID_BCM5720: case BCOM_DEVICEID_BCM5725: case BCOM_DEVICEID_BCM5727: case BCOM_DEVICEID_BCM5762: case BCOM_DEVICEID_BCM57764: case BCOM_DEVICEID_BCM57767: case BCOM_DEVICEID_BCM57787: id = pci_read_config(dev, BGE_PCI_GEN2_PRODID_ASICREV, 4); break; case BCOM_DEVICEID_BCM57761: case BCOM_DEVICEID_BCM57762: case BCOM_DEVICEID_BCM57765: case BCOM_DEVICEID_BCM57766: case BCOM_DEVICEID_BCM57781: case BCOM_DEVICEID_BCM57782: case BCOM_DEVICEID_BCM57785: case BCOM_DEVICEID_BCM57786: case BCOM_DEVICEID_BCM57791: case BCOM_DEVICEID_BCM57795: id = pci_read_config(dev, BGE_PCI_GEN15_PRODID_ASICREV, 4); break; default: id = pci_read_config(dev, BGE_PCI_PRODID_ASICREV, 4); } } return (id); } /* * Probe for a Broadcom chip. Check the PCI vendor and device IDs * against our list and return its name if we find a match. * * Note that since the Broadcom controller contains VPD support, we * try to get the device name string from the controller itself instead * of the compiled-in string. It guarantees we'll always announce the * right product name. We fall back to the compiled-in string when * VPD is unavailable or corrupt. */ static int bge_probe(device_t dev) { char buf[96]; char model[64]; const struct bge_revision *br; const char *pname; struct bge_softc *sc; const struct bge_type *t = bge_devs; const struct bge_vendor *v; uint32_t id; uint16_t did, vid; sc = device_get_softc(dev); sc->bge_dev = dev; vid = pci_get_vendor(dev); did = pci_get_device(dev); while(t->bge_vid != 0) { if ((vid == t->bge_vid) && (did == t->bge_did)) { id = bge_chipid(dev); br = bge_lookup_rev(id); if (bge_has_eaddr(sc) && pci_get_vpd_ident(dev, &pname) == 0) snprintf(model, sizeof(model), "%s", pname); else { v = bge_lookup_vendor(vid); snprintf(model, sizeof(model), "%s %s", v != NULL ? v->v_name : "Unknown", br != NULL ? br->br_name : "NetXtreme/NetLink Ethernet Controller"); } snprintf(buf, sizeof(buf), "%s, %sASIC rev. %#08x", model, br != NULL ? "" : "unknown ", id); device_set_desc_copy(dev, buf); return (BUS_PROBE_DEFAULT); } t++; } return (ENXIO); } static void bge_dma_free(struct bge_softc *sc) { int i; /* Destroy DMA maps for RX buffers. */ for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { if (sc->bge_cdata.bge_rx_std_dmamap[i]) bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_dmamap[i]); } if (sc->bge_cdata.bge_rx_std_sparemap) bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_sparemap); /* Destroy DMA maps for jumbo RX buffers. */ for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { if (sc->bge_cdata.bge_rx_jumbo_dmamap[i]) bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i]); } if (sc->bge_cdata.bge_rx_jumbo_sparemap) bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_sparemap); /* Destroy DMA maps for TX buffers. */ for (i = 0; i < BGE_TX_RING_CNT; i++) { if (sc->bge_cdata.bge_tx_dmamap[i]) bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag, sc->bge_cdata.bge_tx_dmamap[i]); } if (sc->bge_cdata.bge_rx_mtag) bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag); if (sc->bge_cdata.bge_mtag_jumbo) bus_dma_tag_destroy(sc->bge_cdata.bge_mtag_jumbo); if (sc->bge_cdata.bge_tx_mtag) bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag); /* Destroy standard RX ring. */ if (sc->bge_ldata.bge_rx_std_ring_paddr) bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map); if (sc->bge_ldata.bge_rx_std_ring) bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_ldata.bge_rx_std_ring, sc->bge_cdata.bge_rx_std_ring_map); if (sc->bge_cdata.bge_rx_std_ring_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag); /* Destroy jumbo RX ring. */ if (sc->bge_ldata.bge_rx_jumbo_ring_paddr) bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map); if (sc->bge_ldata.bge_rx_jumbo_ring) bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_ldata.bge_rx_jumbo_ring, sc->bge_cdata.bge_rx_jumbo_ring_map); if (sc->bge_cdata.bge_rx_jumbo_ring_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag); /* Destroy RX return ring. */ if (sc->bge_ldata.bge_rx_return_ring_paddr) bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_cdata.bge_rx_return_ring_map); if (sc->bge_ldata.bge_rx_return_ring) bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_ldata.bge_rx_return_ring, sc->bge_cdata.bge_rx_return_ring_map); if (sc->bge_cdata.bge_rx_return_ring_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag); /* Destroy TX ring. */ if (sc->bge_ldata.bge_tx_ring_paddr) bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag, sc->bge_cdata.bge_tx_ring_map); if (sc->bge_ldata.bge_tx_ring) bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag, sc->bge_ldata.bge_tx_ring, sc->bge_cdata.bge_tx_ring_map); if (sc->bge_cdata.bge_tx_ring_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag); /* Destroy status block. */ if (sc->bge_ldata.bge_status_block_paddr) bus_dmamap_unload(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map); if (sc->bge_ldata.bge_status_block) bus_dmamem_free(sc->bge_cdata.bge_status_tag, sc->bge_ldata.bge_status_block, sc->bge_cdata.bge_status_map); if (sc->bge_cdata.bge_status_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag); /* Destroy statistics block. */ if (sc->bge_ldata.bge_stats_paddr) bus_dmamap_unload(sc->bge_cdata.bge_stats_tag, sc->bge_cdata.bge_stats_map); if (sc->bge_ldata.bge_stats) bus_dmamem_free(sc->bge_cdata.bge_stats_tag, sc->bge_ldata.bge_stats, sc->bge_cdata.bge_stats_map); if (sc->bge_cdata.bge_stats_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag); if (sc->bge_cdata.bge_buffer_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag); /* Destroy the parent tag. */ if (sc->bge_cdata.bge_parent_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag); } static int bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment, bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map, bus_addr_t *paddr, const char *msg) { struct bge_dmamap_arg ctx; int error; error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, alignment, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag); if (error != 0) { device_printf(sc->bge_dev, "could not create %s dma tag\n", msg); return (ENOMEM); } /* Allocate DMA'able memory for ring. */ error = bus_dmamem_alloc(*tag, (void **)ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map); if (error != 0) { device_printf(sc->bge_dev, "could not allocate DMA'able memory for %s\n", msg); return (ENOMEM); } /* Load the address of the ring. */ ctx.bge_busaddr = 0; error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->bge_dev, "could not load DMA'able memory for %s\n", msg); return (ENOMEM); } *paddr = ctx.bge_busaddr; return (0); } static int bge_dma_alloc(struct bge_softc *sc) { bus_addr_t lowaddr; bus_size_t rxmaxsegsz, sbsz, txsegsz, txmaxsegsz; int i, error; lowaddr = BUS_SPACE_MAXADDR; if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0) lowaddr = BGE_DMA_MAXADDR; /* * Allocate the parent bus DMA tag appropriate for PCI. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev), 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag); if (error != 0) { device_printf(sc->bge_dev, "could not allocate parent dma tag\n"); return (ENOMEM); } /* Create tag for standard RX ring. */ error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ, &sc->bge_cdata.bge_rx_std_ring_tag, (uint8_t **)&sc->bge_ldata.bge_rx_std_ring, &sc->bge_cdata.bge_rx_std_ring_map, &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring"); if (error) return (error); /* Create tag for RX return ring. */ error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc), &sc->bge_cdata.bge_rx_return_ring_tag, (uint8_t **)&sc->bge_ldata.bge_rx_return_ring, &sc->bge_cdata.bge_rx_return_ring_map, &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring"); if (error) return (error); /* Create tag for TX ring. */ error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ, &sc->bge_cdata.bge_tx_ring_tag, (uint8_t **)&sc->bge_ldata.bge_tx_ring, &sc->bge_cdata.bge_tx_ring_map, &sc->bge_ldata.bge_tx_ring_paddr, "TX ring"); if (error) return (error); /* * Create tag for status block. * Because we only use single Tx/Rx/Rx return ring, use * minimum status block size except BCM5700 AX/BX which * seems to want to see full status block size regardless * of configured number of ring. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_C0) sbsz = BGE_STATUS_BLK_SZ; else sbsz = 32; error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz, &sc->bge_cdata.bge_status_tag, (uint8_t **)&sc->bge_ldata.bge_status_block, &sc->bge_cdata.bge_status_map, &sc->bge_ldata.bge_status_block_paddr, "status block"); if (error) return (error); /* Create tag for statistics block. */ error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ, &sc->bge_cdata.bge_stats_tag, (uint8_t **)&sc->bge_ldata.bge_stats, &sc->bge_cdata.bge_stats_map, &sc->bge_ldata.bge_stats_paddr, "statistics block"); if (error) return (error); /* Create tag for jumbo RX ring. */ if (BGE_IS_JUMBO_CAPABLE(sc)) { error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ, &sc->bge_cdata.bge_rx_jumbo_ring_tag, (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring, &sc->bge_cdata.bge_rx_jumbo_ring_map, &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring"); if (error) return (error); } /* Create parent tag for buffers. */ if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0) { /* * XXX * watchdog timeout issue was observed on BCM5704 which * lives behind PCI-X bridge(e.g AMD 8131 PCI-X bridge). * Both limiting DMA address space to 32bits and flushing * mailbox write seem to address the issue. */ if (sc->bge_pcixcap != 0) lowaddr = BUS_SPACE_MAXADDR_32BIT; } error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev), 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->bge_cdata.bge_buffer_tag); if (error != 0) { device_printf(sc->bge_dev, "could not allocate buffer dma tag\n"); return (ENOMEM); } /* Create tag for Tx mbufs. */ if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) { txsegsz = BGE_TSOSEG_SZ; txmaxsegsz = 65535 + sizeof(struct ether_vlan_header); } else { txsegsz = MCLBYTES; txmaxsegsz = MCLBYTES * BGE_NSEG_NEW; } error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL, &sc->bge_cdata.bge_tx_mtag); if (error) { device_printf(sc->bge_dev, "could not allocate TX dma tag\n"); return (ENOMEM); } /* Create tag for Rx mbufs. */ if (sc->bge_flags & BGE_FLAG_JUMBO_STD) rxmaxsegsz = MJUM9BYTES; else rxmaxsegsz = MCLBYTES; error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, rxmaxsegsz, 1, rxmaxsegsz, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag); if (error) { device_printf(sc->bge_dev, "could not allocate RX dma tag\n"); return (ENOMEM); } /* Create DMA maps for RX buffers. */ error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0, &sc->bge_cdata.bge_rx_std_sparemap); if (error) { device_printf(sc->bge_dev, "can't create spare DMA map for RX\n"); return (ENOMEM); } for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0, &sc->bge_cdata.bge_rx_std_dmamap[i]); if (error) { device_printf(sc->bge_dev, "can't create DMA map for RX\n"); return (ENOMEM); } } /* Create DMA maps for TX buffers. */ for (i = 0; i < BGE_TX_RING_CNT; i++) { error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0, &sc->bge_cdata.bge_tx_dmamap[i]); if (error) { device_printf(sc->bge_dev, "can't create DMA map for TX\n"); return (ENOMEM); } } /* Create tags for jumbo RX buffers. */ if (BGE_IS_JUMBO_CAPABLE(sc)) { error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE, 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo); if (error) { device_printf(sc->bge_dev, "could not allocate jumbo dma tag\n"); return (ENOMEM); } /* Create DMA maps for jumbo RX buffers. */ error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, 0, &sc->bge_cdata.bge_rx_jumbo_sparemap); if (error) { device_printf(sc->bge_dev, "can't create spare DMA map for jumbo RX\n"); return (ENOMEM); } for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]); if (error) { device_printf(sc->bge_dev, "can't create DMA map for jumbo RX\n"); return (ENOMEM); } } } return (0); } /* * Return true if this device has more than one port. */ static int bge_has_multiple_ports(struct bge_softc *sc) { device_t dev = sc->bge_dev; u_int b, d, f, fscan, s; d = pci_get_domain(dev); b = pci_get_bus(dev); s = pci_get_slot(dev); f = pci_get_function(dev); for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++) if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL) return (1); return (0); } /* * Return true if MSI can be used with this device. */ static int bge_can_use_msi(struct bge_softc *sc) { int can_use_msi = 0; if (sc->bge_msi == 0) return (0); /* Disable MSI for polling(4). */ #ifdef DEVICE_POLLING return (0); #endif switch (sc->bge_asicrev) { case BGE_ASICREV_BCM5714_A0: case BGE_ASICREV_BCM5714: /* * Apparently, MSI doesn't work when these chips are * configured in single-port mode. */ if (bge_has_multiple_ports(sc)) can_use_msi = 1; break; case BGE_ASICREV_BCM5750: if (sc->bge_chiprev != BGE_CHIPREV_5750_AX && sc->bge_chiprev != BGE_CHIPREV_5750_BX) can_use_msi = 1; break; default: if (BGE_IS_575X_PLUS(sc)) can_use_msi = 1; } return (can_use_msi); } static int bge_mbox_reorder(struct bge_softc *sc) { /* Lists of PCI bridges that are known to reorder mailbox writes. */ static const struct mbox_reorder { const uint16_t vendor; const uint16_t device; const char *desc; } mbox_reorder_lists[] = { { 0x1022, 0x7450, "AMD-8131 PCI-X Bridge" }, }; devclass_t pci, pcib; device_t bus, dev; int i; pci = devclass_find("pci"); pcib = devclass_find("pcib"); dev = sc->bge_dev; bus = device_get_parent(dev); for (;;) { dev = device_get_parent(bus); bus = device_get_parent(dev); if (device_get_devclass(dev) != pcib) break; for (i = 0; i < nitems(mbox_reorder_lists); i++) { if (pci_get_vendor(dev) == mbox_reorder_lists[i].vendor && pci_get_device(dev) == mbox_reorder_lists[i].device) { device_printf(sc->bge_dev, "enabling MBOX workaround for %s\n", mbox_reorder_lists[i].desc); return (1); } } if (device_get_devclass(bus) != pci) break; } return (0); } static void bge_devinfo(struct bge_softc *sc) { uint32_t cfg, clk; device_printf(sc->bge_dev, "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; ", sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev); if (sc->bge_flags & BGE_FLAG_PCIE) printf("PCI-E\n"); else if (sc->bge_flags & BGE_FLAG_PCIX) { printf("PCI-X "); cfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK; if (cfg == BGE_MISCCFG_BOARD_ID_5704CIOBE) clk = 133; else { clk = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F; switch (clk) { case 0: clk = 33; break; case 2: clk = 50; break; case 4: clk = 66; break; case 6: clk = 100; break; case 7: clk = 133; break; } } printf("%u MHz\n", clk); } else { if (sc->bge_pcixcap != 0) printf("PCI on PCI-X "); else printf("PCI "); cfg = pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4); if (cfg & BGE_PCISTATE_PCI_BUSSPEED) clk = 66; else clk = 33; if (cfg & BGE_PCISTATE_32BIT_BUS) printf("%u MHz; 32bit\n", clk); else printf("%u MHz; 64bit\n", clk); } } static int bge_attach(device_t dev) { if_t ifp; struct bge_softc *sc; uint32_t hwcfg = 0, misccfg, pcistate; u_char eaddr[ETHER_ADDR_LEN]; int capmask, error, reg, rid, trys; sc = device_get_softc(dev); sc->bge_dev = dev; BGE_LOCK_INIT(sc, device_get_nameunit(dev)); TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc); callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0); pci_enable_busmaster(dev); /* * Allocate control/status registers. */ rid = PCIR_BAR(0); sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->bge_res == NULL) { device_printf (sc->bge_dev, "couldn't map BAR0 memory\n"); error = ENXIO; goto fail; } /* Save various chip information. */ sc->bge_func_addr = pci_get_function(dev); sc->bge_chipid = bge_chipid(dev); sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid); sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid); /* Set default PHY address. */ sc->bge_phy_addr = 1; /* * PHY address mapping for various devices. * * | F0 Cu | F0 Sr | F1 Cu | F1 Sr | * ---------+-------+-------+-------+-------+ * BCM57XX | 1 | X | X | X | * BCM5704 | 1 | X | 1 | X | * BCM5717 | 1 | 8 | 2 | 9 | * BCM5719 | 1 | 8 | 2 | 9 | * BCM5720 | 1 | 8 | 2 | 9 | * * | F2 Cu | F2 Sr | F3 Cu | F3 Sr | * ---------+-------+-------+-------+-------+ * BCM57XX | X | X | X | X | * BCM5704 | X | X | X | X | * BCM5717 | X | X | X | X | * BCM5719 | 3 | 10 | 4 | 11 | * BCM5720 | X | X | X | X | * * Other addresses may respond but they are not * IEEE compliant PHYs and should be ignored. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5717 || sc->bge_asicrev == BGE_ASICREV_BCM5719 || sc->bge_asicrev == BGE_ASICREV_BCM5720) { if (sc->bge_chipid != BGE_CHIPID_BCM5717_A0) { if (CSR_READ_4(sc, BGE_SGDIG_STS) & BGE_SGDIGSTS_IS_SERDES) sc->bge_phy_addr = sc->bge_func_addr + 8; else sc->bge_phy_addr = sc->bge_func_addr + 1; } else { if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) & BGE_CPMU_PHY_STRAP_IS_SERDES) sc->bge_phy_addr = sc->bge_func_addr + 8; else sc->bge_phy_addr = sc->bge_func_addr + 1; } } if (bge_has_eaddr(sc)) sc->bge_flags |= BGE_FLAG_EADDR; /* Save chipset family. */ switch (sc->bge_asicrev) { case BGE_ASICREV_BCM5762: case BGE_ASICREV_BCM57765: case BGE_ASICREV_BCM57766: sc->bge_flags |= BGE_FLAG_57765_PLUS; /* FALLTHROUGH */ case BGE_ASICREV_BCM5717: case BGE_ASICREV_BCM5719: case BGE_ASICREV_BCM5720: sc->bge_flags |= BGE_FLAG_5717_PLUS | BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS | BGE_FLAG_JUMBO | BGE_FLAG_JUMBO_FRAME; if (sc->bge_asicrev == BGE_ASICREV_BCM5719 || sc->bge_asicrev == BGE_ASICREV_BCM5720) { /* * Enable work around for DMA engine miscalculation * of TXMBUF available space. */ sc->bge_flags |= BGE_FLAG_RDMA_BUG; if (sc->bge_asicrev == BGE_ASICREV_BCM5719 && sc->bge_chipid == BGE_CHIPID_BCM5719_A0) { /* Jumbo frame on BCM5719 A0 does not work. */ sc->bge_flags &= ~BGE_FLAG_JUMBO; } } break; case BGE_ASICREV_BCM5755: case BGE_ASICREV_BCM5761: case BGE_ASICREV_BCM5784: case BGE_ASICREV_BCM5785: case BGE_ASICREV_BCM5787: case BGE_ASICREV_BCM57780: sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS; break; case BGE_ASICREV_BCM5700: case BGE_ASICREV_BCM5701: case BGE_ASICREV_BCM5703: case BGE_ASICREV_BCM5704: sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO; break; case BGE_ASICREV_BCM5714_A0: case BGE_ASICREV_BCM5780: case BGE_ASICREV_BCM5714: sc->bge_flags |= BGE_FLAG_5714_FAMILY | BGE_FLAG_JUMBO_STD; /* FALLTHROUGH */ case BGE_ASICREV_BCM5750: case BGE_ASICREV_BCM5752: case BGE_ASICREV_BCM5906: sc->bge_flags |= BGE_FLAG_575X_PLUS; /* FALLTHROUGH */ case BGE_ASICREV_BCM5705: sc->bge_flags |= BGE_FLAG_5705_PLUS; break; } /* Identify chips with APE processor. */ switch (sc->bge_asicrev) { case BGE_ASICREV_BCM5717: case BGE_ASICREV_BCM5719: case BGE_ASICREV_BCM5720: case BGE_ASICREV_BCM5761: case BGE_ASICREV_BCM5762: sc->bge_flags |= BGE_FLAG_APE; break; } /* Chips with APE need BAR2 access for APE registers/memory. */ if ((sc->bge_flags & BGE_FLAG_APE) != 0) { rid = PCIR_BAR(2); sc->bge_res2 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->bge_res2 == NULL) { device_printf (sc->bge_dev, "couldn't map BAR2 memory\n"); error = ENXIO; goto fail; } /* Enable APE register/memory access by host driver. */ pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); pcistate |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | BGE_PCISTATE_ALLOW_APE_SHMEM_WR | BGE_PCISTATE_ALLOW_APE_PSPACE_WR; pci_write_config(dev, BGE_PCI_PCISTATE, pcistate, 4); bge_ape_lock_init(sc); bge_ape_read_fw_ver(sc); } /* Add SYSCTLs, requires the chipset family to be set. */ bge_add_sysctls(sc); /* Identify the chips that use an CPMU. */ if (BGE_IS_5717_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5784 || sc->bge_asicrev == BGE_ASICREV_BCM5761 || sc->bge_asicrev == BGE_ASICREV_BCM5785 || sc->bge_asicrev == BGE_ASICREV_BCM57780) sc->bge_flags |= BGE_FLAG_CPMU_PRESENT; if ((sc->bge_flags & BGE_FLAG_CPMU_PRESENT) != 0) sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST; else sc->bge_mi_mode = BGE_MIMODE_BASE; /* Enable auto polling for BCM570[0-5]. */ if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705) sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL; /* * All Broadcom controllers have 4GB boundary DMA bug. * Whenever an address crosses a multiple of the 4GB boundary * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA * state machine will lockup and cause the device to hang. */ sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG; /* BCM5755 or higher and BCM5906 have short DMA bug. */ if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906) sc->bge_flags |= BGE_FLAG_SHORT_DMA_BUG; /* * BCM5719 cannot handle DMA requests for DMA segments that * have larger than 4KB in size. However the maximum DMA * segment size created in DMA tag is 4KB for TSO, so we * wouldn't encounter the issue here. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5719) sc->bge_flags |= BGE_FLAG_4K_RDMA_BUG; misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK; if (sc->bge_asicrev == BGE_ASICREV_BCM5705) { if (misccfg == BGE_MISCCFG_BOARD_ID_5788 || misccfg == BGE_MISCCFG_BOARD_ID_5788M) sc->bge_flags |= BGE_FLAG_5788; } capmask = BMSR_DEFCAPMASK; if ((sc->bge_asicrev == BGE_ASICREV_BCM5703 && (misccfg == 0x4000 || misccfg == 0x8000)) || (sc->bge_asicrev == BGE_ASICREV_BCM5705 && pci_get_vendor(dev) == BCOM_VENDORID && (pci_get_device(dev) == BCOM_DEVICEID_BCM5901 || pci_get_device(dev) == BCOM_DEVICEID_BCM5901A2 || pci_get_device(dev) == BCOM_DEVICEID_BCM5705F)) || (pci_get_vendor(dev) == BCOM_VENDORID && (pci_get_device(dev) == BCOM_DEVICEID_BCM5751F || pci_get_device(dev) == BCOM_DEVICEID_BCM5753F || pci_get_device(dev) == BCOM_DEVICEID_BCM5787F)) || pci_get_device(dev) == BCOM_DEVICEID_BCM57790 || pci_get_device(dev) == BCOM_DEVICEID_BCM57791 || pci_get_device(dev) == BCOM_DEVICEID_BCM57795 || sc->bge_asicrev == BGE_ASICREV_BCM5906) { /* These chips are 10/100 only. */ capmask &= ~BMSR_EXTSTAT; sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED; } /* * Some controllers seem to require a special firmware to use * TSO. But the firmware is not available to FreeBSD and Linux * claims that the TSO performed by the firmware is slower than * hardware based TSO. Moreover the firmware based TSO has one * known bug which can't handle TSO if Ethernet header + IP/TCP * header is greater than 80 bytes. A workaround for the TSO * bug exist but it seems it's too expensive than not using * TSO at all. Some hardwares also have the TSO bug so limit * the TSO to the controllers that are not affected TSO issues * (e.g. 5755 or higher). */ if (BGE_IS_5717_PLUS(sc)) { /* BCM5717 requires different TSO configuration. */ sc->bge_flags |= BGE_FLAG_TSO3; if (sc->bge_asicrev == BGE_ASICREV_BCM5719 && sc->bge_chipid == BGE_CHIPID_BCM5719_A0) { /* TSO on BCM5719 A0 does not work. */ sc->bge_flags &= ~BGE_FLAG_TSO3; } } else if (BGE_IS_5755_PLUS(sc)) { /* * BCM5754 and BCM5787 shares the same ASIC id so * explicit device id check is required. * Due to unknown reason TSO does not work on BCM5755M. */ if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 && pci_get_device(dev) != BCOM_DEVICEID_BCM5754M && pci_get_device(dev) != BCOM_DEVICEID_BCM5755M) sc->bge_flags |= BGE_FLAG_TSO; } /* * Check if this is a PCI-X or PCI Express device. */ if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { /* * Found a PCI Express capabilities register, this * must be a PCI Express device. */ sc->bge_flags |= BGE_FLAG_PCIE; sc->bge_expcap = reg; /* Extract supported maximum payload size. */ sc->bge_mps = pci_read_config(dev, sc->bge_expcap + PCIER_DEVICE_CAP, 2); sc->bge_mps = 128 << (sc->bge_mps & PCIEM_CAP_MAX_PAYLOAD); if (sc->bge_asicrev == BGE_ASICREV_BCM5719 || sc->bge_asicrev == BGE_ASICREV_BCM5720) sc->bge_expmrq = 2048; else sc->bge_expmrq = 4096; pci_set_max_read_req(dev, sc->bge_expmrq); } else { /* * Check if the device is in PCI-X Mode. * (This bit is not valid on PCI Express controllers.) */ if (pci_find_cap(dev, PCIY_PCIX, ®) == 0) sc->bge_pcixcap = reg; if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) & BGE_PCISTATE_PCI_BUSMODE) == 0) sc->bge_flags |= BGE_FLAG_PCIX; } /* * The 40bit DMA bug applies to the 5714/5715 controllers and is * not actually a MAC controller bug but an issue with the embedded * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround. */ if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX)) sc->bge_flags |= BGE_FLAG_40BIT_BUG; /* * Some PCI-X bridges are known to trigger write reordering to * the mailbox registers. Typical phenomena is watchdog timeouts * caused by out-of-order TX completions. Enable workaround for * PCI-X devices that live behind these bridges. * Note, PCI-X controllers can run in PCI mode so we can't use * BGE_FLAG_PCIX flag to detect PCI-X controllers. */ if (sc->bge_pcixcap != 0 && bge_mbox_reorder(sc) != 0) sc->bge_flags |= BGE_FLAG_MBOX_REORDER; /* * Allocate the interrupt, using MSI if possible. These devices * support 8 MSI messages, but only the first one is used in * normal operation. */ rid = 0; if (pci_find_cap(sc->bge_dev, PCIY_MSI, ®) == 0) { sc->bge_msicap = reg; reg = 1; if (bge_can_use_msi(sc) && pci_alloc_msi(dev, ®) == 0) { rid = 1; sc->bge_flags |= BGE_FLAG_MSI; } } /* * All controllers except BCM5700 supports tagged status but * we use tagged status only for MSI case on BCM5717. Otherwise * MSI on BCM5717 does not work. */ #ifndef DEVICE_POLLING if (sc->bge_flags & BGE_FLAG_MSI && BGE_IS_5717_PLUS(sc)) sc->bge_flags |= BGE_FLAG_TAGGED_STATUS; #endif sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | (rid != 0 ? 0 : RF_SHAREABLE)); if (sc->bge_irq == NULL) { device_printf(sc->bge_dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } bge_devinfo(sc); sc->bge_asf_mode = 0; /* No ASF if APE present. */ if ((sc->bge_flags & BGE_FLAG_APE) == 0) { if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC)) { if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG) & BGE_HWCFG_ASF) { sc->bge_asf_mode |= ASF_ENABLE; sc->bge_asf_mode |= ASF_STACKUP; if (BGE_IS_575X_PLUS(sc)) sc->bge_asf_mode |= ASF_NEW_HANDSHAKE; } } } bge_stop_fw(sc); bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN); if (bge_reset(sc)) { device_printf(sc->bge_dev, "chip reset failed\n"); error = ENXIO; goto fail; } bge_sig_legacy(sc, BGE_RESET_SHUTDOWN); bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN); if (bge_chipinit(sc)) { device_printf(sc->bge_dev, "chip initialization failed\n"); error = ENXIO; goto fail; } error = bge_get_eaddr(sc, eaddr); if (error) { device_printf(sc->bge_dev, "failed to read station address\n"); error = ENXIO; goto fail; } /* 5705 limits RX return ring to 512 entries. */ if (BGE_IS_5717_PLUS(sc)) sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; else if (BGE_IS_5705_PLUS(sc)) sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; else sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; if (bge_dma_alloc(sc)) { device_printf(sc->bge_dev, "failed to allocate DMA resources\n"); error = ENXIO; goto fail; } /* Set default tuneable values. */ sc->bge_stat_ticks = BGE_TICKS_PER_SEC; sc->bge_rx_coal_ticks = 150; sc->bge_tx_coal_ticks = 150; sc->bge_rx_max_coal_bds = 10; sc->bge_tx_max_coal_bds = 10; /* Initialize checksum features to use. */ sc->bge_csum_features = BGE_CSUM_FEATURES; if (sc->bge_forced_udpcsum != 0) sc->bge_csum_features |= CSUM_UDP; /* Set up ifnet structure */ ifp = sc->bge_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(sc->bge_dev, "failed to if_alloc()\n"); error = ENXIO; goto fail; } if_setsoftc(ifp, sc); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); if_setioctlfn(ifp, bge_ioctl); if_setstartfn(ifp, bge_start); if_setinitfn(ifp, bge_init); if_setgetcounterfn(ifp, bge_get_counter); if_setsendqlen(ifp, BGE_TX_RING_CNT - 1); if_setsendqready(ifp); if_sethwassist(ifp, sc->bge_csum_features); if_setcapabilities(ifp, IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU); if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) { if_sethwassistbits(ifp, CSUM_TSO, 0); if_setcapabilitiesbit(ifp, IFCAP_TSO4 | IFCAP_VLAN_HWTSO, 0); } #ifdef IFCAP_VLAN_HWCSUM if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM, 0); #endif if_setcapenable(ifp, if_getcapabilities(ifp)); #ifdef DEVICE_POLLING if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0); #endif /* * 5700 B0 chips do not support checksumming correctly due * to hardware bugs. */ if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) { if_setcapabilitiesbit(ifp, 0, IFCAP_HWCSUM); if_setcapenablebit(ifp, 0, IFCAP_HWCSUM); if_sethwassist(ifp, 0); } /* * Figure out what sort of media we have by checking the * hardware config word in the first 32k of NIC internal memory, * or fall back to examining the EEPROM if necessary. * Note: on some BCM5700 cards, this value appears to be unset. * If that's the case, we have to rely on identifying the NIC * by its PCI subsystem ID, as we do below for the SysKonnect * SK-9D41. */ if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC) hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG); else if ((sc->bge_flags & BGE_FLAG_EADDR) && (sc->bge_asicrev != BGE_ASICREV_BCM5906)) { if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, sizeof(hwcfg))) { device_printf(sc->bge_dev, "failed to read EEPROM\n"); error = ENXIO; goto fail; } hwcfg = ntohl(hwcfg); } /* The SysKonnect SK-9D41 is a 1000baseSX card. */ if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) { if (BGE_IS_5705_PLUS(sc)) { sc->bge_flags |= BGE_FLAG_MII_SERDES; sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED; } else sc->bge_flags |= BGE_FLAG_TBI; } /* Set various PHY bug flags. */ if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 || sc->bge_chipid == BGE_CHIPID_BCM5701_B0) sc->bge_phy_flags |= BGE_PHY_CRC_BUG; if (sc->bge_chiprev == BGE_CHIPREV_5703_AX || sc->bge_chiprev == BGE_CHIPREV_5704_AX) sc->bge_phy_flags |= BGE_PHY_ADC_BUG; if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0) sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG; if (pci_get_subvendor(dev) == DELL_VENDORID) sc->bge_phy_flags |= BGE_PHY_NO_3LED; if ((BGE_IS_5705_PLUS(sc)) && sc->bge_asicrev != BGE_ASICREV_BCM5906 && sc->bge_asicrev != BGE_ASICREV_BCM5785 && sc->bge_asicrev != BGE_ASICREV_BCM57780 && !BGE_IS_5717_PLUS(sc)) { if (sc->bge_asicrev == BGE_ASICREV_BCM5755 || sc->bge_asicrev == BGE_ASICREV_BCM5761 || sc->bge_asicrev == BGE_ASICREV_BCM5784 || sc->bge_asicrev == BGE_ASICREV_BCM5787) { if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 && pci_get_device(dev) != BCOM_DEVICEID_BCM5756) sc->bge_phy_flags |= BGE_PHY_JITTER_BUG; if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M) sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM; } else sc->bge_phy_flags |= BGE_PHY_BER_BUG; } /* * Don't enable Ethernet@WireSpeed for the 5700 or the * 5705 A0 and A1 chips. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || (sc->bge_asicrev == BGE_ASICREV_BCM5705 && (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 && sc->bge_chipid != BGE_CHIPID_BCM5705_A1))) sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED; if (sc->bge_flags & BGE_FLAG_TBI) { ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, bge_ifmedia_sts); ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL); ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL); ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO); sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; } else { /* * Do transceiver setup and tell the firmware the * driver is down so we can try to get access the * probe if ASF is running. Retry a couple of times * if we get a conflict with the ASF firmware accessing * the PHY. */ trys = 0; BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); again: bge_asf_driver_up(sc); error = mii_attach(dev, &sc->bge_miibus, ifp, (ifm_change_cb_t)bge_ifmedia_upd, (ifm_stat_cb_t)bge_ifmedia_sts, capmask, sc->bge_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE); if (error != 0) { if (trys++ < 4) { device_printf(sc->bge_dev, "Try again\n"); bge_miibus_writereg(sc->bge_dev, sc->bge_phy_addr, MII_BMCR, BMCR_RESET); goto again; } device_printf(sc->bge_dev, "attaching PHYs failed\n"); goto fail; } /* * Now tell the firmware we are going up after probing the PHY */ if (sc->bge_asf_mode & ASF_STACKUP) BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); } /* * When using the BCM5701 in PCI-X mode, data corruption has * been observed in the first few bytes of some received packets. * Aligning the packet buffer in memory eliminates the corruption. * Unfortunately, this misaligns the packet payloads. On platforms * which do not support unaligned accesses, we will realign the * payloads by copying the received packets. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && sc->bge_flags & BGE_FLAG_PCIX) sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG; /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* Tell upper layer we support long frames. */ if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); /* * Hookup IRQ last. */ if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) { /* Take advantage of single-shot MSI. */ CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) & ~BGE_MSIMODE_ONE_SHOT_DISABLE); sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK, taskqueue_thread_enqueue, &sc->bge_tq); if (sc->bge_tq == NULL) { device_printf(dev, "could not create taskqueue.\n"); ether_ifdetach(ifp); error = ENOMEM; goto fail; } error = taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->bge_dev)); if (error != 0) { device_printf(dev, "could not start threads.\n"); ether_ifdetach(ifp); goto fail; } error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc, &sc->bge_intrhand); } else error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc, &sc->bge_intrhand); if (error) { ether_ifdetach(ifp); device_printf(sc->bge_dev, "couldn't set up irq\n"); } fail: if (error) bge_detach(dev); return (error); } static int bge_detach(device_t dev) { struct bge_softc *sc; if_t ifp; sc = device_get_softc(dev); ifp = sc->bge_ifp; #ifdef DEVICE_POLLING if (if_getcapenable(ifp) & IFCAP_POLLING) ether_poll_deregister(ifp); #endif if (device_is_attached(dev)) { ether_ifdetach(ifp); BGE_LOCK(sc); bge_stop(sc); BGE_UNLOCK(sc); callout_drain(&sc->bge_stat_ch); } if (sc->bge_tq) taskqueue_drain(sc->bge_tq, &sc->bge_intr_task); if (sc->bge_flags & BGE_FLAG_TBI) ifmedia_removeall(&sc->bge_ifmedia); else if (sc->bge_miibus != NULL) { bus_generic_detach(dev); device_delete_child(dev, sc->bge_miibus); } bge_release_resources(sc); return (0); } static void bge_release_resources(struct bge_softc *sc) { device_t dev; dev = sc->bge_dev; if (sc->bge_tq != NULL) taskqueue_free(sc->bge_tq); if (sc->bge_intrhand != NULL) bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); if (sc->bge_irq != NULL) { bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->bge_irq), sc->bge_irq); pci_release_msi(dev); } if (sc->bge_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->bge_res), sc->bge_res); if (sc->bge_res2 != NULL) bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->bge_res2), sc->bge_res2); if (sc->bge_ifp != NULL) if_free(sc->bge_ifp); bge_dma_free(sc); if (mtx_initialized(&sc->bge_mtx)) /* XXX */ BGE_LOCK_DESTROY(sc); } static int bge_reset(struct bge_softc *sc) { device_t dev; uint32_t cachesize, command, mac_mode, mac_mode_mask, reset, val; void (*write_op)(struct bge_softc *, int, int); uint16_t devctl; int i; dev = sc->bge_dev; mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE; if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask; if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) && (sc->bge_asicrev != BGE_ASICREV_BCM5906)) { if (sc->bge_flags & BGE_FLAG_PCIE) write_op = bge_writemem_direct; else write_op = bge_writemem_ind; } else write_op = bge_writereg_ind; if (sc->bge_asicrev != BGE_ASICREV_BCM5700 && sc->bge_asicrev != BGE_ASICREV_BCM5701) { CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); for (i = 0; i < 8000; i++) { if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) break; DELAY(20); } if (i == 8000) { if (bootverbose) device_printf(dev, "NVRAM lock timedout!\n"); } } /* Take APE lock when performing reset. */ bge_ape_lock(sc, BGE_APE_LOCK_GRC); /* Save some important PCI state. */ cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); command = pci_read_config(dev, BGE_PCI_CMD, 4); pci_write_config(dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4); /* Disable fastboot on controllers that support it. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5752 || BGE_IS_5755_PLUS(sc)) { if (bootverbose) device_printf(dev, "Disabling fastboot\n"); CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0); } /* * Write the magic number to SRAM at offset 0xB50. * When firmware finishes its initialization it will * write ~BGE_SRAM_FW_MB_MAGIC to the same location. */ bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC); reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ; /* XXX: Broadcom Linux driver. */ if (sc->bge_flags & BGE_FLAG_PCIE) { if (sc->bge_asicrev != BGE_ASICREV_BCM5785 && (sc->bge_flags & BGE_FLAG_5717_PLUS) == 0) { if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */ CSR_WRITE_4(sc, 0x7E2C, 0x20); } if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { /* Prevent PCIE link training during global reset */ CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29); reset |= 1 << 29; } } if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { val = CSR_READ_4(sc, BGE_VCPU_STATUS); CSR_WRITE_4(sc, BGE_VCPU_STATUS, val | BGE_VCPU_STATUS_DRV_RESET); val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL); CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL, val & ~BGE_VCPU_EXT_CTRL_HALT_CPU); } /* * Set GPHY Power Down Override to leave GPHY * powered up in D0 uninitialized. */ if (BGE_IS_5705_PLUS(sc) && (sc->bge_flags & BGE_FLAG_CPMU_PRESENT) == 0) reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE; /* Issue global reset */ write_op(sc, BGE_MISC_CFG, reset); if (sc->bge_flags & BGE_FLAG_PCIE) DELAY(100 * 1000); else DELAY(1000); /* XXX: Broadcom Linux driver. */ if (sc->bge_flags & BGE_FLAG_PCIE) { if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { DELAY(500000); /* wait for link training to complete */ val = pci_read_config(dev, 0xC4, 4); pci_write_config(dev, 0xC4, val | (1 << 15), 4); } devctl = pci_read_config(dev, sc->bge_expcap + PCIER_DEVICE_CTL, 2); /* Clear enable no snoop and disable relaxed ordering. */ devctl &= ~(PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE); pci_write_config(dev, sc->bge_expcap + PCIER_DEVICE_CTL, devctl, 2); pci_set_max_read_req(dev, sc->bge_expmrq); /* Clear error status. */ pci_write_config(dev, sc->bge_expcap + PCIER_DEVICE_STA, PCIEM_STA_CORRECTABLE_ERROR | PCIEM_STA_NON_FATAL_ERROR | PCIEM_STA_FATAL_ERROR | PCIEM_STA_UNSUPPORTED_REQ, 2); } /* Reset some of the PCI state that got zapped by reset. */ pci_write_config(dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4); val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE; if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0 && (sc->bge_flags & BGE_FLAG_PCIX) != 0) val |= BGE_PCISTATE_RETRY_SAME_DMA; if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | BGE_PCISTATE_ALLOW_APE_SHMEM_WR | BGE_PCISTATE_ALLOW_APE_PSPACE_WR; pci_write_config(dev, BGE_PCI_PCISTATE, val, 4); pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); pci_write_config(dev, BGE_PCI_CMD, command, 4); /* * Disable PCI-X relaxed ordering to ensure status block update * comes first then packet buffer DMA. Otherwise driver may * read stale status block. */ if (sc->bge_flags & BGE_FLAG_PCIX) { devctl = pci_read_config(dev, sc->bge_pcixcap + PCIXR_COMMAND, 2); devctl &= ~PCIXM_COMMAND_ERO; if (sc->bge_asicrev == BGE_ASICREV_BCM5703) { devctl &= ~PCIXM_COMMAND_MAX_READ; devctl |= PCIXM_COMMAND_MAX_READ_2048; } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { devctl &= ~(PCIXM_COMMAND_MAX_SPLITS | PCIXM_COMMAND_MAX_READ); devctl |= PCIXM_COMMAND_MAX_READ_2048; } pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND, devctl, 2); } /* Re-enable MSI, if necessary, and enable the memory arbiter. */ if (BGE_IS_5714_FAMILY(sc)) { /* This chip disables MSI on reset. */ if (sc->bge_flags & BGE_FLAG_MSI) { val = pci_read_config(dev, sc->bge_msicap + PCIR_MSI_CTRL, 2); pci_write_config(dev, sc->bge_msicap + PCIR_MSI_CTRL, val | PCIM_MSICTRL_MSI_ENABLE, 2); val = CSR_READ_4(sc, BGE_MSI_MODE); CSR_WRITE_4(sc, BGE_MSI_MODE, val | BGE_MSIMODE_ENABLE); } val = CSR_READ_4(sc, BGE_MARB_MODE); CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val); } else CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); /* Fix up byte swapping. */ CSR_WRITE_4(sc, BGE_MODE_CTL, bge_dma_swap_options(sc)); val = CSR_READ_4(sc, BGE_MAC_MODE); val = (val & ~mac_mode_mask) | mac_mode; CSR_WRITE_4(sc, BGE_MAC_MODE, val); DELAY(40); bge_ape_unlock(sc, BGE_APE_LOCK_GRC); if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { for (i = 0; i < BGE_TIMEOUT; i++) { val = CSR_READ_4(sc, BGE_VCPU_STATUS); if (val & BGE_VCPU_STATUS_INIT_DONE) break; DELAY(100); } if (i == BGE_TIMEOUT) { device_printf(dev, "reset timed out\n"); return (1); } } else { /* * Poll until we see the 1's complement of the magic number. * This indicates that the firmware initialization is complete. * We expect this to fail if no chip containing the Ethernet * address is fitted though. */ for (i = 0; i < BGE_TIMEOUT; i++) { DELAY(10); val = bge_readmem_ind(sc, BGE_SRAM_FW_MB); if (val == ~BGE_SRAM_FW_MB_MAGIC) break; } if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT) device_printf(dev, "firmware handshake timed out, found 0x%08x\n", val); /* BCM57765 A0 needs additional time before accessing. */ if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) DELAY(10 * 1000); /* XXX */ } /* * The 5704 in TBI mode apparently needs some special * adjustment to insure the SERDES drive level is set * to 1.2V. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_flags & BGE_FLAG_TBI) { val = CSR_READ_4(sc, BGE_SERDES_CFG); val = (val & ~0xFFF) | 0x880; CSR_WRITE_4(sc, BGE_SERDES_CFG, val); } /* XXX: Broadcom Linux driver. */ if (sc->bge_flags & BGE_FLAG_PCIE && !BGE_IS_5717_PLUS(sc) && sc->bge_chipid != BGE_CHIPID_BCM5750_A0 && sc->bge_asicrev != BGE_ASICREV_BCM5785) { /* Enable Data FIFO protection. */ val = CSR_READ_4(sc, 0x7C00); CSR_WRITE_4(sc, 0x7C00, val | (1 << 25)); } if (sc->bge_asicrev == BGE_ASICREV_BCM5720) BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN); return (0); } static __inline void bge_rxreuse_std(struct bge_softc *sc, int i) { struct bge_rx_bd *r; r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std]; r->bge_flags = BGE_RXBDFLAG_END; r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i]; r->bge_idx = i; BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); } static __inline void bge_rxreuse_jumbo(struct bge_softc *sc, int i) { struct bge_extrx_bd *r; r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo]; r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END; r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0]; r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1]; r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2]; r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3]; r->bge_idx = i; BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); } /* * Frame reception handling. This is called if there's a frame * on the receive return list. * * Note: we have to be able to handle two possibilities here: * 1) the frame is from the jumbo receive ring * 2) the frame is from the standard receive ring */ static int bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck) { if_t ifp; int rx_npkts = 0, stdcnt = 0, jumbocnt = 0; uint16_t rx_cons; rx_cons = sc->bge_rx_saved_considx; /* Nothing to do. */ if (rx_cons == rx_prod) return (rx_npkts); ifp = sc->bge_ifp; bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD); bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE); if (BGE_IS_JUMBO_CAPABLE(sc) && if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN)) bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE); while (rx_cons != rx_prod) { struct bge_rx_bd *cur_rx; uint32_t rxidx; struct mbuf *m = NULL; uint16_t vlan_tag = 0; int have_tag = 0; #ifdef DEVICE_POLLING if (if_getcapenable(ifp) & IFCAP_POLLING) { if (sc->rxcycles <= 0) break; sc->rxcycles--; } #endif cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons]; rxidx = cur_rx->bge_idx; BGE_INC(rx_cons, sc->bge_return_ring_cnt); if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING && cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { have_tag = 1; vlan_tag = cur_rx->bge_vlan_tag; } if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { jumbocnt++; m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { bge_rxreuse_jumbo(sc, rxidx); continue; } if (bge_newbuf_jumbo(sc, rxidx) != 0) { bge_rxreuse_jumbo(sc, rxidx); if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); continue; } BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); } else { stdcnt++; m = sc->bge_cdata.bge_rx_std_chain[rxidx]; if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { bge_rxreuse_std(sc, rxidx); continue; } if (bge_newbuf_std(sc, rxidx) != 0) { bge_rxreuse_std(sc, rxidx); if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); continue; } BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); } if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); #ifndef __NO_STRICT_ALIGNMENT /* * For architectures with strict alignment we must make sure * the payload is aligned. */ if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) { bcopy(m->m_data, m->m_data + ETHER_ALIGN, cur_rx->bge_len); m->m_data += ETHER_ALIGN; } #endif m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; m->m_pkthdr.rcvif = ifp; if (if_getcapenable(ifp) & IFCAP_RXCSUM) bge_rxcsum(sc, cur_rx, m); /* * If we received a packet with a vlan tag, * attach that information to the packet. */ if (have_tag) { m->m_pkthdr.ether_vtag = vlan_tag; m->m_flags |= M_VLANTAG; } if (holdlck != 0) { BGE_UNLOCK(sc); if_input(ifp, m); BGE_LOCK(sc); } else if_input(ifp, m); rx_npkts++; if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) return (rx_npkts); } bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD); if (stdcnt > 0) bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE); if (jumbocnt > 0) bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE); sc->bge_rx_saved_considx = rx_cons; bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); if (stdcnt) bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std + BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT); if (jumbocnt) bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo + BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT); #ifdef notyet /* * This register wraps very quickly under heavy packet drops. * If you need correct statistics, you can enable this check. */ if (BGE_IS_5705_PLUS(sc)) if_incierrors(ifp, CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS)); #endif return (rx_npkts); } static void bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m) { if (BGE_IS_5717_PLUS(sc)) { if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) { if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if ((cur_rx->bge_error_flag & BGE_RXERRFLAG_IP_CSUM_NOK) == 0) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; } if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { m->m_pkthdr.csum_data = cur_rx->bge_tcp_udp_csum; m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; } } } else { if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; } if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && m->m_pkthdr.len >= ETHER_MIN_NOPAD) { m->m_pkthdr.csum_data = cur_rx->bge_tcp_udp_csum; m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; } } } static void bge_txeof(struct bge_softc *sc, uint16_t tx_cons) { struct bge_tx_bd *cur_tx; if_t ifp; BGE_LOCK_ASSERT(sc); /* Nothing to do. */ if (sc->bge_tx_saved_considx == tx_cons) return; ifp = sc->bge_ifp; bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE); /* * Go through our tx ring and free mbufs for those * frames that have been sent. */ while (sc->bge_tx_saved_considx != tx_cons) { uint32_t idx; idx = sc->bge_tx_saved_considx; cur_tx = &sc->bge_ldata.bge_tx_ring[idx]; if (cur_tx->bge_flags & BGE_TXBDFLAG_END) if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, sc->bge_cdata.bge_tx_dmamap[idx], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, sc->bge_cdata.bge_tx_dmamap[idx]); m_freem(sc->bge_cdata.bge_tx_chain[idx]); sc->bge_cdata.bge_tx_chain[idx] = NULL; } sc->bge_txcnt--; BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); } if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); if (sc->bge_txcnt == 0) sc->bge_timer = 0; } #ifdef DEVICE_POLLING static int bge_poll(if_t ifp, enum poll_cmd cmd, int count) { struct bge_softc *sc = if_getsoftc(ifp); uint16_t rx_prod, tx_cons; uint32_t statusword; int rx_npkts = 0; BGE_LOCK(sc); if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { BGE_UNLOCK(sc); return (rx_npkts); } bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* Fetch updates from the status block. */ rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; statusword = sc->bge_ldata.bge_status_block->bge_status; /* Clear the status so the next pass only sees the changes. */ sc->bge_ldata.bge_status_block->bge_status = 0; bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */ if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) sc->bge_link_evt++; if (cmd == POLL_AND_CHECK_STATUS) if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI)) bge_link_upd(sc); sc->rxcycles = count; rx_npkts = bge_rxeof(sc, rx_prod, 1); if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { BGE_UNLOCK(sc); return (rx_npkts); } bge_txeof(sc, tx_cons); if (!if_sendq_empty(ifp)) bge_start_locked(ifp); BGE_UNLOCK(sc); return (rx_npkts); } #endif /* DEVICE_POLLING */ static int bge_msi_intr(void *arg) { struct bge_softc *sc; sc = (struct bge_softc *)arg; /* * This interrupt is not shared and controller already * disabled further interrupt. */ taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task); return (FILTER_HANDLED); } static void bge_intr_task(void *arg, int pending) { struct bge_softc *sc; if_t ifp; uint32_t status, status_tag; uint16_t rx_prod, tx_cons; sc = (struct bge_softc *)arg; ifp = sc->bge_ifp; BGE_LOCK(sc); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { BGE_UNLOCK(sc); return; } /* Get updated status block. */ bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* Save producer/consumer indices. */ rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; status = sc->bge_ldata.bge_status_block->bge_status; status_tag = sc->bge_ldata.bge_status_block->bge_status_tag << 24; /* Dirty the status flag. */ sc->bge_ldata.bge_status_block->bge_status = 0; bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if ((sc->bge_flags & BGE_FLAG_TAGGED_STATUS) == 0) status_tag = 0; if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0) bge_link_upd(sc); /* Let controller work. */ bge_writembx(sc, BGE_MBX_IRQ0_LO, status_tag); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING && sc->bge_rx_saved_considx != rx_prod) { /* Check RX return ring producer/consumer. */ BGE_UNLOCK(sc); bge_rxeof(sc, rx_prod, 0); BGE_LOCK(sc); } if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { /* Check TX ring producer/consumer. */ bge_txeof(sc, tx_cons); if (!if_sendq_empty(ifp)) bge_start_locked(ifp); } BGE_UNLOCK(sc); } static void bge_intr(void *xsc) { struct bge_softc *sc; if_t ifp; uint32_t statusword; uint16_t rx_prod, tx_cons; sc = xsc; BGE_LOCK(sc); ifp = sc->bge_ifp; #ifdef DEVICE_POLLING if (if_getcapenable(ifp) & IFCAP_POLLING) { BGE_UNLOCK(sc); return; } #endif /* * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't * disable interrupts by writing nonzero like we used to, since with * our current organization this just gives complications and * pessimizations for re-enabling interrupts. We used to have races * instead of the necessary complications. Disabling interrupts * would just reduce the chance of a status update while we are * running (by switching to the interrupt-mode coalescence * parameters), but this chance is already very low so it is more * efficient to get another interrupt than prevent it. * * We do the ack first to ensure another interrupt if there is a * status update after the ack. We don't check for the status * changing later because it is more efficient to get another * interrupt than prevent it, not quite as above (not checking is * a smaller optimization than not toggling the interrupt enable, * since checking doesn't involve PCI accesses and toggling require * the status check). So toggling would probably be a pessimization * even with MSI. It would only be needed for using a task queue. */ bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); /* * Do the mandatory PCI flush as well as get the link status. */ statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED; /* Make sure the descriptor ring indexes are coherent. */ bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; sc->bge_ldata.bge_status_block->bge_status = 0; bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || statusword || sc->bge_link_evt) bge_link_upd(sc); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { /* Check RX return ring producer/consumer. */ bge_rxeof(sc, rx_prod, 1); } if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { /* Check TX ring producer/consumer. */ bge_txeof(sc, tx_cons); } if (if_getdrvflags(ifp) & IFF_DRV_RUNNING && !if_sendq_empty(ifp)) bge_start_locked(ifp); BGE_UNLOCK(sc); } static void bge_asf_driver_up(struct bge_softc *sc) { if (sc->bge_asf_mode & ASF_STACKUP) { /* Send ASF heartbeat aprox. every 2s */ if (sc->bge_asf_count) sc->bge_asf_count --; else { sc->bge_asf_count = 2; bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_DRV_ALIVE); bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4); bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB, BGE_FW_HB_TIMEOUT_SEC); CSR_WRITE_4(sc, BGE_RX_CPU_EVENT, CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT); } } } static void bge_tick(void *xsc) { struct bge_softc *sc = xsc; struct mii_data *mii = NULL; BGE_LOCK_ASSERT(sc); /* Synchronize with possible callout reset/stop. */ if (callout_pending(&sc->bge_stat_ch) || !callout_active(&sc->bge_stat_ch)) return; if (BGE_IS_5705_PLUS(sc)) bge_stats_update_regs(sc); else bge_stats_update(sc); /* XXX Add APE heartbeat check here? */ if ((sc->bge_flags & BGE_FLAG_TBI) == 0) { mii = device_get_softc(sc->bge_miibus); /* * Do not touch PHY if we have link up. This could break * IPMI/ASF mode or produce extra input errors * (extra errors was reported for bcm5701 & bcm5704). */ if (!sc->bge_link) mii_tick(mii); } else { /* * Since in TBI mode auto-polling can't be used we should poll * link status manually. Here we register pending link event * and trigger interrupt. */ #ifdef DEVICE_POLLING /* In polling mode we poll link state in bge_poll(). */ if (!(if_getcapenable(sc->bge_ifp) & IFCAP_POLLING)) #endif { sc->bge_link_evt++; if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || sc->bge_flags & BGE_FLAG_5788) BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); else BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); } } bge_asf_driver_up(sc); bge_watchdog(sc); callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); } static void bge_stats_update_regs(struct bge_softc *sc) { if_t ifp; struct bge_mac_stats *stats; uint32_t val; ifp = sc->bge_ifp; stats = &sc->bge_mac_stats; stats->ifHCOutOctets += CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS); stats->etherStatsCollisions += CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS); stats->outXonSent += CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT); stats->outXoffSent += CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT); stats->dot3StatsInternalMacTransmitErrors += CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS); stats->dot3StatsSingleCollisionFrames += CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL); stats->dot3StatsMultipleCollisionFrames += CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL); stats->dot3StatsDeferredTransmissions += CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED); stats->dot3StatsExcessiveCollisions += CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL); stats->dot3StatsLateCollisions += CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL); stats->ifHCOutUcastPkts += CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST); stats->ifHCOutMulticastPkts += CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST); stats->ifHCOutBroadcastPkts += CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST); stats->ifHCInOctets += CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS); stats->etherStatsFragments += CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS); stats->ifHCInUcastPkts += CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST); stats->ifHCInMulticastPkts += CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST); stats->ifHCInBroadcastPkts += CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST); stats->dot3StatsFCSErrors += CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS); stats->dot3StatsAlignmentErrors += CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS); stats->xonPauseFramesReceived += CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD); stats->xoffPauseFramesReceived += CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD); stats->macControlFramesReceived += CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD); stats->xoffStateEntered += CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED); stats->dot3StatsFramesTooLong += CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG); stats->etherStatsJabbers += CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS); stats->etherStatsUndersizePkts += CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE); stats->FramesDroppedDueToFilters += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP); stats->DmaWriteQueueFull += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL); stats->DmaWriteHighPriQueueFull += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL); stats->NoMoreRxBDs += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS); /* * XXX * Unlike other controllers, BGE_RXLP_LOCSTAT_IFIN_DROPS * counter of BCM5717, BCM5718, BCM5719 A0 and BCM5720 A0 * includes number of unwanted multicast frames. This comes * from silicon bug and known workaround to get rough(not * exact) counter is to enable interrupt on MBUF low water * attention. This can be accomplished by setting * BGE_HCCMODE_ATTN bit of BGE_HCC_MODE, * BGE_BMANMODE_LOMBUF_ATTN bit of BGE_BMAN_MODE and * BGE_MODECTL_FLOWCTL_ATTN_INTR bit of BGE_MODE_CTL. * However that change would generate more interrupts and * there are still possibilities of losing multiple frames * during BGE_MODECTL_FLOWCTL_ATTN_INTR interrupt handling. * Given that the workaround still would not get correct * counter I don't think it's worth to implement it. So * ignore reading the counter on controllers that have the * silicon bug. */ if (sc->bge_asicrev != BGE_ASICREV_BCM5717 && sc->bge_chipid != BGE_CHIPID_BCM5719_A0 && sc->bge_chipid != BGE_CHIPID_BCM5720_A0) stats->InputDiscards += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); stats->InputErrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS); stats->RecvThresholdHit += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT); if (sc->bge_flags & BGE_FLAG_RDMA_BUG) { /* * If controller transmitted more than BGE_NUM_RDMA_CHANNELS * frames, it's safe to disable workaround for DMA engine's * miscalculation of TXMBUF space. */ if (stats->ifHCOutUcastPkts + stats->ifHCOutMulticastPkts + stats->ifHCOutBroadcastPkts > BGE_NUM_RDMA_CHANNELS) { val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL); if (sc->bge_asicrev == BGE_ASICREV_BCM5719) val &= ~BGE_RDMA_TX_LENGTH_WA_5719; else val &= ~BGE_RDMA_TX_LENGTH_WA_5720; CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val); sc->bge_flags &= ~BGE_FLAG_RDMA_BUG; } } } static void bge_stats_clear_regs(struct bge_softc *sc) { CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS); CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS); CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT); CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT); CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS); CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL); CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL); CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED); CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL); CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL); CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST); CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST); CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST); CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS); CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS); CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST); CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST); CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST); CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS); CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS); CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD); CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD); CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD); CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED); CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG); CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS); CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE); CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP); CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL); CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL); CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS); CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS); CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT); } static void bge_stats_update(struct bge_softc *sc) { if_t ifp; bus_size_t stats; uint32_t cnt; /* current register value */ ifp = sc->bge_ifp; stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; #define READ_STAT(sc, stats, stat) \ CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, cnt - sc->bge_tx_collisions); sc->bge_tx_collisions = cnt; cnt = READ_STAT(sc, stats, nicNoMoreRxBDs.bge_addr_lo); if_inc_counter(ifp, IFCOUNTER_IERRORS, cnt - sc->bge_rx_nobds); sc->bge_rx_nobds = cnt; cnt = READ_STAT(sc, stats, ifInErrors.bge_addr_lo); if_inc_counter(ifp, IFCOUNTER_IERRORS, cnt - sc->bge_rx_inerrs); sc->bge_rx_inerrs = cnt; cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo); if_inc_counter(ifp, IFCOUNTER_IERRORS, cnt - sc->bge_rx_discards); sc->bge_rx_discards = cnt; cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo); if_inc_counter(ifp, IFCOUNTER_OERRORS, cnt - sc->bge_tx_discards); sc->bge_tx_discards = cnt; #undef READ_STAT } /* * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, * but when such padded frames employ the bge IP/TCP checksum offload, * the hardware checksum assist gives incorrect results (possibly * from incorporating its own padding into the UDP/TCP checksum; who knows). * If we pad such runts with zeros, the onboard checksum comes out correct. */ static __inline int bge_cksum_pad(struct mbuf *m) { int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len; struct mbuf *last; /* If there's only the packet-header and we can pad there, use it. */ if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) && M_TRAILINGSPACE(m) >= padlen) { last = m; } else { /* * Walk packet chain to find last mbuf. We will either * pad there, or append a new mbuf and pad it. */ for (last = m; last->m_next != NULL; last = last->m_next); if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) { /* Allocate new empty mbuf, pad it. Compact later. */ struct mbuf *n; MGET(n, M_NOWAIT, MT_DATA); if (n == NULL) return (ENOBUFS); n->m_len = 0; last->m_next = n; last = n; } } /* Now zero the pad area, to avoid the bge cksum-assist bug. */ memset(mtod(last, caddr_t) + last->m_len, 0, padlen); last->m_len += padlen; m->m_pkthdr.len += padlen; return (0); } static struct mbuf * bge_check_short_dma(struct mbuf *m) { struct mbuf *n; int found; /* * If device receive two back-to-back send BDs with less than * or equal to 8 total bytes then the device may hang. The two * back-to-back send BDs must in the same frame for this failure * to occur. Scan mbuf chains and see whether two back-to-back * send BDs are there. If this is the case, allocate new mbuf * and copy the frame to workaround the silicon bug. */ for (n = m, found = 0; n != NULL; n = n->m_next) { if (n->m_len < 8) { found++; if (found > 1) break; continue; } found = 0; } if (found > 1) { n = m_defrag(m, M_NOWAIT); if (n == NULL) m_freem(m); } else n = m; return (n); } static struct mbuf * bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss, uint16_t *flags) { struct ip *ip; struct tcphdr *tcp; struct mbuf *n; uint16_t hlen; uint32_t poff; if (M_WRITABLE(m) == 0) { /* Get a writable copy. */ n = m_dup(m, M_NOWAIT); m_freem(m); if (n == NULL) return (NULL); m = n; } m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip)); if (m == NULL) return (NULL); ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header)); poff = sizeof(struct ether_header) + (ip->ip_hl << 2); m = m_pullup(m, poff + sizeof(struct tcphdr)); if (m == NULL) return (NULL); tcp = (struct tcphdr *)(mtod(m, char *) + poff); m = m_pullup(m, poff + (tcp->th_off << 2)); if (m == NULL) return (NULL); /* * It seems controller doesn't modify IP length and TCP pseudo * checksum. These checksum computed by upper stack should be 0. */ *mss = m->m_pkthdr.tso_segsz; ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header)); ip->ip_sum = 0; ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2)); /* Clear pseudo checksum computed by TCP stack. */ tcp = (struct tcphdr *)(mtod(m, char *) + poff); tcp->th_sum = 0; /* * Broadcom controllers uses different descriptor format for * TSO depending on ASIC revision. Due to TSO-capable firmware * license issue and lower performance of firmware based TSO * we only support hardware based TSO. */ /* Calculate header length, incl. TCP/IP options, in 32 bit units. */ hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2; if (sc->bge_flags & BGE_FLAG_TSO3) { /* * For BCM5717 and newer controllers, hardware based TSO * uses the 14 lower bits of the bge_mss field to store the * MSS and the upper 2 bits to store the lowest 2 bits of * the IP/TCP header length. The upper 6 bits of the header * length are stored in the bge_flags[14:10,4] field. Jumbo * frames are supported. */ *mss |= ((hlen & 0x3) << 14); *flags |= ((hlen & 0xF8) << 7) | ((hlen & 0x4) << 2); } else { /* * For BCM5755 and newer controllers, hardware based TSO uses * the lower 11 bits to store the MSS and the upper 5 bits to * store the IP/TCP header length. Jumbo frames are not * supported. */ *mss |= (hlen << 11); } return (m); } /* * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data * pointers to descriptors. */ static int bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx) { bus_dma_segment_t segs[BGE_NSEG_NEW]; bus_dmamap_t map; struct bge_tx_bd *d; struct mbuf *m = *m_head; uint32_t idx = *txidx; uint16_t csum_flags, mss, vlan_tag; int nsegs, i, error; csum_flags = 0; mss = 0; vlan_tag = 0; if ((sc->bge_flags & BGE_FLAG_SHORT_DMA_BUG) != 0 && m->m_next != NULL) { *m_head = bge_check_short_dma(m); if (*m_head == NULL) return (ENOBUFS); m = *m_head; } if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { *m_head = m = bge_setup_tso(sc, m, &mss, &csum_flags); if (*m_head == NULL) return (ENOBUFS); csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA | BGE_TXBDFLAG_CPU_POST_DMA; } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) { if (m->m_pkthdr.csum_flags & CSUM_IP) csum_flags |= BGE_TXBDFLAG_IP_CSUM; if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) { csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; if (m->m_pkthdr.len < ETHER_MIN_NOPAD && (error = bge_cksum_pad(m)) != 0) { m_freem(m); *m_head = NULL; return (error); } } } if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) { if (sc->bge_flags & BGE_FLAG_JUMBO_FRAME && m->m_pkthdr.len > ETHER_MAX_LEN) csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME; if (sc->bge_forced_collapse > 0 && (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) { /* * Forcedly collapse mbuf chains to overcome hardware * limitation which only support a single outstanding * DMA read operation. */ if (sc->bge_forced_collapse == 1) m = m_defrag(m, M_NOWAIT); else m = m_collapse(m, M_NOWAIT, sc->bge_forced_collapse); if (m == NULL) m = *m_head; *m_head = m; } } map = sc->bge_cdata.bge_tx_dmamap[idx]; error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { m = m_collapse(m, M_NOWAIT, BGE_NSEG_NEW); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m; error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error) { m_freem(m); *m_head = NULL; return (error); } } else if (error != 0) return (error); /* Check if we have enough free send BDs. */ if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) { bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map); return (ENOBUFS); } bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE); if (m->m_flags & M_VLANTAG) { csum_flags |= BGE_TXBDFLAG_VLAN_TAG; vlan_tag = m->m_pkthdr.ether_vtag; } if (sc->bge_asicrev == BGE_ASICREV_BCM5762 && (m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { /* * 5725 family of devices corrupts TSO packets when TSO DMA * buffers cross into regions which are within MSS bytes of * a 4GB boundary. If we encounter the condition, drop the * packet. */ for (i = 0; ; i++) { d = &sc->bge_ldata.bge_tx_ring[idx]; d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); d->bge_len = segs[i].ds_len; if (d->bge_addr.bge_addr_lo + segs[i].ds_len + mss < d->bge_addr.bge_addr_lo) break; d->bge_flags = csum_flags; d->bge_vlan_tag = vlan_tag; d->bge_mss = mss; if (i == nsegs - 1) break; BGE_INC(idx, BGE_TX_RING_CNT); } if (i != nsegs - 1) { bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map); m_freem(*m_head); *m_head = NULL; return (EIO); } } else { for (i = 0; ; i++) { d = &sc->bge_ldata.bge_tx_ring[idx]; d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); d->bge_len = segs[i].ds_len; d->bge_flags = csum_flags; d->bge_vlan_tag = vlan_tag; d->bge_mss = mss; if (i == nsegs - 1) break; BGE_INC(idx, BGE_TX_RING_CNT); } } /* Mark the last segment as end of packet... */ d->bge_flags |= BGE_TXBDFLAG_END; /* * Insure that the map for this transmission * is placed at the array index of the last descriptor * in this chain. */ sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx]; sc->bge_cdata.bge_tx_dmamap[idx] = map; sc->bge_cdata.bge_tx_chain[idx] = m; sc->bge_txcnt += nsegs; BGE_INC(idx, BGE_TX_RING_CNT); *txidx = idx; return (0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit descriptors. */ static void bge_start_locked(if_t ifp) { struct bge_softc *sc; struct mbuf *m_head; uint32_t prodidx; int count; sc = if_getsoftc(ifp); BGE_LOCK_ASSERT(sc); if (!sc->bge_link || (if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; prodidx = sc->bge_tx_prodidx; for (count = 0; !if_sendq_empty(ifp);) { if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) { if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); break; } m_head = if_dequeue(ifp); if (m_head == NULL) break; /* * Pack the data into the transmit ring. If we * don't have room, set the OACTIVE flag and wait * for the NIC to drain the ring. */ if (bge_encap(sc, &m_head, &prodidx)) { if (m_head == NULL) break; if_sendq_prepend(ifp, m_head); if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); break; } ++count; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ if_bpfmtap(ifp, m_head); } if (count > 0) { bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE); /* Transmit. */ bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); /* 5700 b2 errata */ if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); sc->bge_tx_prodidx = prodidx; /* * Set a timeout in case the chip goes out to lunch. */ sc->bge_timer = BGE_TX_TIMEOUT; } } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit descriptors. */ static void bge_start(if_t ifp) { struct bge_softc *sc; sc = if_getsoftc(ifp); BGE_LOCK(sc); bge_start_locked(ifp); BGE_UNLOCK(sc); } static void bge_init_locked(struct bge_softc *sc) { if_t ifp; uint16_t *m; uint32_t mode; BGE_LOCK_ASSERT(sc); ifp = sc->bge_ifp; if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) return; /* Cancel pending I/O and flush buffers. */ bge_stop(sc); bge_stop_fw(sc); bge_sig_pre_reset(sc, BGE_RESET_START); bge_reset(sc); bge_sig_legacy(sc, BGE_RESET_START); bge_sig_post_reset(sc, BGE_RESET_START); bge_chipinit(sc); /* * Init the various state machines, ring * control blocks and firmware. */ if (bge_blockinit(sc)) { device_printf(sc->bge_dev, "initialization failure\n"); return; } ifp = sc->bge_ifp; /* Specify MTU. */ CSR_WRITE_4(sc, BGE_RX_MTU, if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN + (if_getcapenable(ifp) & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0)); /* Load our MAC address. */ m = (uint16_t *)IF_LLADDR(sc->bge_ifp); CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); /* Program promiscuous mode. */ bge_setpromisc(sc); /* Program multicast filter. */ bge_setmulti(sc); /* Program VLAN tag stripping. */ bge_setvlan(sc); /* Override UDP checksum offloading. */ if (sc->bge_forced_udpcsum == 0) sc->bge_csum_features &= ~CSUM_UDP; else sc->bge_csum_features |= CSUM_UDP; if (if_getcapabilities(ifp) & IFCAP_TXCSUM && if_getcapenable(ifp) & IFCAP_TXCSUM) { if_sethwassistbits(ifp, 0, (BGE_CSUM_FEATURES | CSUM_UDP)); if_sethwassistbits(ifp, sc->bge_csum_features, 0); } /* Init RX ring. */ if (bge_init_rx_ring_std(sc) != 0) { device_printf(sc->bge_dev, "no memory for std Rx buffers.\n"); bge_stop(sc); return; } /* * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's * memory to insure that the chip has in fact read the first * entry of the ring. */ if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { uint32_t v, i; for (i = 0; i < 10; i++) { DELAY(20); v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); if (v == (MCLBYTES - ETHER_ALIGN)) break; } if (i == 10) device_printf (sc->bge_dev, "5705 A0 chip failed to load RX ring\n"); } /* Init jumbo RX ring. */ if (BGE_IS_JUMBO_CAPABLE(sc) && if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN)) { if (bge_init_rx_ring_jumbo(sc) != 0) { device_printf(sc->bge_dev, "no memory for jumbo Rx buffers.\n"); bge_stop(sc); return; } } /* Init our RX return ring index. */ sc->bge_rx_saved_considx = 0; /* Init our RX/TX stat counters. */ sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0; /* Init TX ring. */ bge_init_tx_ring(sc); /* Enable TX MAC state machine lockup fix. */ mode = CSR_READ_4(sc, BGE_TX_MODE); if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906) mode |= BGE_TXMODE_MBUF_LOCKUP_FIX; if (sc->bge_asicrev == BGE_ASICREV_BCM5720 || sc->bge_asicrev == BGE_ASICREV_BCM5762) { mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); mode |= CSR_READ_4(sc, BGE_TX_MODE) & (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); } /* Turn on transmitter. */ CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE); DELAY(100); /* Turn on receiver. */ mode = CSR_READ_4(sc, BGE_RX_MODE); if (BGE_IS_5755_PLUS(sc)) mode |= BGE_RXMODE_IPV6_ENABLE; if (sc->bge_asicrev == BGE_ASICREV_BCM5762) mode |= BGE_RXMODE_IPV4_FRAG_FIX; CSR_WRITE_4(sc,BGE_RX_MODE, mode | BGE_RXMODE_ENABLE); DELAY(10); /* * Set the number of good frames to receive after RX MBUF * Low Watermark has been reached. After the RX MAC receives * this number of frames, it will drop subsequent incoming * frames until the MBUF High Watermark is reached. */ if (BGE_IS_57765_PLUS(sc)) CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1); else CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); /* Clear MAC statistics. */ if (BGE_IS_5705_PLUS(sc)) bge_stats_clear_regs(sc); /* Tell firmware we're alive. */ BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); #ifdef DEVICE_POLLING /* Disable interrupts if we are polling. */ if (if_getcapenable(ifp) & IFCAP_POLLING) { BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); } else #endif /* Enable host interrupts. */ { BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); } if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); bge_ifmedia_upd_locked(ifp); callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); } static void bge_init(void *xsc) { struct bge_softc *sc = xsc; BGE_LOCK(sc); bge_init_locked(sc); BGE_UNLOCK(sc); } /* * Set media options. */ static int bge_ifmedia_upd(if_t ifp) { struct bge_softc *sc = if_getsoftc(ifp); int res; BGE_LOCK(sc); res = bge_ifmedia_upd_locked(ifp); BGE_UNLOCK(sc); return (res); } static int bge_ifmedia_upd_locked(if_t ifp) { struct bge_softc *sc = if_getsoftc(ifp); struct mii_data *mii; struct mii_softc *miisc; struct ifmedia *ifm; BGE_LOCK_ASSERT(sc); ifm = &sc->bge_ifmedia; /* If this is a 1000baseX NIC, enable the TBI port. */ if (sc->bge_flags & BGE_FLAG_TBI) { if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); switch(IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: /* * The BCM5704 ASIC appears to have a special * mechanism for programming the autoneg * advertisement registers in TBI mode. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { uint32_t sgdig; sgdig = CSR_READ_4(sc, BGE_SGDIG_STS); if (sgdig & BGE_SGDIGSTS_DONE) { CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); sgdig |= BGE_SGDIGCFG_AUTO | BGE_SGDIGCFG_PAUSE_CAP | BGE_SGDIGCFG_ASYM_PAUSE; CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig | BGE_SGDIGCFG_SEND); DELAY(5); CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig); } } break; case IFM_1000_SX: if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); } else { BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); } DELAY(40); break; default: return (EINVAL); } return (0); } sc->bge_link_evt++; mii = device_get_softc(sc->bge_miibus); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); mii_mediachg(mii); /* * Force an interrupt so that we will call bge_link_upd * if needed and clear any pending link state attention. * Without this we are not getting any further interrupts * for link state changes and thus will not UP the link and * not be able to send in bge_start_locked. The only * way to get things working was to receive a packet and * get an RX intr. * bge_tick should help for fiber cards and we might not * need to do this here if BGE_FLAG_TBI is set but as * we poll for fiber anyway it should not harm. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || sc->bge_flags & BGE_FLAG_5788) BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); else BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); return (0); } /* * Report current media status. */ static void bge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { struct bge_softc *sc = if_getsoftc(ifp); struct mii_data *mii; BGE_LOCK(sc); if ((if_getflags(ifp) & IFF_UP) == 0) { BGE_UNLOCK(sc); return; } if (sc->bge_flags & BGE_FLAG_TBI) { ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_TBI_PCS_SYNCHED) ifmr->ifm_status |= IFM_ACTIVE; else { ifmr->ifm_active |= IFM_NONE; BGE_UNLOCK(sc); return; } ifmr->ifm_active |= IFM_1000_SX; if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) ifmr->ifm_active |= IFM_HDX; else ifmr->ifm_active |= IFM_FDX; BGE_UNLOCK(sc); return; } mii = device_get_softc(sc->bge_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; BGE_UNLOCK(sc); } static int bge_ioctl(if_t ifp, u_long command, caddr_t data) { struct bge_softc *sc = if_getsoftc(ifp); struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int flags, mask, error = 0; switch (command) { case SIOCSIFMTU: if (BGE_IS_JUMBO_CAPABLE(sc) || (sc->bge_flags & BGE_FLAG_JUMBO_STD)) { if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > BGE_JUMBO_MTU) { error = EINVAL; break; } } else if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) { error = EINVAL; break; } BGE_LOCK(sc); if (if_getmtu(ifp) != ifr->ifr_mtu) { if_setmtu(ifp, ifr->ifr_mtu); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); bge_init_locked(sc); } } BGE_UNLOCK(sc); break; case SIOCSIFFLAGS: BGE_LOCK(sc); if (if_getflags(ifp) & IFF_UP) { /* * If only the state of the PROMISC flag changed, * then just use the 'set promisc mode' command * instead of reinitializing the entire NIC. Doing * a full re-init means reloading the firmware and * waiting for it to start up, which may take a * second or two. Similarly for ALLMULTI. */ if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { flags = if_getflags(ifp) ^ sc->bge_if_flags; if (flags & IFF_PROMISC) bge_setpromisc(sc); if (flags & IFF_ALLMULTI) bge_setmulti(sc); } else bge_init_locked(sc); } else { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { bge_stop(sc); } } sc->bge_if_flags = if_getflags(ifp); BGE_UNLOCK(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { BGE_LOCK(sc); bge_setmulti(sc); BGE_UNLOCK(sc); error = 0; } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: if (sc->bge_flags & BGE_FLAG_TBI) { error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, command); } else { mii = device_get_softc(sc->bge_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); } break; case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { error = ether_poll_register(bge_poll, ifp); if (error) return (error); BGE_LOCK(sc); BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); if_setcapenablebit(ifp, IFCAP_POLLING, 0); BGE_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); /* Enable interrupt even in error case */ BGE_LOCK(sc); BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); if_setcapenablebit(ifp, 0, IFCAP_POLLING); BGE_UNLOCK(sc); } } #endif if ((mask & IFCAP_TXCSUM) != 0 && (if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) { if_togglecapenable(ifp, IFCAP_TXCSUM); if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) if_sethwassistbits(ifp, sc->bge_csum_features, 0); else if_sethwassistbits(ifp, 0, sc->bge_csum_features); } if ((mask & IFCAP_RXCSUM) != 0 && (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) if_togglecapenable(ifp, IFCAP_RXCSUM); if ((mask & IFCAP_TSO4) != 0 && (if_getcapabilities(ifp) & IFCAP_TSO4) != 0) { if_togglecapenable(ifp, IFCAP_TSO4); if ((if_getcapenable(ifp) & IFCAP_TSO4) != 0) if_sethwassistbits(ifp, CSUM_TSO, 0); else if_sethwassistbits(ifp, 0, CSUM_TSO); } if (mask & IFCAP_VLAN_MTU) { if_togglecapenable(ifp, IFCAP_VLAN_MTU); if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); bge_init(sc); } if ((mask & IFCAP_VLAN_HWTSO) != 0 && (if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0) if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && (if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) { if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING); if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0) if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWTSO); BGE_LOCK(sc); bge_setvlan(sc); BGE_UNLOCK(sc); } #ifdef VLAN_CAPABILITIES if_vlancap(ifp); #endif break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void bge_watchdog(struct bge_softc *sc) { if_t ifp; uint32_t status; BGE_LOCK_ASSERT(sc); if (sc->bge_timer == 0 || --sc->bge_timer) return; /* If pause frames are active then don't reset the hardware. */ if ((CSR_READ_4(sc, BGE_RX_MODE) & BGE_RXMODE_FLOWCTL_ENABLE) != 0) { status = CSR_READ_4(sc, BGE_RX_STS); if ((status & BGE_RXSTAT_REMOTE_XOFFED) != 0) { /* * If link partner has us in XOFF state then wait for * the condition to clear. */ CSR_WRITE_4(sc, BGE_RX_STS, status); sc->bge_timer = BGE_TX_TIMEOUT; return; } else if ((status & BGE_RXSTAT_RCVD_XOFF) != 0 && (status & BGE_RXSTAT_RCVD_XON) != 0) { /* * If link partner has us in XOFF state then wait for * the condition to clear. */ CSR_WRITE_4(sc, BGE_RX_STS, status); sc->bge_timer = BGE_TX_TIMEOUT; return; } /* * Any other condition is unexpected and the controller * should be reset. */ } ifp = sc->bge_ifp; if_printf(ifp, "watchdog timeout -- resetting\n"); if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); bge_init_locked(sc); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } static void bge_stop_block(struct bge_softc *sc, bus_size_t reg, uint32_t bit) { int i; BGE_CLRBIT(sc, reg, bit); for (i = 0; i < BGE_TIMEOUT; i++) { if ((CSR_READ_4(sc, reg) & bit) == 0) return; DELAY(100); } } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void bge_stop(struct bge_softc *sc) { if_t ifp; BGE_LOCK_ASSERT(sc); ifp = sc->bge_ifp; callout_stop(&sc->bge_stat_ch); /* Disable host interrupts. */ BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); /* * Tell firmware we're shutting down. */ bge_stop_fw(sc); bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN); /* * Disable all of the receiver blocks. */ bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); if (BGE_IS_5700_FAMILY(sc)) bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); /* * Disable all of the transmit blocks. */ bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); if (BGE_IS_5700_FAMILY(sc)) bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); /* * Shut down all of the memory managers and related * state machines. */ bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); if (BGE_IS_5700_FAMILY(sc)) bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); if (!(BGE_IS_5705_PLUS(sc))) { BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); } /* Update MAC statistics. */ if (BGE_IS_5705_PLUS(sc)) bge_stats_update_regs(sc); bge_reset(sc); bge_sig_legacy(sc, BGE_RESET_SHUTDOWN); bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN); /* * Keep the ASF firmware running if up. */ if (sc->bge_asf_mode & ASF_STACKUP) BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); else BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); /* Free the RX lists. */ bge_free_rx_ring_std(sc); /* Free jumbo RX list. */ if (BGE_IS_JUMBO_CAPABLE(sc)) bge_free_rx_ring_jumbo(sc); /* Free TX buffers. */ bge_free_tx_ring(sc); sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; /* Clear MAC's link state (PHY may still have link UP). */ if (bootverbose && sc->bge_link) if_printf(sc->bge_ifp, "link DOWN\n"); sc->bge_link = 0; if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int bge_shutdown(device_t dev) { struct bge_softc *sc; sc = device_get_softc(dev); BGE_LOCK(sc); bge_stop(sc); BGE_UNLOCK(sc); return (0); } static int bge_suspend(device_t dev) { struct bge_softc *sc; sc = device_get_softc(dev); BGE_LOCK(sc); bge_stop(sc); BGE_UNLOCK(sc); return (0); } static int bge_resume(device_t dev) { struct bge_softc *sc; if_t ifp; sc = device_get_softc(dev); BGE_LOCK(sc); ifp = sc->bge_ifp; if (if_getflags(ifp) & IFF_UP) { bge_init_locked(sc); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) bge_start_locked(ifp); } BGE_UNLOCK(sc); return (0); } static void bge_link_upd(struct bge_softc *sc) { struct mii_data *mii; uint32_t link, status; BGE_LOCK_ASSERT(sc); /* Clear 'pending link event' flag. */ sc->bge_link_evt = 0; /* * Process link state changes. * Grrr. The link status word in the status block does * not work correctly on the BCM5700 rev AX and BX chips, * according to all available information. Hence, we have * to enable MII interrupts in order to properly obtain * async link changes. Unfortunately, this also means that * we have to read the MAC status register to detect link * changes, thereby adding an additional register access to * the interrupt handler. * * XXX: perhaps link state detection procedure used for * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_B2) { status = CSR_READ_4(sc, BGE_MAC_STS); if (status & BGE_MACSTAT_MI_INTERRUPT) { mii = device_get_softc(sc->bge_miibus); mii_pollstat(mii); if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->bge_link++; if (bootverbose) if_printf(sc->bge_ifp, "link UP\n"); } else if (sc->bge_link && (!(mii->mii_media_status & IFM_ACTIVE) || IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { sc->bge_link = 0; if (bootverbose) if_printf(sc->bge_ifp, "link DOWN\n"); } /* Clear the interrupt. */ CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_MI_INTERRUPT); bge_miibus_readreg(sc->bge_dev, sc->bge_phy_addr, BRGPHY_MII_ISR); bge_miibus_writereg(sc->bge_dev, sc->bge_phy_addr, BRGPHY_MII_IMR, BRGPHY_INTRS); } return; } if (sc->bge_flags & BGE_FLAG_TBI) { status = CSR_READ_4(sc, BGE_MAC_STS); if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { if (!sc->bge_link) { sc->bge_link++; if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_TBI_SEND_CFGS); DELAY(40); } CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); if (bootverbose) if_printf(sc->bge_ifp, "link UP\n"); if_link_state_change(sc->bge_ifp, LINK_STATE_UP); } } else if (sc->bge_link) { sc->bge_link = 0; if (bootverbose) if_printf(sc->bge_ifp, "link DOWN\n"); if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN); } } else if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) { /* * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit * in status word always set. Workaround this bug by reading * PHY link status directly. */ link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0; if (link != sc->bge_link || sc->bge_asicrev == BGE_ASICREV_BCM5700) { mii = device_get_softc(sc->bge_miibus); mii_pollstat(mii); if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->bge_link++; if (bootverbose) if_printf(sc->bge_ifp, "link UP\n"); } else if (sc->bge_link && (!(mii->mii_media_status & IFM_ACTIVE) || IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { sc->bge_link = 0; if (bootverbose) if_printf(sc->bge_ifp, "link DOWN\n"); } } } else { /* * For controllers that call mii_tick, we have to poll * link status. */ mii = device_get_softc(sc->bge_miibus); mii_pollstat(mii); bge_miibus_statchg(sc->bge_dev); } /* Disable MAC attention when link is up. */ CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | BGE_MACSTAT_LINK_CHANGED); } static void bge_add_sysctls(struct bge_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; int unit; ctx = device_get_sysctl_ctx(sc->bge_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev)); #ifdef BGE_REGISTER_DEBUG SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info", CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I", "Debug Information"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read", CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I", "MAC Register Read"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ape_read", CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_ape_read, "I", "APE Register Read"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read", CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I", "Memory Read"); #endif unit = device_get_unit(sc->bge_dev); /* * A common design characteristic for many Broadcom client controllers * is that they only support a single outstanding DMA read operation * on the PCIe bus. This means that it will take twice as long to fetch * a TX frame that is split into header and payload buffers as it does * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For * these controllers, coalescing buffers to reduce the number of memory * reads is effective way to get maximum performance(about 940Mbps). * Without collapsing TX buffers the maximum TCP bulk transfer * performance is about 850Mbps. However forcing coalescing mbufs * consumes a lot of CPU cycles, so leave it off by default. */ sc->bge_forced_collapse = 0; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse", CTLFLAG_RWTUN, &sc->bge_forced_collapse, 0, "Number of fragmented TX buffers of a frame allowed before " "forced collapsing"); sc->bge_msi = 1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "msi", CTLFLAG_RDTUN, &sc->bge_msi, 0, "Enable MSI"); /* * It seems all Broadcom controllers have a bug that can generate UDP * datagrams with checksum value 0 when TX UDP checksum offloading is * enabled. Generating UDP checksum value 0 is RFC 768 violation. * Even though the probability of generating such UDP datagrams is * low, I don't want to see FreeBSD boxes to inject such datagrams * into network so disable UDP checksum offloading by default. Users * still override this behavior by setting a sysctl variable, * dev.bge.0.forced_udpcsum. */ sc->bge_forced_udpcsum = 0; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum", CTLFLAG_RWTUN, &sc->bge_forced_udpcsum, 0, "Enable UDP checksum offloading even if controller can " "generate UDP checksum value 0"); if (BGE_IS_5705_PLUS(sc)) bge_add_sysctl_stats_regs(sc, ctx, children); else bge_add_sysctl_stats(sc, ctx, children); } #define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \ SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \ sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \ desc) static void bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx, struct sysctl_oid_list *parent) { struct sysctl_oid *tree; struct sysctl_oid_list *children, *schildren; tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD, NULL, "BGE Statistics"); schildren = children = SYSCTL_CHILDREN(tree); BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters", children, COSFramesDroppedDueToFilters, "FramesDroppedDueToFilters"); BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full", children, nicDmaWriteQueueFull, "DmaWriteQueueFull"); BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full", children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull"); BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors", children, nicNoMoreRxBDs, "NoMoreRxBDs"); BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames", children, ifInDiscards, "InputDiscards"); BGE_SYSCTL_STAT(sc, ctx, "Input Errors", children, ifInErrors, "InputErrors"); BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit", children, nicRecvThresholdHit, "RecvThresholdHit"); BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full", children, nicDmaReadQueueFull, "DmaReadQueueFull"); BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full", children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull"); BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full", children, nicSendDataCompQueueFull, "SendDataCompQueueFull"); BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index", children, nicRingSetSendProdIndex, "RingSetSendProdIndex"); BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update", children, nicRingStatusUpdate, "RingStatusUpdate"); BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts", children, nicInterrupts, "Interrupts"); BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts", children, nicAvoidedInterrupts, "AvoidedInterrupts"); BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit", children, nicSendThresholdHit, "SendThresholdHit"); tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD, NULL, "BGE RX Statistics"); children = SYSCTL_CHILDREN(tree); BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets", children, rxstats.ifHCInOctets, "ifHCInOctets"); BGE_SYSCTL_STAT(sc, ctx, "Fragments", children, rxstats.etherStatsFragments, "Fragments"); BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets", children, rxstats.ifHCInUcastPkts, "UnicastPkts"); BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets", children, rxstats.ifHCInMulticastPkts, "MulticastPkts"); BGE_SYSCTL_STAT(sc, ctx, "FCS Errors", children, rxstats.dot3StatsFCSErrors, "FCSErrors"); BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors", children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors"); BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received", children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived"); BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received", children, rxstats.xoffPauseFramesReceived, "xoffPauseFramesReceived"); BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received", children, rxstats.macControlFramesReceived, "ControlFramesReceived"); BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered", children, rxstats.xoffStateEntered, "xoffStateEntered"); BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long", children, rxstats.dot3StatsFramesTooLong, "FramesTooLong"); BGE_SYSCTL_STAT(sc, ctx, "Jabbers", children, rxstats.etherStatsJabbers, "Jabbers"); BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets", children, rxstats.etherStatsUndersizePkts, "UndersizePkts"); BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors", children, rxstats.inRangeLengthError, "inRangeLengthError"); BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors", children, rxstats.outRangeLengthError, "outRangeLengthError"); tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD, NULL, "BGE TX Statistics"); children = SYSCTL_CHILDREN(tree); BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets", children, txstats.ifHCOutOctets, "ifHCOutOctets"); BGE_SYSCTL_STAT(sc, ctx, "TX Collisions", children, txstats.etherStatsCollisions, "Collisions"); BGE_SYSCTL_STAT(sc, ctx, "XON Sent", children, txstats.outXonSent, "XonSent"); BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent", children, txstats.outXoffSent, "XoffSent"); BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done", children, txstats.flowControlDone, "flowControlDone"); BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors", children, txstats.dot3StatsInternalMacTransmitErrors, "InternalMacTransmitErrors"); BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames", children, txstats.dot3StatsSingleCollisionFrames, "SingleCollisionFrames"); BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames", children, txstats.dot3StatsMultipleCollisionFrames, "MultipleCollisionFrames"); BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions", children, txstats.dot3StatsDeferredTransmissions, "DeferredTransmissions"); BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions", children, txstats.dot3StatsExcessiveCollisions, "ExcessiveCollisions"); BGE_SYSCTL_STAT(sc, ctx, "Late Collisions", children, txstats.dot3StatsLateCollisions, "LateCollisions"); BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets", children, txstats.ifHCOutUcastPkts, "UnicastPkts"); BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets", children, txstats.ifHCOutMulticastPkts, "MulticastPkts"); BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets", children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts"); BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors", children, txstats.dot3StatsCarrierSenseErrors, "CarrierSenseErrors"); BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards", children, txstats.ifOutDiscards, "Discards"); BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors", children, txstats.ifOutErrors, "Errors"); } #undef BGE_SYSCTL_STAT #define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d) \ SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d) static void bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx, struct sysctl_oid_list *parent) { struct sysctl_oid *tree; struct sysctl_oid_list *child, *schild; struct bge_mac_stats *stats; stats = &sc->bge_mac_stats; tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD, NULL, "BGE Statistics"); schild = child = SYSCTL_CHILDREN(tree); BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters", &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters"); BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull", &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full"); BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull", &stats->DmaWriteHighPriQueueFull, "NIC DMA Write High Priority Queue Full"); BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs", &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors"); BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards", &stats->InputDiscards, "Discarded Input Frames"); BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors", &stats->InputErrors, "Input Errors"); BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit", &stats->RecvThresholdHit, "NIC Recv Threshold Hit"); tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD, NULL, "BGE RX Statistics"); child = SYSCTL_CHILDREN(tree); BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets", &stats->ifHCInOctets, "Inbound Octets"); BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments", &stats->etherStatsFragments, "Fragments"); BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts", &stats->ifHCInUcastPkts, "Inbound Unicast Packets"); BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts", &stats->ifHCInMulticastPkts, "Inbound Multicast Packets"); BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts", &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets"); BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors", &stats->dot3StatsFCSErrors, "FCS Errors"); BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors", &stats->dot3StatsAlignmentErrors, "Alignment Errors"); BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived", &stats->xonPauseFramesReceived, "XON Pause Frames Received"); BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived", &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received"); BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived", &stats->macControlFramesReceived, "MAC Control Frames Received"); BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered", &stats->xoffStateEntered, "XOFF State Entered"); BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong", &stats->dot3StatsFramesTooLong, "Frames Too Long"); BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers", &stats->etherStatsJabbers, "Jabbers"); BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts", &stats->etherStatsUndersizePkts, "Undersized Packets"); tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD, NULL, "BGE TX Statistics"); child = SYSCTL_CHILDREN(tree); BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets", &stats->ifHCOutOctets, "Outbound Octets"); BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions", &stats->etherStatsCollisions, "TX Collisions"); BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent", &stats->outXonSent, "XON Sent"); BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent", &stats->outXoffSent, "XOFF Sent"); BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors", &stats->dot3StatsInternalMacTransmitErrors, "Internal MAC TX Errors"); BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames", &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames"); BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames", &stats->dot3StatsMultipleCollisionFrames, "Multiple Collision Frames"); BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions", &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions"); BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions", &stats->dot3StatsExcessiveCollisions, "Excessive Collisions"); BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions", &stats->dot3StatsLateCollisions, "Late Collisions"); BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts", &stats->ifHCOutUcastPkts, "Outbound Unicast Packets"); BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts", &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets"); BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts", &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets"); } #undef BGE_SYSCTL_STAT_ADD64 static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS) { struct bge_softc *sc; uint32_t result; int offset; sc = (struct bge_softc *)arg1; offset = arg2; result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset + offsetof(bge_hostaddr, bge_addr_lo)); return (sysctl_handle_int(oidp, &result, 0, req)); } #ifdef BGE_REGISTER_DEBUG static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS) { struct bge_softc *sc; uint16_t *sbdata; int error, result, sbsz; int i, j; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || (req->newptr == NULL)) return (error); if (result == 1) { sc = (struct bge_softc *)arg1; if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_C0) sbsz = BGE_STATUS_BLK_SZ; else sbsz = 32; sbdata = (uint16_t *)sc->bge_ldata.bge_status_block; printf("Status Block:\n"); BGE_LOCK(sc); bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (i = 0x0; i < sbsz / sizeof(uint16_t); ) { printf("%06x:", i); for (j = 0; j < 8; j++) printf(" %04x", sbdata[i++]); printf("\n"); } printf("Registers:\n"); for (i = 0x800; i < 0xA00; ) { printf("%06x:", i); for (j = 0; j < 8; j++) { printf(" %08x", CSR_READ_4(sc, i)); i += 4; } printf("\n"); } BGE_UNLOCK(sc); printf("Hardware Flags:\n"); if (BGE_IS_5717_PLUS(sc)) printf(" - 5717 Plus\n"); if (BGE_IS_5755_PLUS(sc)) printf(" - 5755 Plus\n"); if (BGE_IS_575X_PLUS(sc)) printf(" - 575X Plus\n"); if (BGE_IS_5705_PLUS(sc)) printf(" - 5705 Plus\n"); if (BGE_IS_5714_FAMILY(sc)) printf(" - 5714 Family\n"); if (BGE_IS_5700_FAMILY(sc)) printf(" - 5700 Family\n"); if (sc->bge_flags & BGE_FLAG_JUMBO) printf(" - Supports Jumbo Frames\n"); if (sc->bge_flags & BGE_FLAG_PCIX) printf(" - PCI-X Bus\n"); if (sc->bge_flags & BGE_FLAG_PCIE) printf(" - PCI Express Bus\n"); if (sc->bge_phy_flags & BGE_PHY_NO_3LED) printf(" - No 3 LEDs\n"); if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) printf(" - RX Alignment Bug\n"); } return (error); } static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS) { struct bge_softc *sc; int error; uint16_t result; uint32_t val; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || (req->newptr == NULL)) return (error); if (result < 0x8000) { sc = (struct bge_softc *)arg1; val = CSR_READ_4(sc, result); printf("reg 0x%06X = 0x%08X\n", result, val); } return (error); } static int bge_sysctl_ape_read(SYSCTL_HANDLER_ARGS) { struct bge_softc *sc; int error; uint16_t result; uint32_t val; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || (req->newptr == NULL)) return (error); if (result < 0x8000) { sc = (struct bge_softc *)arg1; val = APE_READ_4(sc, result); printf("reg 0x%06X = 0x%08X\n", result, val); } return (error); } static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS) { struct bge_softc *sc; int error; uint16_t result; uint32_t val; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || (req->newptr == NULL)) return (error); if (result < 0x8000) { sc = (struct bge_softc *)arg1; val = bge_readmem_ind(sc, result); printf("mem 0x%06X = 0x%08X\n", result, val); } return (error); } #endif static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]) { if (sc->bge_flags & BGE_FLAG_EADDR) return (1); #ifdef __sparc64__ OF_getetheraddr(sc->bge_dev, ether_addr); return (0); #endif return (1); } static int bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[]) { uint32_t mac_addr; mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB); if ((mac_addr >> 16) == 0x484b) { ether_addr[0] = (uint8_t)(mac_addr >> 8); ether_addr[1] = (uint8_t)mac_addr; mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB); ether_addr[2] = (uint8_t)(mac_addr >> 24); ether_addr[3] = (uint8_t)(mac_addr >> 16); ether_addr[4] = (uint8_t)(mac_addr >> 8); ether_addr[5] = (uint8_t)mac_addr; return (0); } return (1); } static int bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[]) { int mac_offset = BGE_EE_MAC_OFFSET; if (sc->bge_asicrev == BGE_ASICREV_BCM5906) mac_offset = BGE_EE_MAC_OFFSET_5906; return (bge_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN)); } static int bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[]) { if (sc->bge_asicrev == BGE_ASICREV_BCM5906) return (1); return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)); } static int bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[]) { static const bge_eaddr_fcn_t bge_eaddr_funcs[] = { /* NOTE: Order is critical */ bge_get_eaddr_fw, bge_get_eaddr_mem, bge_get_eaddr_nvram, bge_get_eaddr_eeprom, NULL }; const bge_eaddr_fcn_t *func; for (func = bge_eaddr_funcs; *func != NULL; ++func) { if ((*func)(sc, eaddr) == 0) break; } return (*func == NULL ? ENXIO : 0); } static uint64_t bge_get_counter(if_t ifp, ift_counter cnt) { struct bge_softc *sc; struct bge_mac_stats *stats; sc = if_getsoftc(ifp); if (!BGE_IS_5705_PLUS(sc)) return (if_get_counter_default(ifp, cnt)); stats = &sc->bge_mac_stats; switch (cnt) { case IFCOUNTER_IERRORS: return (stats->NoMoreRxBDs + stats->InputDiscards + stats->InputErrors); case IFCOUNTER_COLLISIONS: return (stats->etherStatsCollisions); default: return (if_get_counter_default(ifp, cnt)); } } Index: head/sys/dev/ce/if_ce.c =================================================================== --- head/sys/dev/ce/if_ce.c (revision 276749) +++ head/sys/dev/ce/if_ce.c (revision 276750) @@ -1,2649 +1,2648 @@ /* * Cronyx-Tau32-PCI adapter driver for FreeBSD. * * Copyright (C) 2003-2005 Cronyx Engineering. * Copyright (C) 2003-2005 Kurakin Roman, * * This software is distributed with NO WARRANTIES, not even the implied * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * Authors grant any other persons or organisations a permission to use, * modify and redistribute this software in source and binary forms, * as long as this message is kept with the software, all derivative * works or modified versions. * * $Cronyx: if_ce.c,v 1.9.2.8 2005/11/21 14:17:44 rik Exp $ */ #include __FBSDID("$FreeBSD$"); #include #if __FreeBSD_version >= 500000 # define NPCI 1 #else # include "pci.h" #endif #if NPCI > 0 #include #include #include #include #include #include #include #include #include #include #include #if __FreeBSD_version >= 504000 #include #endif #include #include #include #include #include #include #if __FreeBSD_version > 501000 # include # include #else # include # include #endif #include #include #include "opt_ng_cronyx.h" #ifdef NETGRAPH_CRONYX # include "opt_netgraph.h" # ifndef NETGRAPH # error #option NETGRAPH missed from configuration # endif # include # include # include #else # include # include # define PP_CISCO IFF_LINK2 # include #endif #include #include #include #include #include /* If we don't have Cronyx's sppp version, we don't have fr support via sppp */ #ifndef PP_FR #define PP_FR 0 #endif #ifndef IFP2SP #define IFP2SP(ifp) ((struct sppp*)ifp) #endif #ifndef SP2IFP #define SP2IFP(sp) ((struct ifnet*)sp) #endif #ifndef PCIR_BAR #define PCIR_BAR(x) (PCIR_MAPS + (x) * 4) #endif /* define as our previous return value */ #ifndef BUS_PROBE_DEFAULT #define BUS_PROBE_DEFAULT 0 #endif #define CE_DEBUG(d,s) ({if (d->chan->debug) {\ printf ("%s: ", d->name); printf s;}}) #define CE_DEBUG2(d,s) ({if (d->chan->debug>1) {\ printf ("%s: ", d->name); printf s;}}) #ifndef CALLOUT_MPSAFE #define CALLOUT_MPSAFE 0 #endif #ifndef IF_DRAIN #define IF_DRAIN(ifq) do { \ struct mbuf *m; \ for (;;) { \ IF_DEQUEUE(ifq, m); \ if (m == NULL) \ break; \ m_freem(m); \ } \ } while (0) #endif #ifndef _IF_QLEN #define _IF_QLEN(ifq) ((ifq)->ifq_len) #endif #ifndef callout_drain #define callout_drain callout_stop #endif #define CE_LOCK_NAME "ceX" #define CE_LOCK(_bd) mtx_lock (&(_bd)->ce_mtx) #define CE_UNLOCK(_bd) mtx_unlock (&(_bd)->ce_mtx) #define CE_LOCK_ASSERT(_bd) mtx_assert (&(_bd)->ce_mtx, MA_OWNED) #define CDEV_MAJOR 185 static int ce_probe __P((device_t)); static int ce_attach __P((device_t)); static int ce_detach __P((device_t)); static device_method_t ce_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ce_probe), DEVMETHOD(device_attach, ce_attach), DEVMETHOD(device_detach, ce_detach), DEVMETHOD_END }; typedef struct _ce_dma_mem_t { unsigned long phys; void *virt; size_t size; #if __FreeBSD_version >= 500000 bus_dma_tag_t dmat; bus_dmamap_t mapp; #endif } ce_dma_mem_t; typedef struct _drv_t { char name [8]; int running; ce_board_t *board; ce_chan_t *chan; struct ifqueue rqueue; #ifdef NETGRAPH char nodename [NG_NODESIZE]; hook_p hook; hook_p debug_hook; node_p node; struct ifqueue queue; struct ifqueue hi_queue; #else struct ifnet *ifp; #endif short timeout; struct callout timeout_handle; #if __FreeBSD_version >= 500000 struct cdev *devt; #else /* __FreeBSD_version < 500000 */ dev_t devt; #endif ce_dma_mem_t dmamem; } drv_t; typedef struct _bdrv_t { ce_board_t *board; struct resource *ce_res; struct resource *ce_irq; void *ce_intrhand; ce_dma_mem_t dmamem; drv_t channel [NCHAN]; #if __FreeBSD_version >= 504000 struct mtx ce_mtx; #endif } bdrv_t; static driver_t ce_driver = { "ce", ce_methods, sizeof(bdrv_t), }; static devclass_t ce_devclass; static void ce_receive (ce_chan_t *c, unsigned char *data, int len); static void ce_transmit (ce_chan_t *c, void *attachment, int len); static void ce_error (ce_chan_t *c, int data); static void ce_up (drv_t *d); static void ce_start (drv_t *d); static void ce_down (drv_t *d); static void ce_watchdog (drv_t *d); static void ce_watchdog_timer (void *arg); #ifdef NETGRAPH extern struct ng_type typestruct; #else static void ce_ifstart (struct ifnet *ifp); static void ce_tlf (struct sppp *sp); static void ce_tls (struct sppp *sp); static int ce_sioctl (struct ifnet *ifp, u_long cmd, caddr_t data); static void ce_initialize (void *softc); #endif static ce_board_t *adapter [NBRD]; static drv_t *channel [NBRD*NCHAN]; static struct callout led_timo [NBRD]; static struct callout timeout_handle; static int ce_destroy = 0; #if __FreeBSD_version < 500000 static int ce_open (dev_t dev, int oflags, int devtype, struct proc *p); static int ce_close (dev_t dev, int fflag, int devtype, struct proc *p); static int ce_ioctl (dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p); #else static int ce_open (struct cdev *dev, int oflags, int devtype, struct thread *td); static int ce_close (struct cdev *dev, int fflag, int devtype, struct thread *td); static int ce_ioctl (struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td); #endif #if __FreeBSD_version < 500000 static struct cdevsw ce_cdevsw = { ce_open, ce_close, noread, nowrite, ce_ioctl, nopoll, nommap, nostrategy, "ce", CDEV_MAJOR, nodump, nopsize, D_NAGGED, -1 }; #elif __FreeBSD_version == 500000 static struct cdevsw ce_cdevsw = { ce_open, ce_close, noread, nowrite, ce_ioctl, nopoll, nommap, nostrategy, "ce", CDEV_MAJOR, nodump, nopsize, D_NAGGED, }; #elif __FreeBSD_version <= 501000 static struct cdevsw ce_cdevsw = { .d_open = ce_open, .d_close = ce_close, .d_read = noread, .d_write = nowrite, .d_ioctl = ce_ioctl, .d_poll = nopoll, .d_mmap = nommap, .d_strategy = nostrategy, .d_name = "ce", .d_maj = CDEV_MAJOR, .d_dump = nodump, .d_flags = D_NAGGED, }; #elif __FreeBSD_version < 502103 static struct cdevsw ce_cdevsw = { .d_open = ce_open, .d_close = ce_close, .d_ioctl = ce_ioctl, .d_name = "ce", .d_maj = CDEV_MAJOR, .d_flags = D_NAGGED, }; #elif __FreeBSD_version < 600000 static struct cdevsw ce_cdevsw = { .d_version = D_VERSION, .d_open = ce_open, .d_close = ce_close, .d_ioctl = ce_ioctl, .d_name = "ce", .d_maj = CDEV_MAJOR, .d_flags = D_NEEDGIANT, }; #else /* __FreeBSD_version >= 600000 */ static struct cdevsw ce_cdevsw = { .d_version = D_VERSION, .d_open = ce_open, .d_close = ce_close, .d_ioctl = ce_ioctl, .d_name = "ce", }; #endif /* * Make an mbuf from data. */ static struct mbuf *makembuf (void *buf, unsigned len) { struct mbuf *m; MGETHDR (m, M_NOWAIT, MT_DATA); if (! m) return 0; - MCLGET (m, M_NOWAIT); - if (! (m->m_flags & M_EXT)) { + if (!(MCLGET(m, M_NOWAIT))) { m_freem (m); return 0; } m->m_pkthdr.len = m->m_len = len; bcopy (buf, mtod (m, caddr_t), len); return m; } static int ce_probe (device_t dev) { if ((pci_get_vendor (dev) == TAU32_PCI_VENDOR_ID) && (pci_get_device (dev) == TAU32_PCI_DEVICE_ID)) { device_set_desc (dev, "Cronyx-Tau32-PCI serial adapter"); return BUS_PROBE_DEFAULT; } return ENXIO; } static void ce_timeout (void *arg) { drv_t *d; int s, i, k; for (i = 0; i < NBRD; ++i) { if (adapter[i] == NULL) continue; for (k = 0; k < NCHAN; ++k) { s = splimp (); if (ce_destroy) { splx (s); return; } d = channel[i * NCHAN + k]; if (!d) { splx (s); continue; } CE_LOCK ((bdrv_t *)d->board->sys); switch (d->chan->type) { case T_E1: ce_e1_timer (d->chan); break; default: break; } CE_UNLOCK ((bdrv_t *)d->board->sys); splx (s); } } s = splimp (); if (!ce_destroy) callout_reset (&timeout_handle, hz, ce_timeout, 0); splx (s); } static void ce_led_off (void *arg) { ce_board_t *b = arg; bdrv_t *bd = (bdrv_t *) b->sys; int s; s = splimp (); if (ce_destroy) { splx (s); return; } CE_LOCK (bd); TAU32_LedSet (b->ddk.pControllerObject, 0); CE_UNLOCK (bd); splx (s); } static void ce_intr (void *arg) { bdrv_t *bd = arg; ce_board_t *b = bd->board; int s; int i; #if __FreeBSD_version >= 500000 && defined NETGRAPH int error; #endif s = splimp (); if (ce_destroy) { splx (s); return; } CE_LOCK (bd); /* Turn LED on. */ TAU32_LedSet (b->ddk.pControllerObject, 1); TAU32_HandleInterrupt (b->ddk.pControllerObject); /* Turn LED off 50 msec later. */ callout_reset (&led_timo[b->num], hz/20, ce_led_off, b); CE_UNLOCK (bd); splx (s); /* Pass packets in a lock-free state */ for (i = 0; i < NCHAN && b->chan[i].type; i++) { drv_t *d = b->chan[i].sys; struct mbuf *m; if (!d || !d->running) continue; while (_IF_QLEN(&d->rqueue)) { IF_DEQUEUE (&d->rqueue,m); if (!m) continue; #ifdef NETGRAPH if (d->hook) { #if __FreeBSD_version >= 500000 NG_SEND_DATA_ONLY (error, d->hook, m); #else ng_queue_data (d->hook, m, 0); #endif } else { IF_DRAIN (&d->rqueue); } #else sppp_input (d->ifp, m); #endif } } } #if __FreeBSD_version >= 500000 static void ce_bus_dmamap_addr (void *arg, bus_dma_segment_t *segs, int nseg, int error) { unsigned long *addr; if (error) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); addr = arg; *addr = segs->ds_addr; } #ifndef BUS_DMA_ZERO #define BUS_DMA_ZERO 0 #endif static int ce_bus_dma_mem_alloc (int bnum, int cnum, ce_dma_mem_t *dmem) { int error; error = bus_dma_tag_create (NULL, 16, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, dmem->size, 1, dmem->size, 0, #if __FreeBSD_version >= 502000 NULL, NULL, #endif &dmem->dmat); if (error) { if (cnum >= 0) printf ("ce%d-%d: ", bnum, cnum); else printf ("ce%d: ", bnum); printf ("couldn't allocate tag for dma memory\n"); return 0; } error = bus_dmamem_alloc (dmem->dmat, (void **)&dmem->virt, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &dmem->mapp); if (error) { if (cnum >= 0) printf ("ce%d-%d: ", bnum, cnum); else printf ("ce%d: ", bnum); printf ("couldn't allocate mem for dma memory\n"); bus_dma_tag_destroy (dmem->dmat); return 0; } error = bus_dmamap_load (dmem->dmat, dmem->mapp, dmem->virt, dmem->size, ce_bus_dmamap_addr, &dmem->phys, 0); if (error) { if (cnum >= 0) printf ("ce%d-%d: ", bnum, cnum); else printf ("ce%d: ", bnum); printf ("couldn't load mem map for dma memory\n"); bus_dmamem_free (dmem->dmat, dmem->virt, dmem->mapp); bus_dma_tag_destroy (dmem->dmat); return 0; } #if __FreeBSD_version >= 502000 bzero (dmem->virt, dmem->size); #endif return 1; } static void ce_bus_dma_mem_free (ce_dma_mem_t *dmem) { bus_dmamap_unload (dmem->dmat, dmem->mapp); bus_dmamem_free (dmem->dmat, dmem->virt, dmem->mapp); bus_dma_tag_destroy (dmem->dmat); } #else static int ce_bus_dma_mem_alloc (int bnum, int cnum, ce_dma_mem_t *dmem) { dmem->virt = contigmalloc (dmem->size, M_DEVBUF, M_WAITOK, 0x100000, 0xffffffff, 16, 0); if (dmem->virt == NULL) { if (cnum >= 0) printf ("ce%d-%d: ", bnum, cnum); else printf ("ce%d: ", bnum); printf ("couldn't allocate dma memory\n"); return 0; } dmem->phys = vtophys (dmem->virt); bzero (dmem->virt, dmem->size); return 1; } static void ce_bus_dma_mem_free (ce_dma_mem_t *dmem) { contigfree (dmem->virt, dmem->size, M_DEVBUF); } #endif /* * Called if the probe succeeded. */ static int ce_attach (device_t dev) { bdrv_t *bd = device_get_softc (dev); int unit = device_get_unit (dev); #if __FreeBSD_version >= 504000 char *ce_ln = CE_LOCK_NAME; #endif vm_offset_t vbase; int rid, error; ce_board_t *b; ce_chan_t *c; drv_t *d; int s; b = malloc (sizeof(ce_board_t), M_DEVBUF, M_WAITOK); if (!b) { printf ("ce%d: couldn't allocate memory\n", unit); return (ENXIO); } bzero (b, sizeof(ce_board_t)); b->ddk.sys = &b; #if __FreeBSD_version >= 440000 pci_enable_busmaster (dev); #endif bd->dmamem.size = TAU32_ControllerObjectSize; if (! ce_bus_dma_mem_alloc (unit, -1, &bd->dmamem)) { free (b, M_DEVBUF); return (ENXIO); } b->ddk.pControllerObject = bd->dmamem.virt; bd->board = b; b->sys = bd; rid = PCIR_BAR(0); bd->ce_res = bus_alloc_resource (dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE); if (! bd->ce_res) { printf ("ce%d: cannot map memory\n", unit); ce_bus_dma_mem_free (&bd->dmamem); free (b, M_DEVBUF); return (ENXIO); } vbase = (vm_offset_t) rman_get_virtual (bd->ce_res); b->ddk.PciBar1VirtualAddress = (void *)vbase; b->ddk.ControllerObjectPhysicalAddress = bd->dmamem.phys; b->ddk.pErrorNotifyCallback = ce_error_callback; b->ddk.pStatusNotifyCallback = ce_status_callback; b->num = unit; TAU32_BeforeReset(&b->ddk); pci_write_config (dev, TAU32_PCI_RESET_ADDRESS, TAU32_PCI_RESET_ON, 4); pci_write_config (dev, TAU32_PCI_RESET_ADDRESS, TAU32_PCI_RESET_OFF, 4); if(!TAU32_Initialize(&b->ddk, 0)) { printf ("ce%d: init adapter error 0x%08x, bus dead bits 0x%08lx\n", unit, b->ddk.InitErrors, b->ddk.DeadBits); bus_release_resource (dev, SYS_RES_MEMORY, PCIR_BAR(0), bd->ce_res); ce_bus_dma_mem_free (&bd->dmamem); free (b, M_DEVBUF); return (ENXIO); } s = splimp (); ce_init_board (b); rid = 0; bd->ce_irq = bus_alloc_resource (dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (! bd->ce_irq) { printf ("ce%d: cannot map interrupt\n", unit); bus_release_resource (dev, SYS_RES_MEMORY, PCIR_BAR(0), bd->ce_res); ce_bus_dma_mem_free (&bd->dmamem); free (b, M_DEVBUF); splx (s); return (ENXIO); } #if __FreeBSD_version >= 500000 callout_init (&led_timo[unit], CALLOUT_MPSAFE); #else callout_init (&led_timo[unit]); #endif error = bus_setup_intr (dev, bd->ce_irq, #if __FreeBSD_version >= 500013 INTR_TYPE_NET|INTR_MPSAFE, #else INTR_TYPE_NET, #endif NULL, ce_intr, bd, &bd->ce_intrhand); if (error) { printf ("ce%d: cannot set up irq\n", unit); bus_release_resource (dev, SYS_RES_IRQ, 0, bd->ce_irq); bus_release_resource (dev, SYS_RES_MEMORY, PCIR_BAR(0), bd->ce_res); ce_bus_dma_mem_free (&bd->dmamem); free (b, M_DEVBUF); splx (s); return (ENXIO); } switch (b->ddk.Model) { case 1: strcpy (b->name, TAU32_BASE_NAME); break; case 2: strcpy (b->name, TAU32_LITE_NAME); break; case 3: strcpy (b->name, TAU32_ADPCM_NAME); break; default: strcpy (b->name, TAU32_UNKNOWN_NAME); break; } printf ("ce%d: %s\n", unit, b->name); for (c = b->chan; c < b->chan + NCHAN; ++c) { c->num = (c - b->chan); c->board = b; d = &bd->channel[c->num]; d->dmamem.size = sizeof(ce_buf_t); if (! ce_bus_dma_mem_alloc (unit, c->num, &d->dmamem)) continue; channel [b->num * NCHAN + c->num] = d; sprintf (d->name, "ce%d.%d", b->num, c->num); d->board = b; d->chan = c; c->sys = d; } for (c = b->chan; c < b->chan + NCHAN; ++c) { if (c->sys == NULL) continue; d = c->sys; callout_init (&d->timeout_handle, CALLOUT_MPSAFE); #ifdef NETGRAPH if (ng_make_node_common (&typestruct, &d->node) != 0) { printf ("%s: cannot make common node\n", d->name); d->node = NULL; continue; } #if __FreeBSD_version >= 500000 NG_NODE_SET_PRIVATE (d->node, d); #else d->node->private = d; #endif sprintf (d->nodename, "%s%d", NG_CE_NODE_TYPE, c->board->num * NCHAN + c->num); if (ng_name_node (d->node, d->nodename)) { printf ("%s: cannot name node\n", d->nodename); #if __FreeBSD_version >= 500000 NG_NODE_UNREF (d->node); #else ng_rmnode (d->node); ng_unref (d->node); #endif continue; } d->queue.ifq_maxlen = ifqmaxlen; d->hi_queue.ifq_maxlen = ifqmaxlen; d->rqueue.ifq_maxlen = ifqmaxlen; #if __FreeBSD_version >= 500000 mtx_init (&d->queue.ifq_mtx, "ce_queue", NULL, MTX_DEF); mtx_init (&d->hi_queue.ifq_mtx, "ce_queue_hi", NULL, MTX_DEF); mtx_init (&d->rqueue.ifq_mtx, "ce_rqueue", NULL, MTX_DEF); #endif #else /*NETGRAPH*/ #if __FreeBSD_version >= 600031 d->ifp = if_alloc(IFT_PPP); #else d->ifp = malloc (sizeof(struct sppp), M_DEVBUF, M_WAITOK); bzero (d->ifp, sizeof(struct sppp)); #endif if (!d->ifp) { printf ("%s: cannot if_alloc() interface\n", d->name); continue; } d->ifp->if_softc = d; #if __FreeBSD_version > 501000 if_initname (d->ifp, "ce", b->num * NCHAN + c->num); #else d->ifp->if_unit = b->num * NCHAN + c->num; d->ifp->if_name = "ce"; #endif d->ifp->if_mtu = PP_MTU; d->ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST; d->ifp->if_ioctl = ce_sioctl; d->ifp->if_start = ce_ifstart; d->ifp->if_init = ce_initialize; d->rqueue.ifq_maxlen = ifqmaxlen; #if __FreeBSD_version >= 500000 mtx_init (&d->rqueue.ifq_mtx, "ce_rqueue", NULL, MTX_DEF); #endif sppp_attach (d->ifp); if_attach (d->ifp); IFP2SP(d->ifp)->pp_tlf = ce_tlf; IFP2SP(d->ifp)->pp_tls = ce_tls; /* If BPF is in the kernel, call the attach for it. * The header size of PPP or Cisco/HDLC is 4 bytes. */ bpfattach (d->ifp, DLT_PPP, 4); #endif /*NETGRAPH*/ ce_start_chan (c, 1, 1, d->dmamem.virt, d->dmamem.phys); /* Register callback functions. */ ce_register_transmit (c, &ce_transmit); ce_register_receive (c, &ce_receive); ce_register_error (c, &ce_error); d->devt = make_dev (&ce_cdevsw, b->num*NCHAN+c->num, UID_ROOT, GID_WHEEL, 0600, "ce%d", b->num*NCHAN+c->num); } #if __FreeBSD_version >= 504000 ce_ln[2] = '0' + unit; mtx_init (&bd->ce_mtx, ce_ln, MTX_NETWORK_LOCK, MTX_DEF|MTX_RECURSE); #endif CE_LOCK (bd); TAU32_EnableInterrupts(b->ddk.pControllerObject); adapter[unit] = b; CE_UNLOCK (bd); splx (s); return 0; } static int ce_detach (device_t dev) { bdrv_t *bd = device_get_softc (dev); ce_board_t *b = bd->board; ce_chan_t *c; int s; #if __FreeBSD_version >= 504000 KASSERT (mtx_initialized (&bd->ce_mtx), ("ce mutex not initialized")); #endif s = splimp (); CE_LOCK (bd); /* Check if the device is busy (open). */ for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; /* XXX Non existen chan! */ if (! d || ! d->chan) continue; if (d->running) { CE_UNLOCK (bd); splx (s); return EBUSY; } } /* Ok, we can unload driver */ /* At first we should disable interrupts */ ce_destroy = 1; TAU32_DisableInterrupts(b->ddk.pControllerObject); callout_stop (&led_timo[b->num]); for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (! d || ! d->chan) continue; callout_stop (&d->timeout_handle); #ifndef NETGRAPH /* Detach from the packet filter list of interfaces. */ bpfdetach (d->ifp); /* Detach from the sync PPP list. */ sppp_detach (d->ifp); /* Detach from the system list of interfaces. */ if_detach (d->ifp); #if __FreeBSD_version > 600031 if_free(d->ifp); #else free (d->ifp, M_DEVBUF); #endif IF_DRAIN (&d->rqueue); #if __FreeBSD_version >= 500000 mtx_destroy (&d->rqueue.ifq_mtx); #endif #else #if __FreeBSD_version >= 500000 if (d->node) { ng_rmnode_self (d->node); NG_NODE_UNREF (d->node); d->node = NULL; } IF_DRAIN (&d->rqueue); mtx_destroy (&d->queue.ifq_mtx); mtx_destroy (&d->hi_queue.ifq_mtx); mtx_destroy (&d->rqueue.ifq_mtx); #else ng_rmnode (d->node); d->node = 0; #endif #endif destroy_dev (d->devt); } CE_UNLOCK (bd); splx (s); callout_drain (&led_timo[b->num]); /* Disable the interrupt request. */ bus_teardown_intr (dev, bd->ce_irq, bd->ce_intrhand); bus_release_resource (dev, SYS_RES_IRQ, 0, bd->ce_irq); TAU32_DestructiveHalt (b->ddk.pControllerObject, 0); bus_release_resource (dev, SYS_RES_MEMORY, PCIR_BAR(0), bd->ce_res); for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (! d || ! d->chan) continue; callout_drain (&d->timeout_handle); channel [b->num * NCHAN + c->num] = 0; /* Deallocate buffers. */ ce_bus_dma_mem_free (&d->dmamem); } adapter [b->num] = 0; ce_bus_dma_mem_free (&bd->dmamem); free (b, M_DEVBUF); #if __FreeBSD_version >= 504000 mtx_destroy (&bd->ce_mtx); #endif return 0; } #ifndef NETGRAPH static void ce_ifstart (struct ifnet *ifp) { drv_t *d = ifp->if_softc; bdrv_t *bd = d->board->sys; CE_LOCK (bd); ce_start (d); CE_UNLOCK (bd); } static void ce_tlf (struct sppp *sp) { drv_t *d = SP2IFP(sp)->if_softc; CE_DEBUG2 (d, ("ce_tlf\n")); sp->pp_down (sp); } static void ce_tls (struct sppp *sp) { drv_t *d = SP2IFP(sp)->if_softc; CE_DEBUG2 (d, ("ce_tls\n")); sp->pp_up (sp); } /* * Process an ioctl request. */ static int ce_sioctl (struct ifnet *ifp, u_long cmd, caddr_t data) { drv_t *d = ifp->if_softc; bdrv_t *bd = d->board->sys; int error, s, was_up, should_be_up; #if __FreeBSD_version >= 600034 was_up = (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; #else was_up = (ifp->if_flags & IFF_RUNNING) != 0; #endif error = sppp_ioctl (ifp, cmd, data); if (error) return error; if (! (ifp->if_flags & IFF_DEBUG)) d->chan->debug = 0; else d->chan->debug = d->chan->debug_shadow; switch (cmd) { default: CE_DEBUG2 (d, ("ioctl 0x%lx\n", cmd)); return 0; case SIOCADDMULTI: CE_DEBUG2 (d, ("ioctl SIOCADDMULTI\n")); return 0; case SIOCDELMULTI: CE_DEBUG2 (d, ("ioctl SIOCDELMULTI\n")); return 0; case SIOCSIFFLAGS: CE_DEBUG2 (d, ("ioctl SIOCSIFFLAGS\n")); break; case SIOCSIFADDR: CE_DEBUG2 (d, ("ioctl SIOCSIFADDR\n")); break; } /* We get here only in case of SIFFLAGS or SIFADDR. */ s = splimp (); CE_LOCK (bd); #if __FreeBSD_version >= 600034 should_be_up = (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; #else should_be_up = (ifp->if_flags & IFF_RUNNING) != 0; #endif if (! was_up && should_be_up) { /* Interface goes up -- start it. */ ce_up (d); ce_start (d); } else if (was_up && ! should_be_up) { /* Interface is going down -- stop it. */ /* if ((IFP2SP(ifp)->pp_flags & PP_FR) || (ifp->if_flags & PP_CISCO))*/ ce_down (d); } CE_DEBUG (d, ("ioctl 0x%lx p4\n", cmd)); CE_UNLOCK (bd); splx (s); return 0; } /* * Initialization of interface. * It seems to be never called by upper level? */ static void ce_initialize (void *softc) { drv_t *d = softc; CE_DEBUG (d, ("ce_initialize\n")); } #endif /*NETGRAPH*/ /* * Stop the interface. Called on splimp(). */ static void ce_down (drv_t *d) { CE_DEBUG (d, ("ce_down\n")); /* Interface is going down -- stop it. */ ce_set_dtr (d->chan, 0); ce_set_rts (d->chan, 0); d->running = 0; callout_stop (&d->timeout_handle); } /* * Start the interface. Called on splimp(). */ static void ce_up (drv_t *d) { CE_DEBUG (d, ("ce_up\n")); ce_set_dtr (d->chan, 1); ce_set_rts (d->chan, 1); d->running = 1; } /* * Start output on the interface. Get another datagram to send * off of the interface queue, and copy it to the interface * before starting the output. */ static void ce_send (drv_t *d) { struct mbuf *m; u_short len; CE_DEBUG2 (d, ("ce_send\n")); /* No output if the interface is down. */ if (! d->running) return; while (ce_transmit_space (d->chan)) { /* Get the packet to send. */ #ifdef NETGRAPH IF_DEQUEUE (&d->hi_queue, m); if (! m) IF_DEQUEUE (&d->queue, m); #else m = sppp_dequeue (d->ifp); #endif if (! m) return; #ifndef NETGRAPH #if __FreeBSD_version >= 500000 BPF_MTAP (d->ifp, m); #else if (d->ifp->if_bpf) bpf_mtap (d->ifp, m); #endif #endif #if __FreeBSD_version >= 490000 len = m_length (m, NULL); #else len = m->m_pkthdr.len; #endif if (len >= BUFSZ) printf ("%s: too long packet: %d bytes: ", d->name, len); else if (! m->m_next) ce_send_packet (d->chan, (u_char*) mtod (m, caddr_t), len, 0); else { ce_buf_item_t *item = (ce_buf_item_t*)d->chan->tx_queue; m_copydata (m, 0, len, item->buf); ce_send_packet (d->chan, item->buf, len, 0); } m_freem (m); /* Set up transmit timeout, if the transmit ring is not empty.*/ d->timeout = 10; } #ifndef NETGRAPH #if __FreeBSD_version >= 600034 d->ifp->if_flags |= IFF_DRV_OACTIVE; #else d->ifp->if_flags |= IFF_OACTIVE; #endif #endif } /* * Start output on the interface. * Always called on splimp(). */ static void ce_start (drv_t *d) { if (d->running) { if (! d->chan->dtr) ce_set_dtr (d->chan, 1); if (! d->chan->rts) ce_set_rts (d->chan, 1); ce_send (d); callout_reset (&d->timeout_handle, hz, ce_watchdog_timer, d); } } /* * Handle transmit timeouts. * Recover after lost transmit interrupts. * Always called on splimp(). */ static void ce_watchdog (drv_t *d) { CE_DEBUG (d, ("device timeout\n")); if (d->running) { ce_set_dtr (d->chan, 0); ce_set_rts (d->chan, 0); /* ce_stop_chan (d->chan);*/ /* ce_start_chan (d->chan, 1, 1, 0, 0);*/ ce_set_dtr (d->chan, 1); ce_set_rts (d->chan, 1); ce_start (d); } } static void ce_watchdog_timer (void *arg) { drv_t *d = arg; bdrv_t *bd = d->board->sys; CE_LOCK(bd); if (d->timeout == 1) ce_watchdog (d); if (d->timeout) d->timeout--; callout_reset (&d->timeout_handle, hz, ce_watchdog_timer, d); CE_UNLOCK(bd); } static void ce_transmit (ce_chan_t *c, void *attachment, int len) { drv_t *d = c->sys; d->timeout = 0; #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_OPACKETS, 1); #if __FreeBSD_version >= 600034 d->ifp->if_flags &= ~IFF_DRV_OACTIVE; #else d->ifp->if_flags &= ~IFF_OACTIVE; #endif #endif ce_start (d); } static void ce_receive (ce_chan_t *c, unsigned char *data, int len) { drv_t *d = c->sys; struct mbuf *m; if (! d->running) return; m = makembuf (data, len); if (! m) { CE_DEBUG (d, ("no memory for packet\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IQDROPS, 1); #endif return; } if (c->debug > 1) m_print (m, 0); #ifdef NETGRAPH m->m_pkthdr.rcvif = 0; IF_ENQUEUE(&d->rqueue, m); #else if_inc_counter(d->ifp, IFCOUNTER_IPACKETS, 1); m->m_pkthdr.rcvif = d->ifp; /* Check if there's a BPF listener on this interface. * If so, hand off the raw packet to bpf. */ BPF_MTAP(d->ifp, m); IF_ENQUEUE(&d->rqueue, m); #endif } static void ce_error (ce_chan_t *c, int data) { drv_t *d = c->sys; switch (data) { case CE_FRAME: CE_DEBUG (d, ("frame error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CE_CRC: CE_DEBUG (d, ("crc error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CE_OVERRUN: CE_DEBUG (d, ("overrun error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_COLLISIONS, 1); if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CE_OVERFLOW: CE_DEBUG (d, ("overflow error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CE_UNDERRUN: CE_DEBUG (d, ("underrun error\n")); d->timeout = 0; #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_OERRORS, 1); #if __FreeBSD_version >= 600034 d->ifp->if_flags &= ~IFF_DRV_OACTIVE; #else d->ifp->if_flags &= ~IFF_OACTIVE; #endif #endif ce_start (d); break; default: CE_DEBUG (d, ("error #%d\n", data)); break; } } /* * You also need read, write, open, close routines. * This should get you started */ #if __FreeBSD_version < 500000 static int ce_open (dev_t dev, int oflags, int devtype, struct proc *p) #else static int ce_open (struct cdev *dev, int oflags, int devtype, struct thread *td) #endif { int unit = dev2unit (dev); drv_t *d; if (unit >= NBRD*NCHAN || ! (d = channel[unit])) return ENXIO; CE_DEBUG2 (d, ("ce_open\n")); return 0; } /* * Only called on the LAST close. */ #if __FreeBSD_version < 500000 static int ce_close (dev_t dev, int fflag, int devtype, struct proc *p) #else static int ce_close (struct cdev *dev, int fflag, int devtype, struct thread *td) #endif { drv_t *d = channel [dev2unit (dev)]; CE_DEBUG2 (d, ("ce_close\n")); return 0; } static int ce_modem_status (ce_chan_t *c) { drv_t *d = c->sys; bdrv_t *bd = d->board->sys; int status, s; status = d->running ? TIOCM_LE : 0; s = splimp (); CE_LOCK (bd); if (ce_get_cd (c)) status |= TIOCM_CD; if (ce_get_cts (c)) status |= TIOCM_CTS; if (ce_get_dsr (c)) status |= TIOCM_DSR; if (c->dtr) status |= TIOCM_DTR; if (c->rts) status |= TIOCM_RTS; CE_UNLOCK (bd); splx (s); return status; } #if __FreeBSD_version < 500000 static int ce_ioctl (dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) #else static int ce_ioctl (struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td) #endif { drv_t *d = channel [dev2unit (dev)]; bdrv_t *bd = d->board->sys; ce_chan_t *c = d->chan; struct serial_statistics *st; struct e1_statistics *opte1; int error, s; char mask[16]; switch (cmd) { case SERIAL_GETREGISTERED: CE_DEBUG2 (d, ("ioctl: getregistered\n")); bzero (mask, sizeof(mask)); for (s=0; sifp)->pp_flags & PP_FR) ? "fr" : (d->ifp->if_flags & PP_CISCO) ? "cisco" : "ppp"); return 0; case SERIAL_SETPROTO: CE_DEBUG2 (d, ("ioctl: setproto\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; #if __FreeBSD_version >= 600034 if (d->ifp->if_flags & IFF_DRV_RUNNING) #else if (d->ifp->if_flags & IFF_RUNNING) #endif return EBUSY; if (! strcmp ("cisco", (char*)data)) { IFP2SP(d->ifp)->pp_flags &= ~(PP_FR); IFP2SP(d->ifp)->pp_flags |= PP_KEEPALIVE; d->ifp->if_flags |= PP_CISCO; #if PP_FR != 0 } else if (! strcmp ("fr", (char*)data)) { d->ifp->if_flags &= ~(PP_CISCO); IFP2SP(d->ifp)->pp_flags |= PP_FR | PP_KEEPALIVE; #endif } else if (! strcmp ("ppp", (char*)data)) { IFP2SP(d->ifp)->pp_flags &= ~PP_FR; IFP2SP(d->ifp)->pp_flags &= ~PP_KEEPALIVE; d->ifp->if_flags &= ~(PP_CISCO); } else return EINVAL; return 0; case SERIAL_GETKEEPALIVE: CE_DEBUG2 (d, ("ioctl: getkeepalive\n")); if ((IFP2SP(d->ifp)->pp_flags & PP_FR) || (d->ifp->if_flags & PP_CISCO)) return EINVAL; *(int*)data = (IFP2SP(d->ifp)->pp_flags & PP_KEEPALIVE) ? 1 : 0; return 0; case SERIAL_SETKEEPALIVE: CE_DEBUG2 (d, ("ioctl: setkeepalive\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; if ((IFP2SP(d->ifp)->pp_flags & PP_FR) || (d->ifp->if_flags & PP_CISCO)) return EINVAL; s = splimp (); CE_LOCK (bd); if (*(int*)data) IFP2SP(d->ifp)->pp_flags |= PP_KEEPALIVE; else IFP2SP(d->ifp)->pp_flags &= ~PP_KEEPALIVE; CE_UNLOCK (bd); splx (s); return 0; #endif /*NETGRAPH*/ case SERIAL_GETMODE: CE_DEBUG2 (d, ("ioctl: getmode\n")); *(int*)data = SERIAL_HDLC; return 0; case SERIAL_SETMODE: /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; if (*(int*)data != SERIAL_HDLC) return EINVAL; return 0; case SERIAL_GETCFG: CE_DEBUG2 (d, ("ioctl: getcfg\n")); *(char*)data = 'c'; return 0; case SERIAL_SETCFG: CE_DEBUG2 (d, ("ioctl: setcfg\n")); #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; if (*((char*)data) != 'c') return EINVAL; return 0; case SERIAL_GETSTAT: CE_DEBUG2 (d, ("ioctl: getstat\n")); st = (struct serial_statistics*) data; st->rintr = c->rintr; st->tintr = c->tintr; st->mintr = 0; st->ibytes = c->ibytes; st->ipkts = c->ipkts; st->obytes = c->obytes; st->opkts = c->opkts; st->ierrs = c->overrun + c->frame + c->crc; st->oerrs = c->underrun; return 0; case SERIAL_GETESTAT: CE_DEBUG2 (d, ("ioctl: getestat\n")); if (c->type != T_E1) return EINVAL; opte1 = (struct e1_statistics*) data; opte1->status = 0; if (c->status & ESTS_NOALARM) opte1->status |= E1_NOALARM; if (c->status & ESTS_LOS) opte1->status |= E1_LOS; if (c->status & ESTS_LOF) opte1->status |= E1_LOF; if (c->status & ESTS_AIS) opte1->status |= E1_AIS; if (c->status & ESTS_LOMF) opte1->status |= E1_LOMF; if (c->status & ESTS_AIS16) opte1->status |= E1_AIS16; if (c->status & ESTS_FARLOF) opte1->status |= E1_FARLOF; if (c->status & ESTS_FARLOMF) opte1->status |= E1_FARLOMF; if (c->status & ESTS_TSTREQ) opte1->status |= E1_TSTREQ; if (c->status & ESTS_TSTERR) opte1->status |= E1_TSTERR; opte1->cursec = c->cursec; opte1->totsec = c->totsec + c->cursec; opte1->currnt.bpv = c->currnt.bpv; opte1->currnt.fse = c->currnt.fse; opte1->currnt.crce = c->currnt.crce; opte1->currnt.rcrce = c->currnt.rcrce; opte1->currnt.uas = c->currnt.uas; opte1->currnt.les = c->currnt.les; opte1->currnt.es = c->currnt.es; opte1->currnt.bes = c->currnt.bes; opte1->currnt.ses = c->currnt.ses; opte1->currnt.oofs = c->currnt.oofs; opte1->currnt.css = c->currnt.css; opte1->currnt.dm = c->currnt.dm; opte1->total.bpv = c->total.bpv + c->currnt.bpv; opte1->total.fse = c->total.fse + c->currnt.fse; opte1->total.crce = c->total.crce + c->currnt.crce; opte1->total.rcrce = c->total.rcrce + c->currnt.rcrce; opte1->total.uas = c->total.uas + c->currnt.uas; opte1->total.les = c->total.les + c->currnt.les; opte1->total.es = c->total.es + c->currnt.es; opte1->total.bes = c->total.bes + c->currnt.bes; opte1->total.ses = c->total.ses + c->currnt.ses; opte1->total.oofs = c->total.oofs + c->currnt.oofs; opte1->total.css = c->total.css + c->currnt.css; opte1->total.dm = c->total.dm + c->currnt.dm; for (s=0; s<48; ++s) { opte1->interval[s].bpv = c->interval[s].bpv; opte1->interval[s].fse = c->interval[s].fse; opte1->interval[s].crce = c->interval[s].crce; opte1->interval[s].rcrce = c->interval[s].rcrce; opte1->interval[s].uas = c->interval[s].uas; opte1->interval[s].les = c->interval[s].les; opte1->interval[s].es = c->interval[s].es; opte1->interval[s].bes = c->interval[s].bes; opte1->interval[s].ses = c->interval[s].ses; opte1->interval[s].oofs = c->interval[s].oofs; opte1->interval[s].css = c->interval[s].css; opte1->interval[s].dm = c->interval[s].dm; } return 0; case SERIAL_CLRSTAT: CE_DEBUG2 (d, ("ioctl: clrstat\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; c->rintr = 0; c->tintr = 0; c->ibytes = 0; c->obytes = 0; c->ipkts = 0; c->opkts = 0; c->overrun = 0; c->frame = 0; c->crc = 0; c->underrun = 0; bzero (&c->currnt, sizeof (c->currnt)); bzero (&c->total, sizeof (c->total)); bzero (c->interval, sizeof (c->interval)); return 0; case SERIAL_GETLOOP: CE_DEBUG2 (d, ("ioctl: getloop\n")); if (c->type != T_E1) return EINVAL; *(int*)data = c->lloop; return 0; case SERIAL_SETLOOP: CE_DEBUG2 (d, ("ioctl: setloop\n")); if (c->type != T_E1) return EINVAL; /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; s = splimp (); CE_LOCK (bd); ce_set_lloop (c, *(int*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETRLOOP: CE_DEBUG2 (d, ("ioctl: getrloop\n")); if (c->type != T_E1) return EINVAL; *(int*)data = c->rloop; return 0; case SERIAL_SETRLOOP: CE_DEBUG2 (d, ("ioctl: setloop\n")); if (c->type != T_E1) return EINVAL; /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; s = splimp (); CE_LOCK (bd); ce_set_rloop (c, *(int*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETDEBUG: CE_DEBUG2 (d, ("ioctl: getdebug\n")); *(int*)data = d->chan->debug; return 0; case SERIAL_SETDEBUG: CE_DEBUG2 (d, ("ioctl: setdebug\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; #ifndef NETGRAPH /* * The debug_shadow is always greater than zero for logic * simplicity. For switching debug off the IFF_DEBUG is * responsible. */ d->chan->debug_shadow = (*(int*)data) ? (*(int*)data) : 1; if (d->ifp->if_flags & IFF_DEBUG) d->chan->debug = d->chan->debug_shadow; #else d->chan->debug = *(int*)data; #endif return 0; case SERIAL_GETBAUD: CE_DEBUG2 (d, ("ioctl: getbaud\n")); *(long*)data = c->baud; return 0; case SERIAL_SETBAUD: CE_DEBUG2 (d, ("ioctl: setbaud\n")); if (c->type != T_E1 || !c->unfram) return EINVAL; /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; s = splimp (); CE_LOCK (bd); ce_set_baud (c, *(long*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETTIMESLOTS: CE_DEBUG2 (d, ("ioctl: gettimeslots\n")); if ((c->type != T_E1 || c->unfram) && c->type != T_DATA) return EINVAL; *(u_long*)data = c->ts; return 0; case SERIAL_SETTIMESLOTS: CE_DEBUG2 (d, ("ioctl: settimeslots\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; if ((c->type != T_E1 || c->unfram) && c->type != T_DATA) return EINVAL; s = splimp (); CE_LOCK (bd); ce_set_ts (c, *(u_long*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETHIGAIN: CE_DEBUG2 (d, ("ioctl: gethigain\n")); if (c->type != T_E1) return EINVAL; *(int*)data = c->higain; return 0; case SERIAL_SETHIGAIN: CE_DEBUG2 (d, ("ioctl: sethigain\n")); if (c->type != T_E1) return EINVAL; /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; s = splimp (); CE_LOCK (bd); ce_set_higain (c, *(int*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETPHONY: CE_DEBUG2 (d, ("ioctl: getphony\n")); *(int*)data = c->phony; return 0; case SERIAL_SETPHONY: CE_DEBUG2 (d, ("ioctl: setphony\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; s = splimp (); CE_LOCK (bd); ce_set_phony (c, *(int*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETUNFRAM: CE_DEBUG2 (d, ("ioctl: getunfram\n")); if (c->type != T_E1 || c->num != 0) return EINVAL; *(int*)data = c->unfram; return 0; case SERIAL_SETUNFRAM: CE_DEBUG2 (d, ("ioctl: setunfram\n")); if (c->type != T_E1 || c->num != 0) return EINVAL; /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; s = splimp (); CE_LOCK (bd); ce_set_unfram (c, *(int*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETSCRAMBLER: CE_DEBUG2 (d, ("ioctl: getscrambler\n")); if (!c->unfram) return EINVAL; *(int*)data = c->scrambler; return 0; case SERIAL_SETSCRAMBLER: CE_DEBUG2 (d, ("ioctl: setscrambler\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; if (!c->unfram) return EINVAL; s = splimp (); CE_LOCK (bd); ce_set_scrambler (c, *(int*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETMONITOR: CE_DEBUG2 (d, ("ioctl: getmonitor\n")); if (c->type != T_E1) return EINVAL; *(int*)data = c->monitor; return 0; case SERIAL_SETMONITOR: CE_DEBUG2 (d, ("ioctl: setmonitor\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); CE_LOCK (bd); ce_set_monitor (c, *(int*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETUSE16: CE_DEBUG2 (d, ("ioctl: getuse16\n")); if (c->type != T_E1 || c->unfram) return EINVAL; *(int*)data = c->use16; return 0; case SERIAL_SETUSE16: CE_DEBUG2 (d, ("ioctl: setuse16\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); CE_LOCK (bd); ce_set_use16 (c, *(int*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETCRC4: CE_DEBUG2 (d, ("ioctl: getcrc4\n")); if (c->type != T_E1 || c->unfram) return EINVAL; *(int*)data = c->crc4; return 0; case SERIAL_SETCRC4: CE_DEBUG2 (d, ("ioctl: setcrc4\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; if (c->type != T_E1 || c->unfram) return EINVAL; s = splimp (); CE_LOCK (bd); ce_set_crc4 (c, *(int*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETCLK: CE_DEBUG2 (d, ("ioctl: getclk\n")); if (c->type != T_E1) return EINVAL; switch (c->gsyn) { default: *(int*)data = E1CLK_INTERNAL; break; case GSYN_RCV: *(int*)data = E1CLK_RECEIVE; break; case GSYN_RCV0: *(int*)data = E1CLK_RECEIVE_CHAN0; break; case GSYN_RCV1: *(int*)data = E1CLK_RECEIVE_CHAN1; break; } return 0; case SERIAL_SETCLK: CE_DEBUG2 (d, ("ioctl: setclk\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); CE_LOCK (bd); switch (*(int*)data) { default: ce_set_gsyn (c, GSYN_INT); break; case E1CLK_RECEIVE: ce_set_gsyn (c, GSYN_RCV); break; case E1CLK_RECEIVE_CHAN0: ce_set_gsyn (c, GSYN_RCV0); break; case E1CLK_RECEIVE_CHAN1: ce_set_gsyn (c, GSYN_RCV1); break; } CE_UNLOCK (bd); splx (s); return 0; #if 0 case SERIAL_RESET: CE_DEBUG2 (d, ("ioctl: reset\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; s = splimp (); CE_LOCK (bd); /* ce_reset (c->board, 0, 0);*/ CE_UNLOCK (bd); splx (s); return 0; case SERIAL_HARDRESET: CE_DEBUG2 (d, ("ioctl: hardreset\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; s = splimp (); CE_LOCK (bd); /* hard_reset (c->board); */ CE_UNLOCK (bd); splx (s); return 0; #endif case SERIAL_GETCABLE: CE_DEBUG2 (d, ("ioctl: getcable\n")); if (c->type != T_E1) return EINVAL; s = splimp (); CE_LOCK (bd); *(int*)data = CABLE_TP; CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETDIR: CE_DEBUG2 (d, ("ioctl: getdir\n")); if (c->type != T_E1 && c->type != T_DATA) return EINVAL; *(int*)data = c->dir; return 0; case SERIAL_SETDIR: CE_DEBUG2 (d, ("ioctl: setdir\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; s = splimp (); CE_LOCK (bd); ce_set_dir (c, *(int*)data); CE_UNLOCK (bd); splx (s); return 0; case TIOCSDTR: /* Set DTR */ s = splimp (); CE_LOCK (bd); ce_set_dtr (c, 1); CE_UNLOCK (bd); splx (s); return 0; case TIOCCDTR: /* Clear DTR */ s = splimp (); CE_LOCK (bd); ce_set_dtr (c, 0); CE_UNLOCK (bd); splx (s); return 0; case TIOCMSET: /* Set DTR/RTS */ s = splimp (); CE_LOCK (bd); ce_set_dtr (c, (*(int*)data & TIOCM_DTR) ? 1 : 0); ce_set_rts (c, (*(int*)data & TIOCM_RTS) ? 1 : 0); CE_UNLOCK (bd); splx (s); return 0; case TIOCMBIS: /* Add DTR/RTS */ s = splimp (); CE_LOCK (bd); if (*(int*)data & TIOCM_DTR) ce_set_dtr (c, 1); if (*(int*)data & TIOCM_RTS) ce_set_rts (c, 1); CE_UNLOCK (bd); splx (s); return 0; case TIOCMBIC: /* Clear DTR/RTS */ s = splimp (); CE_LOCK (bd); if (*(int*)data & TIOCM_DTR) ce_set_dtr (c, 0); if (*(int*)data & TIOCM_RTS) ce_set_rts (c, 0); CE_UNLOCK (bd); splx (s); return 0; case TIOCMGET: /* Get modem status */ *(int*)data = ce_modem_status (c); return 0; } return ENOTTY; } #ifdef NETGRAPH #if __FreeBSD_version >= 500000 static int ng_ce_constructor (node_p node) { drv_t *d = NG_NODE_PRIVATE (node); #else static int ng_ce_constructor (node_p *node) { drv_t *d = (*node)->private; #endif CE_DEBUG (d, ("Constructor\n")); return EINVAL; } static int ng_ce_newhook (node_p node, hook_p hook, const char *name) { int s; #if __FreeBSD_version >= 500000 drv_t *d = NG_NODE_PRIVATE (node); #else drv_t *d = node->private; #endif bdrv_t *bd = d->board->sys; CE_DEBUG (d, ("Newhook\n")); /* Attach debug hook */ if (strcmp (name, NG_CE_HOOK_DEBUG) == 0) { #if __FreeBSD_version >= 500000 NG_HOOK_SET_PRIVATE (hook, NULL); #else hook->private = 0; #endif d->debug_hook = hook; return 0; } /* Check for raw hook */ if (strcmp (name, NG_CE_HOOK_RAW) != 0) return EINVAL; #if __FreeBSD_version >= 500000 NG_HOOK_SET_PRIVATE (hook, d); #else hook->private = d; #endif d->hook = hook; s = splimp (); CE_LOCK (bd); ce_up (d); CE_UNLOCK (bd); splx (s); return 0; } static char *format_timeslots (u_long s) { static char buf [100]; char *p = buf; int i; for (i=1; i<32; ++i) if ((s >> i) & 1) { int prev = (i > 1) & (s >> (i-1)); int next = (i < 31) & (s >> (i+1)); if (prev) { if (next) continue; *p++ = '-'; } else if (p > buf) *p++ = ','; if (i >= 10) *p++ = '0' + i / 10; *p++ = '0' + i % 10; } *p = 0; return buf; } static int print_modems (char *s, ce_chan_t *c, int need_header) { int status = ce_modem_status (c); int length = 0; if (need_header) length += sprintf (s + length, " LE DTR DSR RTS CTS CD\n"); length += sprintf (s + length, "%4s %4s %4s %4s %4s %4s\n", status & TIOCM_LE ? "On" : "-", status & TIOCM_DTR ? "On" : "-", status & TIOCM_DSR ? "On" : "-", status & TIOCM_RTS ? "On" : "-", status & TIOCM_CTS ? "On" : "-", status & TIOCM_CD ? "On" : "-"); return length; } static int print_stats (char *s, ce_chan_t *c, int need_header) { int length = 0; if (need_header) length += sprintf (s + length, " Rintr Tintr Mintr Ibytes Ipkts Ierrs Obytes Opkts Oerrs\n"); length += sprintf (s + length, "%7ld %7ld %7ld %8lu %7ld %7ld %8lu %7ld %7ld\n", c->rintr, c->tintr, 0l, (unsigned long) c->ibytes, c->ipkts, c->overrun + c->frame + c->crc, (unsigned long) c->obytes, c->opkts, c->underrun); return length; } static char *format_e1_status (u_char status) { static char buf [80]; if (status & E1_NOALARM) return "Ok"; buf[0] = 0; if (status & E1_LOS) strcat (buf, ",LOS"); if (status & E1_AIS) strcat (buf, ",AIS"); if (status & E1_LOF) strcat (buf, ",LOF"); if (status & E1_LOMF) strcat (buf, ",LOMF"); if (status & E1_FARLOF) strcat (buf, ",FARLOF"); if (status & E1_AIS16) strcat (buf, ",AIS16"); if (status & E1_FARLOMF) strcat (buf, ",FARLOMF"); if (status & E1_TSTREQ) strcat (buf, ",TSTREQ"); if (status & E1_TSTERR) strcat (buf, ",TSTERR"); if (buf[0] == ',') return buf+1; return "Unknown"; } static int print_frac (char *s, int leftalign, u_long numerator, u_long divider) { int n, length = 0; if (numerator < 1 || divider < 1) { length += sprintf (s+length, leftalign ? "/- " : " -"); return length; } n = (int) (0.5 + 1000.0 * numerator / divider); if (n < 1000) { length += sprintf (s+length, leftalign ? "/.%-3d" : " .%03d", n); return length; } *(s + length) = leftalign ? '/' : ' '; length ++; if (n >= 1000000) n = (n+500) / 1000 * 1000; else if (n >= 100000) n = (n+50) / 100 * 100; else if (n >= 10000) n = (n+5) / 10 * 10; switch (n) { case 1000: length += printf (s+length, ".999"); return length; case 10000: n = 9990; break; case 100000: n = 99900; break; case 1000000: n = 999000; break; } if (n < 10000) length += sprintf (s+length, "%d.%d", n/1000, n/10%100); else if (n < 100000) length += sprintf (s+length, "%d.%d", n/1000, n/100%10); else if (n < 1000000) length += sprintf (s+length, "%d.", n/1000); else length += sprintf (s+length, "%d", n/1000); return length; } static int print_e1_stats (char *s, ce_chan_t *c) { struct e1_counters total; u_long totsec; int length = 0; totsec = c->totsec + c->cursec; total.bpv = c->total.bpv + c->currnt.bpv; total.fse = c->total.fse + c->currnt.fse; total.crce = c->total.crce + c->currnt.crce; total.rcrce = c->total.rcrce + c->currnt.rcrce; total.uas = c->total.uas + c->currnt.uas; total.les = c->total.les + c->currnt.les; total.es = c->total.es + c->currnt.es; total.bes = c->total.bes + c->currnt.bes; total.ses = c->total.ses + c->currnt.ses; total.oofs = c->total.oofs + c->currnt.oofs; total.css = c->total.css + c->currnt.css; total.dm = c->total.dm + c->currnt.dm; length += sprintf (s + length, " Unav/Degr Bpv/Fsyn CRC/RCRC Err/Lerr Sev/Bur Oof/Slp Status\n"); /* Unavailable seconds, degraded minutes */ length += print_frac (s + length, 0, c->currnt.uas, c->cursec); length += print_frac (s + length, 1, 60 * c->currnt.dm, c->cursec); /* Bipolar violations, frame sync errors */ length += print_frac (s + length, 0, c->currnt.bpv, c->cursec); length += print_frac (s + length, 1, c->currnt.fse, c->cursec); /* CRC errors, remote CRC errors (E-bit) */ length += print_frac (s + length, 0, c->currnt.crce, c->cursec); length += print_frac (s + length, 1, c->currnt.rcrce, c->cursec); /* Errored seconds, line errored seconds */ length += print_frac (s + length, 0, c->currnt.es, c->cursec); length += print_frac (s + length, 1, c->currnt.les, c->cursec); /* Severely errored seconds, burst errored seconds */ length += print_frac (s + length, 0, c->currnt.ses, c->cursec); length += print_frac (s + length, 1, c->currnt.bes, c->cursec); /* Out of frame seconds, controlled slip seconds */ length += print_frac (s + length, 0, c->currnt.oofs, c->cursec); length += print_frac (s + length, 1, c->currnt.css, c->cursec); length += sprintf (s + length, " %s\n", format_e1_status (c->status)); /* Print total statistics. */ length += print_frac (s + length, 0, total.uas, totsec); length += print_frac (s + length, 1, 60 * total.dm, totsec); length += print_frac (s + length, 0, total.bpv, totsec); length += print_frac (s + length, 1, total.fse, totsec); length += print_frac (s + length, 0, total.crce, totsec); length += print_frac (s + length, 1, total.rcrce, totsec); length += print_frac (s + length, 0, total.es, totsec); length += print_frac (s + length, 1, total.les, totsec); length += print_frac (s + length, 0, total.ses, totsec); length += print_frac (s + length, 1, total.bes, totsec); length += print_frac (s + length, 0, total.oofs, totsec); length += print_frac (s + length, 1, total.css, totsec); length += sprintf (s + length, " -- Total\n"); return length; } static int print_chan (char *s, ce_chan_t *c) { drv_t *d = c->sys; int length = 0; length += sprintf (s + length, "ce%d", c->board->num * NCHAN + c->num); if (d->chan->debug) length += sprintf (s + length, " debug=%d", d->chan->debug); if (c->board->mux) { length += sprintf (s + length, " cfg=C"); } else { length += sprintf (s + length, " cfg=A"); } if (c->baud) length += sprintf (s + length, " %ld", c->baud); else length += sprintf (s + length, " extclock"); if (c->type == T_E1) switch (c->gsyn) { case GSYN_INT : length += sprintf (s + length, " syn=int"); break; case GSYN_RCV : length += sprintf (s + length, " syn=rcv"); break; case GSYN_RCV0 : length += sprintf (s + length, " syn=rcv0"); break; case GSYN_RCV1 : length += sprintf (s + length, " syn=rcv1"); break; } if (c->type == T_E1) length += sprintf (s + length, " higain=%s", c->higain ? "on" : "off"); length += sprintf (s + length, " loop=%s", c->lloop ? "on" : "off"); if (c->type == T_E1) length += sprintf (s + length, " ts=%s", format_timeslots (c->ts)); length += sprintf (s + length, "\n"); return length; } #if __FreeBSD_version >= 500000 static int ng_ce_rcvmsg (node_p node, item_p item, hook_p lasthook) { drv_t *d = NG_NODE_PRIVATE (node); struct ng_mesg *msg; #else static int ng_ce_rcvmsg (node_p node, struct ng_mesg *msg, const char *retaddr, struct ng_mesg **rptr) { drv_t *d = node->private; #endif struct ng_mesg *resp = NULL; int error = 0; CE_DEBUG (d, ("Rcvmsg\n")); #if __FreeBSD_version >= 500000 NGI_GET_MSG (item, msg); #endif switch (msg->header.typecookie) { default: error = EINVAL; break; case NGM_CE_COOKIE: printf ("Not implemented yet\n"); error = EINVAL; break; case NGM_GENERIC_COOKIE: switch (msg->header.cmd) { default: error = EINVAL; break; case NGM_TEXT_STATUS: { char *s; int l = 0; int dl = sizeof (struct ng_mesg) + 730; #if __FreeBSD_version >= 500000 NG_MKRESPONSE (resp, msg, dl, M_NOWAIT); if (! resp) { error = ENOMEM; break; } #else resp = malloc (M_NETGRAPH, M_NOWAIT); if (! resp) { error = ENOMEM; break; } bzero (resp, dl); #endif s = (resp)->data; if (d) { l += print_chan (s + l, d->chan); l += print_stats (s + l, d->chan, 1); l += print_modems (s + l, d->chan, 1); l += print_e1_stats (s + l, d->chan); } else l += sprintf (s + l, "Error: node not connect to channel"); #if __FreeBSD_version < 500000 (resp)->header.version = NG_VERSION; (resp)->header.arglen = strlen (s) + 1; (resp)->header.token = msg->header.token; (resp)->header.typecookie = NGM_CE_COOKIE; (resp)->header.cmd = msg->header.cmd; #endif strncpy ((resp)->header.cmdstr, "status", NG_CMDSTRSIZ); } break; } break; } #if __FreeBSD_version >= 500000 NG_RESPOND_MSG (error, node, item, resp); NG_FREE_MSG (msg); #else *rptr = resp; free (msg, M_NETGRAPH); #endif return error; } #if __FreeBSD_version >= 500000 static int ng_ce_rcvdata (hook_p hook, item_p item) { drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE(hook)); struct mbuf *m; #if __FreeBSD_version < 502120 meta_p meta; #else struct ng_tag_prio *ptag; #endif #else static int ng_ce_rcvdata (hook_p hook, struct mbuf *m, meta_p meta) { drv_t *d = hook->node->private; #endif bdrv_t *bd = d->board->sys; struct ifqueue *q; int s; CE_DEBUG2 (d, ("Rcvdata\n")); #if __FreeBSD_version >= 500000 NGI_GET_M (item, m); #if __FreeBSD_version < 502120 NGI_GET_META (item, meta); #endif NG_FREE_ITEM (item); if (! NG_HOOK_PRIVATE (hook) || ! d) { NG_FREE_M (m); #if __FreeBSD_version < 502120 NG_FREE_META (meta); #endif #else if (! hook->private || ! d) { NG_FREE_DATA (m,meta); #endif return ENETDOWN; } #if __FreeBSD_version >= 502120 /* Check for high priority data */ if ((ptag = (struct ng_tag_prio *)m_tag_locate(m, NGM_GENERIC_COOKIE, NG_TAG_PRIO, NULL)) != NULL && (ptag->priority > NG_PRIO_CUTOFF) ) q = &d->hi_queue; else q = &d->queue; #else q = (meta && meta->priority > 0) ? &d->hi_queue : &d->queue; #endif s = splimp (); CE_LOCK (bd); #if __FreeBSD_version >= 500000 IF_LOCK (q); if (_IF_QFULL (q)) { IF_UNLOCK (q); CE_UNLOCK (bd); splx (s); NG_FREE_M (m); #if __FreeBSD_version < 502120 NG_FREE_META (meta); #endif return ENOBUFS; } _IF_ENQUEUE (q, m); IF_UNLOCK (q); #else if (IF_QFULL (q)) { IF_DROP (q); CE_UNLOCK (bd); splx (s); NG_FREE_DATA (m, meta); return ENOBUFS; } IF_ENQUEUE (q, m); #endif ce_start (d); CE_UNLOCK (bd); splx (s); return 0; } static int ng_ce_rmnode (node_p node) { #if __FreeBSD_version >= 500000 drv_t *d = NG_NODE_PRIVATE (node); CE_DEBUG (d, ("Rmnode\n")); if (d && d->running) { bdrv_t *bd = d->board->sys; int s = splimp (); CE_LOCK (bd); ce_down (d); CE_UNLOCK (bd); splx (s); } #ifdef KLD_MODULE #if __FreeBSD_version >= 502120 if (node->nd_flags & NGF_REALLY_DIE) { #else if (node->nd_flags & NG_REALLY_DIE) { #endif NG_NODE_SET_PRIVATE (node, NULL); NG_NODE_UNREF (node); } #if __FreeBSD_version >= 502120 NG_NODE_REVIVE(node); /* Persistant node */ #else node->nd_flags &= ~NG_INVALID; #endif #endif #else /* __FreeBSD_version < 500000 */ drv_t *d = node->private; if (d && d->running) { bdrv_t *bd = d->board->sys; int s = splimp (); CE_LOCK (bd); ce_down (d); CE_UNLOCK (bd); splx (s); } node->flags |= NG_INVALID; ng_cutlinks (node); #ifdef KLD_MODULE ng_unname (node); ng_unref (node); #endif #endif return 0; } static int ng_ce_connect (hook_p hook) { #if __FreeBSD_version >= 500000 drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE (hook)); #else drv_t *d = hook->node->private; #endif if (d) { CE_DEBUG (d, ("Connect\n")); callout_reset (&d->timeout_handle, hz, ce_watchdog_timer, d); } return 0; } static int ng_ce_disconnect (hook_p hook) { #if __FreeBSD_version >= 500000 drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE (hook)); #else drv_t *d = hook->node->private; #endif if (d) { CE_DEBUG (d, ("Disconnect\n")); #if __FreeBSD_version >= 500000 if (NG_HOOK_PRIVATE (hook)) #else if (hook->private) #endif { bdrv_t *bd = d->board->sys; int s = splimp (); CE_LOCK (bd); ce_down (d); CE_UNLOCK (bd); splx (s); } /* If we were wait it than it reasserted now, just stop it. */ if (!callout_drain (&d->timeout_handle)) callout_stop (&d->timeout_handle); } return 0; } #endif static int ce_modevent (module_t mod, int type, void *unused) { #if __FreeBSD_version < 500000 dev_t dev; struct cdevsw *cdsw; #endif static int load_count = 0; #if __FreeBSD_version < 500000 dev = makedev (CDEV_MAJOR, 0); #endif switch (type) { case MOD_LOAD: #if __FreeBSD_version < 500000 if (dev != NODEV && (cdsw = devsw (dev)) && cdsw->d_maj == CDEV_MAJOR) { printf ("Tau32-PCI driver is already in system\n"); return (ENXIO); } #endif #if __FreeBSD_version >= 500000 && defined NETGRAPH if (ng_newtype (&typestruct)) printf ("Failed to register ng_ce\n"); #endif ++load_count; #if __FreeBSD_version <= 500000 cdevsw_add (&ce_cdevsw); #endif #if __FreeBSD_version >= 500000 callout_init (&timeout_handle, CALLOUT_MPSAFE); #else callout_init (&timeout_handle); #endif callout_reset (&timeout_handle, hz*5, ce_timeout, 0); break; case MOD_UNLOAD: if (load_count == 1) { printf ("Removing device entry for Tau32-PCI\n"); #if __FreeBSD_version <= 500000 cdevsw_remove (&ce_cdevsw); #endif #if __FreeBSD_version >= 500000 && defined NETGRAPH ng_rmtype (&typestruct); #endif } /* If we were wait it than it reasserted now, just stop it. * Actually we shouldn't get this condition. But code could be * changed in the future, so just be a litle paranoid. */ if (!callout_drain (&timeout_handle)) callout_stop (&timeout_handle); --load_count; break; case MOD_SHUTDOWN: break; } return 0; } #ifdef NETGRAPH #if __FreeBSD_version >= 502100 static struct ng_type typestruct = { .version = NG_ABI_VERSION, .name = NG_CE_NODE_TYPE, .constructor = ng_ce_constructor, .rcvmsg = ng_ce_rcvmsg, .shutdown = ng_ce_rmnode, .newhook = ng_ce_newhook, .connect = ng_ce_connect, .rcvdata = ng_ce_rcvdata, .disconnect = ng_ce_disconnect, }; #else /* __FreeBSD_version < 502100 */ static struct ng_type typestruct = { #if __FreeBSD_version >= 500000 NG_ABI_VERSION, #else NG_VERSION, #endif NG_CE_NODE_TYPE, ce_modevent, ng_ce_constructor, ng_ce_rcvmsg, ng_ce_rmnode, ng_ce_newhook, NULL, ng_ce_connect, ng_ce_rcvdata, #if __FreeBSD_version < 500000 NULL, #endif ng_ce_disconnect, NULL }; #endif /* __FreeBSD_version < 502100 */ #endif /*NETGRAPH*/ #if __FreeBSD_version >= 500000 #ifdef NETGRAPH MODULE_DEPEND (ng_ce, netgraph, NG_ABI_VERSION, NG_ABI_VERSION, NG_ABI_VERSION); #else MODULE_DEPEND (ce, sppp, 1, 1, 1); #endif #ifdef KLD_MODULE DRIVER_MODULE (cemod, pci, ce_driver, ce_devclass, ce_modevent, NULL); #else DRIVER_MODULE (ce, pci, ce_driver, ce_devclass, ce_modevent, NULL); #endif #else /* if __FreeBSD_version < 500000*/ #ifdef NETGRAPH DRIVER_MODULE (ce, pci, ce_driver, ce_devclass, ng_mod_event, &typestruct); #else DRIVER_MODULE (ce, pci, ce_driver, ce_devclass, ce_modevent, NULL); #endif #endif /* __FreeBSD_version < 500000 */ #endif /* NPCI */ Index: head/sys/dev/cm/smc90cx6.c =================================================================== --- head/sys/dev/cm/smc90cx6.c (revision 276749) +++ head/sys/dev/cm/smc90cx6.c (revision 276750) @@ -1,927 +1,924 @@ /* $NetBSD: smc90cx6.c,v 1.38 2001/07/07 15:57:53 thorpej Exp $ */ #include __FBSDID("$FreeBSD$"); /*- * Copyright (c) 1994, 1995, 1998 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Ignatios Souvatzis. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Chip core driver for the SMC90c26 / SMC90c56 (and SMC90c66 in '56 * compatibility mode) boards */ /* #define CMSOFTCOPY */ #define CMRETRANSMIT /**/ /* #define CM_DEBUG */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(if_cm, arcnet, 1, 1, 1); /* these should be elsewhere */ #define ARC_MIN_LEN 1 #define ARC_MIN_FORBID_LEN 254 #define ARC_MAX_FORBID_LEN 256 #define ARC_MAX_LEN 508 #define ARC_ADDR_LEN 1 /* for watchdog timer. This should be more than enough. */ #define ARCTIMEOUT (5*IFNET_SLOWHZ) devclass_t cm_devclass; /* * This currently uses 2 bufs for tx, 2 for rx * * New rx protocol: * * rx has a fillcount variable. If fillcount > (NRXBUF-1), * rx can be switched off from rx hard int. * Else rx is restarted on the other receiver. * rx soft int counts down. if it is == (NRXBUF-1), it restarts * the receiver. * To ensure packet ordering (we need that for 1201 later), we have a counter * which is incremented modulo 256 on each receive and a per buffer * variable, which is set to the counter on filling. The soft int can * compare both values to determine the older packet. * * Transmit direction: * * cm_start checks tx_fillcount * case 2: return * * else fill tx_act ^ 1 && inc tx_fillcount * * check tx_fillcount again. * case 2: set IFF_DRV_OACTIVE to stop arc_output from filling us. * case 1: start tx * * tint clears IFF_OACTIVE, decrements and checks tx_fillcount * case 1: start tx on tx_act ^ 1, softcall cm_start * case 0: softcall cm_start * * #define fill(i) get mbuf && copy mbuf to chip(i) */ void cm_init(void *); static void cm_init_locked(struct cm_softc *); static void cm_reset_locked(struct cm_softc *); void cm_start(struct ifnet *); void cm_start_locked(struct ifnet *); int cm_ioctl(struct ifnet *, unsigned long, caddr_t); void cm_watchdog(void *); void cm_srint_locked(void *vsc); static void cm_tint_locked(struct cm_softc *, int); void cm_reconwatch_locked(void *); /* * Release all resources */ void cm_release_resources(dev) device_t dev; { struct cm_softc *sc = device_get_softc(dev); if (sc->port_res != NULL) { bus_release_resource(dev, SYS_RES_IOPORT, 0, sc->port_res); sc->port_res = NULL; } if (sc->mem_res != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); sc->mem_res = NULL; } if (sc->irq_res != NULL) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); sc->irq_res = NULL; } } int cm_attach(dev) device_t dev; { struct cm_softc *sc = device_get_softc(dev); struct ifnet *ifp; u_int8_t linkaddress; ifp = sc->sc_ifp = if_alloc(IFT_ARCNET); if (ifp == NULL) return (ENOSPC); /* * read the arcnet address from the board */ GETREG(CMRESET); do { DELAY(200); } while (!(GETREG(CMSTAT) & CM_POR)); linkaddress = GETMEM(CMMACOFF); /* clear the int mask... */ sc->sc_intmask = 0; PUTREG(CMSTAT, 0); PUTREG(CMCMD, CM_CONF(CONF_LONG)); PUTREG(CMCMD, CM_CLR(CLR_POR|CLR_RECONFIG)); sc->sc_recontime = sc->sc_reconcount = 0; /* * set interface to stopped condition (reset) */ cm_stop_locked(sc); ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_output = arc_output; ifp->if_start = cm_start; ifp->if_ioctl = cm_ioctl; ifp->if_init = cm_init; /* XXX IFQ_SET_READY(&ifp->if_snd); */ ifp->if_snd.ifq_maxlen = ifqmaxlen; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX; arc_ifattach(ifp, linkaddress); #ifdef CMSOFTCOPY sc->sc_rxcookie = softintr_establish(IPL_SOFTNET, cm_srint, sc); sc->sc_txcookie = softintr_establish(IPL_SOFTNET, (void (*)(void *))cm_start, ifp); #endif callout_init_mtx(&sc->sc_recon_ch, &sc->sc_mtx, 0); callout_init_mtx(&sc->sc_watchdog_timer, &sc->sc_mtx, 0); if_printf(ifp, "link addr 0x%02x (%d)\n", linkaddress, linkaddress); return 0; } /* * Initialize device * */ void cm_init(xsc) void *xsc; { struct cm_softc *sc = (struct cm_softc *)xsc; CM_LOCK(sc); cm_init_locked(sc); CM_UNLOCK(sc); } static void cm_init_locked(struct cm_softc *sc) { struct ifnet *ifp = sc->sc_ifp; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { ifp->if_drv_flags |= IFF_DRV_RUNNING; cm_reset_locked(sc); } } /* * Reset the interface... * * Assumes that it is called with sc_mtx held */ void cm_reset_locked(sc) struct cm_softc *sc; { struct ifnet *ifp; int linkaddress; ifp = sc->sc_ifp; #ifdef CM_DEBUG if_printf(ifp, "reset\n"); #endif /* stop and restart hardware */ GETREG(CMRESET); do { DELAY(200); } while (!(GETREG(CMSTAT) & CM_POR)); linkaddress = GETMEM(CMMACOFF); #if defined(CM_DEBUG) && (CM_DEBUG > 2) if_printf(ifp, "reset: card reset, link addr = 0x%02x (%d)\n", linkaddress, linkaddress); #endif /* tell the routing level about the (possibly changed) link address */ arc_storelladdr(ifp, linkaddress); arc_frag_init(ifp); /* POR is NMI, but we need it below: */ sc->sc_intmask = CM_RECON|CM_POR; PUTREG(CMSTAT, sc->sc_intmask); PUTREG(CMCMD, CM_CONF(CONF_LONG)); #ifdef CM_DEBUG if_printf(ifp, "reset: chip configured, status=0x%02x\n", GETREG(CMSTAT)); #endif PUTREG(CMCMD, CM_CLR(CLR_POR|CLR_RECONFIG)); #ifdef CM_DEBUG if_printf(ifp, "reset: bits cleared, status=0x%02x\n", GETREG(CMSTAT)); #endif sc->sc_reconcount_excessive = ARC_EXCESSIVE_RECONS; /* start receiver */ sc->sc_intmask |= CM_RI; sc->sc_rx_fillcount = 0; sc->sc_rx_act = 2; PUTREG(CMCMD, CM_RXBC(2)); PUTREG(CMSTAT, sc->sc_intmask); #ifdef CM_DEBUG if_printf(ifp, "reset: started receiver, status=0x%02x\n", GETREG(CMSTAT)); #endif /* and init transmitter status */ sc->sc_tx_act = 0; sc->sc_tx_fillcount = 0; ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&sc->sc_watchdog_timer, hz, cm_watchdog, sc); cm_start_locked(ifp); } /* * Take interface offline */ void cm_stop_locked(sc) struct cm_softc *sc; { /* Stop the interrupts */ PUTREG(CMSTAT, 0); /* Stop the interface */ GETREG(CMRESET); /* Stop watchdog timer */ callout_stop(&sc->sc_watchdog_timer); sc->sc_timer = 0; } void cm_start(struct ifnet *ifp) { struct cm_softc *sc = ifp->if_softc; CM_LOCK(sc); cm_start_locked(ifp); CM_UNLOCK(sc); } /* * Start output on interface. Get another datagram to send * off the interface queue, and copy it to the * interface becore starting the output * * Assumes that sc_mtx is held */ void cm_start_locked(ifp) struct ifnet *ifp; { struct cm_softc *sc = ifp->if_softc; struct mbuf *m, *mp; int cm_ram_ptr; int len, tlen, offset, buffer; #ifdef CMTIMINGS u_long copystart, lencopy, perbyte; #endif #if defined(CM_DEBUG) && (CM_DEBUG > 3) if_printf(ifp, "start(%p)\n", ifp); #endif if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; if (sc->sc_tx_fillcount >= 2) return; m = arc_frag_next(ifp); buffer = sc->sc_tx_act ^ 1; if (m == 0) return; #ifdef CM_DEBUG if (m->m_len < ARC_HDRLEN) m = m_pullup(m, ARC_HDRLEN);/* gcc does structure padding */ if_printf(ifp, "start: filling %d from %d to %d type %d\n", buffer, mtod(m, u_char *)[0], mtod(m, u_char *)[1], mtod(m, u_char *)[2]); #else if (m->m_len < 2) m = m_pullup(m, 2); #endif cm_ram_ptr = buffer * 512; if (m == 0) return; /* write the addresses to RAM and throw them away */ /* * Hardware does this: Yet Another Microsecond Saved. * (btw, timing code says usually 2 microseconds) * PUTMEM(cm_ram_ptr + 0, mtod(m, u_char *)[0]); */ PUTMEM(cm_ram_ptr + 1, mtod(m, u_char *)[1]); m_adj(m, 2); /* get total length left at this point */ tlen = m->m_pkthdr.len; if (tlen < ARC_MIN_FORBID_LEN) { offset = 256 - tlen; PUTMEM(cm_ram_ptr + 2, offset); } else { PUTMEM(cm_ram_ptr + 2, 0); if (tlen <= ARC_MAX_FORBID_LEN) offset = 255; /* !!! */ else { if (tlen > ARC_MAX_LEN) tlen = ARC_MAX_LEN; offset = 512 - tlen; } PUTMEM(cm_ram_ptr + 3, offset); } cm_ram_ptr += offset; /* lets loop through the mbuf chain */ for (mp = m; mp; mp = mp->m_next) { if ((len = mp->m_len)) { /* YAMS */ bus_space_write_region_1( rman_get_bustag(sc->mem_res), rman_get_bushandle(sc->mem_res), cm_ram_ptr, mtod(mp, caddr_t), len); cm_ram_ptr += len; } } sc->sc_broadcast[buffer] = (m->m_flags & M_BCAST) != 0; sc->sc_retransmits[buffer] = (m->m_flags & M_BCAST) ? 1 : 5; if (++sc->sc_tx_fillcount > 1) { /* * We are filled up to the rim. No more bufs for the moment, * please. */ ifp->if_drv_flags |= IFF_DRV_OACTIVE; } else { #ifdef CM_DEBUG if_printf(ifp, "start: starting transmitter on buffer %d\n", buffer); #endif /* Transmitter was off, start it */ sc->sc_tx_act = buffer; /* * We still can accept another buf, so don't: * ifp->if_drv_flags |= IFF_DRV_OACTIVE; */ sc->sc_intmask |= CM_TA; PUTREG(CMCMD, CM_TX(buffer)); PUTREG(CMSTAT, sc->sc_intmask); sc->sc_timer = ARCTIMEOUT; } m_freem(m); /* * After 10 times reading the docs, I realized * that in the case the receiver NAKs the buffer request, * the hardware retries till shutdown. * This is integrated now in the code above. */ } #ifdef CMSOFTCOPY void cm_srint(void *vsc) { struct cm_softc *sc = (struct cm_softc *)vsc; CM_LOCK(sc); cm_srint_locked(vsc); CM_UNLOCK(sc); } #endif /* * Arcnet interface receiver soft interrupt: * get the stuff out of any filled buffer we find. */ void cm_srint_locked(vsc) void *vsc; { struct cm_softc *sc = (struct cm_softc *)vsc; int buffer, len, offset, type; int cm_ram_ptr; struct mbuf *m; struct arc_header *ah; struct ifnet *ifp; ifp = sc->sc_ifp; buffer = sc->sc_rx_act ^ 1; /* Allocate header mbuf */ MGETHDR(m, M_NOWAIT, MT_DATA); if (m == 0) { /* * in case s.th. goes wrong with mem, drop it * to make sure the receiver can be started again * count it as input error (we dont have any other * detectable) */ if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); goto cleanup; } m->m_pkthdr.rcvif = ifp; /* * Align so that IP packet will be longword aligned. Here we * assume that m_data of new packet is longword aligned. * When implementing PHDS, we might have to change it to 2, * (2*sizeof(ulong) - CM_HDRNEWLEN)), packet type dependent. */ cm_ram_ptr = buffer * 512; offset = GETMEM(cm_ram_ptr + 2); if (offset) len = 256 - offset; else { offset = GETMEM(cm_ram_ptr + 3); len = 512 - offset; } /* * first +2 bytes for align fixup below * second +2 bytes are for src/dst addresses */ if ((len + 2 + 2) > MHLEN) { /* attach an mbuf cluster */ - MCLGET(m, M_NOWAIT); - - /* Insist on getting a cluster */ - if ((m->m_flags & M_EXT) == 0) { + if (!(MCLGET(m, M_NOWAIT))) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); goto cleanup; } } if (m == 0) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); goto cleanup; } type = GETMEM(cm_ram_ptr + offset); m->m_data += 1 + arc_isphds(type); /* mbuf filled with ARCnet addresses */ m->m_pkthdr.len = m->m_len = len + 2; ah = mtod(m, struct arc_header *); ah->arc_shost = GETMEM(cm_ram_ptr + 0); ah->arc_dhost = GETMEM(cm_ram_ptr + 1); bus_space_read_region_1( rman_get_bustag(sc->mem_res), rman_get_bushandle(sc->mem_res), cm_ram_ptr + offset, mtod(m, u_char *) + 2, len); CM_UNLOCK(sc); arc_input(ifp, m); CM_LOCK(sc); m = NULL; if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); cleanup: if (m != NULL) m_freem(m); /* mark buffer as invalid by source id 0 */ PUTMEM(buffer << 9, 0); if (--sc->sc_rx_fillcount == 2 - 1) { /* was off, restart it on buffer just emptied */ sc->sc_rx_act = buffer; sc->sc_intmask |= CM_RI; /* this also clears the RI flag interrupt: */ PUTREG(CMCMD, CM_RXBC(buffer)); PUTREG(CMSTAT, sc->sc_intmask); #ifdef CM_DEBUG if_printf(ifp, "srint: restarted rx on buf %d\n", buffer); #endif } } static inline void cm_tint_locked(sc, isr) struct cm_softc *sc; int isr; { struct ifnet *ifp; int buffer; #ifdef CMTIMINGS int clknow; #endif ifp = sc->sc_ifp; buffer = sc->sc_tx_act; /* * retransmit code: * Normal situtations first for fast path: * If acknowledgement received ok or broadcast, we're ok. * else if */ if (isr & CM_TMA || sc->sc_broadcast[buffer]) if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); #ifdef CMRETRANSMIT else if (ifp->if_flags & IFF_LINK2 && sc->sc_timer > 0 && --sc->sc_retransmits[buffer] > 0) { /* retransmit same buffer */ PUTREG(CMCMD, CM_TX(buffer)); return; } #endif else if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); /* We know we can accept another buffer at this point. */ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (--sc->sc_tx_fillcount > 0) { /* * start tx on other buffer. * This also clears the int flag */ buffer ^= 1; sc->sc_tx_act = buffer; /* * already given: * sc->sc_intmask |= CM_TA; * PUTREG(CMSTAT, sc->sc_intmask); */ PUTREG(CMCMD, CM_TX(buffer)); /* init watchdog timer */ sc->sc_timer = ARCTIMEOUT; #if defined(CM_DEBUG) && (CM_DEBUG > 1) if_printf(ifp, "tint: starting tx on buffer %d, status 0x%02x\n", buffer, GETREG(CMSTAT)); #endif } else { /* have to disable TX interrupt */ sc->sc_intmask &= ~CM_TA; PUTREG(CMSTAT, sc->sc_intmask); /* ... and watchdog timer */ sc->sc_timer = 0; #ifdef CM_DEBUG if_printf(ifp, "tint: no more buffers to send, status 0x%02x\n", GETREG(CMSTAT)); #endif } /* XXXX TODO */ #ifdef CMSOFTCOPY /* schedule soft int to fill a new buffer for us */ softintr_schedule(sc->sc_txcookie); #else /* call it directly */ cm_start_locked(ifp); #endif } /* * Our interrupt routine */ void cmintr(arg) void *arg; { struct cm_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; u_char isr, maskedisr; int buffer; u_long newsec; CM_LOCK(sc); isr = GETREG(CMSTAT); maskedisr = isr & sc->sc_intmask; if (!maskedisr || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { CM_UNLOCK(sc); return; } do { #if defined(CM_DEBUG) && (CM_DEBUG > 1) if_printf(ifp, "intr: status 0x%02x, intmask 0x%02x\n", isr, sc->sc_intmask); #endif if (maskedisr & CM_POR) { /* * XXX We should never see this. Don't bother to store * the address. * sc->sc_ifp->if_l2com->ac_anaddr = GETMEM(CMMACOFF); */ PUTREG(CMCMD, CM_CLR(CLR_POR)); log(LOG_WARNING, "%s: intr: got spurious power on reset int\n", ifp->if_xname); } if (maskedisr & CM_RECON) { /* * we dont need to: * PUTREG(CMCMD, CM_CONF(CONF_LONG)); */ PUTREG(CMCMD, CM_CLR(CLR_RECONFIG)); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); /* * If less than 2 seconds per reconfig: * If ARC_EXCESSIVE_RECONFIGS * since last burst, complain and set treshold for * warnings to ARC_EXCESSIVE_RECONS_REWARN. * * This allows for, e.g., new stations on the cable, or * cable switching as long as it is over after * (normally) 16 seconds. * * XXX TODO: check timeout bits in status word and * double time if necessary. */ callout_stop(&sc->sc_recon_ch); newsec = time_second; if ((newsec - sc->sc_recontime <= 2) && (++sc->sc_reconcount == ARC_EXCESSIVE_RECONS)) { log(LOG_WARNING, "%s: excessive token losses, " "cable problem?\n", ifp->if_xname); } sc->sc_recontime = newsec; callout_reset(&sc->sc_recon_ch, 15 * hz, cm_reconwatch_locked, (void *)sc); } if (maskedisr & CM_RI) { #if defined(CM_DEBUG) && (CM_DEBUG > 1) if_printf(ifp, "intr: hard rint, act %d\n", sc->sc_rx_act); #endif buffer = sc->sc_rx_act; /* look if buffer is marked invalid: */ if (GETMEM(buffer * 512) == 0) { /* * invalid marked buffer (or illegally * configured sender) */ log(LOG_WARNING, "%s: spurious RX interrupt or sender 0 " " (ignored)\n", ifp->if_xname); /* * restart receiver on same buffer. * XXX maybe better reset interface? */ PUTREG(CMCMD, CM_RXBC(buffer)); } else { if (++sc->sc_rx_fillcount > 1) { sc->sc_intmask &= ~CM_RI; PUTREG(CMSTAT, sc->sc_intmask); } else { buffer ^= 1; sc->sc_rx_act = buffer; /* * Start receiver on other receive * buffer. This also clears the RI * interrupt flag. */ PUTREG(CMCMD, CM_RXBC(buffer)); /* in RX intr, so mask is ok for RX */ #ifdef CM_DEBUG if_printf(ifp, "strt rx for buf %d, " "stat 0x%02x\n", sc->sc_rx_act, GETREG(CMSTAT)); #endif } #ifdef CMSOFTCOPY /* * this one starts a soft int to copy out * of the hw */ softintr_schedule(sc->sc_rxcookie); #else /* this one does the copy here */ cm_srint_locked(sc); #endif } } if (maskedisr & CM_TA) { cm_tint_locked(sc, isr); } isr = GETREG(CMSTAT); maskedisr = isr & sc->sc_intmask; } while (maskedisr); #if defined(CM_DEBUG) && (CM_DEBUG > 1) if_printf(ifp, "intr (exit): status 0x%02x, intmask 0x%02x\n", isr, sc->sc_intmask); #endif CM_UNLOCK(sc); } void cm_reconwatch_locked(arg) void *arg; { struct cm_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; if (sc->sc_reconcount >= ARC_EXCESSIVE_RECONS) { sc->sc_reconcount = 0; log(LOG_WARNING, "%s: token valid again.\n", ifp->if_xname); } sc->sc_reconcount = 0; } /* * Process an ioctl request. * This code needs some work - it looks pretty ugly. */ int cm_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct cm_softc *sc; int error; error = 0; sc = ifp->if_softc; #if defined(CM_DEBUG) && (CM_DEBUG > 2) if_printf(ifp, "ioctl() called, cmd = 0x%lx\n", command); #endif switch (command) { case SIOCSIFADDR: case SIOCGIFADDR: case SIOCADDMULTI: case SIOCDELMULTI: case SIOCSIFMTU: error = arc_ioctl(ifp, command, data); break; case SIOCSIFFLAGS: CM_LOCK(sc); if ((ifp->if_flags & IFF_UP) == 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { /* * If interface is marked down and it is running, * then stop it. */ cm_stop_locked(sc); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; } else if ((ifp->if_flags & IFF_UP) != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { /* * If interface is marked up and it is stopped, then * start it. */ cm_init_locked(sc); } CM_UNLOCK(sc); break; default: error = EINVAL; break; } return (error); } /* * watchdog routine for transmitter. * * We need this, because else a receiver whose hardware is alive, but whose * software has not enabled the Receiver, would make our hardware wait forever * Discovered this after 20 times reading the docs. * * Only thing we do is disable transmitter. We'll get a transmit timeout, * and the int handler will have to decide not to retransmit (in case * retransmission is implemented). */ void cm_watchdog(void *arg) { struct cm_softc *sc; sc = arg; callout_reset(&sc->sc_watchdog_timer, hz, cm_watchdog, sc); if (sc->sc_timer == 0 || --sc->sc_timer > 0) return; PUTREG(CMCMD, CM_TXDIS); } Index: head/sys/dev/cp/if_cp.c =================================================================== --- head/sys/dev/cp/if_cp.c (revision 276749) +++ head/sys/dev/cp/if_cp.c (revision 276750) @@ -1,2271 +1,2270 @@ /*- * Cronyx-Tau-PCI adapter driver for FreeBSD. * Supports PPP/HDLC, Cisco/HDLC and FrameRelay protocol in synchronous mode, * and asynchronous channels with full modem control. * Keepalive protocol implemented in both Cisco and PPP modes. * * Copyright (C) 1999-2004 Cronyx Engineering. * Author: Kurakin Roman, * * Copyright (C) 1999-2002 Cronyx Engineering. * Author: Serge Vakulenko, * * This software is distributed with NO WARRANTIES, not even the implied * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * Authors grant any other persons or organisations a permission to use, * modify and redistribute this software in source and binary forms, * as long as this message is kept with the software, all derivative * works or modified versions. * * Cronyx Id: if_cp.c,v 1.1.2.41 2004/06/23 17:09:13 rik Exp $ */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_ng_cronyx.h" #ifdef NETGRAPH_CRONYX # include "opt_netgraph.h" # ifndef NETGRAPH # error #option NETGRAPH missed from configuration # endif # include # include # include #else # include # include #include # define PP_CISCO IFF_LINK2 # include #endif #include #include #include #include #include /* If we don't have Cronyx's sppp version, we don't have fr support via sppp */ #ifndef PP_FR #define PP_FR 0 #endif #define CP_DEBUG(d,s) ({if (d->chan->debug) {\ printf ("%s: ", d->name); printf s;}}) #define CP_DEBUG2(d,s) ({if (d->chan->debug>1) {\ printf ("%s: ", d->name); printf s;}}) #define CP_LOCK_NAME "cpX" #define CP_LOCK(_bd) mtx_lock (&(_bd)->cp_mtx) #define CP_UNLOCK(_bd) mtx_unlock (&(_bd)->cp_mtx) #define CP_LOCK_ASSERT(_bd) mtx_assert (&(_bd)->cp_mtx, MA_OWNED) static int cp_probe __P((device_t)); static int cp_attach __P((device_t)); static int cp_detach __P((device_t)); static device_method_t cp_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cp_probe), DEVMETHOD(device_attach, cp_attach), DEVMETHOD(device_detach, cp_detach), DEVMETHOD_END }; typedef struct _cp_dma_mem_t { unsigned long phys; void *virt; size_t size; bus_dma_tag_t dmat; bus_dmamap_t mapp; } cp_dma_mem_t; typedef struct _drv_t { char name [8]; int running; cp_chan_t *chan; cp_board_t *board; cp_dma_mem_t dmamem; #ifdef NETGRAPH char nodename [NG_NODESIZE]; hook_p hook; hook_p debug_hook; node_p node; struct ifqueue queue; struct ifqueue hi_queue; #else struct ifqueue queue; struct ifnet *ifp; #endif short timeout; struct callout timeout_handle; struct cdev *devt; } drv_t; typedef struct _bdrv_t { cp_board_t *board; struct resource *cp_res; struct resource *cp_irq; void *cp_intrhand; cp_dma_mem_t dmamem; drv_t channel [NCHAN]; struct mtx cp_mtx; } bdrv_t; static driver_t cp_driver = { "cp", cp_methods, sizeof(bdrv_t), }; static devclass_t cp_devclass; static void cp_receive (cp_chan_t *c, unsigned char *data, int len); static void cp_transmit (cp_chan_t *c, void *attachment, int len); static void cp_error (cp_chan_t *c, int data); static void cp_up (drv_t *d); static void cp_start (drv_t *d); static void cp_down (drv_t *d); static void cp_watchdog (drv_t *d); static void cp_watchdog_timer (void *arg); #ifdef NETGRAPH extern struct ng_type typestruct; #else static void cp_ifstart (struct ifnet *ifp); static void cp_tlf (struct sppp *sp); static void cp_tls (struct sppp *sp); static int cp_sioctl (struct ifnet *ifp, u_long cmd, caddr_t data); static void cp_initialize (void *softc); #endif static cp_board_t *adapter [NBRD]; static drv_t *channel [NBRD*NCHAN]; static struct callout led_timo [NBRD]; static struct callout timeout_handle; static int cp_destroy = 0; static int cp_open (struct cdev *dev, int oflags, int devtype, struct thread *td); static int cp_close (struct cdev *dev, int fflag, int devtype, struct thread *td); static int cp_ioctl (struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td); static struct cdevsw cp_cdevsw = { .d_version = D_VERSION, .d_open = cp_open, .d_close = cp_close, .d_ioctl = cp_ioctl, .d_name = "cp", }; /* * Make an mbuf from data. */ static struct mbuf *makembuf (void *buf, unsigned len) { struct mbuf *m; MGETHDR (m, M_NOWAIT, MT_DATA); if (! m) return 0; - MCLGET (m, M_NOWAIT); - if (! (m->m_flags & M_EXT)) { + if (!(MCLGET (m, M_NOWAIT))) { m_freem (m); return 0; } m->m_pkthdr.len = m->m_len = len; bcopy (buf, mtod (m, caddr_t), len); return m; } static int cp_probe (device_t dev) { if ((pci_get_vendor (dev) == cp_vendor_id) && (pci_get_device (dev) == cp_device_id)) { device_set_desc (dev, "Cronyx-Tau-PCI serial adapter"); return BUS_PROBE_DEFAULT; } return ENXIO; } static void cp_timeout (void *arg) { drv_t *d; int s, i, k; for (i = 0; i < NBRD; ++i) { if (adapter[i] == NULL) continue; for (k = 0; k < NCHAN; ++k) { s = splimp (); if (cp_destroy) { splx (s); return; } d = channel[i * NCHAN + k]; if (!d) { splx (s); continue; } CP_LOCK ((bdrv_t *)d->board->sys); switch (d->chan->type) { case T_G703: cp_g703_timer (d->chan); break; case T_E1: cp_e1_timer (d->chan); break; case T_E3: case T_T3: case T_STS1: cp_e3_timer (d->chan); break; default: break; } CP_UNLOCK ((bdrv_t *)d->board->sys); splx (s); } } s = splimp (); if (!cp_destroy) callout_reset (&timeout_handle, hz, cp_timeout, 0); splx (s); } static void cp_led_off (void *arg) { cp_board_t *b = arg; bdrv_t *bd = (bdrv_t *) b->sys; int s; s = splimp (); if (cp_destroy) { splx (s); return; } CP_LOCK (bd); cp_led (b, 0); CP_UNLOCK (bd); splx (s); } static void cp_intr (void *arg) { bdrv_t *bd = arg; cp_board_t *b = bd->board; #ifndef NETGRAPH int i; #endif int s = splimp (); if (cp_destroy) { splx (s); return; } CP_LOCK (bd); /* Check if we are ready */ if (b->sys == NULL) { /* Not we are not, just cleanup. */ cp_interrupt_poll (b, 1); CP_UNLOCK (bd); return; } /* Turn LED on. */ cp_led (b, 1); cp_interrupt (b); /* Turn LED off 50 msec later. */ callout_reset (&led_timo[b->num], hz/20, cp_led_off, b); CP_UNLOCK (bd); splx (s); #ifndef NETGRAPH /* Pass packets in a lock-free state */ for (i = 0; i < NCHAN && b->chan[i].type; i++) { drv_t *d = b->chan[i].sys; struct mbuf *m; if (!d || !d->running) continue; while (_IF_QLEN(&d->queue)) { IF_DEQUEUE (&d->queue,m); if (!m) continue; sppp_input (d->ifp, m); } } #endif } static void cp_bus_dmamap_addr (void *arg, bus_dma_segment_t *segs, int nseg, int error) { unsigned long *addr; if (error) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); addr = arg; *addr = segs->ds_addr; } static int cp_bus_dma_mem_alloc (int bnum, int cnum, cp_dma_mem_t *dmem) { int error; error = bus_dma_tag_create (NULL, 16, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, dmem->size, 1, dmem->size, 0, NULL, NULL, &dmem->dmat); if (error) { if (cnum >= 0) printf ("cp%d-%d: ", bnum, cnum); else printf ("cp%d: ", bnum); printf ("couldn't allocate tag for dma memory\n"); return 0; } error = bus_dmamem_alloc (dmem->dmat, (void **)&dmem->virt, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &dmem->mapp); if (error) { if (cnum >= 0) printf ("cp%d-%d: ", bnum, cnum); else printf ("cp%d: ", bnum); printf ("couldn't allocate mem for dma memory\n"); bus_dma_tag_destroy (dmem->dmat); return 0; } error = bus_dmamap_load (dmem->dmat, dmem->mapp, dmem->virt, dmem->size, cp_bus_dmamap_addr, &dmem->phys, 0); if (error) { if (cnum >= 0) printf ("cp%d-%d: ", bnum, cnum); else printf ("cp%d: ", bnum); printf ("couldn't load mem map for dma memory\n"); bus_dmamem_free (dmem->dmat, dmem->virt, dmem->mapp); bus_dma_tag_destroy (dmem->dmat); return 0; } return 1; } static void cp_bus_dma_mem_free (cp_dma_mem_t *dmem) { bus_dmamap_unload (dmem->dmat, dmem->mapp); bus_dmamem_free (dmem->dmat, dmem->virt, dmem->mapp); bus_dma_tag_destroy (dmem->dmat); } /* * Called if the probe succeeded. */ static int cp_attach (device_t dev) { bdrv_t *bd = device_get_softc (dev); int unit = device_get_unit (dev); char *cp_ln = CP_LOCK_NAME; unsigned short res; vm_offset_t vbase; int rid, error; cp_board_t *b; cp_chan_t *c; drv_t *d; int s = splimp (); b = malloc (sizeof(cp_board_t), M_DEVBUF, M_WAITOK); if (!b) { printf ("cp%d: couldn't allocate memory\n", unit); splx (s); return (ENXIO); } bzero (b, sizeof(cp_board_t)); bd->board = b; rid = PCIR_BAR(0); bd->cp_res = bus_alloc_resource (dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE); if (! bd->cp_res) { printf ("cp%d: cannot map memory\n", unit); free (b, M_DEVBUF); splx (s); return (ENXIO); } vbase = (vm_offset_t) rman_get_virtual (bd->cp_res); cp_ln[2] = '0' + unit; mtx_init (&bd->cp_mtx, cp_ln, MTX_NETWORK_LOCK, MTX_DEF|MTX_RECURSE); res = cp_init (b, unit, (u_char*) vbase); if (res) { printf ("cp%d: can't init, error code:%x\n", unit, res); bus_release_resource (dev, SYS_RES_MEMORY, PCIR_BAR(0), bd->cp_res); free (b, M_DEVBUF); splx (s); return (ENXIO); } bd->dmamem.size = sizeof(cp_qbuf_t); if (! cp_bus_dma_mem_alloc (unit, -1, &bd->dmamem)) { free (b, M_DEVBUF); splx (s); return (ENXIO); } CP_LOCK (bd); cp_reset (b, bd->dmamem.virt, bd->dmamem.phys); CP_UNLOCK (bd); rid = 0; bd->cp_irq = bus_alloc_resource (dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (! bd->cp_irq) { cp_destroy = 1; printf ("cp%d: cannot map interrupt\n", unit); bus_release_resource (dev, SYS_RES_MEMORY, PCIR_BAR(0), bd->cp_res); mtx_destroy (&bd->cp_mtx); free (b, M_DEVBUF); splx (s); return (ENXIO); } callout_init (&led_timo[unit], CALLOUT_MPSAFE); error = bus_setup_intr (dev, bd->cp_irq, INTR_TYPE_NET|INTR_MPSAFE, NULL, cp_intr, bd, &bd->cp_intrhand); if (error) { cp_destroy = 1; printf ("cp%d: cannot set up irq\n", unit); bus_release_resource (dev, SYS_RES_IRQ, 0, bd->cp_irq); bus_release_resource (dev, SYS_RES_MEMORY, PCIR_BAR(0), bd->cp_res); mtx_destroy (&bd->cp_mtx); free (b, M_DEVBUF); splx (s); return (ENXIO); } printf ("cp%d: %s, clock %ld MHz\n", unit, b->name, b->osc / 1000000); for (c = b->chan; c < b->chan + NCHAN; ++c) { if (! c->type) continue; d = &bd->channel[c->num]; d->dmamem.size = sizeof(cp_buf_t); if (! cp_bus_dma_mem_alloc (unit, c->num, &d->dmamem)) continue; channel [b->num*NCHAN + c->num] = d; sprintf (d->name, "cp%d.%d", b->num, c->num); d->board = b; d->chan = c; c->sys = d; callout_init (&d->timeout_handle, CALLOUT_MPSAFE); #ifdef NETGRAPH if (ng_make_node_common (&typestruct, &d->node) != 0) { printf ("%s: cannot make common node\n", d->name); d->node = NULL; continue; } NG_NODE_SET_PRIVATE (d->node, d); sprintf (d->nodename, "%s%d", NG_CP_NODE_TYPE, c->board->num*NCHAN + c->num); if (ng_name_node (d->node, d->nodename)) { printf ("%s: cannot name node\n", d->nodename); NG_NODE_UNREF (d->node); continue; } d->queue.ifq_maxlen = ifqmaxlen; d->hi_queue.ifq_maxlen = ifqmaxlen; mtx_init (&d->queue.ifq_mtx, "cp_queue", NULL, MTX_DEF); mtx_init (&d->hi_queue.ifq_mtx, "cp_queue_hi", NULL, MTX_DEF); #else /*NETGRAPH*/ d->ifp = if_alloc(IFT_PPP); if (d->ifp == NULL) { printf ("%s: cannot if_alloc() interface\n", d->name); continue; } d->ifp->if_softc = d; if_initname (d->ifp, "cp", b->num * NCHAN + c->num); d->ifp->if_mtu = PP_MTU; d->ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST; d->ifp->if_ioctl = cp_sioctl; d->ifp->if_start = cp_ifstart; d->ifp->if_init = cp_initialize; d->queue.ifq_maxlen = NRBUF; mtx_init (&d->queue.ifq_mtx, "cp_queue", NULL, MTX_DEF); sppp_attach (d->ifp); if_attach (d->ifp); IFP2SP(d->ifp)->pp_tlf = cp_tlf; IFP2SP(d->ifp)->pp_tls = cp_tls; /* If BPF is in the kernel, call the attach for it. * The header size of PPP or Cisco/HDLC is 4 bytes. */ bpfattach (d->ifp, DLT_PPP, 4); #endif /*NETGRAPH*/ cp_start_e1 (c); cp_start_chan (c, 1, 1, d->dmamem.virt, d->dmamem.phys); /* Register callback functions. */ cp_register_transmit (c, &cp_transmit); cp_register_receive (c, &cp_receive); cp_register_error (c, &cp_error); d->devt = make_dev (&cp_cdevsw, b->num*NCHAN+c->num, UID_ROOT, GID_WHEEL, 0600, "cp%d", b->num*NCHAN+c->num); } CP_LOCK (bd); b->sys = bd; adapter[unit] = b; CP_UNLOCK (bd); splx (s); return 0; } static int cp_detach (device_t dev) { bdrv_t *bd = device_get_softc (dev); cp_board_t *b = bd->board; cp_chan_t *c; int s; KASSERT (mtx_initialized (&bd->cp_mtx), ("cp mutex not initialized")); s = splimp (); CP_LOCK (bd); /* Check if the device is busy (open). */ for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (! d || ! d->chan->type) continue; if (d->running) { CP_UNLOCK (bd); splx (s); return EBUSY; } } /* Ok, we can unload driver */ /* At first we should stop all channels */ for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (! d || ! d->chan->type) continue; cp_stop_chan (c); cp_stop_e1 (c); cp_set_dtr (d->chan, 0); cp_set_rts (d->chan, 0); } /* Reset the adapter. */ cp_destroy = 1; cp_interrupt_poll (b, 1); cp_led_off (b); cp_reset (b, 0 ,0); callout_stop (&led_timo[b->num]); /* Disable the interrupt request. */ bus_teardown_intr (dev, bd->cp_irq, bd->cp_intrhand); for (c=b->chan; cchan+NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (! d || ! d->chan->type) continue; callout_stop (&d->timeout_handle); #ifndef NETGRAPH /* Detach from the packet filter list of interfaces. */ bpfdetach (d->ifp); /* Detach from the sync PPP list. */ sppp_detach (d->ifp); /* Detach from the system list of interfaces. */ if_detach (d->ifp); if_free (d->ifp); IF_DRAIN (&d->queue); mtx_destroy (&d->queue.ifq_mtx); #else if (d->node) { ng_rmnode_self (d->node); NG_NODE_UNREF (d->node); d->node = NULL; } mtx_destroy (&d->queue.ifq_mtx); mtx_destroy (&d->hi_queue.ifq_mtx); #endif destroy_dev (d->devt); } b->sys = NULL; CP_UNLOCK (bd); bus_release_resource (dev, SYS_RES_IRQ, 0, bd->cp_irq); bus_release_resource (dev, SYS_RES_MEMORY, PCIR_BAR(0), bd->cp_res); CP_LOCK (bd); cp_led_off (b); CP_UNLOCK (bd); callout_drain (&led_timo[b->num]); splx (s); for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (! d || ! d->chan->type) continue; callout_drain (&d->timeout_handle); channel [b->num*NCHAN + c->num] = 0; /* Deallocate buffers. */ cp_bus_dma_mem_free (&d->dmamem); } adapter [b->num] = 0; cp_bus_dma_mem_free (&bd->dmamem); free (b, M_DEVBUF); mtx_destroy (&bd->cp_mtx); return 0; } #ifndef NETGRAPH static void cp_ifstart (struct ifnet *ifp) { drv_t *d = ifp->if_softc; bdrv_t *bd = d->board->sys; CP_LOCK (bd); cp_start (d); CP_UNLOCK (bd); } static void cp_tlf (struct sppp *sp) { drv_t *d = SP2IFP(sp)->if_softc; CP_DEBUG2 (d, ("cp_tlf\n")); /* XXXRIK: Don't forget to protect them by LOCK, or kill them. */ /* cp_set_dtr (d->chan, 0);*/ /* cp_set_rts (d->chan, 0);*/ if (!(sp->pp_flags & PP_FR) && !(d->ifp->if_flags & PP_CISCO)) sp->pp_down (sp); } static void cp_tls (struct sppp *sp) { drv_t *d = SP2IFP(sp)->if_softc; CP_DEBUG2 (d, ("cp_tls\n")); if (!(sp->pp_flags & PP_FR) && !(d->ifp->if_flags & PP_CISCO)) sp->pp_up (sp); } /* * Process an ioctl request. */ static int cp_sioctl (struct ifnet *ifp, u_long cmd, caddr_t data) { drv_t *d = ifp->if_softc; bdrv_t *bd = d->board->sys; int error, s, was_up, should_be_up; was_up = (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; error = sppp_ioctl (ifp, cmd, data); if (error) return error; if (! (ifp->if_flags & IFF_DEBUG)) d->chan->debug = 0; else d->chan->debug = d->chan->debug_shadow; switch (cmd) { default: CP_DEBUG2 (d, ("ioctl 0x%lx\n", cmd)); return 0; case SIOCADDMULTI: CP_DEBUG2 (d, ("ioctl SIOCADDMULTI\n")); return 0; case SIOCDELMULTI: CP_DEBUG2 (d, ("ioctl SIOCDELMULTI\n")); return 0; case SIOCSIFFLAGS: CP_DEBUG2 (d, ("ioctl SIOCSIFFLAGS\n")); break; case SIOCSIFADDR: CP_DEBUG2 (d, ("ioctl SIOCSIFADDR\n")); break; } /* We get here only in case of SIFFLAGS or SIFADDR. */ s = splimp (); CP_LOCK (bd); should_be_up = (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; if (! was_up && should_be_up) { /* Interface goes up -- start it. */ cp_up (d); cp_start (d); } else if (was_up && ! should_be_up) { /* Interface is going down -- stop it. */ /* if ((IFP2SP(ifp)->pp_flags & PP_FR) || (ifp->if_flags & PP_CISCO))*/ cp_down (d); } CP_DEBUG (d, ("ioctl 0x%lx p4\n", cmd)); CP_UNLOCK (bd); splx (s); return 0; } /* * Initialization of interface. * It seems to be never called by upper level? */ static void cp_initialize (void *softc) { drv_t *d = softc; CP_DEBUG (d, ("cp_initialize\n")); } #endif /*NETGRAPH*/ /* * Stop the interface. Called on splimp(). */ static void cp_down (drv_t *d) { CP_DEBUG (d, ("cp_down\n")); /* Interface is going down -- stop it. */ cp_set_dtr (d->chan, 0); cp_set_rts (d->chan, 0); d->running = 0; callout_stop (&d->timeout_handle); } /* * Start the interface. Called on splimp(). */ static void cp_up (drv_t *d) { CP_DEBUG (d, ("cp_up\n")); cp_set_dtr (d->chan, 1); cp_set_rts (d->chan, 1); d->running = 1; } /* * Start output on the interface. Get another datagram to send * off of the interface queue, and copy it to the interface * before starting the output. */ static void cp_send (drv_t *d) { struct mbuf *m; u_short len; CP_DEBUG2 (d, ("cp_send, tn=%d te=%d\n", d->chan->tn, d->chan->te)); /* No output if the interface is down. */ if (! d->running) return; /* No output if the modem is off. */ if (! (d->chan->lloop || d->chan->type != T_SERIAL || cp_get_dsr (d->chan))) return; while (cp_transmit_space (d->chan)) { /* Get the packet to send. */ #ifdef NETGRAPH IF_DEQUEUE (&d->hi_queue, m); if (! m) IF_DEQUEUE (&d->queue, m); #else m = sppp_dequeue (d->ifp); #endif if (! m) return; #ifndef NETGRAPH BPF_MTAP (d->ifp, m); #endif len = m_length (m, NULL); if (len >= BUFSZ) printf ("%s: too long packet: %d bytes: ", d->name, len); else if (! m->m_next) cp_send_packet (d->chan, (u_char*) mtod (m, caddr_t), len, 0); else { u_char *buf = d->chan->tbuf[d->chan->te]; m_copydata (m, 0, len, buf); cp_send_packet (d->chan, buf, len, 0); } m_freem (m); /* Set up transmit timeout, if the transmit ring is not empty.*/ d->timeout = 10; } #ifndef NETGRAPH d->ifp->if_drv_flags |= IFF_DRV_OACTIVE; #endif } /* * Start output on the interface. * Always called on splimp(). */ static void cp_start (drv_t *d) { if (d->running) { if (! d->chan->dtr) cp_set_dtr (d->chan, 1); if (! d->chan->rts) cp_set_rts (d->chan, 1); cp_send (d); callout_reset (&d->timeout_handle, hz, cp_watchdog_timer, d); } } /* * Handle transmit timeouts. * Recover after lost transmit interrupts. * Always called on splimp(). */ static void cp_watchdog (drv_t *d) { CP_DEBUG (d, ("device timeout\n")); if (d->running) { cp_stop_chan (d->chan); cp_stop_e1 (d->chan); cp_start_e1 (d->chan); cp_start_chan (d->chan, 1, 1, 0, 0); cp_set_dtr (d->chan, 1); cp_set_rts (d->chan, 1); cp_start (d); } } static void cp_watchdog_timer (void *arg) { drv_t *d = arg; bdrv_t *bd = d->board->sys; CP_LOCK (bd); if (d->timeout == 1) cp_watchdog (d); if (d->timeout) d->timeout--; callout_reset (&d->timeout_handle, hz, cp_watchdog_timer, d); CP_UNLOCK (bd); } static void cp_transmit (cp_chan_t *c, void *attachment, int len) { drv_t *d = c->sys; d->timeout = 0; #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_OPACKETS, 1); d->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; #endif cp_start (d); } static void cp_receive (cp_chan_t *c, unsigned char *data, int len) { drv_t *d = c->sys; struct mbuf *m; #ifdef NETGRAPH int error; #endif if (! d->running) return; m = makembuf (data, len); if (! m) { CP_DEBUG (d, ("no memory for packet\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IQDROPS, 1); #endif return; } if (c->debug > 1) m_print (m, 0); #ifdef NETGRAPH m->m_pkthdr.rcvif = 0; NG_SEND_DATA_ONLY (error, d->hook, m); #else if_inc_counter(d->ifp, IFCOUNTER_IPACKETS, 1); m->m_pkthdr.rcvif = d->ifp; /* Check if there's a BPF listener on this interface. * If so, hand off the raw packet to bpf. */ BPF_MTAP(d->ifp, m); IF_ENQUEUE (&d->queue, m); #endif } static void cp_error (cp_chan_t *c, int data) { drv_t *d = c->sys; switch (data) { case CP_FRAME: CP_DEBUG (d, ("frame error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CP_CRC: CP_DEBUG (d, ("crc error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CP_OVERRUN: CP_DEBUG (d, ("overrun error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_COLLISIONS, 1); if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CP_OVERFLOW: CP_DEBUG (d, ("overflow error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CP_UNDERRUN: CP_DEBUG (d, ("underrun error\n")); d->timeout = 0; #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_OERRORS, 1); d->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; #endif cp_start (d); break; default: CP_DEBUG (d, ("error #%d\n", data)); break; } } /* * You also need read, write, open, close routines. * This should get you started */ static int cp_open (struct cdev *dev, int oflags, int devtype, struct thread *td) { int unit = dev2unit (dev); drv_t *d; if (unit >= NBRD*NCHAN || ! (d = channel[unit])) return ENXIO; CP_DEBUG2 (d, ("cp_open\n")); return 0; } /* * Only called on the LAST close. */ static int cp_close (struct cdev *dev, int fflag, int devtype, struct thread *td) { drv_t *d = channel [dev2unit (dev)]; CP_DEBUG2 (d, ("cp_close\n")); return 0; } static int cp_modem_status (cp_chan_t *c) { drv_t *d = c->sys; bdrv_t *bd = d->board->sys; int status, s; status = d->running ? TIOCM_LE : 0; s = splimp (); CP_LOCK (bd); if (cp_get_cd (c)) status |= TIOCM_CD; if (cp_get_cts (c)) status |= TIOCM_CTS; if (cp_get_dsr (c)) status |= TIOCM_DSR; if (c->dtr) status |= TIOCM_DTR; if (c->rts) status |= TIOCM_RTS; CP_UNLOCK (bd); splx (s); return status; } static int cp_ioctl (struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td) { drv_t *d = channel [dev2unit (dev)]; bdrv_t *bd = d->board->sys; cp_chan_t *c = d->chan; struct serial_statistics *st; struct e1_statistics *opte1; struct e3_statistics *opte3; int error, s; char mask[16]; switch (cmd) { case SERIAL_GETREGISTERED: CP_DEBUG2 (d, ("ioctl: getregistered\n")); bzero (mask, sizeof(mask)); for (s=0; sifp)->pp_flags & PP_FR) ? "fr" : (d->ifp->if_flags & PP_CISCO) ? "cisco" : "ppp"); return 0; case SERIAL_SETPROTO: CP_DEBUG2 (d, ("ioctl: setproto\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (d->ifp->if_drv_flags & IFF_DRV_RUNNING) return EBUSY; if (! strcmp ("cisco", (char*)data)) { IFP2SP(d->ifp)->pp_flags &= ~(PP_FR); IFP2SP(d->ifp)->pp_flags |= PP_KEEPALIVE; d->ifp->if_flags |= PP_CISCO; #if PP_FR != 0 } else if (! strcmp ("fr", (char*)data)) { d->ifp->if_flags &= ~(PP_CISCO); IFP2SP(d->ifp)->pp_flags |= PP_FR | PP_KEEPALIVE; #endif } else if (! strcmp ("ppp", (char*)data)) { IFP2SP(d->ifp)->pp_flags &= ~PP_FR; IFP2SP(d->ifp)->pp_flags &= ~PP_KEEPALIVE; d->ifp->if_flags &= ~(PP_CISCO); } else return EINVAL; return 0; case SERIAL_GETKEEPALIVE: CP_DEBUG2 (d, ("ioctl: getkeepalive\n")); if ((IFP2SP(d->ifp)->pp_flags & PP_FR) || (d->ifp->if_flags & PP_CISCO)) return EINVAL; *(int*)data = (IFP2SP(d->ifp)->pp_flags & PP_KEEPALIVE) ? 1 : 0; return 0; case SERIAL_SETKEEPALIVE: CP_DEBUG2 (d, ("ioctl: setkeepalive\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if ((IFP2SP(d->ifp)->pp_flags & PP_FR) || (d->ifp->if_flags & PP_CISCO)) return EINVAL; s = splimp (); CP_LOCK (bd); if (*(int*)data) IFP2SP(d->ifp)->pp_flags |= PP_KEEPALIVE; else IFP2SP(d->ifp)->pp_flags &= ~PP_KEEPALIVE; CP_UNLOCK (bd); splx (s); return 0; #endif /*NETGRAPH*/ case SERIAL_GETMODE: CP_DEBUG2 (d, ("ioctl: getmode\n")); *(int*)data = SERIAL_HDLC; return 0; case SERIAL_SETMODE: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (*(int*)data != SERIAL_HDLC) return EINVAL; return 0; case SERIAL_GETCFG: CP_DEBUG2 (d, ("ioctl: getcfg\n")); if (c->type != T_E1 || c->unfram) return EINVAL; *(char*)data = c->board->mux ? 'c' : 'a'; return 0; case SERIAL_SETCFG: CP_DEBUG2 (d, ("ioctl: setcfg\n")); error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_mux (c->board, *((char*)data) == 'c'); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETSTAT: CP_DEBUG2 (d, ("ioctl: getstat\n")); st = (struct serial_statistics*) data; st->rintr = c->rintr; st->tintr = c->tintr; st->mintr = 0; st->ibytes = c->ibytes; st->ipkts = c->ipkts; st->obytes = c->obytes; st->opkts = c->opkts; st->ierrs = c->overrun + c->frame + c->crc; st->oerrs = c->underrun; return 0; case SERIAL_GETESTAT: CP_DEBUG2 (d, ("ioctl: getestat\n")); if (c->type != T_E1 && c->type != T_G703) return EINVAL; opte1 = (struct e1_statistics*) data; opte1->status = c->status; opte1->cursec = c->cursec; opte1->totsec = c->totsec + c->cursec; opte1->currnt.bpv = c->currnt.bpv; opte1->currnt.fse = c->currnt.fse; opte1->currnt.crce = c->currnt.crce; opte1->currnt.rcrce = c->currnt.rcrce; opte1->currnt.uas = c->currnt.uas; opte1->currnt.les = c->currnt.les; opte1->currnt.es = c->currnt.es; opte1->currnt.bes = c->currnt.bes; opte1->currnt.ses = c->currnt.ses; opte1->currnt.oofs = c->currnt.oofs; opte1->currnt.css = c->currnt.css; opte1->currnt.dm = c->currnt.dm; opte1->total.bpv = c->total.bpv + c->currnt.bpv; opte1->total.fse = c->total.fse + c->currnt.fse; opte1->total.crce = c->total.crce + c->currnt.crce; opte1->total.rcrce = c->total.rcrce + c->currnt.rcrce; opte1->total.uas = c->total.uas + c->currnt.uas; opte1->total.les = c->total.les + c->currnt.les; opte1->total.es = c->total.es + c->currnt.es; opte1->total.bes = c->total.bes + c->currnt.bes; opte1->total.ses = c->total.ses + c->currnt.ses; opte1->total.oofs = c->total.oofs + c->currnt.oofs; opte1->total.css = c->total.css + c->currnt.css; opte1->total.dm = c->total.dm + c->currnt.dm; for (s=0; s<48; ++s) { opte1->interval[s].bpv = c->interval[s].bpv; opte1->interval[s].fse = c->interval[s].fse; opte1->interval[s].crce = c->interval[s].crce; opte1->interval[s].rcrce = c->interval[s].rcrce; opte1->interval[s].uas = c->interval[s].uas; opte1->interval[s].les = c->interval[s].les; opte1->interval[s].es = c->interval[s].es; opte1->interval[s].bes = c->interval[s].bes; opte1->interval[s].ses = c->interval[s].ses; opte1->interval[s].oofs = c->interval[s].oofs; opte1->interval[s].css = c->interval[s].css; opte1->interval[s].dm = c->interval[s].dm; } return 0; case SERIAL_GETE3STAT: CP_DEBUG2 (d, ("ioctl: gete3stat\n")); if (c->type != T_E3 && c->type != T_T3 && c->type != T_STS1) return EINVAL; opte3 = (struct e3_statistics*) data; opte3->status = c->e3status; opte3->cursec = (c->e3csec_5 * 2 + 1) / 10; opte3->totsec = c->e3tsec + opte3->cursec; opte3->ccv = c->e3ccv; opte3->tcv = c->e3tcv + opte3->ccv; for (s = 0; s < 48; ++s) { opte3->icv[s] = c->e3icv[s]; } return 0; case SERIAL_CLRSTAT: CP_DEBUG2 (d, ("ioctl: clrstat\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; c->rintr = 0; c->tintr = 0; c->ibytes = 0; c->obytes = 0; c->ipkts = 0; c->opkts = 0; c->overrun = 0; c->frame = 0; c->crc = 0; c->underrun = 0; bzero (&c->currnt, sizeof (c->currnt)); bzero (&c->total, sizeof (c->total)); bzero (c->interval, sizeof (c->interval)); c->e3ccv = 0; c->e3tcv = 0; bzero (c->e3icv, sizeof (c->e3icv)); return 0; case SERIAL_GETBAUD: CP_DEBUG2 (d, ("ioctl: getbaud\n")); *(long*)data = c->baud; return 0; case SERIAL_SETBAUD: CP_DEBUG2 (d, ("ioctl: setbaud\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CP_LOCK (bd); cp_set_baud (c, *(long*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETLOOP: CP_DEBUG2 (d, ("ioctl: getloop\n")); *(int*)data = c->lloop; return 0; case SERIAL_SETLOOP: CP_DEBUG2 (d, ("ioctl: setloop\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CP_LOCK (bd); cp_set_lloop (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETDPLL: CP_DEBUG2 (d, ("ioctl: getdpll\n")); if (c->type != T_SERIAL) return EINVAL; *(int*)data = c->dpll; return 0; case SERIAL_SETDPLL: CP_DEBUG2 (d, ("ioctl: setdpll\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_SERIAL) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_dpll (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETNRZI: CP_DEBUG2 (d, ("ioctl: getnrzi\n")); if (c->type != T_SERIAL) return EINVAL; *(int*)data = c->nrzi; return 0; case SERIAL_SETNRZI: CP_DEBUG2 (d, ("ioctl: setnrzi\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_SERIAL) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_nrzi (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETDEBUG: CP_DEBUG2 (d, ("ioctl: getdebug\n")); *(int*)data = d->chan->debug; return 0; case SERIAL_SETDEBUG: CP_DEBUG2 (d, ("ioctl: setdebug\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; #ifndef NETGRAPH /* * The debug_shadow is always greater than zero for logic * simplicity. For switching debug off the IFF_DEBUG is * responsible. */ d->chan->debug_shadow = (*(int*)data) ? (*(int*)data) : 1; if (d->ifp->if_flags & IFF_DEBUG) d->chan->debug = d->chan->debug_shadow; #else d->chan->debug = *(int*)data; #endif return 0; case SERIAL_GETHIGAIN: CP_DEBUG2 (d, ("ioctl: gethigain\n")); if (c->type != T_E1) return EINVAL; *(int*)data = c->higain; return 0; case SERIAL_SETHIGAIN: CP_DEBUG2 (d, ("ioctl: sethigain\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_higain (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETPHONY: CP_DEBUG2 (d, ("ioctl: getphony\n")); if (c->type != T_E1) return EINVAL; *(int*)data = c->phony; return 0; case SERIAL_SETPHONY: CP_DEBUG2 (d, ("ioctl: setphony\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_phony (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETUNFRAM: CP_DEBUG2 (d, ("ioctl: getunfram\n")); if (c->type != T_E1) return EINVAL; *(int*)data = c->unfram; return 0; case SERIAL_SETUNFRAM: CP_DEBUG2 (d, ("ioctl: setunfram\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_unfram (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETSCRAMBLER: CP_DEBUG2 (d, ("ioctl: getscrambler\n")); if (c->type != T_G703 && !c->unfram) return EINVAL; *(int*)data = c->scrambler; return 0; case SERIAL_SETSCRAMBLER: CP_DEBUG2 (d, ("ioctl: setscrambler\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_G703 && !c->unfram) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_scrambler (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETMONITOR: CP_DEBUG2 (d, ("ioctl: getmonitor\n")); if (c->type != T_E1 && c->type != T_E3 && c->type != T_T3 && c->type != T_STS1) return EINVAL; *(int*)data = c->monitor; return 0; case SERIAL_SETMONITOR: CP_DEBUG2 (d, ("ioctl: setmonitor\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_monitor (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETUSE16: CP_DEBUG2 (d, ("ioctl: getuse16\n")); if (c->type != T_E1 || c->unfram) return EINVAL; *(int*)data = c->use16; return 0; case SERIAL_SETUSE16: CP_DEBUG2 (d, ("ioctl: setuse16\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_use16 (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETCRC4: CP_DEBUG2 (d, ("ioctl: getcrc4\n")); if (c->type != T_E1 || c->unfram) return EINVAL; *(int*)data = c->crc4; return 0; case SERIAL_SETCRC4: CP_DEBUG2 (d, ("ioctl: setcrc4\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_crc4 (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETCLK: CP_DEBUG2 (d, ("ioctl: getclk\n")); if (c->type != T_E1 && c->type != T_G703 && c->type != T_E3 && c->type != T_T3 && c->type != T_STS1) return EINVAL; switch (c->gsyn) { default: *(int*)data = E1CLK_INTERNAL; break; case GSYN_RCV: *(int*)data = E1CLK_RECEIVE; break; case GSYN_RCV0: *(int*)data = E1CLK_RECEIVE_CHAN0; break; case GSYN_RCV1: *(int*)data = E1CLK_RECEIVE_CHAN1; break; case GSYN_RCV2: *(int*)data = E1CLK_RECEIVE_CHAN2; break; case GSYN_RCV3: *(int*)data = E1CLK_RECEIVE_CHAN3; break; } return 0; case SERIAL_SETCLK: CP_DEBUG2 (d, ("ioctl: setclk\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_E1 && c->type != T_G703 && c->type != T_E3 && c->type != T_T3 && c->type != T_STS1) return EINVAL; s = splimp (); CP_LOCK (bd); switch (*(int*)data) { default: cp_set_gsyn (c, GSYN_INT); break; case E1CLK_RECEIVE: cp_set_gsyn (c, GSYN_RCV); break; case E1CLK_RECEIVE_CHAN0: cp_set_gsyn (c, GSYN_RCV0); break; case E1CLK_RECEIVE_CHAN1: cp_set_gsyn (c, GSYN_RCV1); break; case E1CLK_RECEIVE_CHAN2: cp_set_gsyn (c, GSYN_RCV2); break; case E1CLK_RECEIVE_CHAN3: cp_set_gsyn (c, GSYN_RCV3); break; } CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETTIMESLOTS: CP_DEBUG2 (d, ("ioctl: gettimeslots\n")); if ((c->type != T_E1 || c->unfram) && c->type != T_DATA) return EINVAL; *(u_long*)data = c->ts; return 0; case SERIAL_SETTIMESLOTS: CP_DEBUG2 (d, ("ioctl: settimeslots\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if ((c->type != T_E1 || c->unfram) && c->type != T_DATA) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_ts (c, *(u_long*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETINVCLK: CP_DEBUG2 (d, ("ioctl: getinvclk\n")); #if 1 return EINVAL; #else if (c->type != T_SERIAL) return EINVAL; *(int*)data = c->invtxc; return 0; #endif case SERIAL_SETINVCLK: CP_DEBUG2 (d, ("ioctl: setinvclk\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_SERIAL) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_invtxc (c, *(int*)data); cp_set_invrxc (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETINVTCLK: CP_DEBUG2 (d, ("ioctl: getinvtclk\n")); if (c->type != T_SERIAL) return EINVAL; *(int*)data = c->invtxc; return 0; case SERIAL_SETINVTCLK: CP_DEBUG2 (d, ("ioctl: setinvtclk\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_SERIAL) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_invtxc (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETINVRCLK: CP_DEBUG2 (d, ("ioctl: getinvrclk\n")); if (c->type != T_SERIAL) return EINVAL; *(int*)data = c->invrxc; return 0; case SERIAL_SETINVRCLK: CP_DEBUG2 (d, ("ioctl: setinvrclk\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_SERIAL) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_invrxc (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETLEVEL: CP_DEBUG2 (d, ("ioctl: getlevel\n")); if (c->type != T_G703) return EINVAL; s = splimp (); CP_LOCK (bd); *(int*)data = cp_get_lq (c); CP_UNLOCK (bd); splx (s); return 0; #if 0 case SERIAL_RESET: CP_DEBUG2 (d, ("ioctl: reset\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CP_LOCK (bd); cp_reset (c->board, 0, 0); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_HARDRESET: CP_DEBUG2 (d, ("ioctl: hardreset\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CP_LOCK (bd); /* hard_reset (c->board); */ CP_UNLOCK (bd); splx (s); return 0; #endif case SERIAL_GETCABLE: CP_DEBUG2 (d, ("ioctl: getcable\n")); if (c->type != T_SERIAL) return EINVAL; s = splimp (); CP_LOCK (bd); *(int*)data = cp_get_cable (c); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETDIR: CP_DEBUG2 (d, ("ioctl: getdir\n")); if (c->type != T_E1 && c->type != T_DATA) return EINVAL; *(int*)data = c->dir; return 0; case SERIAL_SETDIR: CP_DEBUG2 (d, ("ioctl: setdir\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CP_LOCK (bd); cp_set_dir (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETRLOOP: CP_DEBUG2 (d, ("ioctl: getrloop\n")); if (c->type != T_G703 && c->type != T_E3 && c->type != T_T3 && c->type != T_STS1) return EINVAL; *(int*)data = cp_get_rloop (c); return 0; case SERIAL_SETRLOOP: CP_DEBUG2 (d, ("ioctl: setloop\n")); if (c->type != T_E3 && c->type != T_T3 && c->type != T_STS1) return EINVAL; /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CP_LOCK (bd); cp_set_rloop (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETCABLEN: CP_DEBUG2 (d, ("ioctl: getcablen\n")); if (c->type != T_T3 && c->type != T_STS1) return EINVAL; *(int*)data = c->cablen; return 0; case SERIAL_SETCABLEN: CP_DEBUG2 (d, ("ioctl: setloop\n")); if (c->type != T_T3 && c->type != T_STS1) return EINVAL; /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CP_LOCK (bd); cp_set_cablen (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case TIOCSDTR: /* Set DTR */ s = splimp (); CP_LOCK (bd); cp_set_dtr (c, 1); CP_UNLOCK (bd); splx (s); return 0; case TIOCCDTR: /* Clear DTR */ s = splimp (); CP_LOCK (bd); cp_set_dtr (c, 0); CP_UNLOCK (bd); splx (s); return 0; case TIOCMSET: /* Set DTR/RTS */ s = splimp (); CP_LOCK (bd); cp_set_dtr (c, (*(int*)data & TIOCM_DTR) ? 1 : 0); cp_set_rts (c, (*(int*)data & TIOCM_RTS) ? 1 : 0); CP_UNLOCK (bd); splx (s); return 0; case TIOCMBIS: /* Add DTR/RTS */ s = splimp (); CP_LOCK (bd); if (*(int*)data & TIOCM_DTR) cp_set_dtr (c, 1); if (*(int*)data & TIOCM_RTS) cp_set_rts (c, 1); CP_UNLOCK (bd); splx (s); return 0; case TIOCMBIC: /* Clear DTR/RTS */ s = splimp (); CP_LOCK (bd); if (*(int*)data & TIOCM_DTR) cp_set_dtr (c, 0); if (*(int*)data & TIOCM_RTS) cp_set_rts (c, 0); CP_UNLOCK (bd); splx (s); return 0; case TIOCMGET: /* Get modem status */ *(int*)data = cp_modem_status (c); return 0; } return ENOTTY; } #ifdef NETGRAPH static int ng_cp_constructor (node_p node) { drv_t *d = NG_NODE_PRIVATE (node); CP_DEBUG (d, ("Constructor\n")); return EINVAL; } static int ng_cp_newhook (node_p node, hook_p hook, const char *name) { int s; drv_t *d = NG_NODE_PRIVATE (node); bdrv_t *bd = d->board->sys; CP_DEBUG (d, ("Newhook\n")); /* Attach debug hook */ if (strcmp (name, NG_CP_HOOK_DEBUG) == 0) { NG_HOOK_SET_PRIVATE (hook, NULL); d->debug_hook = hook; return 0; } /* Check for raw hook */ if (strcmp (name, NG_CP_HOOK_RAW) != 0) return EINVAL; NG_HOOK_SET_PRIVATE (hook, d); d->hook = hook; s = splimp (); CP_LOCK (bd); cp_up (d); CP_UNLOCK (bd); splx (s); return 0; } static char *format_timeslots (u_long s) { static char buf [100]; char *p = buf; int i; for (i=1; i<32; ++i) if ((s >> i) & 1) { int prev = (i > 1) & (s >> (i-1)); int next = (i < 31) & (s >> (i+1)); if (prev) { if (next) continue; *p++ = '-'; } else if (p > buf) *p++ = ','; if (i >= 10) *p++ = '0' + i / 10; *p++ = '0' + i % 10; } *p = 0; return buf; } static int print_modems (char *s, cp_chan_t *c, int need_header) { int status = cp_modem_status (c); int length = 0; if (need_header) length += sprintf (s + length, " LE DTR DSR RTS CTS CD\n"); length += sprintf (s + length, "%4s %4s %4s %4s %4s %4s\n", status & TIOCM_LE ? "On" : "-", status & TIOCM_DTR ? "On" : "-", status & TIOCM_DSR ? "On" : "-", status & TIOCM_RTS ? "On" : "-", status & TIOCM_CTS ? "On" : "-", status & TIOCM_CD ? "On" : "-"); return length; } static int print_stats (char *s, cp_chan_t *c, int need_header) { int length = 0; if (need_header) length += sprintf (s + length, " Rintr Tintr Mintr Ibytes Ipkts Ierrs Obytes Opkts Oerrs\n"); length += sprintf (s + length, "%7ld %7ld %7ld %8lu %7ld %7ld %8lu %7ld %7ld\n", c->rintr, c->tintr, 0l, (unsigned long) c->ibytes, c->ipkts, c->overrun + c->frame + c->crc, (unsigned long) c->obytes, c->opkts, c->underrun); return length; } static char *format_e1_status (u_char status) { static char buf [80]; if (status & E1_NOALARM) return "Ok"; buf[0] = 0; if (status & E1_LOS) strcat (buf, ",LOS"); if (status & E1_AIS) strcat (buf, ",AIS"); if (status & E1_LOF) strcat (buf, ",LOF"); if (status & E1_LOMF) strcat (buf, ",LOMF"); if (status & E1_FARLOF) strcat (buf, ",FARLOF"); if (status & E1_AIS16) strcat (buf, ",AIS16"); if (status & E1_FARLOMF) strcat (buf, ",FARLOMF"); if (status & E1_TSTREQ) strcat (buf, ",TSTREQ"); if (status & E1_TSTERR) strcat (buf, ",TSTERR"); if (buf[0] == ',') return buf+1; return "Unknown"; } static int print_frac (char *s, int leftalign, u_long numerator, u_long divider) { int n, length = 0; if (numerator < 1 || divider < 1) { length += sprintf (s+length, leftalign ? "/- " : " -"); return length; } n = (int) (0.5 + 1000.0 * numerator / divider); if (n < 1000) { length += sprintf (s+length, leftalign ? "/.%-3d" : " .%03d", n); return length; } *(s + length) = leftalign ? '/' : ' '; length ++; if (n >= 1000000) n = (n+500) / 1000 * 1000; else if (n >= 100000) n = (n+50) / 100 * 100; else if (n >= 10000) n = (n+5) / 10 * 10; switch (n) { case 1000: length += printf (s+length, ".999"); return length; case 10000: n = 9990; break; case 100000: n = 99900; break; case 1000000: n = 999000; break; } if (n < 10000) length += sprintf (s+length, "%d.%d", n/1000, n/10%100); else if (n < 100000) length += sprintf (s+length, "%d.%d", n/1000, n/100%10); else if (n < 1000000) length += sprintf (s+length, "%d.", n/1000); else length += sprintf (s+length, "%d", n/1000); return length; } static int print_e1_stats (char *s, cp_chan_t *c) { struct e1_counters total; u_long totsec; int length = 0; totsec = c->totsec + c->cursec; total.bpv = c->total.bpv + c->currnt.bpv; total.fse = c->total.fse + c->currnt.fse; total.crce = c->total.crce + c->currnt.crce; total.rcrce = c->total.rcrce + c->currnt.rcrce; total.uas = c->total.uas + c->currnt.uas; total.les = c->total.les + c->currnt.les; total.es = c->total.es + c->currnt.es; total.bes = c->total.bes + c->currnt.bes; total.ses = c->total.ses + c->currnt.ses; total.oofs = c->total.oofs + c->currnt.oofs; total.css = c->total.css + c->currnt.css; total.dm = c->total.dm + c->currnt.dm; length += sprintf (s + length, " Unav/Degr Bpv/Fsyn CRC/RCRC Err/Lerr Sev/Bur Oof/Slp Status\n"); /* Unavailable seconds, degraded minutes */ length += print_frac (s + length, 0, c->currnt.uas, c->cursec); length += print_frac (s + length, 1, 60 * c->currnt.dm, c->cursec); /* Bipolar violations, frame sync errors */ length += print_frac (s + length, 0, c->currnt.bpv, c->cursec); length += print_frac (s + length, 1, c->currnt.fse, c->cursec); /* CRC errors, remote CRC errors (E-bit) */ length += print_frac (s + length, 0, c->currnt.crce, c->cursec); length += print_frac (s + length, 1, c->currnt.rcrce, c->cursec); /* Errored seconds, line errored seconds */ length += print_frac (s + length, 0, c->currnt.es, c->cursec); length += print_frac (s + length, 1, c->currnt.les, c->cursec); /* Severely errored seconds, burst errored seconds */ length += print_frac (s + length, 0, c->currnt.ses, c->cursec); length += print_frac (s + length, 1, c->currnt.bes, c->cursec); /* Out of frame seconds, controlled slip seconds */ length += print_frac (s + length, 0, c->currnt.oofs, c->cursec); length += print_frac (s + length, 1, c->currnt.css, c->cursec); length += sprintf (s + length, " %s\n", format_e1_status (c->status)); /* Print total statistics. */ length += print_frac (s + length, 0, total.uas, totsec); length += print_frac (s + length, 1, 60 * total.dm, totsec); length += print_frac (s + length, 0, total.bpv, totsec); length += print_frac (s + length, 1, total.fse, totsec); length += print_frac (s + length, 0, total.crce, totsec); length += print_frac (s + length, 1, total.rcrce, totsec); length += print_frac (s + length, 0, total.es, totsec); length += print_frac (s + length, 1, total.les, totsec); length += print_frac (s + length, 0, total.ses, totsec); length += print_frac (s + length, 1, total.bes, totsec); length += print_frac (s + length, 0, total.oofs, totsec); length += print_frac (s + length, 1, total.css, totsec); length += sprintf (s + length, " -- Total\n"); return length; } static int print_chan (char *s, cp_chan_t *c) { drv_t *d = c->sys; bdrv_t *bd = d->board->sys; int length = 0; length += sprintf (s + length, "cp%d", c->board->num * NCHAN + c->num); if (d->chan->debug) length += sprintf (s + length, " debug=%d", d->chan->debug); if (c->board->mux) { length += sprintf (s + length, " cfg=C"); } else { length += sprintf (s + length, " cfg=A"); } if (c->baud) length += sprintf (s + length, " %ld", c->baud); else length += sprintf (s + length, " extclock"); if (c->type == T_E1 || c->type == T_G703) switch (c->gsyn) { case GSYN_INT : length += sprintf (s + length, " syn=int"); break; case GSYN_RCV : length += sprintf (s + length, " syn=rcv"); break; case GSYN_RCV0 : length += sprintf (s + length, " syn=rcv0"); break; case GSYN_RCV1 : length += sprintf (s + length, " syn=rcv1"); break; case GSYN_RCV2 : length += sprintf (s + length, " syn=rcv2"); break; case GSYN_RCV3 : length += sprintf (s + length, " syn=rcv3"); break; } if (c->type == T_SERIAL) { length += sprintf (s + length, " dpll=%s", c->dpll ? "on" : "off"); length += sprintf (s + length, " nrzi=%s", c->nrzi ? "on" : "off"); length += sprintf (s + length, " invclk=%s", c->invtxc ? "on" : "off"); } if (c->type == T_E1) length += sprintf (s + length, " higain=%s", c->higain ? "on" : "off"); length += sprintf (s + length, " loop=%s", c->lloop ? "on" : "off"); if (c->type == T_E1) length += sprintf (s + length, " ts=%s", format_timeslots (c->ts)); if (c->type == T_G703) { int lq, x; x = splimp (); CP_LOCK (bd); lq = cp_get_lq (c); CP_UNLOCK (bd); splx (x); length += sprintf (s + length, " (level=-%.1fdB)", lq / 10.0); } length += sprintf (s + length, "\n"); return length; } static int ng_cp_rcvmsg (node_p node, item_p item, hook_p lasthook) { drv_t *d = NG_NODE_PRIVATE (node); struct ng_mesg *msg; struct ng_mesg *resp = NULL; int error = 0; CP_DEBUG (d, ("Rcvmsg\n")); NGI_GET_MSG (item, msg); switch (msg->header.typecookie) { default: error = EINVAL; break; case NGM_CP_COOKIE: printf ("Not implemented yet\n"); error = EINVAL; break; case NGM_GENERIC_COOKIE: switch (msg->header.cmd) { default: error = EINVAL; break; case NGM_TEXT_STATUS: { char *s; int l = 0; int dl = sizeof (struct ng_mesg) + 730; NG_MKRESPONSE (resp, msg, dl, M_NOWAIT); if (! resp) { error = ENOMEM; break; } s = (resp)->data; if (d) { l += print_chan (s + l, d->chan); l += print_stats (s + l, d->chan, 1); l += print_modems (s + l, d->chan, 1); l += print_e1_stats (s + l, d->chan); } else l += sprintf (s + l, "Error: node not connect to channel"); strncpy ((resp)->header.cmdstr, "status", NG_CMDSTRSIZ); } break; } break; } NG_RESPOND_MSG (error, node, item, resp); NG_FREE_MSG (msg); return error; } static int ng_cp_rcvdata (hook_p hook, item_p item) { drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE(hook)); struct mbuf *m; struct ng_tag_prio *ptag; bdrv_t *bd = d->board->sys; struct ifqueue *q; int s; CP_DEBUG2 (d, ("Rcvdata\n")); NGI_GET_M (item, m); NG_FREE_ITEM (item); if (! NG_HOOK_PRIVATE (hook) || ! d) { NG_FREE_M (m); return ENETDOWN; } /* Check for high priority data */ if ((ptag = (struct ng_tag_prio *)m_tag_locate(m, NGM_GENERIC_COOKIE, NG_TAG_PRIO, NULL)) != NULL && (ptag->priority > NG_PRIO_CUTOFF) ) q = &d->hi_queue; else q = &d->queue; s = splimp (); CP_LOCK (bd); IF_LOCK (q); if (_IF_QFULL (q)) { IF_UNLOCK (q); CP_UNLOCK (bd); splx (s); NG_FREE_M (m); return ENOBUFS; } _IF_ENQUEUE (q, m); IF_UNLOCK (q); cp_start (d); CP_UNLOCK (bd); splx (s); return 0; } static int ng_cp_rmnode (node_p node) { drv_t *d = NG_NODE_PRIVATE (node); CP_DEBUG (d, ("Rmnode\n")); if (d && d->running) { bdrv_t *bd = d->board->sys; int s = splimp (); CP_LOCK (bd); cp_down (d); CP_UNLOCK (bd); splx (s); } #ifdef KLD_MODULE if (node->nd_flags & NGF_REALLY_DIE) { NG_NODE_SET_PRIVATE (node, NULL); NG_NODE_UNREF (node); } NG_NODE_REVIVE(node); /* Persistant node */ #endif return 0; } static int ng_cp_connect (hook_p hook) { drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE (hook)); if (d) { CP_DEBUG (d, ("Connect\n")); callout_reset (&d->timeout_handle, hz, cp_watchdog_timer, d); } return 0; } static int ng_cp_disconnect (hook_p hook) { drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE (hook)); if (d) { CP_DEBUG (d, ("Disconnect\n")); if (NG_HOOK_PRIVATE (hook)) { bdrv_t *bd = d->board->sys; int s = splimp (); CP_LOCK (bd); cp_down (d); CP_UNLOCK (bd); splx (s); } /* If we were wait it than it reasserted now, just stop it. */ if (!callout_drain (&d->timeout_handle)) callout_stop (&d->timeout_handle); } return 0; } #endif static int cp_modevent (module_t mod, int type, void *unused) { static int load_count = 0; switch (type) { case MOD_LOAD: #ifdef NETGRAPH if (ng_newtype (&typestruct)) printf ("Failed to register ng_cp\n"); #endif ++load_count; callout_init (&timeout_handle, CALLOUT_MPSAFE); callout_reset (&timeout_handle, hz*5, cp_timeout, 0); break; case MOD_UNLOAD: if (load_count == 1) { printf ("Removing device entry for Tau-PCI\n"); #ifdef NETGRAPH ng_rmtype (&typestruct); #endif } /* If we were wait it than it reasserted now, just stop it. * Actually we shouldn't get this condition. But code could be * changed in the future, so just be a litle paranoid. */ if (!callout_drain (&timeout_handle)) callout_stop (&timeout_handle); --load_count; break; case MOD_SHUTDOWN: break; } return 0; } #ifdef NETGRAPH static struct ng_type typestruct = { .version = NG_ABI_VERSION, .name = NG_CP_NODE_TYPE, .constructor = ng_cp_constructor, .rcvmsg = ng_cp_rcvmsg, .shutdown = ng_cp_rmnode, .newhook = ng_cp_newhook, .connect = ng_cp_connect, .rcvdata = ng_cp_rcvdata, .disconnect = ng_cp_disconnect, }; #endif /*NETGRAPH*/ #ifdef NETGRAPH MODULE_DEPEND (ng_cp, netgraph, NG_ABI_VERSION, NG_ABI_VERSION, NG_ABI_VERSION); #else MODULE_DEPEND (cp, sppp, 1, 1, 1); #endif DRIVER_MODULE (cp, pci, cp_driver, cp_devclass, cp_modevent, NULL); MODULE_VERSION (cp, 1); Index: head/sys/dev/cs/if_cs.c =================================================================== --- head/sys/dev/cs/if_cs.c (revision 276749) +++ head/sys/dev/cs/if_cs.c (revision 276750) @@ -1,1226 +1,1225 @@ /*- * Copyright (c) 1997,1998 Maxim Bolotin and Oleg Sharoiko. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); /* * * Device driver for Crystal Semiconductor CS8920 based ethernet * adapters. By Maxim Bolotin and Oleg Sharoiko, 27-April-1997 */ /* #define CS_DEBUG */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef CS_USE_64K_DMA #define CS_DMA_BUFFER_SIZE 65536 #else #define CS_DMA_BUFFER_SIZE 16384 #endif static void cs_init(void *); static void cs_init_locked(struct cs_softc *); static int cs_ioctl(struct ifnet *, u_long, caddr_t); static void cs_start(struct ifnet *); static void cs_start_locked(struct ifnet *); static void cs_stop(struct cs_softc *); static void cs_reset(struct cs_softc *); static void cs_watchdog(void *); static int cs_mediachange(struct ifnet *); static void cs_mediastatus(struct ifnet *, struct ifmediareq *); static int cs_mediaset(struct cs_softc *, int); static void cs_write_mbufs(struct cs_softc*, struct mbuf*); static void cs_xmit_buf(struct cs_softc*); static int cs_get_packet(struct cs_softc*); static void cs_setmode(struct cs_softc*); static int get_eeprom_data(struct cs_softc *sc, int, int, uint16_t *); static int get_eeprom_cksum(int, int, uint16_t *); static int wait_eeprom_ready( struct cs_softc *); static void control_dc_dc( struct cs_softc *, int ); static int enable_tp(struct cs_softc *); static int enable_aui(struct cs_softc *); static int enable_bnc(struct cs_softc *); static int cs_duplex_auto(struct cs_softc *); devclass_t cs_devclass; driver_intr_t csintr; /* sysctl vars */ static SYSCTL_NODE(_hw, OID_AUTO, cs, CTLFLAG_RD, 0, "cs device parameters"); int cs_ignore_cksum_failure = 0; SYSCTL_INT(_hw_cs, OID_AUTO, ignore_checksum_failure, CTLFLAG_RWTUN, &cs_ignore_cksum_failure, 0, "ignore checksum errors in cs card EEPROM"); static int cs_recv_delay = 570; SYSCTL_INT(_hw_cs, OID_AUTO, recv_delay, CTLFLAG_RWTUN, &cs_recv_delay, 570, ""); static int cs8900_eeint2irq[16] = { 10, 11, 12, 5, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }; static int cs8900_irq2eeint[16] = { 255, 255, 255, 255, 255, 3, 255, 255, 255, 0, 1, 2, 255, 255, 255, 255 }; static int get_eeprom_data(struct cs_softc *sc, int off, int len, uint16_t *buffer) { int i; #ifdef CS_DEBUG device_printf(sc->dev, "EEPROM data from %x for %x:\n", off, len); #endif for (i=0; i < len; i++) { if (wait_eeprom_ready(sc) < 0) return (-1); /* Send command to EEPROM to read */ cs_writereg(sc, PP_EECMD, (off + i) | EEPROM_READ_CMD); if (wait_eeprom_ready(sc) < 0) return (-1); buffer[i] = cs_readreg(sc, PP_EEData); #ifdef CS_DEBUG printf("%04x ",buffer[i]); #endif } #ifdef CS_DEBUG printf("\n"); #endif return (0); } static int get_eeprom_cksum(int off, int len, uint16_t *buffer) { int i; uint16_t cksum=0; for (i = 0; i < len; i++) cksum += buffer[i]; cksum &= 0xffff; if (cksum == 0 || cs_ignore_cksum_failure) return (0); return (-1); } static int wait_eeprom_ready(struct cs_softc *sc) { int i; /* * From the CS8900A datasheet, section 3.5.2: * "Before issuing any command to the EEPROM, the host must wait * for the SIBUSY bit (Register 16, SelfST, bit 8) to clear. After * each command has been issued, the host must wait again for SIBUSY * to clear." * * Before we issue the command, we should be !busy, so that will * be fast. The datasheet suggests that clock out from the part * per word will be on the order of 25us, which is consistant with * the 1MHz serial clock and 16bits... We should never hit 100, * let alone 15,000 here. The original code did an unconditional * 30ms DELAY here. Bad Kharma. cs_readreg takes ~2us. */ for (i = 0; i < 15000; i++) /* 30ms max */ if (!(cs_readreg(sc, PP_SelfST) & SI_BUSY)) return (0); return (1); } static void control_dc_dc(struct cs_softc *sc, int on_not_off) { unsigned int self_control = HCB1_ENBL; if (((sc->adapter_cnf & A_CNF_DC_DC_POLARITY)!=0) ^ on_not_off) self_control |= HCB1; else self_control &= ~HCB1; cs_writereg(sc, PP_SelfCTL, self_control); DELAY(500000); /* Bad! */ } static int cs_duplex_auto(struct cs_softc *sc) { int i, error=0; cs_writereg(sc, PP_AutoNegCTL, RE_NEG_NOW | ALLOW_FDX | AUTO_NEG_ENABLE); for (i=0; cs_readreg(sc, PP_AutoNegST) & AUTO_NEG_BUSY; i++) { if (i > 4000) { device_printf(sc->dev, "full/half duplex auto negotiation timeout\n"); error = ETIMEDOUT; break; } DELAY(1000); } return (error); } static int enable_tp(struct cs_softc *sc) { cs_writereg(sc, PP_LineCTL, sc->line_ctl & ~AUI_ONLY); control_dc_dc(sc, 0); return (0); } static int enable_aui(struct cs_softc *sc) { cs_writereg(sc, PP_LineCTL, (sc->line_ctl & ~AUTO_AUI_10BASET) | AUI_ONLY); control_dc_dc(sc, 0); return (0); } static int enable_bnc(struct cs_softc *sc) { cs_writereg(sc, PP_LineCTL, (sc->line_ctl & ~AUTO_AUI_10BASET) | AUI_ONLY); control_dc_dc(sc, 1); return (0); } int cs_cs89x0_probe(device_t dev) { int i; int error; u_long irq, junk; struct cs_softc *sc = device_get_softc(dev); unsigned rev_type = 0; uint16_t id; char chip_revision; uint16_t eeprom_buff[CHKSUM_LEN]; int chip_type, pp_isaint; sc->dev = dev; error = cs_alloc_port(dev, 0, CS_89x0_IO_PORTS); if (error) return (error); if ((cs_inw(sc, ADD_PORT) & ADD_MASK) != ADD_SIG) { /* Chip not detected. Let's try to reset it */ if (bootverbose) device_printf(dev, "trying to reset the chip.\n"); cs_outw(sc, ADD_PORT, PP_SelfCTL); i = cs_inw(sc, DATA_PORT); cs_outw(sc, ADD_PORT, PP_SelfCTL); cs_outw(sc, DATA_PORT, i | POWER_ON_RESET); if ((cs_inw(sc, ADD_PORT) & ADD_MASK) != ADD_SIG) return (ENXIO); } for (i = 0; i < 10000; i++) { id = cs_readreg(sc, PP_ChipID); if (id == CHIP_EISA_ID_SIG) break; } if (i == 10000) return (ENXIO); rev_type = cs_readreg(sc, PRODUCT_ID_ADD); chip_type = rev_type & ~REVISON_BITS; chip_revision = ((rev_type & REVISON_BITS) >> 8) + 'A'; sc->chip_type = chip_type; if (chip_type == CS8900) { pp_isaint = PP_CS8900_ISAINT; sc->send_cmd = TX_CS8900_AFTER_ALL; } else { pp_isaint = PP_CS8920_ISAINT; sc->send_cmd = TX_CS8920_AFTER_ALL; } /* * Clear some fields so that fail of EEPROM will left them clean */ sc->auto_neg_cnf = 0; sc->adapter_cnf = 0; sc->isa_config = 0; /* * If no interrupt specified, use what the board tells us. */ error = bus_get_resource(dev, SYS_RES_IRQ, 0, &irq, &junk); /* * Get data from EEPROM */ if((cs_readreg(sc, PP_SelfST) & EEPROM_PRESENT) == 0) { device_printf(dev, "No EEPROM, assuming defaults.\n"); } else if (get_eeprom_data(sc,START_EEPROM_DATA,CHKSUM_LEN, eeprom_buff)<0) { device_printf(dev, "EEPROM read failed, assuming defaults.\n"); } else if (get_eeprom_cksum(START_EEPROM_DATA,CHKSUM_LEN, eeprom_buff)<0) { device_printf(dev, "EEPROM cheksum bad, assuming defaults.\n"); } else { sc->auto_neg_cnf = eeprom_buff[AUTO_NEG_CNF_OFFSET]; sc->adapter_cnf = eeprom_buff[ADAPTER_CNF_OFFSET]; sc->isa_config = eeprom_buff[ISA_CNF_OFFSET]; for (i=0; ienaddr[i*2] = eeprom_buff[i]; sc->enaddr[i*2+1] = eeprom_buff[i] >> 8; } /* * If no interrupt specified, use what the * board tells us. */ if (error) { irq = sc->isa_config & INT_NO_MASK; error = 0; if (chip_type == CS8900) { irq = cs8900_eeint2irq[irq]; } else { if (irq > CS8920_NO_INTS) irq = 255; } if (irq == 255) { device_printf(dev, "invalid irq in EEPROM.\n"); error = EINVAL; } if (!error) bus_set_resource(dev, SYS_RES_IRQ, 0, irq, 1); } } if (!error && !(sc->flags & CS_NO_IRQ)) { if (chip_type == CS8900) { if (irq < 16) irq = cs8900_irq2eeint[irq]; else irq = 255; } else { if (irq > CS8920_NO_INTS) irq = 255; } if (irq == 255) error = EINVAL; } if (error) { device_printf(dev, "Unknown or invalid irq\n"); return (error); } if (!(sc->flags & CS_NO_IRQ)) cs_writereg(sc, pp_isaint, irq); if (bootverbose) device_printf(dev, "CS89%c0%s rev %c media%s%s%s\n", chip_type == CS8900 ? '0' : '2', chip_type == CS8920M ? "M" : "", chip_revision, (sc->adapter_cnf & A_CNF_10B_T) ? " TP" : "", (sc->adapter_cnf & A_CNF_AUI) ? " AUI" : "", (sc->adapter_cnf & A_CNF_10B_2) ? " BNC" : ""); if ((sc->adapter_cnf & A_CNF_EXTND_10B_2) && (sc->adapter_cnf & A_CNF_LOW_RX_SQUELCH)) sc->line_ctl = LOW_RX_SQUELCH; else sc->line_ctl = 0; return (0); } /* * Allocate a port resource with the given resource id. */ int cs_alloc_port(device_t dev, int rid, int size) { struct cs_softc *sc = device_get_softc(dev); struct resource *res; res = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0ul, ~0ul, size, RF_ACTIVE); if (res == NULL) return (ENOENT); sc->port_rid = rid; sc->port_res = res; return (0); } /* * Allocate an irq resource with the given resource id. */ int cs_alloc_irq(device_t dev, int rid) { struct cs_softc *sc = device_get_softc(dev); struct resource *res; res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (res == NULL) return (ENOENT); sc->irq_rid = rid; sc->irq_res = res; return (0); } /* * Release all resources */ void cs_release_resources(device_t dev) { struct cs_softc *sc = device_get_softc(dev); if (sc->port_res) { bus_release_resource(dev, SYS_RES_IOPORT, sc->port_rid, sc->port_res); sc->port_res = 0; } if (sc->irq_res) { bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); sc->irq_res = 0; } } /* * Install the interface into kernel networking data structures */ int cs_attach(device_t dev) { int error, media=0; struct cs_softc *sc = device_get_softc(dev); struct ifnet *ifp; sc->dev = dev; ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); cs_release_resources(dev); return (ENOMEM); } mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->timer, &sc->lock, 0); CS_LOCK(sc); cs_stop(sc); CS_UNLOCK(sc); ifp->if_softc=sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_start=cs_start; ifp->if_ioctl=cs_ioctl; ifp->if_init=cs_init; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_flags=(IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); /* * this code still in progress (DMA support) * sc->recv_ring=malloc(CS_DMA_BUFFER_SIZE<<1, M_DEVBUF, M_NOWAIT); if (sc->recv_ring == NULL) { log(LOG_ERR, "%s: Couldn't allocate memory for NIC\n", ifp->if_xname); return(0); } if ((sc->recv_ring-(sc->recv_ring & 0x1FFFF)) < (128*1024-CS_DMA_BUFFER_SIZE)) sc->recv_ring+=16*1024; */ sc->buffer=malloc(ETHER_MAX_LEN-ETHER_CRC_LEN,M_DEVBUF,M_NOWAIT); if (sc->buffer == NULL) { device_printf(sc->dev, "Couldn't allocate memory for NIC\n"); if_free(ifp); mtx_destroy(&sc->lock); cs_release_resources(dev); return(ENOMEM); } /* * Initialize the media structures. */ ifmedia_init(&sc->media, 0, cs_mediachange, cs_mediastatus); if (sc->adapter_cnf & A_CNF_10B_T) { ifmedia_add(&sc->media, IFM_ETHER|IFM_10_T, 0, NULL); if (sc->chip_type != CS8900) { ifmedia_add(&sc->media, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); ifmedia_add(&sc->media, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); } } if (sc->adapter_cnf & A_CNF_10B_2) ifmedia_add(&sc->media, IFM_ETHER|IFM_10_2, 0, NULL); if (sc->adapter_cnf & A_CNF_AUI) ifmedia_add(&sc->media, IFM_ETHER|IFM_10_5, 0, NULL); if (sc->adapter_cnf & A_CNF_MEDIA) ifmedia_add(&sc->media, IFM_ETHER|IFM_AUTO, 0, NULL); /* Set default media from EEPROM */ switch (sc->adapter_cnf & A_CNF_MEDIA_TYPE) { case A_CNF_MEDIA_AUTO: media = IFM_ETHER|IFM_AUTO; break; case A_CNF_MEDIA_10B_T: media = IFM_ETHER|IFM_10_T; break; case A_CNF_MEDIA_10B_2: media = IFM_ETHER|IFM_10_2; break; case A_CNF_MEDIA_AUI: media = IFM_ETHER|IFM_10_5; break; default: device_printf(sc->dev, "no media, assuming 10baseT\n"); sc->adapter_cnf |= A_CNF_10B_T; ifmedia_add(&sc->media, IFM_ETHER|IFM_10_T, 0, NULL); if (sc->chip_type != CS8900) { ifmedia_add(&sc->media, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); ifmedia_add(&sc->media, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); } media = IFM_ETHER | IFM_10_T; break; } ifmedia_set(&sc->media, media); cs_mediaset(sc, media); ether_ifattach(ifp, sc->enaddr); error = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE, NULL, csintr, sc, &sc->irq_handle); if (error) { ether_ifdetach(ifp); free(sc->buffer, M_DEVBUF); if_free(ifp); mtx_destroy(&sc->lock); cs_release_resources(dev); return (error); } return (0); } int cs_detach(device_t dev) { struct cs_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->ifp; CS_LOCK(sc); cs_stop(sc); CS_UNLOCK(sc); callout_drain(&sc->timer); ether_ifdetach(ifp); bus_teardown_intr(dev, sc->irq_res, sc->irq_handle); cs_release_resources(dev); free(sc->buffer, M_DEVBUF); if_free(ifp); mtx_destroy(&sc->lock); return (0); } /* * Initialize the board */ static void cs_init(void *xsc) { struct cs_softc *sc=(struct cs_softc *)xsc; CS_LOCK(sc); cs_init_locked(sc); CS_UNLOCK(sc); } static void cs_init_locked(struct cs_softc *sc) { struct ifnet *ifp = sc->ifp; int i, rx_cfg; /* * reset watchdog timer */ sc->tx_timeout = 0; sc->buf_len = 0; /* * Hardware initialization of cs */ /* Enable receiver and transmitter */ cs_writereg(sc, PP_LineCTL, cs_readreg(sc, PP_LineCTL) | SERIAL_RX_ON | SERIAL_TX_ON); /* Configure the receiver mode */ cs_setmode(sc); /* * This defines what type of frames will cause interrupts * Bad frames should generate interrupts so that the driver * could track statistics of discarded packets */ rx_cfg = RX_OK_ENBL | RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL; if (sc->isa_config & STREAM_TRANSFER) rx_cfg |= RX_STREAM_ENBL; cs_writereg(sc, PP_RxCFG, rx_cfg); cs_writereg(sc, PP_TxCFG, TX_LOST_CRS_ENBL | TX_SQE_ERROR_ENBL | TX_OK_ENBL | TX_LATE_COL_ENBL | TX_JBR_ENBL | TX_ANY_COL_ENBL | TX_16_COL_ENBL); cs_writereg(sc, PP_BufCFG, READY_FOR_TX_ENBL | RX_MISS_COUNT_OVRFLOW_ENBL | TX_COL_COUNT_OVRFLOW_ENBL | TX_UNDERRUN_ENBL /*| RX_DMA_ENBL*/); /* Write MAC address into IA filter */ for (i=0; ienaddr[i * 2] | (sc->enaddr[i * 2 + 1] << 8) ); /* * Now enable everything */ /* #ifdef CS_USE_64K_DMA cs_writereg(sc, PP_BusCTL, ENABLE_IRQ | RX_DMA_SIZE_64K); #else cs_writereg(sc, PP_BusCTL, ENABLE_IRQ); #endif */ cs_writereg(sc, PP_BusCTL, ENABLE_IRQ); /* * Set running and clear output active flags */ sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&sc->timer, hz, cs_watchdog, sc); /* * Start sending process */ cs_start_locked(ifp); } /* * Get the packet from the board and send it to the upper layer. */ static int cs_get_packet(struct cs_softc *sc) { struct ifnet *ifp = sc->ifp; int status, length; struct mbuf *m; #ifdef CS_DEBUG int i; #endif status = cs_inw(sc, RX_FRAME_PORT); length = cs_inw(sc, RX_FRAME_PORT); #ifdef CS_DEBUG device_printf(sc->dev, "rcvd: stat %x, len %d\n", status, length); #endif if (!(status & RX_OK)) { #ifdef CS_DEBUG device_printf(sc->dev, "bad pkt stat %x\n", status); #endif if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return (-1); } MGETHDR(m, M_NOWAIT, MT_DATA); if (m==NULL) return (-1); if (length > MHLEN) { - MCLGET(m, M_NOWAIT); - if (!(m->m_flags & M_EXT)) { + if (!(MCLGET(m, M_NOWAIT))) { m_freem(m); return (-1); } } /* Initialize packet's header info */ m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = length; m->m_len = length; /* Get the data */ bus_read_multi_2(sc->port_res, RX_FRAME_PORT, mtod(m, uint16_t *), (length + 1) >> 1); #ifdef CS_DEBUG for (i=0;im_data+i))); printf( "\n" ); #endif if (status & (RX_IA | RX_BROADCAST) || (ifp->if_flags & IFF_MULTICAST && status & RX_HASHED)) { /* Feed the packet to the upper layer */ (*ifp->if_input)(ifp, m); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); if (length == ETHER_MAX_LEN-ETHER_CRC_LEN) DELAY(cs_recv_delay); } else { m_freem(m); } return (0); } /* * Handle interrupts */ void csintr(void *arg) { struct cs_softc *sc = (struct cs_softc*) arg; struct ifnet *ifp = sc->ifp; int status; #ifdef CS_DEBUG device_printf(sc->dev, "Interrupt.\n"); #endif CS_LOCK(sc); while ((status=cs_inw(sc, ISQ_PORT))) { #ifdef CS_DEBUG device_printf(sc->dev, "from ISQ: %04x\n", status); #endif switch (status & ISQ_EVENT_MASK) { case ISQ_RECEIVER_EVENT: cs_get_packet(sc); break; case ISQ_TRANSMITTER_EVENT: if (status & TX_OK) if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); else if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->tx_timeout = 0; break; case ISQ_BUFFER_EVENT: if (status & READY_FOR_TX) { ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->tx_timeout = 0; } if (status & TX_UNDERRUN) { ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->tx_timeout = 0; if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } break; case ISQ_RX_MISS_EVENT: if_inc_counter(ifp, IFCOUNTER_IERRORS, status >> 6); break; case ISQ_TX_COL_EVENT: if_inc_counter(ifp, IFCOUNTER_COLLISIONS, status >> 6); break; } } if (!(ifp->if_drv_flags & IFF_DRV_OACTIVE)) { cs_start_locked(ifp); } CS_UNLOCK(sc); } /* * Save the data in buffer */ static void cs_write_mbufs( struct cs_softc *sc, struct mbuf *m ) { int len; struct mbuf *mp; unsigned char *data, *buf; for (mp=m, buf=sc->buffer, sc->buf_len=0; mp != NULL; mp=mp->m_next) { len = mp->m_len; /* * Ignore empty parts */ if (!len) continue; /* * Find actual data address */ data = mtod(mp, caddr_t); bcopy((caddr_t) data, (caddr_t) buf, len); buf += len; sc->buf_len += len; } } static void cs_xmit_buf( struct cs_softc *sc ) { bus_write_multi_2(sc->port_res, TX_FRAME_PORT, (uint16_t *)sc->buffer, (sc->buf_len + 1) >> 1); sc->buf_len = 0; } static void cs_start(struct ifnet *ifp) { struct cs_softc *sc = ifp->if_softc; CS_LOCK(sc); cs_start_locked(ifp); CS_UNLOCK(sc); } static void cs_start_locked(struct ifnet *ifp) { int length; struct mbuf *m, *mp; struct cs_softc *sc = ifp->if_softc; for (;;) { if (sc->buf_len) length = sc->buf_len; else { IF_DEQUEUE( &ifp->if_snd, m ); if (m==NULL) { return; } for (length=0, mp=m; mp != NULL; mp=mp->m_next) length += mp->m_len; /* Skip zero-length packets */ if (length == 0) { m_freem(m); continue; } cs_write_mbufs(sc, m); BPF_MTAP(ifp, m); m_freem(m); } /* * Issue a SEND command */ cs_outw(sc, TX_CMD_PORT, sc->send_cmd); cs_outw(sc, TX_LEN_PORT, length ); /* * If there's no free space in the buffer then leave * this packet for the next time: indicate output active * and return. */ if (!(cs_readreg(sc, PP_BusST) & READY_FOR_TX_NOW)) { sc->tx_timeout = sc->buf_len; ifp->if_drv_flags |= IFF_DRV_OACTIVE; return; } cs_xmit_buf(sc); /* * Set the watchdog timer in case we never hear * from board again. (I don't know about correct * value for this timeout) */ sc->tx_timeout = length; ifp->if_drv_flags |= IFF_DRV_OACTIVE; return; } } /* * Stop everything on the interface */ static void cs_stop(struct cs_softc *sc) { CS_ASSERT_LOCKED(sc); cs_writereg(sc, PP_RxCFG, 0); cs_writereg(sc, PP_TxCFG, 0); cs_writereg(sc, PP_BufCFG, 0); cs_writereg(sc, PP_BusCTL, 0); sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->tx_timeout = 0; callout_stop(&sc->timer); } /* * Reset the interface */ static void cs_reset(struct cs_softc *sc) { CS_ASSERT_LOCKED(sc); cs_stop(sc); cs_init_locked(sc); } static uint16_t cs_hash_index(struct sockaddr_dl *addr) { uint32_t crc; uint16_t idx; caddr_t lla; lla = LLADDR(addr); crc = ether_crc32_le(lla, ETHER_ADDR_LEN); idx = crc >> 26; return (idx); } static void cs_setmode(struct cs_softc *sc) { int rx_ctl; uint16_t af[4]; uint16_t port, mask, index; struct ifnet *ifp = sc->ifp; struct ifmultiaddr *ifma; /* Stop the receiver while changing filters */ cs_writereg(sc, PP_LineCTL, cs_readreg(sc, PP_LineCTL) & ~SERIAL_RX_ON); if (ifp->if_flags & IFF_PROMISC) { /* Turn on promiscuous mode. */ rx_ctl = RX_OK_ACCEPT | RX_PROM_ACCEPT; } else if (ifp->if_flags & IFF_MULTICAST) { /* Allow receiving frames with multicast addresses */ rx_ctl = RX_IA_ACCEPT | RX_BROADCAST_ACCEPT | RX_OK_ACCEPT | RX_MULTCAST_ACCEPT; /* Start with an empty filter */ af[0] = af[1] = af[2] = af[3] = 0x0000; if (ifp->if_flags & IFF_ALLMULTI) { /* Accept all multicast frames */ af[0] = af[1] = af[2] = af[3] = 0xffff; } else { /* * Set up the filter to only accept multicast * frames we're interested in. */ if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { struct sockaddr_dl *dl = (struct sockaddr_dl *)ifma->ifma_addr; index = cs_hash_index(dl); port = (u_int16_t) (index >> 4); mask = (u_int16_t) (1 << (index & 0xf)); af[port] |= mask; } if_maddr_runlock(ifp); } cs_writereg(sc, PP_LAF + 0, af[0]); cs_writereg(sc, PP_LAF + 2, af[1]); cs_writereg(sc, PP_LAF + 4, af[2]); cs_writereg(sc, PP_LAF + 6, af[3]); } else { /* * Receive only good frames addressed for us and * good broadcasts. */ rx_ctl = RX_IA_ACCEPT | RX_BROADCAST_ACCEPT | RX_OK_ACCEPT; } /* Set up the filter */ cs_writereg(sc, PP_RxCTL, RX_DEF_ACCEPT | rx_ctl); /* Turn on receiver */ cs_writereg(sc, PP_LineCTL, cs_readreg(sc, PP_LineCTL) | SERIAL_RX_ON); } static int cs_ioctl(register struct ifnet *ifp, u_long command, caddr_t data) { struct cs_softc *sc=ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int error=0; #ifdef CS_DEBUG if_printf(ifp, "%s command=%lx\n", __func__, command); #endif switch (command) { case SIOCSIFFLAGS: /* * Switch interface state between "running" and * "stopped", reflecting the UP flag. */ CS_LOCK(sc); if (sc->ifp->if_flags & IFF_UP) { if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING)==0) { cs_init_locked(sc); } } else { if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING)!=0) { cs_stop(sc); } } /* * Promiscuous and/or multicast flags may have changed, * so reprogram the multicast filter and/or receive mode. * * See note about multicasts in cs_setmode */ cs_setmode(sc); CS_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: /* * Multicast list has changed; set the hardware filter * accordingly. * * See note about multicasts in cs_setmode */ CS_LOCK(sc); cs_setmode(sc); CS_UNLOCK(sc); error = 0; break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->media, command); break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } /* * Device timeout/watchdog routine. Entered if the device neglects to * generate an interrupt after a transmit has been started on it. */ static void cs_watchdog(void *arg) { struct cs_softc *sc = arg; struct ifnet *ifp = sc->ifp; CS_ASSERT_LOCKED(sc); if (sc->tx_timeout && --sc->tx_timeout == 0) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); log(LOG_ERR, "%s: device timeout\n", ifp->if_xname); /* Reset the interface */ if (ifp->if_flags & IFF_UP) cs_reset(sc); else cs_stop(sc); } callout_reset(&sc->timer, hz, cs_watchdog, sc); } static int cs_mediachange(struct ifnet *ifp) { struct cs_softc *sc = ifp->if_softc; struct ifmedia *ifm = &sc->media; int error; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); CS_LOCK(sc); error = cs_mediaset(sc, ifm->ifm_media); CS_UNLOCK(sc); return (error); } static void cs_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) { int line_status; struct cs_softc *sc = ifp->if_softc; CS_LOCK(sc); ifmr->ifm_active = IFM_ETHER; line_status = cs_readreg(sc, PP_LineST); if (line_status & TENBASET_ON) { ifmr->ifm_active |= IFM_10_T; if (sc->chip_type != CS8900) { if (cs_readreg(sc, PP_AutoNegST) & FDX_ACTIVE) ifmr->ifm_active |= IFM_FDX; if (cs_readreg(sc, PP_AutoNegST) & HDX_ACTIVE) ifmr->ifm_active |= IFM_HDX; } ifmr->ifm_status = IFM_AVALID; if (line_status & LINK_OK) ifmr->ifm_status |= IFM_ACTIVE; } else { if (line_status & AUI_ON) { cs_writereg(sc, PP_SelfCTL, cs_readreg(sc, PP_SelfCTL) | HCB1_ENBL); if (((sc->adapter_cnf & A_CNF_DC_DC_POLARITY)!=0)^ (cs_readreg(sc, PP_SelfCTL) & HCB1)) ifmr->ifm_active |= IFM_10_2; else ifmr->ifm_active |= IFM_10_5; } } CS_UNLOCK(sc); } static int cs_mediaset(struct cs_softc *sc, int media) { int error = 0; /* Stop the receiver & transmitter */ cs_writereg(sc, PP_LineCTL, cs_readreg(sc, PP_LineCTL) & ~(SERIAL_RX_ON | SERIAL_TX_ON)); #ifdef CS_DEBUG device_printf(sc->dev, "%s media=%x\n", __func__, media); #endif switch (IFM_SUBTYPE(media)) { default: case IFM_AUTO: /* * This chip makes it a little hard to support this, so treat * it as IFM_10_T, auto duplex. */ enable_tp(sc); cs_duplex_auto(sc); break; case IFM_10_T: enable_tp(sc); if (media & IFM_FDX) cs_duplex_full(sc); else if (media & IFM_HDX) cs_duplex_half(sc); else error = cs_duplex_auto(sc); break; case IFM_10_2: enable_bnc(sc); break; case IFM_10_5: enable_aui(sc); break; } /* * Turn the transmitter & receiver back on */ cs_writereg(sc, PP_LineCTL, cs_readreg(sc, PP_LineCTL) | SERIAL_RX_ON | SERIAL_TX_ON); return (error); } Index: head/sys/dev/ctau/if_ct.c =================================================================== --- head/sys/dev/ctau/if_ct.c (revision 276749) +++ head/sys/dev/ctau/if_ct.c (revision 276750) @@ -1,2207 +1,2206 @@ /*- * Cronyx-Tau adapter driver for FreeBSD. * Supports PPP/HDLC and Cisco/HDLC protocol in synchronous mode, * and asynchronous channels with full modem control. * Keepalive protocol implemented in both Cisco and PPP modes. * * Copyright (C) 1994-2002 Cronyx Engineering. * Author: Serge Vakulenko, * * Copyright (C) 1999-2004 Cronyx Engineering. * Author: Roman Kurakin, * * This software is distributed with NO WARRANTIES, not even the implied * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * Authors grant any other persons or organisations a permission to use, * modify and redistribute this software in source and binary forms, * as long as this message is kept with the software, all derivative * works or modified versions. * * Cronyx Id: if_ct.c,v 1.1.2.31 2004/06/23 17:09:13 rik Exp $ */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_ng_cronyx.h" #ifdef NETGRAPH_CRONYX # include "opt_netgraph.h" # include # include # include #else # include # include # define PP_CISCO IFF_LINK2 # include #endif #define NCTAU 1 /* If we don't have Cronyx's sppp version, we don't have fr support via sppp */ #ifndef PP_FR #define PP_FR 0 #endif #define CT_DEBUG(d,s) ({if (d->chan->debug) {\ printf ("%s: ", d->name); printf s;}}) #define CT_DEBUG2(d,s) ({if (d->chan->debug>1) {\ printf ("%s: ", d->name); printf s;}}) #define CT_LOCK_NAME "ctX" #define CT_LOCK(_bd) mtx_lock (&(_bd)->ct_mtx) #define CT_UNLOCK(_bd) mtx_unlock (&(_bd)->ct_mtx) #define CT_LOCK_ASSERT(_bd) mtx_assert (&(_bd)->ct_mtx, MA_OWNED) static void ct_identify __P((driver_t *, device_t)); static int ct_probe __P((device_t)); static int ct_attach __P((device_t)); static int ct_detach __P((device_t)); static device_method_t ct_isa_methods [] = { DEVMETHOD(device_identify, ct_identify), DEVMETHOD(device_probe, ct_probe), DEVMETHOD(device_attach, ct_attach), DEVMETHOD(device_detach, ct_detach), DEVMETHOD_END }; typedef struct _ct_dma_mem_t { unsigned long phys; void *virt; size_t size; bus_dma_tag_t dmat; bus_dmamap_t mapp; } ct_dma_mem_t; typedef struct _drv_t { char name [8]; ct_chan_t *chan; ct_board_t *board; struct _bdrv_t *bd; ct_dma_mem_t dmamem; int running; #ifdef NETGRAPH char nodename [NG_NODESIZ]; hook_p hook; hook_p debug_hook; node_p node; struct ifqueue queue; struct ifqueue hi_queue; #else struct ifqueue queue; struct ifnet *ifp; #endif short timeout; struct callout timeout_handle; struct cdev *devt; } drv_t; typedef struct _bdrv_t { ct_board_t *board; struct resource *base_res; struct resource *drq_res; struct resource *irq_res; int base_rid; int drq_rid; int irq_rid; void *intrhand; drv_t channel [NCHAN]; struct mtx ct_mtx; } bdrv_t; static driver_t ct_isa_driver = { "ct", ct_isa_methods, sizeof (bdrv_t), }; static devclass_t ct_devclass; static void ct_receive (ct_chan_t *c, char *data, int len); static void ct_transmit (ct_chan_t *c, void *attachment, int len); static void ct_error (ct_chan_t *c, int data); static void ct_up (drv_t *d); static void ct_start (drv_t *d); static void ct_down (drv_t *d); static void ct_watchdog (drv_t *d); static void ct_watchdog_timer (void *arg); #ifdef NETGRAPH extern struct ng_type typestruct; #else static void ct_ifstart (struct ifnet *ifp); static void ct_tlf (struct sppp *sp); static void ct_tls (struct sppp *sp); static int ct_sioctl (struct ifnet *ifp, u_long cmd, caddr_t data); static void ct_initialize (void *softc); #endif static ct_board_t *adapter [NCTAU]; static drv_t *channel [NCTAU*NCHAN]; static struct callout led_timo [NCTAU]; static struct callout timeout_handle; static int ct_open (struct cdev *dev, int oflags, int devtype, struct thread *td); static int ct_close (struct cdev *dev, int fflag, int devtype, struct thread *td); static int ct_ioctl (struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td); static struct cdevsw ct_cdevsw = { .d_version = D_VERSION, .d_open = ct_open, .d_close = ct_close, .d_ioctl = ct_ioctl, .d_name = "ct", }; /* * Make an mbuf from data. */ static struct mbuf *makembuf (void *buf, u_int len) { struct mbuf *m; MGETHDR (m, M_NOWAIT, MT_DATA); if (! m) return 0; - MCLGET (m, M_NOWAIT); - if (! (m->m_flags & M_EXT)) { + if (!(MCLGET(m, M_NOWAIT))) { m_freem (m); return 0; } m->m_pkthdr.len = m->m_len = len; bcopy (buf, mtod (m, caddr_t), len); return m; } static void ct_timeout (void *arg) { drv_t *d; int s, i, k; for (i = 0; i < NCTAU; ++i) { if (adapter[i] == NULL) continue; for (k = 0; k < NCHAN; k++) { d = channel[i * NCHAN + k]; if (! d) continue; if (d->chan->mode != M_G703) continue; s = splimp (); CT_LOCK ((bdrv_t *)d->bd); ct_g703_timer (d->chan); CT_UNLOCK ((bdrv_t *)d->bd); splx (s); } } callout_reset (&timeout_handle, hz, ct_timeout, 0); } static void ct_led_off (void *arg) { ct_board_t *b = arg; bdrv_t *bd = ((drv_t *)b->chan->sys)->bd; int s = splimp (); CT_LOCK (bd); ct_led (b, 0); CT_UNLOCK (bd); splx (s); } /* * Activate interrupt handler from DDK. */ static void ct_intr (void *arg) { bdrv_t *bd = arg; ct_board_t *b = bd->board; #ifndef NETGRAPH int i; #endif int s = splimp (); CT_LOCK (bd); /* Turn LED on. */ ct_led (b, 1); ct_int_handler (b); /* Turn LED off 50 msec later. */ callout_reset (&led_timo[b->num], hz/20, ct_led_off, b); CT_UNLOCK (bd); splx (s); #ifndef NETGRAPH /* Pass packets in a lock-free state */ for (i = 0; i < NCHAN && b->chan[i].type; i++) { drv_t *d = b->chan[i].sys; struct mbuf *m; if (!d || !d->running) continue; while (_IF_QLEN(&d->queue)) { IF_DEQUEUE (&d->queue,m); if (!m) continue; sppp_input (d->ifp, m); } } #endif } static int probe_irq (ct_board_t *b, int irq) { int mask, busy, cnt; /* Clear pending irq, if any. */ ct_probe_irq (b, -irq); DELAY (100); for (cnt=0; cnt<5; ++cnt) { /* Get the mask of pending irqs, assuming they are busy. * Activate the adapter on given irq. */ busy = ct_probe_irq (b, irq); DELAY (1000); /* Get the mask of active irqs. * Deactivate our irq. */ mask = ct_probe_irq (b, -irq); DELAY (100); if ((mask & ~busy) == 1 << irq) { ct_probe_irq (b, 0); /* printf ("ct%d: irq %d ok, mask=0x%04x, busy=0x%04x\n", b->num, irq, mask, busy); */ return 1; } } /* printf ("ct%d: irq %d not functional, mask=0x%04x, busy=0x%04x\n", b->num, irq, mask, busy); */ ct_probe_irq (b, 0); return 0; } static short porttab [] = { 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0x300, 0x320, 0x340, 0x360, 0x380, 0x3a0, 0x3c0, 0x3e0, 0 }; static char dmatab [] = { 7, 6, 5, 0 }; static char irqtab [] = { 5, 10, 11, 7, 3, 15, 12, 0 }; static int ct_is_free_res (device_t dev, int rid, int type, u_long start, u_long end, u_long count) { struct resource *res; if (!(res = bus_alloc_resource (dev, type, &rid, start, end, count, 0))) return 0; bus_release_resource (dev, type, rid, res); return 1; } static void ct_identify (driver_t *driver, device_t dev) { u_long iobase, rescount; int devcount; device_t *devices; device_t child; devclass_t my_devclass; int i, k; if ((my_devclass = devclass_find ("ct")) == NULL) return; devclass_get_devices (my_devclass, &devices, &devcount); if (devcount == 0) { /* We should find all devices by our self. We could alter other * devices, but we don't have a choise */ for (i = 0; (iobase = porttab [i]) != 0; i++) { if (!ct_is_free_res (dev, 0, SYS_RES_IOPORT, iobase, iobase + NPORT, NPORT)) continue; if (ct_probe_board (iobase, -1, -1) == 0) continue; devcount++; child = BUS_ADD_CHILD (dev, ISA_ORDER_SPECULATIVE, "ct", -1); if (child == NULL) return; device_set_desc_copy (child, "Cronyx Tau-ISA"); device_set_driver (child, driver); bus_set_resource (child, SYS_RES_IOPORT, 0, iobase, NPORT); if (devcount >= NCTAU) break; } } else { static short porttab [] = { 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0x300, 0x320, 0x340, 0x360, 0x380, 0x3a0, 0x3c0, 0x3e0, 0 }; /* Lets check user choise. */ for (k = 0; k < devcount; k++) { if (bus_get_resource (devices[k], SYS_RES_IOPORT, 0, &iobase, &rescount) != 0) continue; for (i = 0; porttab [i] != 0; i++) { if (porttab [i] != iobase) continue; if (!ct_is_free_res (devices[k], 0, SYS_RES_IOPORT, iobase, iobase + NPORT, NPORT)) continue; if (ct_probe_board (iobase, -1, -1) == 0) continue; porttab [i] = -1; device_set_desc_copy (devices[k], "Cronyx Tau-ISA"); break; } if (porttab [i] == 0) { device_delete_child ( device_get_parent (devices[k]), devices [k]); devices[k] = 0; continue; } } for (k = 0; k < devcount; k++) { if (devices[k] == 0) continue; if (bus_get_resource (devices[k], SYS_RES_IOPORT, 0, &iobase, &rescount) == 0) continue; for (i = 0; (iobase = porttab [i]) != 0; i++) { if (porttab [i] == -1) continue; if (!ct_is_free_res (devices[k], 0, SYS_RES_IOPORT, iobase, iobase + NPORT, NPORT)) continue; if (ct_probe_board (iobase, -1, -1) == 0) continue; bus_set_resource (devices[k], SYS_RES_IOPORT, 0, iobase, NPORT); porttab [i] = -1; device_set_desc_copy (devices[k], "Cronyx Tau-ISA"); break; } if (porttab [i] == 0) { device_delete_child ( device_get_parent (devices[k]), devices [k]); } } free (devices, M_TEMP); } return; } static int ct_probe (device_t dev) { int unit = device_get_unit (dev); u_long iobase, rescount; if (!device_get_desc (dev) || strcmp (device_get_desc (dev), "Cronyx Tau-ISA")) return ENXIO; /* KASSERT ((bd != NULL), ("ct%d: NULL device softc\n", unit));*/ if (bus_get_resource (dev, SYS_RES_IOPORT, 0, &iobase, &rescount) != 0) { printf ("ct%d: Couldn't get IOPORT\n", unit); return ENXIO; } if (!ct_is_free_res (dev, 0, SYS_RES_IOPORT, iobase, iobase + NPORT, NPORT)) { printf ("ct%d: Resource IOPORT isn't free\n", unit); return ENXIO; } if (!ct_probe_board (iobase, -1, -1)) { printf ("ct%d: probing for Tau-ISA at %lx faild\n", unit, iobase); return ENXIO; } return 0; } static void ct_bus_dmamap_addr (void *arg, bus_dma_segment_t *segs, int nseg, int error) { unsigned long *addr; if (error) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); addr = arg; *addr = segs->ds_addr; } static int ct_bus_dma_mem_alloc (int bnum, int cnum, ct_dma_mem_t *dmem) { int error; error = bus_dma_tag_create (NULL, 16, 0, BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR, NULL, NULL, dmem->size, 1, dmem->size, 0, NULL, NULL, &dmem->dmat); if (error) { if (cnum >= 0) printf ("ct%d-%d: ", bnum, cnum); else printf ("ct%d: ", bnum); printf ("couldn't allocate tag for dma memory\n"); return 0; } error = bus_dmamem_alloc (dmem->dmat, (void **)&dmem->virt, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &dmem->mapp); if (error) { if (cnum >= 0) printf ("ct%d-%d: ", bnum, cnum); else printf ("ct%d: ", bnum); printf ("couldn't allocate mem for dma memory\n"); bus_dma_tag_destroy (dmem->dmat); return 0; } error = bus_dmamap_load (dmem->dmat, dmem->mapp, dmem->virt, dmem->size, ct_bus_dmamap_addr, &dmem->phys, 0); if (error) { if (cnum >= 0) printf ("ct%d-%d: ", bnum, cnum); else printf ("ct%d: ", bnum); printf ("couldn't load mem map for dma memory\n"); bus_dmamem_free (dmem->dmat, dmem->virt, dmem->mapp); bus_dma_tag_destroy (dmem->dmat); return 0; } return 1; } static void ct_bus_dma_mem_free (ct_dma_mem_t *dmem) { bus_dmamap_unload (dmem->dmat, dmem->mapp); bus_dmamem_free (dmem->dmat, dmem->virt, dmem->mapp); bus_dma_tag_destroy (dmem->dmat); } /* * The adapter is present, initialize the driver structures. */ static int ct_attach (device_t dev) { bdrv_t *bd = device_get_softc (dev); u_long iobase, drq, irq, rescount; int unit = device_get_unit (dev); char *ct_ln = CT_LOCK_NAME; ct_board_t *b; ct_chan_t *c; drv_t *d; int i; int s; KASSERT ((bd != NULL), ("ct%d: NULL device softc\n", unit)); bus_get_resource (dev, SYS_RES_IOPORT, 0, &iobase, &rescount); bd->base_rid = 0; bd->base_res = bus_alloc_resource (dev, SYS_RES_IOPORT, &bd->base_rid, iobase, iobase + NPORT, NPORT, RF_ACTIVE); if (! bd->base_res) { printf ("ct%d: cannot alloc base address\n", unit); return ENXIO; } if (bus_get_resource (dev, SYS_RES_DRQ, 0, &drq, &rescount) != 0) { for (i = 0; (drq = dmatab [i]) != 0; i++) { if (!ct_is_free_res (dev, 0, SYS_RES_DRQ, drq, drq + 1, 1)) continue; bus_set_resource (dev, SYS_RES_DRQ, 0, drq, 1); break; } if (dmatab[i] == 0) { bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); printf ("ct%d: Couldn't get DRQ\n", unit); return ENXIO; } } bd->drq_rid = 0; bd->drq_res = bus_alloc_resource (dev, SYS_RES_DRQ, &bd->drq_rid, drq, drq + 1, 1, RF_ACTIVE); if (! bd->drq_res) { printf ("ct%d: cannot allocate drq\n", unit); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); return ENXIO; } if (bus_get_resource (dev, SYS_RES_IRQ, 0, &irq, &rescount) != 0) { for (i = 0; (irq = irqtab [i]) != 0; i++) { if (!ct_is_free_res (dev, 0, SYS_RES_IRQ, irq, irq + 1, 1)) continue; bus_set_resource (dev, SYS_RES_IRQ, 0, irq, 1); break; } if (irqtab[i] == 0) { bus_release_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); printf ("ct%d: Couldn't get IRQ\n", unit); return ENXIO; } } bd->irq_rid = 0; bd->irq_res = bus_alloc_resource (dev, SYS_RES_IRQ, &bd->irq_rid, irq, irq + 1, 1, RF_ACTIVE); if (! bd->irq_res) { printf ("ct%d: Couldn't allocate irq\n", unit); bus_release_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); return ENXIO; } b = malloc (sizeof (ct_board_t), M_DEVBUF, M_WAITOK); if (!b) { printf ("ct:%d: Couldn't allocate memory\n", unit); return (ENXIO); } adapter[unit] = b; bzero (b, sizeof(ct_board_t)); if (! ct_open_board (b, unit, iobase, irq, drq)) { printf ("ct%d: error loading firmware\n", unit); free (b, M_DEVBUF); bus_release_resource (dev, SYS_RES_IRQ, bd->irq_rid, bd->irq_res); bus_release_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); return ENXIO; } bd->board = b; ct_ln[2] = '0' + unit; mtx_init (&bd->ct_mtx, ct_ln, MTX_NETWORK_LOCK, MTX_DEF|MTX_RECURSE); if (! probe_irq (b, irq)) { printf ("ct%d: irq %ld not functional\n", unit, irq); bd->board = 0; adapter [unit] = 0; free (b, M_DEVBUF); bus_release_resource (dev, SYS_RES_IRQ, bd->irq_rid, bd->irq_res); bus_release_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); mtx_destroy (&bd->ct_mtx); return ENXIO; } callout_init (&led_timo[unit], CALLOUT_MPSAFE); s = splimp (); if (bus_setup_intr (dev, bd->irq_res, INTR_TYPE_NET|INTR_MPSAFE, NULL, ct_intr, bd, &bd->intrhand)) { printf ("ct%d: Can't setup irq %ld\n", unit, irq); bd->board = 0; adapter [unit] = 0; free (b, M_DEVBUF); bus_release_resource (dev, SYS_RES_IRQ, bd->irq_rid, bd->irq_res); bus_release_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); mtx_destroy (&bd->ct_mtx); splx (s); return ENXIO; } CT_LOCK (bd); ct_init_board (b, b->num, b->port, irq, drq, b->type, b->osc); ct_setup_board (b, 0, 0, 0); CT_UNLOCK (bd); printf ("ct%d: , clock %s MHz\n", b->num, b->name, b->osc == 20000000 ? "20" : "16.384"); for (c = b->chan; c < b->chan + NCHAN; ++c) { d = &bd->channel[c->num]; d->dmamem.size = sizeof(ct_buf_t); if (! ct_bus_dma_mem_alloc (unit, c->num, &d->dmamem)) continue; d->board = b; d->chan = c; d->bd = bd; c->sys = d; channel [b->num*NCHAN + c->num] = d; sprintf (d->name, "ct%d.%d", b->num, c->num); callout_init (&d->timeout_handle, CALLOUT_MPSAFE); #ifdef NETGRAPH if (ng_make_node_common (&typestruct, &d->node) != 0) { printf ("%s: cannot make common node\n", d->name); channel [b->num*NCHAN + c->num] = 0; c->sys = 0; ct_bus_dma_mem_free (&d->dmamem); continue; } NG_NODE_SET_PRIVATE (d->node, d); sprintf (d->nodename, "%s%d", NG_CT_NODE_TYPE, c->board->num*NCHAN + c->num); if (ng_name_node (d->node, d->nodename)) { printf ("%s: cannot name node\n", d->nodename); NG_NODE_UNREF (d->node); channel [b->num*NCHAN + c->num] = 0; c->sys = 0; ct_bus_dma_mem_free (&d->dmamem); continue; } d->queue.ifq_maxlen = ifqmaxlen; d->hi_queue.ifq_maxlen = ifqmaxlen; mtx_init (&d->queue.ifq_mtx, "ct_queue", NULL, MTX_DEF); mtx_init (&d->hi_queue.ifq_mtx, "ct_queue_hi", NULL, MTX_DEF); #else /*NETGRAPH*/ d->ifp = if_alloc(IFT_PPP); if (d->ifp == NULL) { printf ("%s: cannot if_alloc common interface\n", d->name); channel [b->num*NCHAN + c->num] = 0; c->sys = 0; ct_bus_dma_mem_free (&d->dmamem); continue; } d->ifp->if_softc = d; if_initname (d->ifp, "ct", b->num * NCHAN + c->num); d->ifp->if_mtu = PP_MTU; d->ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST; d->ifp->if_ioctl = ct_sioctl; d->ifp->if_start = ct_ifstart; d->ifp->if_init = ct_initialize; d->queue.ifq_maxlen = NBUF; mtx_init (&d->queue.ifq_mtx, "ct_queue", NULL, MTX_DEF); sppp_attach (d->ifp); if_attach (d->ifp); IFP2SP(d->ifp)->pp_tlf = ct_tlf; IFP2SP(d->ifp)->pp_tls = ct_tls; /* If BPF is in the kernel, call the attach for it. * Header size is 4 bytes. */ bpfattach (d->ifp, DLT_PPP, 4); #endif /*NETGRAPH*/ CT_LOCK (bd); ct_start_chan (c, d->dmamem.virt, d->dmamem.phys); ct_register_receive (c, &ct_receive); ct_register_transmit (c, &ct_transmit); ct_register_error (c, &ct_error); CT_UNLOCK (bd); d->devt = make_dev (&ct_cdevsw, b->num*NCHAN+c->num, UID_ROOT, GID_WHEEL, 0600, "ct%d", b->num*NCHAN+c->num); } splx (s); return 0; } static int ct_detach (device_t dev) { bdrv_t *bd = device_get_softc (dev); ct_board_t *b = bd->board; ct_chan_t *c; int s; KASSERT (mtx_initialized (&bd->ct_mtx), ("ct mutex not initialized")); s = splimp (); CT_LOCK (bd); /* Check if the device is busy (open). */ for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (!d || !d->chan->type) continue; if (d->running) { CT_UNLOCK (bd); splx (s); return EBUSY; } } /* Deactivate the timeout routine. */ callout_stop (&led_timo[b->num]); CT_UNLOCK (bd); bus_teardown_intr (dev, bd->irq_res, bd->intrhand); bus_release_resource (dev, SYS_RES_IRQ, bd->irq_rid, bd->irq_res); bus_release_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); CT_LOCK (bd); ct_close_board (b); CT_UNLOCK (bd); /* Detach the interfaces, free buffer memory. */ for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (!d || !d->chan->type) continue; callout_stop (&d->timeout_handle); #ifdef NETGRAPH if (d->node) { ng_rmnode_self (d->node); NG_NODE_UNREF (d->node); d->node = NULL; } mtx_destroy (&d->queue.ifq_mtx); mtx_destroy (&d->hi_queue.ifq_mtx); #else /* Detach from the packet filter list of interfaces. */ bpfdetach (d->ifp); /* Detach from the sync PPP list. */ sppp_detach (d->ifp); if_detach (d->ifp); if_free (d->ifp); IF_DRAIN (&d->queue); mtx_destroy (&d->queue.ifq_mtx); #endif destroy_dev (d->devt); } CT_LOCK (bd); ct_led_off (b); CT_UNLOCK (bd); callout_drain (&led_timo[b->num]); splx (s); for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (!d || !d->chan->type) continue; callout_drain(&d->timeout_handle); /* Deallocate buffers. */ ct_bus_dma_mem_free (&d->dmamem); } bd->board = 0; adapter [b->num] = 0; free (b, M_DEVBUF); mtx_destroy (&bd->ct_mtx); return 0; } #ifndef NETGRAPH static void ct_ifstart (struct ifnet *ifp) { drv_t *d = ifp->if_softc; bdrv_t *bd = d->bd; CT_LOCK (bd); ct_start (d); CT_UNLOCK (bd); } static void ct_tlf (struct sppp *sp) { drv_t *d = SP2IFP(sp)->if_softc; CT_DEBUG (d, ("ct_tlf\n")); /* ct_set_dtr (d->chan, 0);*/ /* ct_set_rts (d->chan, 0);*/ if (!(sp->pp_flags & PP_FR) && !(d->ifp->if_flags & PP_CISCO)) sp->pp_down (sp); } static void ct_tls (struct sppp *sp) { drv_t *d = SP2IFP(sp)->if_softc; CT_DEBUG (d, ("ct_tls\n")); if (!(sp->pp_flags & PP_FR) && !(d->ifp->if_flags & PP_CISCO)) sp->pp_up (sp); } /* * Initialization of interface. * Ii seems to be never called by upper level. */ static void ct_initialize (void *softc) { drv_t *d = softc; CT_DEBUG (d, ("ct_initialize\n")); } /* * Process an ioctl request. */ static int ct_sioctl (struct ifnet *ifp, u_long cmd, caddr_t data) { drv_t *d = ifp->if_softc; bdrv_t *bd = d->bd; int error, s, was_up, should_be_up; was_up = (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; error = sppp_ioctl (ifp, cmd, data); if (error) return error; if (! (ifp->if_flags & IFF_DEBUG)) d->chan->debug = 0; else d->chan->debug = d->chan->debug_shadow; switch (cmd) { default: CT_DEBUG2 (d, ("ioctl 0x%lx\n", cmd)); return 0; case SIOCADDMULTI: CT_DEBUG2 (d, ("SIOCADDMULTI\n")); return 0; case SIOCDELMULTI: CT_DEBUG2 (d, ("SIOCDELMULTI\n")); return 0; case SIOCSIFFLAGS: CT_DEBUG2 (d, ("SIOCSIFFLAGS\n")); break; case SIOCSIFADDR: CT_DEBUG2 (d, ("SIOCSIFADDR\n")); break; } /* We get here only in case of SIFFLAGS or SIFADDR. */ s = splimp (); CT_LOCK (bd); should_be_up = (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; if (! was_up && should_be_up) { /* Interface goes up -- start it. */ ct_up (d); ct_start (d); } else if (was_up && ! should_be_up) { /* Interface is going down -- stop it. */ /* if ((IFP2SP(d->ifp)->pp_flags & PP_FR) || (ifp->if_flags & PP_CISCO))*/ ct_down (d); } CT_UNLOCK (bd); splx (s); return 0; } #endif /*NETGRAPH*/ /* * Stop the interface. Called on splimp(). */ static void ct_down (drv_t *d) { int s = splimp (); CT_DEBUG (d, ("ct_down\n")); ct_set_dtr (d->chan, 0); ct_set_rts (d->chan, 0); d->running = 0; callout_stop (&d->timeout_handle); splx (s); } /* * Start the interface. Called on splimp(). */ static void ct_up (drv_t *d) { int s = splimp (); CT_DEBUG (d, ("ct_up\n")); ct_set_dtr (d->chan, 1); ct_set_rts (d->chan, 1); d->running = 1; splx (s); } /* * Start output on the (slave) interface. Get another datagram to send * off of the interface queue, and copy it to the interface * before starting the output. */ static void ct_send (drv_t *d) { struct mbuf *m; u_short len; CT_DEBUG2 (d, ("ct_send, tn=%d\n", d->chan->tn)); /* No output if the interface is down. */ if (! d->running) return; /* No output if the modem is off. */ if (! ct_get_dsr (d->chan) && !ct_get_loop (d->chan)) return; while (ct_buf_free (d->chan)) { /* Get the packet to send. */ #ifdef NETGRAPH IF_DEQUEUE (&d->hi_queue, m); if (! m) IF_DEQUEUE (&d->queue, m); #else m = sppp_dequeue (d->ifp); #endif if (! m) return; #ifndef NETGRAPH BPF_MTAP (d->ifp, m); #endif len = m_length (m, NULL); if (! m->m_next) ct_send_packet (d->chan, (u_char*)mtod (m, caddr_t), len, 0); else { m_copydata (m, 0, len, d->chan->tbuf[d->chan->te]); ct_send_packet (d->chan, d->chan->tbuf[d->chan->te], len, 0); } m_freem (m); /* Set up transmit timeout, if the transmit ring is not empty. * Transmit timeout is 10 seconds. */ d->timeout = 10; } #ifndef NETGRAPH d->ifp->if_drv_flags |= IFF_DRV_OACTIVE; #endif } /* * Start output on the interface. * Always called on splimp(). */ static void ct_start (drv_t *d) { int s = splimp (); if (d->running) { if (! d->chan->dtr) ct_set_dtr (d->chan, 1); if (! d->chan->rts) ct_set_rts (d->chan, 1); ct_send (d); callout_reset (&d->timeout_handle, hz, ct_watchdog_timer, d); } splx (s); } /* * Handle transmit timeouts. * Recover after lost transmit interrupts. * Always called on splimp(). */ static void ct_watchdog (drv_t *d) { CT_DEBUG (d, ("device timeout\n")); if (d->running) { ct_setup_chan (d->chan); ct_start_chan (d->chan, 0, 0); ct_set_dtr (d->chan, 1); ct_set_rts (d->chan, 1); ct_start (d); } } static void ct_watchdog_timer (void *arg) { drv_t *d = arg; bdrv_t *bd = d->bd; CT_LOCK (bd); if (d->timeout == 1) ct_watchdog (d); if (d->timeout) d->timeout--; callout_reset (&d->timeout_handle, hz, ct_watchdog_timer, d); CT_UNLOCK (bd); } /* * Transmit callback function. */ static void ct_transmit (ct_chan_t *c, void *attachment, int len) { drv_t *d = c->sys; if (!d) return; d->timeout = 0; #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_OPACKETS, 1); d->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; #endif ct_start (d); } /* * Process the received packet. */ static void ct_receive (ct_chan_t *c, char *data, int len) { drv_t *d = c->sys; struct mbuf *m; #ifdef NETGRAPH int error; #endif if (!d || !d->running) return; m = makembuf (data, len); if (! m) { CT_DEBUG (d, ("no memory for packet\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IQDROPS, 1); #endif return; } if (c->debug > 1) m_print (m, 0); #ifdef NETGRAPH m->m_pkthdr.rcvif = 0; NG_SEND_DATA_ONLY (error, d->hook, m); #else if_inc_counter(d->ifp, IFCOUNTER_IPACKETS, 1); m->m_pkthdr.rcvif = d->ifp; /* Check if there's a BPF listener on this interface. * If so, hand off the raw packet to bpf. */ BPF_MTAP(d->ifp, m); IF_ENQUEUE (&d->queue, m); #endif } /* * Error callback function. */ static void ct_error (ct_chan_t *c, int data) { drv_t *d = c->sys; if (!d) return; switch (data) { case CT_FRAME: CT_DEBUG (d, ("frame error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CT_CRC: CT_DEBUG (d, ("crc error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CT_OVERRUN: CT_DEBUG (d, ("overrun error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_COLLISIONS, 1); if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CT_OVERFLOW: CT_DEBUG (d, ("overflow error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CT_UNDERRUN: CT_DEBUG (d, ("underrun error\n")); d->timeout = 0; #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_OERRORS, 1); d->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; #endif ct_start (d); break; default: CT_DEBUG (d, ("error #%d\n", data)); } } static int ct_open (struct cdev *dev, int oflags, int devtype, struct thread *td) { drv_t *d; if (dev2unit(dev) >= NCTAU*NCHAN || ! (d = channel[dev2unit(dev)])) return ENXIO; CT_DEBUG2 (d, ("ct_open\n")); return 0; } static int ct_close (struct cdev *dev, int fflag, int devtype, struct thread *td) { drv_t *d = channel [dev2unit(dev)]; if (!d) return 0; CT_DEBUG2 (d, ("ct_close\n")); return 0; } static int ct_modem_status (ct_chan_t *c) { drv_t *d = c->sys; bdrv_t *bd; int status, s; if (!d) return 0; bd = d->bd; status = d->running ? TIOCM_LE : 0; s = splimp (); CT_LOCK (bd); if (ct_get_cd (c)) status |= TIOCM_CD; if (ct_get_cts (c)) status |= TIOCM_CTS; if (ct_get_dsr (c)) status |= TIOCM_DSR; if (c->dtr) status |= TIOCM_DTR; if (c->rts) status |= TIOCM_RTS; CT_UNLOCK (bd); splx (s); return status; } /* * Process an ioctl request on /dev/cronyx/ctauN. */ static int ct_ioctl (struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td) { drv_t *d = channel [dev2unit (dev)]; bdrv_t *bd; ct_chan_t *c; struct serial_statistics *st; struct e1_statistics *opte1; int error, s; char mask[16]; if (!d || !d->chan) return 0; bd = d->bd; c = d->chan; switch (cmd) { case SERIAL_GETREGISTERED: bzero (mask, sizeof(mask)); for (s=0; sifp)->pp_flags & PP_FR) ? "fr" : (d->ifp->if_flags & PP_CISCO) ? "cisco" : "ppp"); return 0; case SERIAL_SETPROTO: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (d->ifp->if_drv_flags & IFF_DRV_RUNNING) return EBUSY; if (! strcmp ("cisco", (char*)data)) { IFP2SP(d->ifp)->pp_flags &= ~(PP_FR); IFP2SP(d->ifp)->pp_flags |= PP_KEEPALIVE; d->ifp->if_flags |= PP_CISCO; } else if (! strcmp ("fr", (char*)data)) { d->ifp->if_flags &= ~(PP_CISCO); IFP2SP(d->ifp)->pp_flags |= PP_FR | PP_KEEPALIVE; } else if (! strcmp ("ppp", (char*)data)) { IFP2SP(d->ifp)->pp_flags &= ~(PP_FR | PP_KEEPALIVE); d->ifp->if_flags &= ~(PP_CISCO); } else return EINVAL; return 0; case SERIAL_GETKEEPALIVE: if ((IFP2SP(d->ifp)->pp_flags & PP_FR) || (d->ifp->if_flags & PP_CISCO)) return EINVAL; *(int*)data = (IFP2SP(d->ifp)->pp_flags & PP_KEEPALIVE) ? 1 : 0; return 0; case SERIAL_SETKEEPALIVE: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if ((IFP2SP(d->ifp)->pp_flags & PP_FR) || (d->ifp->if_flags & PP_CISCO)) return EINVAL; if (*(int*)data) IFP2SP(d->ifp)->pp_flags |= PP_KEEPALIVE; else IFP2SP(d->ifp)->pp_flags &= ~PP_KEEPALIVE; return 0; #endif /*NETGRAPH*/ case SERIAL_GETMODE: *(int*)data = SERIAL_HDLC; return 0; case SERIAL_GETCFG: if (c->mode == M_HDLC) return EINVAL; switch (ct_get_config (c->board)) { default: *(char*)data = 'a'; break; case CFG_B: *(char*)data = 'b'; break; case CFG_C: *(char*)data = 'c'; break; } return 0; case SERIAL_SETCFG: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->mode == M_HDLC) return EINVAL; s = splimp (); CT_LOCK (bd); switch (*(char*)data) { case 'a': ct_set_config (c->board, CFG_A); break; case 'b': ct_set_config (c->board, CFG_B); break; case 'c': ct_set_config (c->board, CFG_C); break; } CT_UNLOCK (bd); splx (s); return 0; case SERIAL_GETSTAT: st = (struct serial_statistics*) data; st->rintr = c->rintr; st->tintr = c->tintr; st->mintr = c->mintr; st->ibytes = c->ibytes; st->ipkts = c->ipkts; st->ierrs = c->ierrs; st->obytes = c->obytes; st->opkts = c->opkts; st->oerrs = c->oerrs; return 0; case SERIAL_GETESTAT: opte1 = (struct e1_statistics*)data; opte1->status = c->status; opte1->cursec = c->cursec; opte1->totsec = c->totsec + c->cursec; opte1->currnt.bpv = c->currnt.bpv; opte1->currnt.fse = c->currnt.fse; opte1->currnt.crce = c->currnt.crce; opte1->currnt.rcrce = c->currnt.rcrce; opte1->currnt.uas = c->currnt.uas; opte1->currnt.les = c->currnt.les; opte1->currnt.es = c->currnt.es; opte1->currnt.bes = c->currnt.bes; opte1->currnt.ses = c->currnt.ses; opte1->currnt.oofs = c->currnt.oofs; opte1->currnt.css = c->currnt.css; opte1->currnt.dm = c->currnt.dm; opte1->total.bpv = c->total.bpv + c->currnt.bpv; opte1->total.fse = c->total.fse + c->currnt.fse; opte1->total.crce = c->total.crce + c->currnt.crce; opte1->total.rcrce = c->total.rcrce + c->currnt.rcrce; opte1->total.uas = c->total.uas + c->currnt.uas; opte1->total.les = c->total.les + c->currnt.les; opte1->total.es = c->total.es + c->currnt.es; opte1->total.bes = c->total.bes + c->currnt.bes; opte1->total.ses = c->total.ses + c->currnt.ses; opte1->total.oofs = c->total.oofs + c->currnt.oofs; opte1->total.css = c->total.css + c->currnt.css; opte1->total.dm = c->total.dm + c->currnt.dm; for (s=0; s<48; ++s) { opte1->interval[s].bpv = c->interval[s].bpv; opte1->interval[s].fse = c->interval[s].fse; opte1->interval[s].crce = c->interval[s].crce; opte1->interval[s].rcrce = c->interval[s].rcrce; opte1->interval[s].uas = c->interval[s].uas; opte1->interval[s].les = c->interval[s].les; opte1->interval[s].es = c->interval[s].es; opte1->interval[s].bes = c->interval[s].bes; opte1->interval[s].ses = c->interval[s].ses; opte1->interval[s].oofs = c->interval[s].oofs; opte1->interval[s].css = c->interval[s].css; opte1->interval[s].dm = c->interval[s].dm; } return 0; case SERIAL_CLRSTAT: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; c->rintr = 0; c->tintr = 0; c->mintr = 0; c->ibytes = 0; c->ipkts = 0; c->ierrs = 0; c->obytes = 0; c->opkts = 0; c->oerrs = 0; bzero (&c->currnt, sizeof (c->currnt)); bzero (&c->total, sizeof (c->total)); bzero (c->interval, sizeof (c->interval)); return 0; case SERIAL_GETBAUD: *(long*)data = ct_get_baud(c); return 0; case SERIAL_SETBAUD: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CT_LOCK (bd); ct_set_baud (c, *(long*)data); CT_UNLOCK (bd); splx (s); return 0; case SERIAL_GETLOOP: *(int*)data = ct_get_loop (c); return 0; case SERIAL_SETLOOP: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CT_LOCK (bd); ct_set_loop (c, *(int*)data); CT_UNLOCK (bd); splx (s); return 0; case SERIAL_GETDPLL: if (c->mode == M_E1 || c->mode == M_G703) return EINVAL; *(int*)data = ct_get_dpll (c); return 0; case SERIAL_SETDPLL: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->mode == M_E1 || c->mode == M_G703) return EINVAL; s = splimp (); CT_LOCK (bd); ct_set_dpll (c, *(int*)data); CT_UNLOCK (bd); splx (s); return 0; case SERIAL_GETNRZI: if (c->mode == M_E1 || c->mode == M_G703) return EINVAL; *(int*)data = ct_get_nrzi (c); return 0; case SERIAL_SETNRZI: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->mode == M_E1 || c->mode == M_G703) return EINVAL; s = splimp (); CT_LOCK (bd); ct_set_nrzi (c, *(int*)data); CT_UNLOCK (bd); splx (s); return 0; case SERIAL_GETDEBUG: *(int*)data = c->debug; return 0; case SERIAL_SETDEBUG: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; #ifndef NETGRAPH /* * The debug_shadow is always greater than zero for logic * simplicity. For switching debug off the IFF_DEBUG is * responsible. */ c->debug_shadow = (*(int*)data) ? (*(int*)data) : 1; if (d->ifp->if_flags & IFF_DEBUG) c->debug = c->debug_shadow; #else c->debug = *(int*)data; #endif return 0; case SERIAL_GETHIGAIN: if (c->mode != M_E1) return EINVAL; *(int*)data = ct_get_higain (c); return 0; case SERIAL_SETHIGAIN: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CT_LOCK (bd); ct_set_higain (c, *(int*)data); CT_UNLOCK (bd); splx (s); return 0; case SERIAL_GETPHONY: CT_DEBUG2 (d, ("ioctl: getphony\n")); if (c->mode != M_E1) return EINVAL; *(int*)data = c->gopt.phony; return 0; case SERIAL_SETPHONY: CT_DEBUG2 (d, ("ioctl: setphony\n")); if (c->mode != M_E1) return EINVAL; /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CT_LOCK (bd); ct_set_phony (c, *(int*)data); CT_UNLOCK (bd); splx (s); return 0; case SERIAL_GETCLK: if (c->mode != M_E1 && c->mode != M_G703) return EINVAL; switch (ct_get_clk(c)) { default: *(int*)data = E1CLK_INTERNAL; break; case GCLK_RCV: *(int*)data = E1CLK_RECEIVE; break; case GCLK_RCLKO: *(int*)data = c->num ? E1CLK_RECEIVE_CHAN0 : E1CLK_RECEIVE_CHAN1; break; } return 0; case SERIAL_SETCLK: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CT_LOCK (bd); switch (*(int*)data) { default: ct_set_clk (c, GCLK_INT); break; case E1CLK_RECEIVE: ct_set_clk (c, GCLK_RCV); break; case E1CLK_RECEIVE_CHAN0: case E1CLK_RECEIVE_CHAN1: ct_set_clk (c, GCLK_RCLKO); break; } CT_UNLOCK (bd); splx (s); return 0; case SERIAL_GETTIMESLOTS: if (c->mode != M_E1) return EINVAL; *(long*)data = ct_get_ts (c); return 0; case SERIAL_SETTIMESLOTS: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CT_LOCK (bd); ct_set_ts (c, *(long*)data); CT_UNLOCK (bd); splx (s); return 0; case SERIAL_GETSUBCHAN: if (c->mode != M_E1) return EINVAL; *(long*)data = ct_get_subchan (c->board); return 0; case SERIAL_SETSUBCHAN: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CT_LOCK (bd); ct_set_subchan (c->board, *(long*)data); CT_UNLOCK (bd); splx (s); return 0; case SERIAL_GETINVCLK: case SERIAL_GETINVTCLK: if (c->mode == M_E1 || c->mode == M_G703) return EINVAL; *(int*)data = ct_get_invtxc (c); return 0; case SERIAL_GETINVRCLK: if (c->mode == M_E1 || c->mode == M_G703) return EINVAL; *(int*)data = ct_get_invrxc (c); return 0; case SERIAL_SETINVCLK: case SERIAL_SETINVTCLK: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->mode == M_E1 || c->mode == M_G703) return EINVAL; s = splimp (); CT_LOCK (bd); ct_set_invtxc (c, *(int*)data); CT_UNLOCK (bd); splx (s); return 0; case SERIAL_SETINVRCLK: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->mode == M_E1 || c->mode == M_G703) return EINVAL; s = splimp (); CT_LOCK (bd); ct_set_invrxc (c, *(int*)data); CT_UNLOCK (bd); splx (s); return 0; case SERIAL_GETLEVEL: if (c->mode != M_G703) return EINVAL; s = splimp (); CT_LOCK (bd); *(int*)data = ct_get_lq (c); CT_UNLOCK (bd); splx (s); return 0; case TIOCSDTR: /* Set DTR */ s = splimp (); CT_LOCK (bd); ct_set_dtr (c, 1); CT_UNLOCK (bd); splx (s); return 0; case TIOCCDTR: /* Clear DTR */ s = splimp (); CT_LOCK (bd); ct_set_dtr (c, 0); CT_UNLOCK (bd); splx (s); return 0; case TIOCMSET: /* Set DTR/RTS */ s = splimp (); CT_LOCK (bd); ct_set_dtr (c, (*(int*)data & TIOCM_DTR) ? 1 : 0); ct_set_rts (c, (*(int*)data & TIOCM_RTS) ? 1 : 0); CT_UNLOCK (bd); splx (s); return 0; case TIOCMBIS: /* Add DTR/RTS */ s = splimp (); CT_LOCK (bd); if (*(int*)data & TIOCM_DTR) ct_set_dtr (c, 1); if (*(int*)data & TIOCM_RTS) ct_set_rts (c, 1); CT_UNLOCK (bd); splx (s); return 0; case TIOCMBIC: /* Clear DTR/RTS */ s = splimp (); CT_LOCK (bd); if (*(int*)data & TIOCM_DTR) ct_set_dtr (c, 0); if (*(int*)data & TIOCM_RTS) ct_set_rts (c, 0); CT_UNLOCK (bd); splx (s); return 0; case TIOCMGET: /* Get modem status */ *(int*)data = ct_modem_status (c); return 0; } return ENOTTY; } #ifdef NETGRAPH static int ng_ct_constructor (node_p node) { drv_t *d = NG_NODE_PRIVATE (node); CT_DEBUG (d, ("Constructor\n")); return EINVAL; } static int ng_ct_newhook (node_p node, hook_p hook, const char *name) { int s; drv_t *d = NG_NODE_PRIVATE (node); if (!d) return EINVAL; bdrv_t *bd = d->bd; /* Attach debug hook */ if (strcmp (name, NG_CT_HOOK_DEBUG) == 0) { NG_HOOK_SET_PRIVATE (hook, NULL); d->debug_hook = hook; return 0; } /* Check for raw hook */ if (strcmp (name, NG_CT_HOOK_RAW) != 0) return EINVAL; NG_HOOK_SET_PRIVATE (hook, d); d->hook = hook; s = splimp (); CT_LOCK (bd); ct_up (d); CT_UNLOCK (bd); splx (s); return 0; } static char *format_timeslots (u_long s) { static char buf [100]; char *p = buf; int i; for (i=1; i<32; ++i) if ((s >> i) & 1) { int prev = (i > 1) & (s >> (i-1)); int next = (i < 31) & (s >> (i+1)); if (prev) { if (next) continue; *p++ = '-'; } else if (p > buf) *p++ = ','; if (i >= 10) *p++ = '0' + i / 10; *p++ = '0' + i % 10; } *p = 0; return buf; } static int print_modems (char *s, ct_chan_t *c, int need_header) { int status = ct_modem_status (c); int length = 0; if (need_header) length += sprintf (s + length, " LE DTR DSR RTS CTS CD\n"); length += sprintf (s + length, "%4s %4s %4s %4s %4s %4s\n", status & TIOCM_LE ? "On" : "-", status & TIOCM_DTR ? "On" : "-", status & TIOCM_DSR ? "On" : "-", status & TIOCM_RTS ? "On" : "-", status & TIOCM_CTS ? "On" : "-", status & TIOCM_CD ? "On" : "-"); return length; } static int print_stats (char *s, ct_chan_t *c, int need_header) { struct serial_statistics st; int length = 0; st.rintr = c->rintr; st.tintr = c->tintr; st.mintr = c->mintr; st.ibytes = c->ibytes; st.ipkts = c->ipkts; st.ierrs = c->ierrs; st.obytes = c->obytes; st.opkts = c->opkts; st.oerrs = c->oerrs; if (need_header) length += sprintf (s + length, " Rintr Tintr Mintr Ibytes Ipkts Ierrs Obytes Opkts Oerrs\n"); length += sprintf (s + length, "%7ld %7ld %7ld %8ld %7ld %7ld %8ld %7ld %7ld\n", st.rintr, st.tintr, st.mintr, st.ibytes, st.ipkts, st.ierrs, st.obytes, st.opkts, st.oerrs); return length; } static char *format_e1_status (u_char status) { static char buf [80]; if (status & E1_NOALARM) return "Ok"; buf[0] = 0; if (status & E1_LOS) strcat (buf, ",LOS"); if (status & E1_AIS) strcat (buf, ",AIS"); if (status & E1_LOF) strcat (buf, ",LOF"); if (status & E1_LOMF) strcat (buf, ",LOMF"); if (status & E1_FARLOF) strcat (buf, ",FARLOF"); if (status & E1_AIS16) strcat (buf, ",AIS16"); if (status & E1_FARLOMF) strcat (buf, ",FARLOMF"); if (status & E1_TSTREQ) strcat (buf, ",TSTREQ"); if (status & E1_TSTERR) strcat (buf, ",TSTERR"); if (buf[0] == ',') return buf+1; return "Unknown"; } static int print_frac (char *s, int leftalign, u_long numerator, u_long divider) { int n, length = 0; if (numerator < 1 || divider < 1) { length += sprintf (s+length, leftalign ? "/- " : " -"); return length; } n = (int) (0.5 + 1000.0 * numerator / divider); if (n < 1000) { length += sprintf (s+length, leftalign ? "/.%-3d" : " .%03d", n); return length; } *(s + length) = leftalign ? '/' : ' '; length ++; if (n >= 1000000) n = (n+500) / 1000 * 1000; else if (n >= 100000) n = (n+50) / 100 * 100; else if (n >= 10000) n = (n+5) / 10 * 10; switch (n) { case 1000: length += printf (s+length, ".999"); return length; case 10000: n = 9990; break; case 100000: n = 99900; break; case 1000000: n = 999000; break; } if (n < 10000) length += sprintf (s+length, "%d.%d", n/1000, n/10%100); else if (n < 100000) length += sprintf (s+length, "%d.%d", n/1000, n/100%10); else if (n < 1000000) length += sprintf (s+length, "%d.", n/1000); else length += sprintf (s+length, "%d", n/1000); return length; } static int print_e1_stats (char *s, ct_chan_t *c) { struct e1_counters total; u_long totsec; int length = 0; totsec = c->totsec + c->cursec; total.bpv = c->total.bpv + c->currnt.bpv; total.fse = c->total.fse + c->currnt.fse; total.crce = c->total.crce + c->currnt.crce; total.rcrce = c->total.rcrce + c->currnt.rcrce; total.uas = c->total.uas + c->currnt.uas; total.les = c->total.les + c->currnt.les; total.es = c->total.es + c->currnt.es; total.bes = c->total.bes + c->currnt.bes; total.ses = c->total.ses + c->currnt.ses; total.oofs = c->total.oofs + c->currnt.oofs; total.css = c->total.css + c->currnt.css; total.dm = c->total.dm + c->currnt.dm; length += sprintf (s + length, " Unav/Degr Bpv/Fsyn CRC/RCRC Err/Lerr Sev/Bur Oof/Slp Status\n"); /* Unavailable seconds, degraded minutes */ length += print_frac (s + length, 0, c->currnt.uas, c->cursec); length += print_frac (s + length, 1, 60 * c->currnt.dm, c->cursec); /* Bipolar violations, frame sync errors */ length += print_frac (s + length, 0, c->currnt.bpv, c->cursec); length += print_frac (s + length, 1, c->currnt.fse, c->cursec); /* CRC errors, remote CRC errors (E-bit) */ length += print_frac (s + length, 0, c->currnt.crce, c->cursec); length += print_frac (s + length, 1, c->currnt.rcrce, c->cursec); /* Errored seconds, line errored seconds */ length += print_frac (s + length, 0, c->currnt.es, c->cursec); length += print_frac (s + length, 1, c->currnt.les, c->cursec); /* Severely errored seconds, burst errored seconds */ length += print_frac (s + length, 0, c->currnt.ses, c->cursec); length += print_frac (s + length, 1, c->currnt.bes, c->cursec); /* Out of frame seconds, controlled slip seconds */ length += print_frac (s + length, 0, c->currnt.oofs, c->cursec); length += print_frac (s + length, 1, c->currnt.css, c->cursec); length += sprintf (s + length, " %s\n", format_e1_status (c->status)); /* Print total statistics. */ length += print_frac (s + length, 0, total.uas, totsec); length += print_frac (s + length, 1, 60 * total.dm, totsec); length += print_frac (s + length, 0, total.bpv, totsec); length += print_frac (s + length, 1, total.fse, totsec); length += print_frac (s + length, 0, total.crce, totsec); length += print_frac (s + length, 1, total.rcrce, totsec); length += print_frac (s + length, 0, total.es, totsec); length += print_frac (s + length, 1, total.les, totsec); length += print_frac (s + length, 0, total.ses, totsec); length += print_frac (s + length, 1, total.bes, totsec); length += print_frac (s + length, 0, total.oofs, totsec); length += print_frac (s + length, 1, total.css, totsec); length += sprintf (s + length, " -- Total\n"); return length; } static int print_chan (char *s, ct_chan_t *c) { drv_t *d = c->sys; bdrv_t *bd = d->bd; int length = 0; length += sprintf (s + length, "ct%d", c->board->num * NCHAN + c->num); if (d->chan->debug) length += sprintf (s + length, " debug=%d", d->chan->debug); switch (ct_get_config (c->board)) { case CFG_A: length += sprintf (s + length, " cfg=A"); break; case CFG_B: length += sprintf (s + length, " cfg=B"); break; case CFG_C: length += sprintf (s + length, " cfg=C"); break; default: length += sprintf (s + length, " cfg=unknown"); break; } if (ct_get_baud (c)) length += sprintf (s + length, " %ld", ct_get_baud (c)); else length += sprintf (s + length, " extclock"); if (c->mode == M_E1 || c->mode == M_G703) switch (ct_get_clk(c)) { case GCLK_INT : length += sprintf (s + length, " syn=int"); break; case GCLK_RCV : length += sprintf (s + length, " syn=rcv"); break; case GCLK_RCLKO : length += sprintf (s + length, " syn=xrcv"); break; } if (c->mode == M_HDLC) { length += sprintf (s + length, " dpll=%s", ct_get_dpll (c) ? "on" : "off"); length += sprintf (s + length, " nrzi=%s", ct_get_nrzi (c) ? "on" : "off"); length += sprintf (s + length, " invtclk=%s", ct_get_invtxc (c) ? "on" : "off"); length += sprintf (s + length, " invrclk=%s", ct_get_invrxc (c) ? "on" : "off"); } if (c->mode == M_E1) length += sprintf (s + length, " higain=%s", ct_get_higain (c)? "on" : "off"); length += sprintf (s + length, " loop=%s", ct_get_loop (c) ? "on" : "off"); if (c->mode == M_E1) length += sprintf (s + length, " ts=%s", format_timeslots (ct_get_ts(c))); if (c->mode == M_E1 && ct_get_config (c->board) != CFG_A) length += sprintf (s + length, " pass=%s", format_timeslots (ct_get_subchan(c->board))); if (c->mode == M_G703) { int lq, x; x = splimp (); CT_LOCK (bd); lq = ct_get_lq (c); CT_UNLOCK (bd); splx (x); length += sprintf (s + length, " (level=-%.1fdB)", lq / 10.0); } length += sprintf (s + length, "\n"); return length; } static int ng_ct_rcvmsg (node_p node, item_p item, hook_p lasthook) { drv_t *d = NG_NODE_PRIVATE (node); struct ng_mesg *msg; struct ng_mesg *resp = NULL; int error = 0; if (!d) return EINVAL; CT_DEBUG (d, ("Rcvmsg\n")); NGI_GET_MSG (item, msg); switch (msg->header.typecookie) { default: error = EINVAL; break; case NGM_CT_COOKIE: printf ("Don't forget to implement\n"); error = EINVAL; break; case NGM_GENERIC_COOKIE: switch (msg->header.cmd) { default: error = EINVAL; break; case NGM_TEXT_STATUS: { char *s; int l = 0; int dl = sizeof (struct ng_mesg) + 730; NG_MKRESPONSE (resp, msg, dl, M_NOWAIT); if (! resp) { error = ENOMEM; break; } s = (resp)->data; l += print_chan (s + l, d->chan); l += print_stats (s + l, d->chan, 1); l += print_modems (s + l, d->chan, 1); l += print_e1_stats (s + l, d->chan); strncpy ((resp)->header.cmdstr, "status", NG_CMDSTRSIZ); } break; } break; } NG_RESPOND_MSG (error, node, item, resp); NG_FREE_MSG (msg); return error; } static int ng_ct_rcvdata (hook_p hook, item_p item) { drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE(hook)); struct mbuf *m; struct ng_tag_prio *ptag; bdrv_t *bd; struct ifqueue *q; int s; if (!d) return ENETDOWN; bd = d->bd; NGI_GET_M (item, m); NG_FREE_ITEM (item); if (! NG_HOOK_PRIVATE (hook) || ! d) { NG_FREE_M (m); return ENETDOWN; } /* Check for high priority data */ if ((ptag = (struct ng_tag_prio *)m_tag_locate(m, NGM_GENERIC_COOKIE, NG_TAG_PRIO, NULL)) != NULL && (ptag->priority > NG_PRIO_CUTOFF) ) q = &d->hi_queue; else q = &d->queue; s = splimp (); CT_LOCK (bd); IF_LOCK (q); if (_IF_QFULL (q)) { IF_UNLOCK (q); CT_UNLOCK (bd); splx (s); NG_FREE_M (m); return ENOBUFS; } _IF_ENQUEUE (q, m); IF_UNLOCK (q); ct_start (d); CT_UNLOCK (bd); splx (s); return 0; } static int ng_ct_rmnode (node_p node) { drv_t *d = NG_NODE_PRIVATE (node); bdrv_t *bd; CT_DEBUG (d, ("Rmnode\n")); if (d && d->running) { bd = d->bd; int s = splimp (); CT_LOCK (bd); ct_down (d); CT_UNLOCK (bd); splx (s); } #ifdef KLD_MODULE if (node->nd_flags & NGF_REALLY_DIE) { NG_NODE_SET_PRIVATE (node, NULL); NG_NODE_UNREF (node); } NG_NODE_REVIVE(node); /* Persistant node */ #endif return 0; } static int ng_ct_connect (hook_p hook) { drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE (hook)); if (!d) return 0; callout_reset (&d->timeout_handle, hz, ct_watchdog_timer, d); return 0; } static int ng_ct_disconnect (hook_p hook) { drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE (hook)); bdrv_t *bd; if (!d) return 0; bd = d->bd; CT_LOCK (bd); if (NG_HOOK_PRIVATE (hook)) ct_down (d); CT_UNLOCK (bd); /* If we were wait it than it reasserted now, just stop it. */ if (!callout_drain (&d->timeout_handle)) callout_stop (&d->timeout_handle); return 0; } #endif static int ct_modevent (module_t mod, int type, void *unused) { static int load_count = 0; switch (type) { case MOD_LOAD: #ifdef NETGRAPH if (ng_newtype (&typestruct)) printf ("Failed to register ng_ct\n"); #endif ++load_count; callout_init (&timeout_handle, CALLOUT_MPSAFE); callout_reset (&timeout_handle, hz*5, ct_timeout, 0); break; case MOD_UNLOAD: if (load_count == 1) { printf ("Removing device entry for Tau-ISA\n"); #ifdef NETGRAPH ng_rmtype (&typestruct); #endif } /* If we were wait it than it reasserted now, just stop it. */ if (!callout_drain (&timeout_handle)) callout_stop (&timeout_handle); --load_count; break; case MOD_SHUTDOWN: break; } return 0; } #ifdef NETGRAPH static struct ng_type typestruct = { .version = NG_ABI_VERSION, .name = NG_CT_NODE_TYPE, .constructor = ng_ct_constructor, .rcvmsg = ng_ct_rcvmsg, .shutdown = ng_ct_rmnode, .newhook = ng_ct_newhook, .connect = ng_ct_connect, .rcvdata = ng_ct_rcvdata, .disconnect = ng_ct_disconnect, }; #endif /*NETGRAPH*/ #ifdef NETGRAPH MODULE_DEPEND (ng_ct, netgraph, NG_ABI_VERSION, NG_ABI_VERSION, NG_ABI_VERSION); #else MODULE_DEPEND (ct, sppp, 1, 1, 1); #endif DRIVER_MODULE (ct, isa, ct_isa_driver, ct_devclass, ct_modevent, NULL); MODULE_VERSION (ct, 1); Index: head/sys/dev/ed/if_ed.c =================================================================== --- head/sys/dev/ed/if_ed.c (revision 276749) +++ head/sys/dev/ed/if_ed.c (revision 276750) @@ -1,1855 +1,1852 @@ /*- * Copyright (c) 1995, David Greenman * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Device driver for National Semiconductor DS8390/WD83C690 based ethernet * adapters. By David Greenman, 29-April-1993 * * Currently supports the Western Digital/SMC 8003 and 8013 series, * the SMC Elite Ultra (8216), the 3Com 3c503, the NE1000 and NE2000, * and a variety of similar clones. * */ #include "opt_ed.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include devclass_t ed_devclass; static void ed_init(void *); static void ed_init_locked(struct ed_softc *); static int ed_ioctl(struct ifnet *, u_long, caddr_t); static void ed_start(struct ifnet *); static void ed_start_locked(struct ifnet *); static void ed_reset(struct ifnet *); static void ed_tick(void *); static void ed_watchdog(struct ed_softc *); static void ed_ds_getmcaf(struct ed_softc *, uint32_t *); static void ed_get_packet(struct ed_softc *, bus_size_t, u_short); static void ed_stop_hw(struct ed_softc *sc); static __inline void ed_rint(struct ed_softc *); static __inline void ed_xmit(struct ed_softc *); static __inline void ed_ring_copy(struct ed_softc *, bus_size_t, char *, u_short); static void ed_setrcr(struct ed_softc *); /* * Generic probe routine for testing for the existance of a DS8390. * Must be called after the NIC has just been reset. This routine * works by looking at certain register values that are guaranteed * to be initialized a certain way after power-up or reset. Seems * not to currently work on the 83C690. * * Specifically: * * Register reset bits set bits * Command Register (CR) TXP, STA RD2, STP * Interrupt Status (ISR) RST * Interrupt Mask (IMR) All bits * Data Control (DCR) LAS * Transmit Config. (TCR) LB1, LB0 * * We only look at the CR and ISR registers, however, because looking at * the others would require changing register pages (which would be * intrusive if this isn't an 8390). * * Return 1 if 8390 was found, 0 if not. */ int ed_probe_generic8390(struct ed_softc *sc) { if ((ed_nic_inb(sc, ED_P0_CR) & (ED_CR_RD2 | ED_CR_TXP | ED_CR_STA | ED_CR_STP)) != (ED_CR_RD2 | ED_CR_STP)) return (0); if ((ed_nic_inb(sc, ED_P0_ISR) & ED_ISR_RST) != ED_ISR_RST) return (0); return (1); } void ed_disable_16bit_access(struct ed_softc *sc) { /* * Disable 16 bit access to shared memory */ if (sc->isa16bit && sc->vendor == ED_VENDOR_WD_SMC) { if (sc->chip_type == ED_CHIP_TYPE_WD790) ed_asic_outb(sc, ED_WD_MSR, 0x00); ed_asic_outb(sc, ED_WD_LAAR, sc->wd_laar_proto & ~ED_WD_LAAR_M16EN); } } void ed_enable_16bit_access(struct ed_softc *sc) { if (sc->isa16bit && sc->vendor == ED_VENDOR_WD_SMC) { ed_asic_outb(sc, ED_WD_LAAR, sc->wd_laar_proto | ED_WD_LAAR_M16EN); if (sc->chip_type == ED_CHIP_TYPE_WD790) ed_asic_outb(sc, ED_WD_MSR, ED_WD_MSR_MENB); } } /* * Allocate a port resource with the given resource id. */ int ed_alloc_port(device_t dev, int rid, int size) { struct ed_softc *sc = device_get_softc(dev); struct resource *res; res = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0ul, ~0ul, size, RF_ACTIVE); if (res) { sc->port_res = res; sc->port_used = size; sc->port_bst = rman_get_bustag(res); sc->port_bsh = rman_get_bushandle(res); return (0); } return (ENOENT); } /* * Allocate a memory resource with the given resource id. */ int ed_alloc_memory(device_t dev, int rid, int size) { struct ed_softc *sc = device_get_softc(dev); struct resource *res; res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0ul, ~0ul, size, RF_ACTIVE); if (res) { sc->mem_res = res; sc->mem_used = size; sc->mem_bst = rman_get_bustag(res); sc->mem_bsh = rman_get_bushandle(res); return (0); } return (ENOENT); } /* * Allocate an irq resource with the given resource id. */ int ed_alloc_irq(device_t dev, int rid, int flags) { struct ed_softc *sc = device_get_softc(dev); struct resource *res; res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | flags); if (res) { sc->irq_res = res; return (0); } return (ENOENT); } /* * Release all resources */ void ed_release_resources(device_t dev) { struct ed_softc *sc = device_get_softc(dev); if (sc->port_res) bus_free_resource(dev, SYS_RES_IOPORT, sc->port_res); if (sc->port_res2) bus_free_resource(dev, SYS_RES_IOPORT, sc->port_res2); if (sc->mem_res) bus_free_resource(dev, SYS_RES_MEMORY, sc->mem_res); if (sc->irq_res) bus_free_resource(dev, SYS_RES_IRQ, sc->irq_res); sc->port_res = 0; sc->port_res2 = 0; sc->mem_res = 0; sc->irq_res = 0; if (sc->ifp) if_free(sc->ifp); } /* * Install interface into kernel networking data structures */ int ed_attach(device_t dev) { struct ed_softc *sc = device_get_softc(dev); struct ifnet *ifp; sc->dev = dev; ED_LOCK_INIT(sc); ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); ED_LOCK_DESTROY(sc); return (ENOSPC); } if (sc->readmem == NULL) { if (sc->mem_shared) { if (sc->isa16bit) sc->readmem = ed_shmem_readmem16; else sc->readmem = ed_shmem_readmem8; } else { sc->readmem = ed_pio_readmem; } } if (sc->sc_write_mbufs == NULL) { device_printf(dev, "No write mbufs routine set\n"); return (ENXIO); } callout_init_mtx(&sc->tick_ch, ED_MUTEX(sc), 0); /* * Set interface to stopped condition (reset) */ ed_stop_hw(sc); /* * Initialize ifnet structure */ ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_start = ed_start; ifp->if_ioctl = ed_ioctl; ifp->if_init = ed_init; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); ifp->if_linkmib = &sc->mibdata; ifp->if_linkmiblen = sizeof sc->mibdata; /* * XXX - should do a better job. */ if (sc->chip_type == ED_CHIP_TYPE_WD790) sc->mibdata.dot3StatsEtherChipSet = DOT3CHIPSET(dot3VendorWesternDigital, dot3ChipSetWesternDigital83C790); else sc->mibdata.dot3StatsEtherChipSet = DOT3CHIPSET(dot3VendorNational, dot3ChipSetNational8390); sc->mibdata.dot3Compliance = DOT3COMPLIANCE_COLLS; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; /* * Set default state for LINK2 flag (used to disable the * tranceiver for AUI operation), based on config option. * We only set this flag before we attach the device, so there's * no race. It is convenient to allow users to turn this off * by default in the kernel config, but given our more advanced * boot time configuration options, this might no longer be needed. */ if (device_get_flags(dev) & ED_FLAGS_DISABLE_TRANCEIVER) ifp->if_flags |= IFF_LINK2; /* * Attach the interface */ ether_ifattach(ifp, sc->enaddr); /* device attach does transition from UNCONFIGURED to IDLE state */ sc->tx_mem = sc->txb_cnt * ED_PAGE_SIZE * ED_TXBUF_SIZE; sc->rx_mem = (sc->rec_page_stop - sc->rec_page_start) * ED_PAGE_SIZE; SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 0, "type", CTLFLAG_RD, sc->type_str, 0, "Type of chip in card"); SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1, "TxMem", CTLFLAG_RD, &sc->tx_mem, 0, "Memory set aside for transmitting packets"); SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 2, "RxMem", CTLFLAG_RD, &sc->rx_mem, 0, "Memory set aside for receiving packets"); SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 3, "Mem", CTLFLAG_RD, &sc->mem_size, 0, "Total Card Memory"); if (bootverbose) { if (sc->type_str && (*sc->type_str != 0)) device_printf(dev, "type %s ", sc->type_str); else device_printf(dev, "type unknown (0x%x) ", sc->type); #ifdef ED_HPP if (sc->vendor == ED_VENDOR_HP) printf("(%s %s IO)", (sc->hpp_id & ED_HPP_ID_16_BIT_ACCESS) ? "16-bit" : "32-bit", sc->hpp_mem_start ? "memory mapped" : "regular"); else #endif printf("%s", sc->isa16bit ? "(16 bit)" : "(8 bit)"); #if defined(ED_HPP) || defined(ED_3C503) printf("%s", (((sc->vendor == ED_VENDOR_3COM) || (sc->vendor == ED_VENDOR_HP)) && (ifp->if_flags & IFF_LINK2)) ? " tranceiver disabled" : ""); #endif printf("\n"); } return (0); } /* * Detach the driver from the hardware and other systems in the kernel. */ int ed_detach(device_t dev) { struct ed_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->ifp; if (mtx_initialized(ED_MUTEX(sc))) ED_ASSERT_UNLOCKED(sc); if (ifp) { ED_LOCK(sc); if (bus_child_present(dev)) ed_stop(sc); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ED_UNLOCK(sc); ether_ifdetach(ifp); callout_drain(&sc->tick_ch); } if (sc->irq_res != NULL && sc->irq_handle) bus_teardown_intr(dev, sc->irq_res, sc->irq_handle); ed_release_resources(dev); if (sc->miibus) device_delete_child(dev, sc->miibus); if (mtx_initialized(ED_MUTEX(sc))) ED_LOCK_DESTROY(sc); bus_generic_detach(dev); return (0); } /* * Reset interface. */ static void ed_reset(struct ifnet *ifp) { struct ed_softc *sc = ifp->if_softc; ED_ASSERT_LOCKED(sc); /* * Stop interface and re-initialize. */ ed_stop(sc); ed_init_locked(sc); } static void ed_stop_hw(struct ed_softc *sc) { int n = 5000; /* * Stop everything on the interface, and select page 0 registers. */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STP); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* * Wait for interface to enter stopped state, but limit # of checks to * 'n' (about 5ms). It shouldn't even take 5us on modern DS8390's, but * just in case it's an old one. * * The AX88x90 chips don't seem to implement this behavor. The * datasheets say it is only turned on when the chip enters a RESET * state and is silent about behavior for the stopped state we just * entered. */ if (sc->chip_type == ED_CHIP_TYPE_AX88190 || sc->chip_type == ED_CHIP_TYPE_AX88790) return; while (((ed_nic_inb(sc, ED_P0_ISR) & ED_ISR_RST) == 0) && --n) continue; if (n <= 0) device_printf(sc->dev, "ed_stop_hw RST never set\n"); } /* * Take interface offline. */ void ed_stop(struct ed_softc *sc) { ED_ASSERT_LOCKED(sc); callout_stop(&sc->tick_ch); ed_stop_hw(sc); } /* * Periodic timer used to drive the watchdog and attachment-specific * tick handler. */ static void ed_tick(void *arg) { struct ed_softc *sc; sc = arg; ED_ASSERT_LOCKED(sc); if (sc->sc_tick) sc->sc_tick(sc); if (sc->tx_timer != 0 && --sc->tx_timer == 0) ed_watchdog(sc); callout_reset(&sc->tick_ch, hz, ed_tick, sc); } /* * Device timeout/watchdog routine. Entered if the device neglects to * generate an interrupt after a transmit has been started on it. */ static void ed_watchdog(struct ed_softc *sc) { struct ifnet *ifp; ifp = sc->ifp; log(LOG_ERR, "%s: device timeout\n", ifp->if_xname); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ed_reset(ifp); } /* * Initialize device. */ static void ed_init(void *xsc) { struct ed_softc *sc = xsc; ED_ASSERT_UNLOCKED(sc); ED_LOCK(sc); ed_init_locked(sc); ED_UNLOCK(sc); } static void ed_init_locked(struct ed_softc *sc) { struct ifnet *ifp = sc->ifp; int i; ED_ASSERT_LOCKED(sc); /* * Initialize the NIC in the exact order outlined in the NS manual. * This init procedure is "mandatory"...don't change what or when * things happen. */ /* reset transmitter flags */ sc->xmit_busy = 0; sc->tx_timer = 0; sc->txb_inuse = 0; sc->txb_new = 0; sc->txb_next_tx = 0; /* This variable is used below - don't move this assignment */ sc->next_packet = sc->rec_page_start + 1; /* * Set interface for page 0, Remote DMA complete, Stopped */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STP); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); if (sc->isa16bit) /* * Set FIFO threshold to 8, No auto-init Remote DMA, byte * order=80x86, word-wide DMA xfers, */ ed_nic_outb(sc, ED_P0_DCR, ED_DCR_FT1 | ED_DCR_WTS | ED_DCR_LS); else /* * Same as above, but byte-wide DMA xfers */ ed_nic_outb(sc, ED_P0_DCR, ED_DCR_FT1 | ED_DCR_LS); /* * Clear Remote Byte Count Registers */ ed_nic_outb(sc, ED_P0_RBCR0, 0); ed_nic_outb(sc, ED_P0_RBCR1, 0); /* * For the moment, don't store incoming packets in memory. */ ed_nic_outb(sc, ED_P0_RCR, ED_RCR_MON); /* * Place NIC in internal loopback mode */ ed_nic_outb(sc, ED_P0_TCR, ED_TCR_LB0); /* * Initialize transmit/receive (ring-buffer) Page Start */ ed_nic_outb(sc, ED_P0_TPSR, sc->tx_page_start); ed_nic_outb(sc, ED_P0_PSTART, sc->rec_page_start); /* Set lower bits of byte addressable framing to 0 */ if (sc->chip_type == ED_CHIP_TYPE_WD790) ed_nic_outb(sc, 0x09, 0); /* * Initialize Receiver (ring-buffer) Page Stop and Boundry */ ed_nic_outb(sc, ED_P0_PSTOP, sc->rec_page_stop); ed_nic_outb(sc, ED_P0_BNRY, sc->rec_page_start); /* * Clear all interrupts. A '1' in each bit position clears the * corresponding flag. */ ed_nic_outb(sc, ED_P0_ISR, 0xff); /* * Enable the following interrupts: receive/transmit complete, * receive/transmit error, and Receiver OverWrite. * * Counter overflow and Remote DMA complete are *not* enabled. */ ed_nic_outb(sc, ED_P0_IMR, ED_IMR_PRXE | ED_IMR_PTXE | ED_IMR_RXEE | ED_IMR_TXEE | ED_IMR_OVWE); /* * Program Command Register for page 1 */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_PAGE_1 | ED_CR_STP); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* * Copy out our station address */ for (i = 0; i < ETHER_ADDR_LEN; ++i) ed_nic_outb(sc, ED_P1_PAR(i), IF_LLADDR(sc->ifp)[i]); /* * Set Current Page pointer to next_packet (initialized above) */ ed_nic_outb(sc, ED_P1_CURR, sc->next_packet); /* * Program Receiver Configuration Register and multicast filter. CR is * set to page 0 on return. */ ed_setrcr(sc); /* * Take interface out of loopback */ ed_nic_outb(sc, ED_P0_TCR, 0); if (sc->sc_mediachg) sc->sc_mediachg(sc); /* * Set 'running' flag, and clear output active flag. */ ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; /* * ...and attempt to start output */ ed_start_locked(ifp); callout_reset(&sc->tick_ch, hz, ed_tick, sc); } /* * This routine actually starts the transmission on the interface */ static __inline void ed_xmit(struct ed_softc *sc) { unsigned short len; len = sc->txb_len[sc->txb_next_tx]; /* * Set NIC for page 0 register access */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STA); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* * Set TX buffer start page */ ed_nic_outb(sc, ED_P0_TPSR, sc->tx_page_start + sc->txb_next_tx * ED_TXBUF_SIZE); /* * Set TX length */ ed_nic_outb(sc, ED_P0_TBCR0, len); ed_nic_outb(sc, ED_P0_TBCR1, len >> 8); /* * Set page 0, Remote DMA complete, Transmit Packet, and *Start* */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_TXP | ED_CR_STA); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); sc->xmit_busy = 1; /* * Point to next transmit buffer slot and wrap if necessary. */ sc->txb_next_tx++; if (sc->txb_next_tx == sc->txb_cnt) sc->txb_next_tx = 0; /* * Set a timer just in case we never hear from the board again */ sc->tx_timer = 2; } /* * Start output on interface. * We make two assumptions here: * 1) that the current priority is set to splimp _before_ this code * is called *and* is returned to the appropriate priority after * return * 2) that the IFF_DRV_OACTIVE flag is checked before this code is called * (i.e. that the output part of the interface is idle) */ static void ed_start(struct ifnet *ifp) { struct ed_softc *sc = ifp->if_softc; ED_ASSERT_UNLOCKED(sc); ED_LOCK(sc); ed_start_locked(ifp); ED_UNLOCK(sc); } static void ed_start_locked(struct ifnet *ifp) { struct ed_softc *sc = ifp->if_softc; struct mbuf *m0, *m; bus_size_t buffer; int len; ED_ASSERT_LOCKED(sc); outloop: /* * First, see if there are buffered packets and an idle transmitter - * should never happen at this point. */ if (sc->txb_inuse && (sc->xmit_busy == 0)) { printf("ed: packets buffered, but transmitter idle\n"); ed_xmit(sc); } /* * See if there is room to put another packet in the buffer. */ if (sc->txb_inuse == sc->txb_cnt) { /* * No room. Indicate this to the outside world and exit. */ ifp->if_drv_flags |= IFF_DRV_OACTIVE; return; } IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == 0) { /* * We are using the !OACTIVE flag to indicate to the outside * world that we can accept an additional packet rather than * that the transmitter is _actually_ active. Indeed, the * transmitter may be active, but if we haven't filled all the * buffers with data then we still want to accept more. */ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; return; } /* * Copy the mbuf chain into the transmit buffer */ m0 = m; /* txb_new points to next open buffer slot */ buffer = sc->mem_start + (sc->txb_new * ED_TXBUF_SIZE * ED_PAGE_SIZE); len = sc->sc_write_mbufs(sc, m, buffer); if (len == 0) { m_freem(m0); goto outloop; } sc->txb_len[sc->txb_new] = max(len, (ETHER_MIN_LEN-ETHER_CRC_LEN)); sc->txb_inuse++; /* * Point to next buffer slot and wrap if necessary. */ sc->txb_new++; if (sc->txb_new == sc->txb_cnt) sc->txb_new = 0; if (sc->xmit_busy == 0) ed_xmit(sc); /* * Tap off here if there is a bpf listener. */ BPF_MTAP(ifp, m0); m_freem(m0); /* * Loop back to the top to possibly buffer more packets */ goto outloop; } /* * Ethernet interface receiver interrupt. */ static __inline void ed_rint(struct ed_softc *sc) { struct ifnet *ifp = sc->ifp; u_char boundry; u_short len; struct ed_ring packet_hdr; bus_size_t packet_ptr; ED_ASSERT_LOCKED(sc); /* * Set NIC to page 1 registers to get 'current' pointer */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_PAGE_1 | ED_CR_STA); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* * 'sc->next_packet' is the logical beginning of the ring-buffer - * i.e. it points to where new data has been buffered. The 'CURR' * (current) register points to the logical end of the ring-buffer - * i.e. it points to where additional new data will be added. We loop * here until the logical beginning equals the logical end (or in * other words, until the ring-buffer is empty). */ while (sc->next_packet != ed_nic_inb(sc, ED_P1_CURR)) { /* get pointer to this buffer's header structure */ packet_ptr = sc->mem_ring + (sc->next_packet - sc->rec_page_start) * ED_PAGE_SIZE; /* * The byte count includes a 4 byte header that was added by * the NIC. */ sc->readmem(sc, packet_ptr, (char *) &packet_hdr, sizeof(packet_hdr)); len = packet_hdr.count; if (len > (ETHER_MAX_LEN - ETHER_CRC_LEN + sizeof(struct ed_ring)) || len < (ETHER_MIN_LEN - ETHER_CRC_LEN + sizeof(struct ed_ring))) { /* * Length is a wild value. There's a good chance that * this was caused by the NIC being old and buggy. * The bug is that the length low byte is duplicated * in the high byte. Try to recalculate the length * based on the pointer to the next packet. Also, * need ot preserve offset into page. * * NOTE: sc->next_packet is pointing at the current * packet. */ len &= ED_PAGE_SIZE - 1; if (packet_hdr.next_packet >= sc->next_packet) len += (packet_hdr.next_packet - sc->next_packet) * ED_PAGE_SIZE; else len += ((packet_hdr.next_packet - sc->rec_page_start) + (sc->rec_page_stop - sc->next_packet)) * ED_PAGE_SIZE; /* * because buffers are aligned on 256-byte boundary, * the length computed above is off by 256 in almost * all cases. Fix it... */ if (len & 0xff) len -= 256; if (len > (ETHER_MAX_LEN - ETHER_CRC_LEN + sizeof(struct ed_ring))) sc->mibdata.dot3StatsFrameTooLongs++; } /* * Be fairly liberal about what we allow as a "reasonable" * length so that a [crufty] packet will make it to BPF (and * can thus be analyzed). Note that all that is really * important is that we have a length that will fit into one * mbuf cluster or less; the upper layer protocols can then * figure out the length from their own length field(s). But * make sure that we have at least a full ethernet header or * we would be unable to call ether_input() later. */ if ((len >= sizeof(struct ed_ring) + ETHER_HDR_LEN) && (len <= MCLBYTES) && (packet_hdr.next_packet >= sc->rec_page_start) && (packet_hdr.next_packet < sc->rec_page_stop)) { /* * Go get packet. */ ed_get_packet(sc, packet_ptr + sizeof(struct ed_ring), len - sizeof(struct ed_ring)); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); } else { /* * Really BAD. The ring pointers are corrupted. */ log(LOG_ERR, "%s: NIC memory corrupt - invalid packet length %d\n", ifp->if_xname, len); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); ed_reset(ifp); return; } /* * Update next packet pointer */ sc->next_packet = packet_hdr.next_packet; /* * Update NIC boundry pointer - being careful to keep it one * buffer behind. (as recommended by NS databook) */ boundry = sc->next_packet - 1; if (boundry < sc->rec_page_start) boundry = sc->rec_page_stop - 1; /* * Set NIC to page 0 registers to update boundry register */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STA); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_BNRY, boundry); /* * Set NIC to page 1 registers before looping to top (prepare * to get 'CURR' current pointer) */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_PAGE_1 | ED_CR_STA); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); } } /* * Ethernet interface interrupt processor */ void edintr(void *arg) { struct ed_softc *sc = (struct ed_softc*) arg; struct ifnet *ifp = sc->ifp; u_char isr; int count; ED_LOCK(sc); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { ED_UNLOCK(sc); return; } /* * Set NIC to page 0 registers */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STA); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* * loop until there are no more new interrupts. When the card goes * away, the hardware will read back 0xff. Looking at the interrupts, * it would appear that 0xff is impossible, or at least extremely * unlikely. */ while ((isr = ed_nic_inb(sc, ED_P0_ISR)) != 0 && isr != 0xff) { /* * reset all the bits that we are 'acknowledging' by writing a * '1' to each bit position that was set (writing a '1' * *clears* the bit) */ ed_nic_outb(sc, ED_P0_ISR, isr); /* * The AX88190 and AX88190A has problems acking an interrupt * and having them clear. This interferes with top-level loop * here. Wait for all the bits to clear. * * We limit this to 5000 iterations. At 1us per inb/outb, * this translates to about 15ms, which should be plenty of * time, and also gives protection in the card eject case. */ if (sc->chip_type == ED_CHIP_TYPE_AX88190) { count = 5000; /* 15ms */ while (count-- && (ed_nic_inb(sc, ED_P0_ISR) & isr)) { ed_nic_outb(sc, ED_P0_ISR,0); ed_nic_outb(sc, ED_P0_ISR,isr); } if (count == 0) break; } /* * Handle transmitter interrupts. Handle these first because * the receiver will reset the board under some conditions. */ if (isr & (ED_ISR_PTX | ED_ISR_TXE)) { u_char collisions = ed_nic_inb(sc, ED_P0_NCR) & 0x0f; /* * Check for transmit error. If a TX completed with an * error, we end up throwing the packet away. Really * the only error that is possible is excessive * collisions, and in this case it is best to allow * the automatic mechanisms of TCP to backoff the * flow. Of course, with UDP we're screwed, but this * is expected when a network is heavily loaded. */ (void) ed_nic_inb(sc, ED_P0_TSR); if (isr & ED_ISR_TXE) { u_char tsr; /* * Excessive collisions (16) */ tsr = ed_nic_inb(sc, ED_P0_TSR); if ((tsr & ED_TSR_ABT) && (collisions == 0)) { /* * When collisions total 16, the * P0_NCR will indicate 0, and the * TSR_ABT is set. */ collisions = 16; sc->mibdata.dot3StatsExcessiveCollisions++; sc->mibdata.dot3StatsCollFrequencies[15]++; } if (tsr & ED_TSR_OWC) sc->mibdata.dot3StatsLateCollisions++; if (tsr & ED_TSR_CDH) sc->mibdata.dot3StatsSQETestErrors++; if (tsr & ED_TSR_CRS) sc->mibdata.dot3StatsCarrierSenseErrors++; if (tsr & ED_TSR_FU) sc->mibdata.dot3StatsInternalMacTransmitErrors++; /* * update output errors counter */ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } else { /* * Update total number of successfully * transmitted packets. */ if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); } /* * reset tx busy and output active flags */ sc->xmit_busy = 0; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; /* * clear watchdog timer */ sc->tx_timer = 0; /* * Add in total number of collisions on last * transmission. */ if_inc_counter(ifp, IFCOUNTER_COLLISIONS, collisions); switch(collisions) { case 0: case 16: break; case 1: sc->mibdata.dot3StatsSingleCollisionFrames++; sc->mibdata.dot3StatsCollFrequencies[0]++; break; default: sc->mibdata.dot3StatsMultipleCollisionFrames++; sc->mibdata. dot3StatsCollFrequencies[collisions-1] ++; break; } /* * Decrement buffer in-use count if not zero (can only * be zero if a transmitter interrupt occured while * not actually transmitting). If data is ready to * transmit, start it transmitting, otherwise defer * until after handling receiver */ if (sc->txb_inuse && --sc->txb_inuse) ed_xmit(sc); } /* * Handle receiver interrupts */ if (isr & (ED_ISR_PRX | ED_ISR_RXE | ED_ISR_OVW)) { /* * Overwrite warning. In order to make sure that a * lockup of the local DMA hasn't occurred, we reset * and re-init the NIC. The NSC manual suggests only a * partial reset/re-init is necessary - but some chips * seem to want more. The DMA lockup has been seen * only with early rev chips - Methinks this bug was * fixed in later revs. -DG */ if (isr & ED_ISR_OVW) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); #ifdef DIAGNOSTIC log(LOG_WARNING, "%s: warning - receiver ring buffer overrun\n", ifp->if_xname); #endif /* * Stop/reset/re-init NIC */ ed_reset(ifp); } else { /* * Receiver Error. One or more of: CRC error, * frame alignment error FIFO overrun, or * missed packet. */ if (isr & ED_ISR_RXE) { u_char rsr; rsr = ed_nic_inb(sc, ED_P0_RSR); if (rsr & ED_RSR_CRC) sc->mibdata.dot3StatsFCSErrors++; if (rsr & ED_RSR_FAE) sc->mibdata.dot3StatsAlignmentErrors++; if (rsr & ED_RSR_FO) sc->mibdata.dot3StatsInternalMacReceiveErrors++; if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); #ifdef ED_DEBUG if_printf(ifp, "receive error %x\n", ed_nic_inb(sc, ED_P0_RSR)); #endif } /* * Go get the packet(s) XXX - Doing this on an * error is dubious because there shouldn't be * any data to get (we've configured the * interface to not accept packets with * errors). */ /* * Enable 16bit access to shared memory first * on WD/SMC boards. */ ed_enable_16bit_access(sc); ed_rint(sc); ed_disable_16bit_access(sc); } } /* * If it looks like the transmitter can take more data, * attempt to start output on the interface. This is done * after handling the receiver to give the receiver priority. */ if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) ed_start_locked(ifp); /* * return NIC CR to standard state: page 0, remote DMA * complete, start (toggling the TXP bit off, even if was just * set in the transmit routine, is *okay* - it is 'edge' * triggered from low to high) */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STA); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* * If the Network Talley Counters overflow, read them to reset * them. It appears that old 8390's won't clear the ISR flag * otherwise - resulting in an infinite loop. */ if (isr & ED_ISR_CNT) { (void) ed_nic_inb(sc, ED_P0_CNTR0); (void) ed_nic_inb(sc, ED_P0_CNTR1); (void) ed_nic_inb(sc, ED_P0_CNTR2); } } ED_UNLOCK(sc); } /* * Process an ioctl request. */ static int ed_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct ed_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int error = 0; switch (command) { case SIOCSIFFLAGS: /* * If the interface is marked up and stopped, then start it. * If we're up and already running, then it may be a mediachg. * If it is marked down and running, then stop it. */ ED_LOCK(sc); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) ed_init_locked(sc); else if (sc->sc_mediachg) sc->sc_mediachg(sc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ed_stop(sc); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; } } /* * Promiscuous flag may have changed, so reprogram the RCR. */ ed_setrcr(sc); ED_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: /* * Multicast list has changed; set the hardware filter * accordingly. */ ED_LOCK(sc); ed_setrcr(sc); ED_UNLOCK(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: if (sc->sc_media_ioctl == NULL) { error = EINVAL; break; } sc->sc_media_ioctl(sc, ifr, command); break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } /* * Given a source and destination address, copy 'amount' of a packet from * the ring buffer into a linear destination buffer. Takes into account * ring-wrap. */ static __inline void ed_ring_copy(struct ed_softc *sc, bus_size_t src, char *dst, u_short amount) { u_short tmp_amount; /* does copy wrap to lower addr in ring buffer? */ if (src + amount > sc->mem_end) { tmp_amount = sc->mem_end - src; /* copy amount up to end of NIC memory */ sc->readmem(sc, src, dst, tmp_amount); amount -= tmp_amount; src = sc->mem_ring; dst += tmp_amount; } sc->readmem(sc, src, dst, amount); } /* * Retreive packet from shared memory and send to the next level up via * ether_input(). */ static void ed_get_packet(struct ed_softc *sc, bus_size_t buf, u_short len) { struct ifnet *ifp = sc->ifp; struct ether_header *eh; struct mbuf *m; /* Allocate a header mbuf */ MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) return; m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len; /* * We always put the received packet in a single buffer - * either with just an mbuf header or in a cluster attached * to the header. The +2 is to compensate for the alignment * fixup below. */ if ((len + 2) > MHLEN) { /* Attach an mbuf cluster */ - MCLGET(m, M_NOWAIT); - - /* Insist on getting a cluster */ - if ((m->m_flags & M_EXT) == 0) { + if (!(MCLGET(m, M_NOWAIT))) { m_freem(m); return; } } /* * The +2 is to longword align the start of the real packet. * This is important for NFS. */ m->m_data += 2; eh = mtod(m, struct ether_header *); /* * Get packet, including link layer address, from interface. */ ed_ring_copy(sc, buf, (char *)eh, len); m->m_pkthdr.len = m->m_len = len; ED_UNLOCK(sc); (*ifp->if_input)(ifp, m); ED_LOCK(sc); } /* * Supporting routines */ /* * Given a NIC memory source address and a host memory destination * address, copy 'amount' from NIC to host using shared memory. * The 'amount' is rounded up to a word - okay as long as mbufs * are word sized. That's what the +1 is below. * This routine accesses things as 16 bit quantities. */ void ed_shmem_readmem16(struct ed_softc *sc, bus_size_t src, uint8_t *dst, uint16_t amount) { bus_space_read_region_2(sc->mem_bst, sc->mem_bsh, src, (uint16_t *)dst, (amount + 1) / 2); } /* * Given a NIC memory source address and a host memory destination * address, copy 'amount' from NIC to host using shared memory. * This routine accesses things as 8 bit quantities. */ void ed_shmem_readmem8(struct ed_softc *sc, bus_size_t src, uint8_t *dst, uint16_t amount) { bus_space_read_region_1(sc->mem_bst, sc->mem_bsh, src, dst, amount); } /* * Given a NIC memory source address and a host memory destination * address, copy 'amount' from NIC to host using Programmed I/O. * The 'amount' is rounded up to a word - okay as long as mbufs * are word sized. * This routine is currently Novell-specific. */ void ed_pio_readmem(struct ed_softc *sc, bus_size_t src, uint8_t *dst, uint16_t amount) { /* Regular Novell cards */ /* select page 0 registers */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, ED_CR_RD2 | ED_CR_STA); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* round up to a word */ if (amount & 1) ++amount; /* set up DMA byte count */ ed_nic_outb(sc, ED_P0_RBCR0, amount); ed_nic_outb(sc, ED_P0_RBCR1, amount >> 8); /* set up source address in NIC mem */ ed_nic_outb(sc, ED_P0_RSAR0, src); ed_nic_outb(sc, ED_P0_RSAR1, src >> 8); ed_nic_outb(sc, ED_P0_CR, ED_CR_RD0 | ED_CR_STA); if (sc->isa16bit) ed_asic_insw(sc, ED_NOVELL_DATA, dst, amount / 2); else ed_asic_insb(sc, ED_NOVELL_DATA, dst, amount); } /* * Stripped down routine for writing a linear buffer to NIC memory. * Only used in the probe routine to test the memory. 'len' must * be even. */ void ed_pio_writemem(struct ed_softc *sc, uint8_t *src, uint16_t dst, uint16_t len) { int maxwait = 200; /* about 240us */ /* select page 0 registers */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, ED_CR_RD2 | ED_CR_STA); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* reset remote DMA complete flag */ ed_nic_outb(sc, ED_P0_ISR, ED_ISR_RDC); /* set up DMA byte count */ ed_nic_outb(sc, ED_P0_RBCR0, len); ed_nic_outb(sc, ED_P0_RBCR1, len >> 8); /* set up destination address in NIC mem */ ed_nic_outb(sc, ED_P0_RSAR0, dst); ed_nic_outb(sc, ED_P0_RSAR1, dst >> 8); /* set remote DMA write */ ed_nic_outb(sc, ED_P0_CR, ED_CR_RD1 | ED_CR_STA); if (sc->isa16bit) ed_asic_outsw(sc, ED_NOVELL_DATA, src, len / 2); else ed_asic_outsb(sc, ED_NOVELL_DATA, src, len); /* * Wait for remote DMA complete. This is necessary because on the * transmit side, data is handled internally by the NIC in bursts and * we can't start another remote DMA until this one completes. Not * waiting causes really bad things to happen - like the NIC * irrecoverably jamming the ISA bus. */ while (((ed_nic_inb(sc, ED_P0_ISR) & ED_ISR_RDC) != ED_ISR_RDC) && --maxwait) continue; } /* * Write an mbuf chain to the destination NIC memory address using * programmed I/O. */ u_short ed_pio_write_mbufs(struct ed_softc *sc, struct mbuf *m, bus_size_t dst) { struct ifnet *ifp = sc->ifp; unsigned short total_len, dma_len; struct mbuf *mp; int maxwait = 200; /* about 240us */ ED_ASSERT_LOCKED(sc); /* Regular Novell cards */ /* First, count up the total number of bytes to copy */ for (total_len = 0, mp = m; mp; mp = mp->m_next) total_len += mp->m_len; dma_len = total_len; if (sc->isa16bit && (dma_len & 1)) dma_len++; /* select page 0 registers */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, ED_CR_RD2 | ED_CR_STA); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* reset remote DMA complete flag */ ed_nic_outb(sc, ED_P0_ISR, ED_ISR_RDC); /* set up DMA byte count */ ed_nic_outb(sc, ED_P0_RBCR0, dma_len); ed_nic_outb(sc, ED_P0_RBCR1, dma_len >> 8); /* set up destination address in NIC mem */ ed_nic_outb(sc, ED_P0_RSAR0, dst); ed_nic_outb(sc, ED_P0_RSAR1, dst >> 8); /* set remote DMA write */ ed_nic_outb(sc, ED_P0_CR, ED_CR_RD1 | ED_CR_STA); /* * Transfer the mbuf chain to the NIC memory. * 16-bit cards require that data be transferred as words, and only words. * So that case requires some extra code to patch over odd-length mbufs. */ if (!sc->isa16bit) { /* NE1000s are easy */ while (m) { if (m->m_len) ed_asic_outsb(sc, ED_NOVELL_DATA, m->m_data, m->m_len); m = m->m_next; } } else { /* NE2000s are a pain */ uint8_t *data; int len, wantbyte; union { uint16_t w; uint8_t b[2]; } saveword; wantbyte = 0; while (m) { len = m->m_len; if (len) { data = mtod(m, caddr_t); /* finish the last word */ if (wantbyte) { saveword.b[1] = *data; ed_asic_outw(sc, ED_NOVELL_DATA, saveword.w); data++; len--; wantbyte = 0; } /* output contiguous words */ if (len > 1) { ed_asic_outsw(sc, ED_NOVELL_DATA, data, len >> 1); data += len & ~1; len &= 1; } /* save last byte, if necessary */ if (len == 1) { saveword.b[0] = *data; wantbyte = 1; } } m = m->m_next; } /* spit last byte */ if (wantbyte) ed_asic_outw(sc, ED_NOVELL_DATA, saveword.w); } /* * Wait for remote DMA complete. This is necessary because on the * transmit side, data is handled internally by the NIC in bursts and * we can't start another remote DMA until this one completes. Not * waiting causes really bad things to happen - like the NIC * irrecoverably jamming the ISA bus. */ while (((ed_nic_inb(sc, ED_P0_ISR) & ED_ISR_RDC) != ED_ISR_RDC) && --maxwait) continue; if (!maxwait) { log(LOG_WARNING, "%s: remote transmit DMA failed to complete\n", ifp->if_xname); ed_reset(ifp); return(0); } return (total_len); } static void ed_setrcr(struct ed_softc *sc) { struct ifnet *ifp = sc->ifp; int i; u_char reg1; ED_ASSERT_LOCKED(sc); /* Bit 6 in AX88190 RCR register must be set. */ if (sc->chip_type == ED_CHIP_TYPE_AX88190 || sc->chip_type == ED_CHIP_TYPE_AX88790) reg1 = ED_RCR_INTT; else reg1 = 0x00; /* set page 1 registers */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_PAGE_1 | ED_CR_STP); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); if (ifp->if_flags & IFF_PROMISC) { /* * Reconfigure the multicast filter. */ for (i = 0; i < 8; i++) ed_nic_outb(sc, ED_P1_MAR(i), 0xff); /* * And turn on promiscuous mode. Also enable reception of * runts and packets with CRC & alignment errors. */ /* Set page 0 registers */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STP); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_RCR, ED_RCR_PRO | ED_RCR_AM | ED_RCR_AB | ED_RCR_AR | ED_RCR_SEP | reg1); } else { /* set up multicast addresses and filter modes */ if (ifp->if_flags & IFF_MULTICAST) { uint32_t mcaf[2]; if (ifp->if_flags & IFF_ALLMULTI) { mcaf[0] = 0xffffffff; mcaf[1] = 0xffffffff; } else ed_ds_getmcaf(sc, mcaf); /* * Set multicast filter on chip. */ for (i = 0; i < 8; i++) ed_nic_outb(sc, ED_P1_MAR(i), ((u_char *) mcaf)[i]); /* Set page 0 registers */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STP); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_RCR, ED_RCR_AM | ED_RCR_AB | reg1); } else { /* * Initialize multicast address hashing registers to * not accept multicasts. */ for (i = 0; i < 8; ++i) ed_nic_outb(sc, ED_P1_MAR(i), 0x00); /* Set page 0 registers */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STP); ed_nic_outb(sc, ED_P0_RCR, ED_RCR_AB | reg1); } } /* * Start interface. */ ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STA); } /* * Compute the multicast address filter from the * list of multicast addresses we need to listen to. */ static void ed_ds_getmcaf(struct ed_softc *sc, uint32_t *mcaf) { uint32_t index; u_char *af = (u_char *) mcaf; struct ifmultiaddr *ifma; mcaf[0] = 0; mcaf[1] = 0; if_maddr_rlock(sc->ifp); TAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; index = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; af[index >> 3] |= 1 << (index & 7); } if_maddr_runlock(sc->ifp); } int ed_isa_mem_ok(device_t dev, u_long pmem, u_int memsize) { if (pmem < 0xa0000 || pmem + memsize > 0x1000000) { device_printf(dev, "Invalid ISA memory address range " "configured: 0x%lx - 0x%lx\n", pmem, pmem + memsize); return (ENXIO); } return (0); } int ed_clear_memory(device_t dev) { struct ed_softc *sc = device_get_softc(dev); bus_size_t i; bus_space_set_region_1(sc->mem_bst, sc->mem_bsh, sc->mem_start, 0, sc->mem_size); for (i = 0; i < sc->mem_size; i++) { if (bus_space_read_1(sc->mem_bst, sc->mem_bsh, sc->mem_start + i)) { device_printf(dev, "failed to clear shared memory at " "0x%jx - check configuration\n", (uintmax_t)rman_get_start(sc->mem_res) + i); return (ENXIO); } } return (0); } u_short ed_shmem_write_mbufs(struct ed_softc *sc, struct mbuf *m, bus_size_t dst) { u_short len; /* * Special case setup for 16 bit boards... */ if (sc->isa16bit) { switch (sc->vendor) { #ifdef ED_3C503 /* * For 16bit 3Com boards (which have 16k of * memory), we have the xmit buffers in a * different page of memory ('page 0') - so * change pages. */ case ED_VENDOR_3COM: ed_asic_outb(sc, ED_3COM_GACFR, ED_3COM_GACFR_RSEL); break; #endif /* * Enable 16bit access to shared memory on * WD/SMC boards. * * XXX - same as ed_enable_16bit_access() */ case ED_VENDOR_WD_SMC: ed_asic_outb(sc, ED_WD_LAAR, sc->wd_laar_proto | ED_WD_LAAR_M16EN); if (sc->chip_type == ED_CHIP_TYPE_WD790) ed_asic_outb(sc, ED_WD_MSR, ED_WD_MSR_MENB); break; } } for (len = 0; m != NULL; m = m->m_next) { if (m->m_len == 0) continue; if (sc->isa16bit) { if (m->m_len > 1) bus_space_write_region_2(sc->mem_bst, sc->mem_bsh, dst, mtod(m, uint16_t *), m->m_len / 2); if ((m->m_len & 1) != 0) bus_space_write_1(sc->mem_bst, sc->mem_bsh, dst + m->m_len - 1, *(mtod(m, uint8_t *) + m->m_len - 1)); } else bus_space_write_region_1(sc->mem_bst, sc->mem_bsh, dst, mtod(m, uint8_t *), m->m_len); dst += m->m_len; len += m->m_len; } /* * Restore previous shared memory access */ if (sc->isa16bit) { switch (sc->vendor) { #ifdef ED_3C503 case ED_VENDOR_3COM: ed_asic_outb(sc, ED_3COM_GACFR, ED_3COM_GACFR_RSEL | ED_3COM_GACFR_MBS0); break; #endif case ED_VENDOR_WD_SMC: /* XXX - same as ed_disable_16bit_access() */ if (sc->chip_type == ED_CHIP_TYPE_WD790) ed_asic_outb(sc, ED_WD_MSR, 0x00); ed_asic_outb(sc, ED_WD_LAAR, sc->wd_laar_proto & ~ED_WD_LAAR_M16EN); break; } } return (len); } /* * Generic ifmedia support. By default, the DP8390-based cards don't know * what their network attachment really is, or even if it is valid (except * upon successful transmission of a packet). To play nicer with dhclient, as * well as to fit in with a framework where some cards can provde more * detailed information, make sure that we use this as a fallback. */ static int ed_gen_ifmedia_ioctl(struct ed_softc *sc, struct ifreq *ifr, u_long command) { return (ifmedia_ioctl(sc->ifp, ifr, &sc->ifmedia, command)); } static int ed_gen_ifmedia_upd(struct ifnet *ifp) { return 0; } static void ed_gen_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { ifmr->ifm_active = IFM_ETHER | IFM_AUTO; ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; } void ed_gen_ifmedia_init(struct ed_softc *sc) { sc->sc_media_ioctl = &ed_gen_ifmedia_ioctl; ifmedia_init(&sc->ifmedia, 0, ed_gen_ifmedia_upd, ed_gen_ifmedia_sts); ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0, 0); ifmedia_set(&sc->ifmedia, IFM_ETHER | IFM_AUTO); } Index: head/sys/dev/ex/if_ex.c =================================================================== --- head/sys/dev/ex/if_ex.c (revision 276749) +++ head/sys/dev/ex/if_ex.c (revision 276750) @@ -1,1082 +1,1081 @@ /*- * Copyright (c) 1996, Javier Martín Rueda (jmrueda@diatel.upm.es) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * * MAINTAINER: Matthew N. Dodd * */ #include __FBSDID("$FreeBSD$"); /* * Intel EtherExpress Pro/10, Pro/10+ Ethernet driver * * Revision history: * * dd-mmm-yyyy: Multicast support ported from NetBSD's if_iy driver. * 30-Oct-1996: first beta version. Inet and BPF supported, but no multicast. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef EXDEBUG # define Start_End 1 # define Rcvd_Pkts 2 # define Sent_Pkts 4 # define Status 8 static int debug_mask = 0; # define DODEBUG(level, action) if (level & debug_mask) action #else # define DODEBUG(level, action) #endif devclass_t ex_devclass; char irq2eemap[] = { -1, -1, 0, 1, -1, 2, -1, -1, -1, 0, 3, 4, -1, -1, -1, -1 }; u_char ee2irqmap[] = { 9, 3, 5, 10, 11, 0, 0, 0 }; char plus_irq2eemap[] = { -1, -1, -1, 0, 1, 2, -1, 3, -1, 4, 5, 6, 7, -1, -1, -1 }; u_char plus_ee2irqmap[] = { 3, 4, 5, 7, 9, 10, 11, 12 }; /* Network Interface Functions */ static void ex_init(void *); static void ex_init_locked(struct ex_softc *); static void ex_start(struct ifnet *); static void ex_start_locked(struct ifnet *); static int ex_ioctl(struct ifnet *, u_long, caddr_t); static void ex_watchdog(void *); /* ifmedia Functions */ static int ex_ifmedia_upd(struct ifnet *); static void ex_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int ex_get_media(struct ex_softc *); static void ex_reset(struct ex_softc *); static void ex_setmulti(struct ex_softc *); static void ex_tx_intr(struct ex_softc *); static void ex_rx_intr(struct ex_softc *); void ex_get_address(struct ex_softc *sc, u_char *enaddr) { uint16_t eaddr_tmp; eaddr_tmp = ex_eeprom_read(sc, EE_Eth_Addr_Lo); enaddr[5] = eaddr_tmp & 0xff; enaddr[4] = eaddr_tmp >> 8; eaddr_tmp = ex_eeprom_read(sc, EE_Eth_Addr_Mid); enaddr[3] = eaddr_tmp & 0xff; enaddr[2] = eaddr_tmp >> 8; eaddr_tmp = ex_eeprom_read(sc, EE_Eth_Addr_Hi); enaddr[1] = eaddr_tmp & 0xff; enaddr[0] = eaddr_tmp >> 8; return; } int ex_card_type(u_char *enaddr) { if ((enaddr[0] == 0x00) && (enaddr[1] == 0xA0) && (enaddr[2] == 0xC9)) return (CARD_TYPE_EX_10_PLUS); return (CARD_TYPE_EX_10); } /* * Caller is responsible for eventually calling * ex_release_resources() on failure. */ int ex_alloc_resources(device_t dev) { struct ex_softc * sc = device_get_softc(dev); int error = 0; sc->ioport = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->ioport_rid, RF_ACTIVE); if (!sc->ioport) { device_printf(dev, "No I/O space?!\n"); error = ENOMEM; goto bad; } sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE); if (!sc->irq) { device_printf(dev, "No IRQ?!\n"); error = ENOMEM; goto bad; } bad: return (error); } void ex_release_resources(device_t dev) { struct ex_softc * sc = device_get_softc(dev); if (sc->ih) { bus_teardown_intr(dev, sc->irq, sc->ih); sc->ih = NULL; } if (sc->ioport) { bus_release_resource(dev, SYS_RES_IOPORT, sc->ioport_rid, sc->ioport); sc->ioport = NULL; } if (sc->irq) { bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq); sc->irq = NULL; } if (sc->ifp) if_free(sc->ifp); return; } int ex_attach(device_t dev) { struct ex_softc * sc = device_get_softc(dev); struct ifnet * ifp; struct ifmedia * ifm; int error; uint16_t temp; ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); return (ENOSPC); } /* work out which set of irq <-> internal tables to use */ if (ex_card_type(sc->enaddr) == CARD_TYPE_EX_10_PLUS) { sc->irq2ee = plus_irq2eemap; sc->ee2irq = plus_ee2irqmap; } else { sc->irq2ee = irq2eemap; sc->ee2irq = ee2irqmap; } sc->mem_size = CARD_RAM_SIZE; /* XXX This should be read from the card itself. */ /* * Initialize the ifnet structure. */ ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; ifp->if_start = ex_start; ifp->if_ioctl = ex_ioctl; ifp->if_init = ex_init; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifmedia_init(&sc->ifmedia, 0, ex_ifmedia_upd, ex_ifmedia_sts); mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->timer, &sc->lock, 0); temp = ex_eeprom_read(sc, EE_W5); if (temp & EE_W5_PORT_TPE) ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); if (temp & EE_W5_PORT_BNC) ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL); if (temp & EE_W5_PORT_AUI) ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_NONE, 0, NULL); ifmedia_set(&sc->ifmedia, ex_get_media(sc)); ifm = &sc->ifmedia; ifm->ifm_media = ifm->ifm_cur->ifm_media; ex_ifmedia_upd(ifp); /* * Attach the interface. */ ether_ifattach(ifp, sc->enaddr); error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, ex_intr, (void *)sc, &sc->ih); if (error) { device_printf(dev, "bus_setup_intr() failed!\n"); ether_ifdetach(ifp); mtx_destroy(&sc->lock); return (error); } return(0); } int ex_detach(device_t dev) { struct ex_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->ifp; EX_LOCK(sc); ex_stop(sc); EX_UNLOCK(sc); ether_ifdetach(ifp); callout_drain(&sc->timer); ex_release_resources(dev); mtx_destroy(&sc->lock); return (0); } static void ex_init(void *xsc) { struct ex_softc * sc = (struct ex_softc *) xsc; EX_LOCK(sc); ex_init_locked(sc); EX_UNLOCK(sc); } static void ex_init_locked(struct ex_softc *sc) { struct ifnet * ifp = sc->ifp; int i; unsigned short temp_reg; DODEBUG(Start_End, printf("%s: ex_init: start\n", ifp->if_xname);); sc->tx_timeout = 0; /* * Load the ethernet address into the card. */ CSR_WRITE_1(sc, CMD_REG, Bank2_Sel); temp_reg = CSR_READ_1(sc, EEPROM_REG); if (temp_reg & Trnoff_Enable) CSR_WRITE_1(sc, EEPROM_REG, temp_reg & ~Trnoff_Enable); for (i = 0; i < ETHER_ADDR_LEN; i++) CSR_WRITE_1(sc, I_ADDR_REG0 + i, IF_LLADDR(sc->ifp)[i]); /* * - Setup transmit chaining and discard bad received frames. * - Match broadcast. * - Clear test mode. * - Set receiving mode. */ CSR_WRITE_1(sc, REG1, CSR_READ_1(sc, REG1) | Tx_Chn_Int_Md | Tx_Chn_ErStp | Disc_Bad_Fr); CSR_WRITE_1(sc, REG2, CSR_READ_1(sc, REG2) | No_SA_Ins | RX_CRC_InMem); CSR_WRITE_1(sc, REG3, CSR_READ_1(sc, REG3) & 0x3f /* XXX constants. */ ); /* * - Set IRQ number, if this part has it. ISA devices have this, * while PC Card devices don't seem to. Either way, we have to * switch to Bank1 as the rest of this code relies on that. */ CSR_WRITE_1(sc, CMD_REG, Bank1_Sel); if (sc->flags & HAS_INT_NO_REG) CSR_WRITE_1(sc, INT_NO_REG, (CSR_READ_1(sc, INT_NO_REG) & 0xf8) | sc->irq2ee[sc->irq_no]); /* * Divide the available memory in the card into rcv and xmt buffers. * By default, I use the first 3/4 of the memory for the rcv buffer, * and the remaining 1/4 of the memory for the xmt buffer. */ sc->rx_mem_size = sc->mem_size * 3 / 4; sc->tx_mem_size = sc->mem_size - sc->rx_mem_size; sc->rx_lower_limit = 0x0000; sc->rx_upper_limit = sc->rx_mem_size - 2; sc->tx_lower_limit = sc->rx_mem_size; sc->tx_upper_limit = sc->mem_size - 2; CSR_WRITE_1(sc, RCV_LOWER_LIMIT_REG, sc->rx_lower_limit >> 8); CSR_WRITE_1(sc, RCV_UPPER_LIMIT_REG, sc->rx_upper_limit >> 8); CSR_WRITE_1(sc, XMT_LOWER_LIMIT_REG, sc->tx_lower_limit >> 8); CSR_WRITE_1(sc, XMT_UPPER_LIMIT_REG, sc->tx_upper_limit >> 8); /* * Enable receive and transmit interrupts, and clear any pending int. */ CSR_WRITE_1(sc, REG1, CSR_READ_1(sc, REG1) | TriST_INT); CSR_WRITE_1(sc, CMD_REG, Bank0_Sel); CSR_WRITE_1(sc, MASK_REG, All_Int & ~(Rx_Int | Tx_Int)); CSR_WRITE_1(sc, STATUS_REG, All_Int); /* * Initialize receive and transmit ring buffers. */ CSR_WRITE_2(sc, RCV_BAR, sc->rx_lower_limit); sc->rx_head = sc->rx_lower_limit; CSR_WRITE_2(sc, RCV_STOP_REG, sc->rx_upper_limit | 0xfe); CSR_WRITE_2(sc, XMT_BAR, sc->tx_lower_limit); sc->tx_head = sc->tx_tail = sc->tx_lower_limit; ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; DODEBUG(Status, printf("OIDLE init\n");); callout_reset(&sc->timer, hz, ex_watchdog, sc); ex_setmulti(sc); /* * Final reset of the board, and enable operation. */ CSR_WRITE_1(sc, CMD_REG, Sel_Reset_CMD); DELAY(2); CSR_WRITE_1(sc, CMD_REG, Rcv_Enable_CMD); ex_start_locked(ifp); DODEBUG(Start_End, printf("%s: ex_init: finish\n", ifp->if_xname);); } static void ex_start(struct ifnet *ifp) { struct ex_softc * sc = ifp->if_softc; EX_LOCK(sc); ex_start_locked(ifp); EX_UNLOCK(sc); } static void ex_start_locked(struct ifnet *ifp) { struct ex_softc * sc = ifp->if_softc; int i, len, data_len, avail, dest, next; unsigned char tmp16[2]; struct mbuf * opkt; struct mbuf * m; DODEBUG(Start_End, printf("ex_start%d: start\n", unit);); /* * Main loop: send outgoing packets to network card until there are no * more packets left, or the card cannot accept any more yet. */ while (((opkt = ifp->if_snd.ifq_head) != NULL) && !(ifp->if_drv_flags & IFF_DRV_OACTIVE)) { /* * Ensure there is enough free transmit buffer space for * this packet, including its header. Note: the header * cannot wrap around the end of the transmit buffer and * must be kept together, so we allow space for twice the * length of the header, just in case. */ for (len = 0, m = opkt; m != NULL; m = m->m_next) { len += m->m_len; } data_len = len; DODEBUG(Sent_Pkts, printf("1. Sending packet with %d data bytes. ", data_len);); if (len & 1) { len += XMT_HEADER_LEN + 1; } else { len += XMT_HEADER_LEN; } if ((i = sc->tx_tail - sc->tx_head) >= 0) { avail = sc->tx_mem_size - i; } else { avail = -i; } DODEBUG(Sent_Pkts, printf("i=%d, avail=%d\n", i, avail);); if (avail >= len + XMT_HEADER_LEN) { IF_DEQUEUE(&ifp->if_snd, opkt); #ifdef EX_PSA_INTR /* * Disable rx and tx interrupts, to avoid corruption * of the host address register by interrupt service * routines. * XXX Is this necessary with splimp() enabled? */ CSR_WRITE_1(sc, MASK_REG, All_Int); #endif /* * Compute the start and end addresses of this * frame in the tx buffer. */ dest = sc->tx_tail; next = dest + len; if (next > sc->tx_upper_limit) { if ((sc->tx_upper_limit + 2 - sc->tx_tail) <= XMT_HEADER_LEN) { dest = sc->tx_lower_limit; next = dest + len; } else { next = sc->tx_lower_limit + next - sc->tx_upper_limit - 2; } } /* * Build the packet frame in the card's ring buffer. */ DODEBUG(Sent_Pkts, printf("2. dest=%d, next=%d. ", dest, next);); CSR_WRITE_2(sc, HOST_ADDR_REG, dest); CSR_WRITE_2(sc, IO_PORT_REG, Transmit_CMD); CSR_WRITE_2(sc, IO_PORT_REG, 0); CSR_WRITE_2(sc, IO_PORT_REG, next); CSR_WRITE_2(sc, IO_PORT_REG, data_len); /* * Output the packet data to the card. Ensure all * transfers are 16-bit wide, even if individual * mbufs have odd length. */ for (m = opkt, i = 0; m != NULL; m = m->m_next) { DODEBUG(Sent_Pkts, printf("[%d]", m->m_len);); if (i) { tmp16[1] = *(mtod(m, caddr_t)); CSR_WRITE_MULTI_2(sc, IO_PORT_REG, (uint16_t *) tmp16, 1); } CSR_WRITE_MULTI_2(sc, IO_PORT_REG, (uint16_t *) (mtod(m, caddr_t) + i), (m->m_len - i) / 2); if ((i = (m->m_len - i) & 1) != 0) { tmp16[0] = *(mtod(m, caddr_t) + m->m_len - 1); } } if (i) CSR_WRITE_MULTI_2(sc, IO_PORT_REG, (uint16_t *) tmp16, 1); /* * If there were other frames chained, update the * chain in the last one. */ if (sc->tx_head != sc->tx_tail) { if (sc->tx_tail != dest) { CSR_WRITE_2(sc, HOST_ADDR_REG, sc->tx_last + XMT_Chain_Point); CSR_WRITE_2(sc, IO_PORT_REG, dest); } CSR_WRITE_2(sc, HOST_ADDR_REG, sc->tx_last + XMT_Byte_Count); i = CSR_READ_2(sc, IO_PORT_REG); CSR_WRITE_2(sc, HOST_ADDR_REG, sc->tx_last + XMT_Byte_Count); CSR_WRITE_2(sc, IO_PORT_REG, i | Ch_bit); } /* * Resume normal operation of the card: * - Make a dummy read to flush the DRAM write * pipeline. * - Enable receive and transmit interrupts. * - Send Transmit or Resume_XMT command, as * appropriate. */ CSR_READ_2(sc, IO_PORT_REG); #ifdef EX_PSA_INTR CSR_WRITE_1(sc, MASK_REG, All_Int & ~(Rx_Int | Tx_Int)); #endif if (sc->tx_head == sc->tx_tail) { CSR_WRITE_2(sc, XMT_BAR, dest); CSR_WRITE_1(sc, CMD_REG, Transmit_CMD); sc->tx_head = dest; DODEBUG(Sent_Pkts, printf("Transmit\n");); } else { CSR_WRITE_1(sc, CMD_REG, Resume_XMT_List_CMD); DODEBUG(Sent_Pkts, printf("Resume\n");); } sc->tx_last = dest; sc->tx_tail = next; BPF_MTAP(ifp, opkt); sc->tx_timeout = 2; if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); m_freem(opkt); } else { ifp->if_drv_flags |= IFF_DRV_OACTIVE; DODEBUG(Status, printf("OACTIVE start\n");); } } DODEBUG(Start_End, printf("ex_start%d: finish\n", unit);); } void ex_stop(struct ex_softc *sc) { DODEBUG(Start_End, printf("ex_stop%d: start\n", unit);); EX_ASSERT_LOCKED(sc); /* * Disable card operation: * - Disable the interrupt line. * - Flush transmission and disable reception. * - Mask and clear all interrupts. * - Reset the 82595. */ CSR_WRITE_1(sc, CMD_REG, Bank1_Sel); CSR_WRITE_1(sc, REG1, CSR_READ_1(sc, REG1) & ~TriST_INT); CSR_WRITE_1(sc, CMD_REG, Bank0_Sel); CSR_WRITE_1(sc, CMD_REG, Rcv_Stop); sc->tx_head = sc->tx_tail = sc->tx_lower_limit; sc->tx_last = 0; /* XXX I think these two lines are not necessary, because ex_init will always be called again to reinit the interface. */ CSR_WRITE_1(sc, MASK_REG, All_Int); CSR_WRITE_1(sc, STATUS_REG, All_Int); CSR_WRITE_1(sc, CMD_REG, Reset_CMD); DELAY(200); sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->tx_timeout = 0; callout_stop(&sc->timer); DODEBUG(Start_End, printf("ex_stop%d: finish\n", unit);); return; } void ex_intr(void *arg) { struct ex_softc *sc = (struct ex_softc *)arg; struct ifnet *ifp = sc->ifp; int int_status, send_pkts; int loops = 100; DODEBUG(Start_End, printf("ex_intr%d: start\n", unit);); EX_LOCK(sc); send_pkts = 0; while (loops-- > 0 && (int_status = CSR_READ_1(sc, STATUS_REG)) & (Tx_Int | Rx_Int)) { /* don't loop forever */ if (int_status == 0xff) break; if (int_status & Rx_Int) { CSR_WRITE_1(sc, STATUS_REG, Rx_Int); ex_rx_intr(sc); } else if (int_status & Tx_Int) { CSR_WRITE_1(sc, STATUS_REG, Tx_Int); ex_tx_intr(sc); send_pkts = 1; } } if (loops == 0) printf("100 loops are not enough\n"); /* * If any packet has been transmitted, and there are queued packets to * be sent, attempt to send more packets to the network card. */ if (send_pkts && (ifp->if_snd.ifq_head != NULL)) ex_start_locked(ifp); EX_UNLOCK(sc); DODEBUG(Start_End, printf("ex_intr%d: finish\n", unit);); return; } static void ex_tx_intr(struct ex_softc *sc) { struct ifnet * ifp = sc->ifp; int tx_status; DODEBUG(Start_End, printf("ex_tx_intr%d: start\n", unit);); /* * - Cancel the watchdog. * For all packets transmitted since last transmit interrupt: * - Advance chain pointer to next queued packet. * - Update statistics. */ sc->tx_timeout = 0; while (sc->tx_head != sc->tx_tail) { CSR_WRITE_2(sc, HOST_ADDR_REG, sc->tx_head); if (!(CSR_READ_2(sc, IO_PORT_REG) & Done_bit)) break; tx_status = CSR_READ_2(sc, IO_PORT_REG); sc->tx_head = CSR_READ_2(sc, IO_PORT_REG); if (tx_status & TX_OK_bit) { if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); } else { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } if_inc_counter(ifp, IFCOUNTER_COLLISIONS, tx_status & No_Collisions_bits); } /* * The card should be ready to accept more packets now. */ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; DODEBUG(Status, printf("OIDLE tx_intr\n");); DODEBUG(Start_End, printf("ex_tx_intr%d: finish\n", unit);); return; } static void ex_rx_intr(struct ex_softc *sc) { struct ifnet * ifp = sc->ifp; int rx_status; int pkt_len; int QQQ; struct mbuf * m; struct mbuf * ipkt; struct ether_header * eh; DODEBUG(Start_End, printf("ex_rx_intr%d: start\n", unit);); /* * For all packets received since last receive interrupt: * - If packet ok, read it into a new mbuf and queue it to interface, * updating statistics. * - If packet bad, just discard it, and update statistics. * Finally, advance receive stop limit in card's memory to new location. */ CSR_WRITE_2(sc, HOST_ADDR_REG, sc->rx_head); while (CSR_READ_2(sc, IO_PORT_REG) == RCV_Done) { rx_status = CSR_READ_2(sc, IO_PORT_REG); sc->rx_head = CSR_READ_2(sc, IO_PORT_REG); QQQ = pkt_len = CSR_READ_2(sc, IO_PORT_REG); if (rx_status & RCV_OK_bit) { MGETHDR(m, M_NOWAIT, MT_DATA); ipkt = m; if (ipkt == NULL) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); } else { ipkt->m_pkthdr.rcvif = ifp; ipkt->m_pkthdr.len = pkt_len; ipkt->m_len = MHLEN; while (pkt_len > 0) { if (pkt_len >= MINCLSIZE) { - MCLGET(m, M_NOWAIT); - if (m->m_flags & M_EXT) { + if (MCLGET(m, M_NOWAIT)) { m->m_len = MCLBYTES; } else { m_freem(ipkt); if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); goto rx_another; } } m->m_len = min(m->m_len, pkt_len); /* * NOTE: I'm assuming that all mbufs allocated are of even length, * except for the last one in an odd-length packet. */ CSR_READ_MULTI_2(sc, IO_PORT_REG, mtod(m, uint16_t *), m->m_len / 2); if (m->m_len & 1) { *(mtod(m, caddr_t) + m->m_len - 1) = CSR_READ_1(sc, IO_PORT_REG); } pkt_len -= m->m_len; if (pkt_len > 0) { MGET(m->m_next, M_NOWAIT, MT_DATA); if (m->m_next == NULL) { m_freem(ipkt); if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); goto rx_another; } m = m->m_next; m->m_len = MLEN; } } eh = mtod(ipkt, struct ether_header *); #ifdef EXDEBUG if (debug_mask & Rcvd_Pkts) { if ((eh->ether_dhost[5] != 0xff) || (eh->ether_dhost[0] != 0xff)) { printf("Receive packet with %d data bytes: %6D -> ", QQQ, eh->ether_shost, ":"); printf("%6D\n", eh->ether_dhost, ":"); } /* QQQ */ } #endif EX_UNLOCK(sc); (*ifp->if_input)(ifp, ipkt); EX_LOCK(sc); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); } } else { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); } CSR_WRITE_2(sc, HOST_ADDR_REG, sc->rx_head); rx_another: ; } if (sc->rx_head < sc->rx_lower_limit + 2) CSR_WRITE_2(sc, RCV_STOP_REG, sc->rx_upper_limit); else CSR_WRITE_2(sc, RCV_STOP_REG, sc->rx_head - 2); DODEBUG(Start_End, printf("ex_rx_intr%d: finish\n", unit);); return; } static int ex_ioctl(register struct ifnet *ifp, u_long cmd, caddr_t data) { struct ex_softc * sc = ifp->if_softc; struct ifreq * ifr = (struct ifreq *)data; int error = 0; DODEBUG(Start_End, printf("%s: ex_ioctl: start ", ifp->if_xname);); switch(cmd) { case SIOCSIFADDR: case SIOCGIFADDR: case SIOCSIFMTU: error = ether_ioctl(ifp, cmd, data); break; case SIOCSIFFLAGS: DODEBUG(Start_End, printf("SIOCSIFFLAGS");); EX_LOCK(sc); if ((ifp->if_flags & IFF_UP) == 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING)) { ex_stop(sc); } else { ex_init_locked(sc); } EX_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: ex_init(sc); error = 0; break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, cmd); break; default: DODEBUG(Start_End, printf("unknown");); error = EINVAL; } DODEBUG(Start_End, printf("\n%s: ex_ioctl: finish\n", ifp->if_xname);); return(error); } static void ex_setmulti(struct ex_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *maddr; uint16_t *addr; int count; int timeout, status; ifp = sc->ifp; count = 0; if_maddr_rlock(ifp); TAILQ_FOREACH(maddr, &ifp->if_multiaddrs, ifma_link) { if (maddr->ifma_addr->sa_family != AF_LINK) continue; count++; } if_maddr_runlock(ifp); if ((ifp->if_flags & IFF_PROMISC) || (ifp->if_flags & IFF_ALLMULTI) || count > 63) { /* Interface is in promiscuous mode or there are too many * multicast addresses for the card to handle */ CSR_WRITE_1(sc, CMD_REG, Bank2_Sel); CSR_WRITE_1(sc, REG2, CSR_READ_1(sc, REG2) | Promisc_Mode); CSR_WRITE_1(sc, REG3, CSR_READ_1(sc, REG3)); CSR_WRITE_1(sc, CMD_REG, Bank0_Sel); } else if ((ifp->if_flags & IFF_MULTICAST) && (count > 0)) { /* Program multicast addresses plus our MAC address * into the filter */ CSR_WRITE_1(sc, CMD_REG, Bank2_Sel); CSR_WRITE_1(sc, REG2, CSR_READ_1(sc, REG2) | Multi_IA); CSR_WRITE_1(sc, REG3, CSR_READ_1(sc, REG3)); CSR_WRITE_1(sc, CMD_REG, Bank0_Sel); /* Borrow space from TX buffer; this should be safe * as this is only called from ex_init */ CSR_WRITE_2(sc, HOST_ADDR_REG, sc->tx_lower_limit); CSR_WRITE_2(sc, IO_PORT_REG, MC_Setup_CMD); CSR_WRITE_2(sc, IO_PORT_REG, 0); CSR_WRITE_2(sc, IO_PORT_REG, 0); CSR_WRITE_2(sc, IO_PORT_REG, (count + 1) * 6); if_maddr_rlock(ifp); TAILQ_FOREACH(maddr, &ifp->if_multiaddrs, ifma_link) { if (maddr->ifma_addr->sa_family != AF_LINK) continue; addr = (uint16_t*)LLADDR((struct sockaddr_dl *) maddr->ifma_addr); CSR_WRITE_2(sc, IO_PORT_REG, *addr++); CSR_WRITE_2(sc, IO_PORT_REG, *addr++); CSR_WRITE_2(sc, IO_PORT_REG, *addr++); } if_maddr_runlock(ifp); /* Program our MAC address as well */ /* XXX: Is this necessary? The Linux driver does this * but the NetBSD driver does not */ addr = (uint16_t*)IF_LLADDR(sc->ifp); CSR_WRITE_2(sc, IO_PORT_REG, *addr++); CSR_WRITE_2(sc, IO_PORT_REG, *addr++); CSR_WRITE_2(sc, IO_PORT_REG, *addr++); CSR_READ_2(sc, IO_PORT_REG); CSR_WRITE_2(sc, XMT_BAR, sc->tx_lower_limit); CSR_WRITE_1(sc, CMD_REG, MC_Setup_CMD); sc->tx_head = sc->tx_lower_limit; sc->tx_tail = sc->tx_head + XMT_HEADER_LEN + (count + 1) * 6; for (timeout=0; timeout<100; timeout++) { DELAY(2); if ((CSR_READ_1(sc, STATUS_REG) & Exec_Int) == 0) continue; status = CSR_READ_1(sc, CMD_REG); CSR_WRITE_1(sc, STATUS_REG, Exec_Int); break; } sc->tx_head = sc->tx_tail; } else { /* No multicast or promiscuous mode */ CSR_WRITE_1(sc, CMD_REG, Bank2_Sel); CSR_WRITE_1(sc, REG2, CSR_READ_1(sc, REG2) & 0xDE); /* ~(Multi_IA | Promisc_Mode) */ CSR_WRITE_1(sc, REG3, CSR_READ_1(sc, REG3)); CSR_WRITE_1(sc, CMD_REG, Bank0_Sel); } } static void ex_reset(struct ex_softc *sc) { DODEBUG(Start_End, printf("ex_reset%d: start\n", unit);); EX_ASSERT_LOCKED(sc); ex_stop(sc); ex_init_locked(sc); DODEBUG(Start_End, printf("ex_reset%d: finish\n", unit);); return; } static void ex_watchdog(void *arg) { struct ex_softc * sc = arg; struct ifnet *ifp = sc->ifp; if (sc->tx_timeout && --sc->tx_timeout == 0) { DODEBUG(Start_End, if_printf(ifp, "ex_watchdog: start\n");); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; DODEBUG(Status, printf("OIDLE watchdog\n");); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ex_reset(sc); ex_start_locked(ifp); DODEBUG(Start_End, if_printf(ifp, "ex_watchdog: finish\n");); } callout_reset(&sc->timer, hz, ex_watchdog, sc); } static int ex_get_media(struct ex_softc *sc) { int current; int media; media = ex_eeprom_read(sc, EE_W5); CSR_WRITE_1(sc, CMD_REG, Bank2_Sel); current = CSR_READ_1(sc, REG3); CSR_WRITE_1(sc, CMD_REG, Bank0_Sel); if ((current & TPE_bit) && (media & EE_W5_PORT_TPE)) return(IFM_ETHER|IFM_10_T); if ((current & BNC_bit) && (media & EE_W5_PORT_BNC)) return(IFM_ETHER|IFM_10_2); if (media & EE_W5_PORT_AUI) return (IFM_ETHER|IFM_10_5); return (IFM_ETHER|IFM_AUTO); } static int ex_ifmedia_upd(ifp) struct ifnet * ifp; { struct ex_softc * sc = ifp->if_softc; if (IFM_TYPE(sc->ifmedia.ifm_media) != IFM_ETHER) return EINVAL; return (0); } static void ex_ifmedia_sts(ifp, ifmr) struct ifnet * ifp; struct ifmediareq * ifmr; { struct ex_softc * sc = ifp->if_softc; EX_LOCK(sc); ifmr->ifm_active = ex_get_media(sc); ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; EX_UNLOCK(sc); return; } u_short ex_eeprom_read(struct ex_softc *sc, int location) { int i; u_short data = 0; int read_cmd = location | EE_READ_CMD; short ctrl_val = EECS; CSR_WRITE_1(sc, CMD_REG, Bank2_Sel); CSR_WRITE_1(sc, EEPROM_REG, EECS); for (i = 8; i >= 0; i--) { short outval = (read_cmd & (1 << i)) ? ctrl_val | EEDI : ctrl_val; CSR_WRITE_1(sc, EEPROM_REG, outval); CSR_WRITE_1(sc, EEPROM_REG, outval | EESK); DELAY(3); CSR_WRITE_1(sc, EEPROM_REG, outval); DELAY(2); } CSR_WRITE_1(sc, EEPROM_REG, ctrl_val); for (i = 16; i > 0; i--) { CSR_WRITE_1(sc, EEPROM_REG, ctrl_val | EESK); DELAY(3); data = (data << 1) | ((CSR_READ_1(sc, EEPROM_REG) & EEDO) ? 1 : 0); CSR_WRITE_1(sc, EEPROM_REG, ctrl_val); DELAY(2); } ctrl_val &= ~EECS; CSR_WRITE_1(sc, EEPROM_REG, ctrl_val | EESK); DELAY(3); CSR_WRITE_1(sc, EEPROM_REG, ctrl_val); DELAY(2); CSR_WRITE_1(sc, CMD_REG, Bank0_Sel); return(data); } Index: head/sys/dev/fe/if_fe.c =================================================================== --- head/sys/dev/fe/if_fe.c (revision 276749) +++ head/sys/dev/fe/if_fe.c (revision 276750) @@ -1,2265 +1,2264 @@ /*- * All Rights Reserved, Copyright (C) Fujitsu Limited 1995 * * This software may be used, modified, copied, distributed, and sold, in * both source and binary form provided that the above copyright, these * terms and the following disclaimer are retained. The name of the author * and/or the contributor may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND THE CONTRIBUTOR ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR THE CONTRIBUTOR BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * * Device driver for Fujitsu MB86960A/MB86965A based Ethernet cards. * Contributed by M. Sekiguchi. * * This version is intended to be a generic template for various * MB86960A/MB86965A based Ethernet cards. It currently supports * Fujitsu FMV-180 series for ISA and Allied-Telesis AT1700/RE2000 * series for ISA, as well as Fujitsu MBH10302 PC Card. * There are some currently- * unused hooks embedded, which are primarily intended to support * other types of Ethernet cards, but the author is not sure whether * they are useful. * * This version also includes some alignments to support RE1000, * C-NET(98)P2 and so on. These cards are not for AT-compatibles, * but for NEC PC-98 bus -- a proprietary bus architecture available * only in Japan. Confusingly, it is different from the Microsoft's * PC98 architecture. :-{ * Further work for PC-98 version will be available as a part of * FreeBSD(98) project. * * This software is a derivative work of if_ed.c version 1.56 by David * Greenman available as a part of FreeBSD 2.0 RELEASE source distribution. * * The following lines are retained from the original if_ed.c: * * Copyright (C) 1993, David Greenman. This software may be used, modified, * copied, distributed, and sold, in both source and binary form provided * that the above copyright and these terms are retained. Under no * circumstances is the author responsible for the proper functioning * of this software, nor does the author assume any responsibility * for damages incurred with its use. */ /* * TODO: * o To support ISA PnP auto configuration for FMV-183/184. * o To support REX-9886/87(PC-98 only). * o To reconsider mbuf usage. * o To reconsider transmission buffer usage, including * transmission buffer size (currently 4KB x 2) and pros-and- * cons of multiple frame transmission. * o To test IPX codes. * o To test new-bus frontend. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Transmit just one packet per a "send" command to 86960. * This option is intended for performance test. An EXPERIMENTAL option. */ #ifndef FE_SINGLE_TRANSMISSION #define FE_SINGLE_TRANSMISSION 0 #endif /* * Maximum loops when interrupt. * This option prevents an infinite loop due to hardware failure. * (Some laptops make an infinite loop after PC Card is ejected.) */ #ifndef FE_MAX_LOOP #define FE_MAX_LOOP 0x800 #endif /* * Device configuration flags. */ /* DLCR6 settings. */ #define FE_FLAGS_DLCR6_VALUE 0x007F /* Force DLCR6 override. */ #define FE_FLAGS_OVERRIDE_DLCR6 0x0080 devclass_t fe_devclass; /* * Special filter values. */ static struct fe_filter const fe_filter_nothing = { FE_FILTER_NOTHING }; static struct fe_filter const fe_filter_all = { FE_FILTER_ALL }; /* Standard driver entry points. These can be static. */ static void fe_init (void *); static void fe_init_locked (struct fe_softc *); static driver_intr_t fe_intr; static int fe_ioctl (struct ifnet *, u_long, caddr_t); static void fe_start (struct ifnet *); static void fe_start_locked (struct ifnet *); static void fe_watchdog (void *); static int fe_medchange (struct ifnet *); static void fe_medstat (struct ifnet *, struct ifmediareq *); /* Local functions. Order of declaration is confused. FIXME. */ static int fe_get_packet ( struct fe_softc *, u_short ); static void fe_tint ( struct fe_softc *, u_char ); static void fe_rint ( struct fe_softc *, u_char ); static void fe_xmit ( struct fe_softc * ); static void fe_write_mbufs ( struct fe_softc *, struct mbuf * ); static void fe_setmode ( struct fe_softc * ); static void fe_loadmar ( struct fe_softc * ); #ifdef DIAGNOSTIC static void fe_emptybuffer ( struct fe_softc * ); #endif /* * Fe driver specific constants which relate to 86960/86965. */ /* Interrupt masks */ #define FE_TMASK ( FE_D2_COLL16 | FE_D2_TXDONE ) #define FE_RMASK ( FE_D3_OVRFLO | FE_D3_CRCERR \ | FE_D3_ALGERR | FE_D3_SRTPKT | FE_D3_PKTRDY ) /* Maximum number of iterations for a receive interrupt. */ #define FE_MAX_RECV_COUNT ( ( 65536 - 2048 * 2 ) / 64 ) /* * Maximum size of SRAM is 65536, * minimum size of transmission buffer in fe is 2x2KB, * and minimum amount of received packet including headers * added by the chip is 64 bytes. * Hence FE_MAX_RECV_COUNT is the upper limit for number * of packets in the receive buffer. */ /* * Miscellaneous definitions not directly related to hardware. */ /* The following line must be delete when "net/if_media.h" support it. */ #ifndef IFM_10_FL #define IFM_10_FL /* 13 */ IFM_10_5 #endif #if 0 /* Mapping between media bitmap (in fe_softc.mbitmap) and ifm_media. */ static int const bit2media [] = { IFM_HDX | IFM_ETHER | IFM_AUTO, IFM_HDX | IFM_ETHER | IFM_MANUAL, IFM_HDX | IFM_ETHER | IFM_10_T, IFM_HDX | IFM_ETHER | IFM_10_2, IFM_HDX | IFM_ETHER | IFM_10_5, IFM_HDX | IFM_ETHER | IFM_10_FL, IFM_FDX | IFM_ETHER | IFM_10_T, /* More can be come here... */ 0 }; #else /* Mapping between media bitmap (in fe_softc.mbitmap) and ifm_media. */ static int const bit2media [] = { IFM_ETHER | IFM_AUTO, IFM_ETHER | IFM_MANUAL, IFM_ETHER | IFM_10_T, IFM_ETHER | IFM_10_2, IFM_ETHER | IFM_10_5, IFM_ETHER | IFM_10_FL, IFM_ETHER | IFM_10_T, /* More can be come here... */ 0 }; #endif /* * Check for specific bits in specific registers have specific values. * A common utility function called from various sub-probe routines. */ int fe_simple_probe (struct fe_softc const * sc, struct fe_simple_probe_struct const * sp) { struct fe_simple_probe_struct const *p; int8_t bits; for (p = sp; p->mask != 0; p++) { bits = fe_inb(sc, p->port); printf("port %d, mask %x, bits %x read %x\n", p->port, p->mask, p->bits, bits); if ((bits & p->mask) != p->bits) return 0; } return 1; } /* Test if a given 6 byte value is a valid Ethernet station (MAC) address. "Vendor" is an expected vendor code (first three bytes,) or a zero when nothing expected. */ int fe_valid_Ether_p (u_char const * addr, unsigned vendor) { #ifdef FE_DEBUG printf("fe?: validating %6D against %06x\n", addr, ":", vendor); #endif /* All zero is not allowed as a vendor code. */ if (addr[0] == 0 && addr[1] == 0 && addr[2] == 0) return 0; switch (vendor) { case 0x000000: /* Legal Ethernet address (stored in ROM) must have its Group and Local bits cleared. */ if ((addr[0] & 0x03) != 0) return 0; break; case 0x020000: /* Same as above, but a local address is allowed in this context. */ if (ETHER_IS_MULTICAST(addr)) return 0; break; default: /* Make sure the vendor part matches if one is given. */ if ( addr[0] != ((vendor >> 16) & 0xFF) || addr[1] != ((vendor >> 8) & 0xFF) || addr[2] != ((vendor ) & 0xFF)) return 0; break; } /* Host part must not be all-zeros nor all-ones. */ if (addr[3] == 0xFF && addr[4] == 0xFF && addr[5] == 0xFF) return 0; if (addr[3] == 0x00 && addr[4] == 0x00 && addr[5] == 0x00) return 0; /* Given addr looks like an Ethernet address. */ return 1; } /* Fill our softc struct with default value. */ void fe_softc_defaults (struct fe_softc *sc) { /* Prepare for typical register prototypes. We assume a "typical" board has <32KB> of SRAM connected with a data lines. */ sc->proto_dlcr4 = FE_D4_LBC_DISABLE | FE_D4_CNTRL; sc->proto_dlcr5 = 0; sc->proto_dlcr6 = FE_D6_BUFSIZ_32KB | FE_D6_TXBSIZ_2x4KB | FE_D6_BBW_BYTE | FE_D6_SBW_WORD | FE_D6_SRAM_100ns; sc->proto_dlcr7 = FE_D7_BYTSWP_LH; sc->proto_bmpr13 = 0; /* Assume the probe process (to be done later) is stable. */ sc->stability = 0; /* A typical board needs no hooks. */ sc->init = NULL; sc->stop = NULL; /* Assume the board has no software-controllable media selection. */ sc->mbitmap = MB_HM; sc->defmedia = MB_HM; sc->msel = NULL; } /* Common error reporting routine used in probe routines for "soft configured IRQ"-type boards. */ void fe_irq_failure (char const *name, int unit, int irq, char const *list) { printf("fe%d: %s board is detected, but %s IRQ was given\n", unit, name, (irq == NO_IRQ ? "no" : "invalid")); if (list != NULL) { printf("fe%d: specify an IRQ from %s in kernel config\n", unit, list); } } /* * Hardware (vendor) specific hooks. */ /* * Generic media selection scheme for MB86965 based boards. */ void fe_msel_965 (struct fe_softc *sc) { u_char b13; /* Find the appropriate bits for BMPR13 tranceiver control. */ switch (IFM_SUBTYPE(sc->media.ifm_media)) { case IFM_AUTO: b13 = FE_B13_PORT_AUTO | FE_B13_TPTYPE_UTP; break; case IFM_10_T: b13 = FE_B13_PORT_TP | FE_B13_TPTYPE_UTP; break; default: b13 = FE_B13_PORT_AUI; break; } /* Write it into the register. It takes effect immediately. */ fe_outb(sc, FE_BMPR13, sc->proto_bmpr13 | b13); } /* * Fujitsu MB86965 JLI mode support routines. */ /* * Routines to read all bytes from the config EEPROM through MB86965A. * It is a MicroWire (3-wire) serial EEPROM with 6-bit address. * (93C06 or 93C46.) */ static void fe_strobe_eeprom_jli (struct fe_softc *sc, u_short bmpr16) { /* * We must guarantee 1us (or more) interval to access slow * EEPROMs. The following redundant code provides enough * delay with ISA timing. (Even if the bus clock is "tuned.") * Some modification will be needed on faster busses. */ fe_outb(sc, bmpr16, FE_B16_SELECT); fe_outb(sc, bmpr16, FE_B16_SELECT | FE_B16_CLOCK); fe_outb(sc, bmpr16, FE_B16_SELECT | FE_B16_CLOCK); fe_outb(sc, bmpr16, FE_B16_SELECT); } void fe_read_eeprom_jli (struct fe_softc * sc, u_char * data) { u_char n, val, bit; u_char save16, save17; /* Save the current value of the EEPROM interface registers. */ save16 = fe_inb(sc, FE_BMPR16); save17 = fe_inb(sc, FE_BMPR17); /* Read bytes from EEPROM; two bytes per an iteration. */ for (n = 0; n < JLI_EEPROM_SIZE / 2; n++) { /* Reset the EEPROM interface. */ fe_outb(sc, FE_BMPR16, 0x00); fe_outb(sc, FE_BMPR17, 0x00); /* Start EEPROM access. */ fe_outb(sc, FE_BMPR16, FE_B16_SELECT); fe_outb(sc, FE_BMPR17, FE_B17_DATA); fe_strobe_eeprom_jli(sc, FE_BMPR16); /* Pass the iteration count as well as a READ command. */ val = 0x80 | n; for (bit = 0x80; bit != 0x00; bit >>= 1) { fe_outb(sc, FE_BMPR17, (val & bit) ? FE_B17_DATA : 0); fe_strobe_eeprom_jli(sc, FE_BMPR16); } fe_outb(sc, FE_BMPR17, 0x00); /* Read a byte. */ val = 0; for (bit = 0x80; bit != 0x00; bit >>= 1) { fe_strobe_eeprom_jli(sc, FE_BMPR16); if (fe_inb(sc, FE_BMPR17) & FE_B17_DATA) val |= bit; } *data++ = val; /* Read one more byte. */ val = 0; for (bit = 0x80; bit != 0x00; bit >>= 1) { fe_strobe_eeprom_jli(sc, FE_BMPR16); if (fe_inb(sc, FE_BMPR17) & FE_B17_DATA) val |= bit; } *data++ = val; } #if 0 /* Reset the EEPROM interface, again. */ fe_outb(sc, FE_BMPR16, 0x00); fe_outb(sc, FE_BMPR17, 0x00); #else /* Make sure to restore the original value of EEPROM interface registers, since we are not yet sure we have MB86965A on the address. */ fe_outb(sc, FE_BMPR17, save17); fe_outb(sc, FE_BMPR16, save16); #endif #if 1 /* Report what we got. */ if (bootverbose) { int i; data -= JLI_EEPROM_SIZE; for (i = 0; i < JLI_EEPROM_SIZE; i += 16) { if_printf(sc->ifp, "EEPROM(JLI):%3x: %16D\n", i, data + i, " "); } } #endif } void fe_init_jli (struct fe_softc * sc) { /* "Reset" by writing into a magic location. */ DELAY(200); fe_outb(sc, 0x1E, fe_inb(sc, 0x1E)); DELAY(300); } /* * SSi 78Q8377A support routines. */ /* * Routines to read all bytes from the config EEPROM through 78Q8377A. * It is a MicroWire (3-wire) serial EEPROM with 8-bit address. (I.e., * 93C56 or 93C66.) * * As I don't have SSi manuals, (hmm, an old song again!) I'm not exactly * sure the following code is correct... It is just stolen from the * C-NET(98)P2 support routine in FreeBSD(98). */ void fe_read_eeprom_ssi (struct fe_softc *sc, u_char *data) { u_char val, bit; int n; u_char save6, save7, save12; /* Save the current value for the DLCR registers we are about to destroy. */ save6 = fe_inb(sc, FE_DLCR6); save7 = fe_inb(sc, FE_DLCR7); /* Put the 78Q8377A into a state that we can access the EEPROM. */ fe_outb(sc, FE_DLCR6, FE_D6_BBW_WORD | FE_D6_SBW_WORD | FE_D6_DLC_DISABLE); fe_outb(sc, FE_DLCR7, FE_D7_BYTSWP_LH | FE_D7_RBS_BMPR | FE_D7_RDYPNS | FE_D7_POWER_UP); /* Save the current value for the BMPR12 register, too. */ save12 = fe_inb(sc, FE_DLCR12); /* Read bytes from EEPROM; two bytes per an iteration. */ for (n = 0; n < SSI_EEPROM_SIZE / 2; n++) { /* Start EEPROM access */ fe_outb(sc, FE_DLCR12, SSI_EEP); fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL); /* Send the following four bits to the EEPROM in the specified order: a dummy bit, a start bit, and command bits (10) for READ. */ fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL ); fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL | SSI_CLK ); /* 0 */ fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL | SSI_DAT); fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL | SSI_CLK | SSI_DAT); /* 1 */ fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL | SSI_DAT); fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL | SSI_CLK | SSI_DAT); /* 1 */ fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL ); fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL | SSI_CLK ); /* 0 */ /* Pass the iteration count to the chip. */ for (bit = 0x80; bit != 0x00; bit >>= 1) { val = ( n & bit ) ? SSI_DAT : 0; fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL | val); fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL | SSI_CLK | val); } /* Read a byte. */ val = 0; for (bit = 0x80; bit != 0x00; bit >>= 1) { fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL); fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL | SSI_CLK); if (fe_inb(sc, FE_DLCR12) & SSI_DIN) val |= bit; } *data++ = val; /* Read one more byte. */ val = 0; for (bit = 0x80; bit != 0x00; bit >>= 1) { fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL); fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL | SSI_CLK); if (fe_inb(sc, FE_DLCR12) & SSI_DIN) val |= bit; } *data++ = val; fe_outb(sc, FE_DLCR12, SSI_EEP); } /* Reset the EEPROM interface. (For now.) */ fe_outb(sc, FE_DLCR12, 0x00); /* Restore the saved register values, for the case that we didn't have 78Q8377A at the given address. */ fe_outb(sc, FE_DLCR12, save12); fe_outb(sc, FE_DLCR7, save7); fe_outb(sc, FE_DLCR6, save6); #if 1 /* Report what we got. */ if (bootverbose) { int i; data -= SSI_EEPROM_SIZE; for (i = 0; i < SSI_EEPROM_SIZE; i += 16) { if_printf(sc->ifp, "EEPROM(SSI):%3x: %16D\n", i, data + i, " "); } } #endif } /* * TDK/LANX boards support routines. */ /* It is assumed that the CLK line is low and SDA is high (float) upon entry. */ #define LNX_PH(D,K,N) \ ((LNX_SDA_##D | LNX_CLK_##K) << N) #define LNX_CYCLE(D1,D2,D3,D4,K1,K2,K3,K4) \ (LNX_PH(D1,K1,0)|LNX_PH(D2,K2,8)|LNX_PH(D3,K3,16)|LNX_PH(D4,K4,24)) #define LNX_CYCLE_START LNX_CYCLE(HI,LO,LO,HI, HI,HI,LO,LO) #define LNX_CYCLE_STOP LNX_CYCLE(LO,LO,HI,HI, LO,HI,HI,LO) #define LNX_CYCLE_HI LNX_CYCLE(HI,HI,HI,HI, LO,HI,LO,LO) #define LNX_CYCLE_LO LNX_CYCLE(LO,LO,LO,HI, LO,HI,LO,LO) #define LNX_CYCLE_INIT LNX_CYCLE(LO,HI,HI,HI, LO,LO,LO,LO) static void fe_eeprom_cycle_lnx (struct fe_softc *sc, u_short reg20, u_long cycle) { fe_outb(sc, reg20, (cycle ) & 0xFF); DELAY(15); fe_outb(sc, reg20, (cycle >> 8) & 0xFF); DELAY(15); fe_outb(sc, reg20, (cycle >> 16) & 0xFF); DELAY(15); fe_outb(sc, reg20, (cycle >> 24) & 0xFF); DELAY(15); } static u_char fe_eeprom_receive_lnx (struct fe_softc *sc, u_short reg20) { u_char dat; fe_outb(sc, reg20, LNX_CLK_HI | LNX_SDA_FL); DELAY(15); dat = fe_inb(sc, reg20); fe_outb(sc, reg20, LNX_CLK_LO | LNX_SDA_FL); DELAY(15); return (dat & LNX_SDA_IN); } void fe_read_eeprom_lnx (struct fe_softc *sc, u_char *data) { int i; u_char n, bit, val; u_char save20; u_short reg20 = 0x14; save20 = fe_inb(sc, reg20); /* NOTE: DELAY() timing constants are approximately three times longer (slower) than the required minimum. This is to guarantee a reliable operation under some tough conditions... Fortunately, this routine is only called during the boot phase, so the speed is less important than stability. */ #if 1 /* Reset the X24C01's internal state machine and put it into the IDLE state. We usually don't need this, but *if* someone (e.g., probe routine of other driver) write some garbage into the register at 0x14, synchronization will be lost, and the normal EEPROM access protocol won't work. Moreover, as there are no easy way to reset, we need a _manoeuvre_ here. (It even lacks a reset pin, so pushing the RESET button on the PC doesn't help!) */ fe_eeprom_cycle_lnx(sc, reg20, LNX_CYCLE_INIT); for (i = 0; i < 10; i++) fe_eeprom_cycle_lnx(sc, reg20, LNX_CYCLE_START); fe_eeprom_cycle_lnx(sc, reg20, LNX_CYCLE_STOP); DELAY(10000); #endif /* Issue a start condition. */ fe_eeprom_cycle_lnx(sc, reg20, LNX_CYCLE_START); /* Send seven bits of the starting address (zero, in this case) and a command bit for READ. */ val = 0x01; for (bit = 0x80; bit != 0x00; bit >>= 1) { if (val & bit) { fe_eeprom_cycle_lnx(sc, reg20, LNX_CYCLE_HI); } else { fe_eeprom_cycle_lnx(sc, reg20, LNX_CYCLE_LO); } } /* Receive an ACK bit. */ if (fe_eeprom_receive_lnx(sc, reg20)) { /* ACK was not received. EEPROM is not present (i.e., this board was not a TDK/LANX) or not working properly. */ if (bootverbose) { if_printf(sc->ifp, "no ACK received from EEPROM(LNX)\n"); } /* Clear the given buffer to indicate we could not get any info. and return. */ bzero(data, LNX_EEPROM_SIZE); goto RET; } /* Read bytes from EEPROM. */ for (n = 0; n < LNX_EEPROM_SIZE; n++) { /* Read a byte and store it into the buffer. */ val = 0x00; for (bit = 0x80; bit != 0x00; bit >>= 1) { if (fe_eeprom_receive_lnx(sc, reg20)) val |= bit; } *data++ = val; /* Acknowledge if we have to read more. */ if (n < LNX_EEPROM_SIZE - 1) { fe_eeprom_cycle_lnx(sc, reg20, LNX_CYCLE_LO); } } /* Issue a STOP condition, de-activating the clock line. It will be safer to keep the clock line low than to leave it high. */ fe_eeprom_cycle_lnx(sc, reg20, LNX_CYCLE_STOP); RET: fe_outb(sc, reg20, save20); #if 1 /* Report what we got. */ if (bootverbose) { data -= LNX_EEPROM_SIZE; for (i = 0; i < LNX_EEPROM_SIZE; i += 16) { if_printf(sc->ifp, "EEPROM(LNX):%3x: %16D\n", i, data + i, " "); } } #endif } void fe_init_lnx (struct fe_softc * sc) { /* Reset the 86960. Do we need this? FIXME. */ fe_outb(sc, 0x12, 0x06); DELAY(100); fe_outb(sc, 0x12, 0x07); DELAY(100); /* Setup IRQ control register on the ASIC. */ fe_outb(sc, 0x14, sc->priv_info); } /* * Ungermann-Bass boards support routine. */ void fe_init_ubn (struct fe_softc * sc) { /* Do we need this? FIXME. */ fe_outb(sc, FE_DLCR7, sc->proto_dlcr7 | FE_D7_RBS_BMPR | FE_D7_POWER_UP); fe_outb(sc, 0x18, 0x00); DELAY(200); /* Setup IRQ control register on the ASIC. */ fe_outb(sc, 0x14, sc->priv_info); } /* * Install interface into kernel networking data structures */ int fe_attach (device_t dev) { struct fe_softc *sc = device_get_softc(dev); struct ifnet *ifp; int flags = device_get_flags(dev); int b, error; ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not ifalloc\n"); fe_release_resource(dev); return (ENOSPC); } mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->timer, &sc->lock, 0); /* * Initialize ifnet structure */ ifp->if_softc = sc; if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_start = fe_start; ifp->if_ioctl = fe_ioctl; ifp->if_init = fe_init; ifp->if_linkmib = &sc->mibdata; ifp->if_linkmiblen = sizeof (sc->mibdata); #if 0 /* I'm not sure... */ sc->mibdata.dot3Compliance = DOT3COMPLIANCE_COLLS; #endif /* * Set fixed interface flags. */ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); #if FE_SINGLE_TRANSMISSION /* Override txb config to allocate minimum. */ sc->proto_dlcr6 &= ~FE_D6_TXBSIZ sc->proto_dlcr6 |= FE_D6_TXBSIZ_2x2KB; #endif /* Modify hardware config if it is requested. */ if (flags & FE_FLAGS_OVERRIDE_DLCR6) sc->proto_dlcr6 = flags & FE_FLAGS_DLCR6_VALUE; /* Find TX buffer size, based on the hardware dependent proto. */ switch (sc->proto_dlcr6 & FE_D6_TXBSIZ) { case FE_D6_TXBSIZ_2x2KB: sc->txb_size = 2048; break; case FE_D6_TXBSIZ_2x4KB: sc->txb_size = 4096; break; case FE_D6_TXBSIZ_2x8KB: sc->txb_size = 8192; break; default: /* Oops, we can't work with single buffer configuration. */ if (bootverbose) { if_printf(sc->ifp, "strange TXBSIZ config; fixing\n"); } sc->proto_dlcr6 &= ~FE_D6_TXBSIZ; sc->proto_dlcr6 |= FE_D6_TXBSIZ_2x2KB; sc->txb_size = 2048; break; } /* Initialize the if_media interface. */ ifmedia_init(&sc->media, 0, fe_medchange, fe_medstat); for (b = 0; bit2media[b] != 0; b++) { if (sc->mbitmap & (1 << b)) { ifmedia_add(&sc->media, bit2media[b], 0, NULL); } } for (b = 0; bit2media[b] != 0; b++) { if (sc->defmedia & (1 << b)) { ifmedia_set(&sc->media, bit2media[b]); break; } } #if 0 /* Turned off; this is called later, when the interface UPs. */ fe_medchange(sc); #endif /* Attach and stop the interface. */ FE_LOCK(sc); fe_stop(sc); FE_UNLOCK(sc); ether_ifattach(sc->ifp, sc->enaddr); error = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE, NULL, fe_intr, sc, &sc->irq_handle); if (error) { ether_ifdetach(ifp); mtx_destroy(&sc->lock); if_free(ifp); fe_release_resource(dev); return ENXIO; } /* Print additional info when attached. */ device_printf(dev, "type %s%s\n", sc->typestr, (sc->proto_dlcr4 & FE_D4_DSC) ? ", full duplex" : ""); if (bootverbose) { int buf, txb, bbw, sbw, ram; buf = txb = bbw = sbw = ram = -1; switch ( sc->proto_dlcr6 & FE_D6_BUFSIZ ) { case FE_D6_BUFSIZ_8KB: buf = 8; break; case FE_D6_BUFSIZ_16KB: buf = 16; break; case FE_D6_BUFSIZ_32KB: buf = 32; break; case FE_D6_BUFSIZ_64KB: buf = 64; break; } switch ( sc->proto_dlcr6 & FE_D6_TXBSIZ ) { case FE_D6_TXBSIZ_2x2KB: txb = 2; break; case FE_D6_TXBSIZ_2x4KB: txb = 4; break; case FE_D6_TXBSIZ_2x8KB: txb = 8; break; } switch ( sc->proto_dlcr6 & FE_D6_BBW ) { case FE_D6_BBW_BYTE: bbw = 8; break; case FE_D6_BBW_WORD: bbw = 16; break; } switch ( sc->proto_dlcr6 & FE_D6_SBW ) { case FE_D6_SBW_BYTE: sbw = 8; break; case FE_D6_SBW_WORD: sbw = 16; break; } switch ( sc->proto_dlcr6 & FE_D6_SRAM ) { case FE_D6_SRAM_100ns: ram = 100; break; case FE_D6_SRAM_150ns: ram = 150; break; } device_printf(dev, "SRAM %dKB %dbit %dns, TXB %dKBx2, %dbit I/O\n", buf, bbw, ram, txb, sbw); } if (sc->stability & UNSTABLE_IRQ) device_printf(dev, "warning: IRQ number may be incorrect\n"); if (sc->stability & UNSTABLE_MAC) device_printf(dev, "warning: above MAC address may be incorrect\n"); if (sc->stability & UNSTABLE_TYPE) device_printf(dev, "warning: hardware type was not validated\n"); return 0; } int fe_alloc_port(device_t dev, int size) { struct fe_softc *sc = device_get_softc(dev); struct resource *res; int rid; rid = 0; res = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0ul, ~0ul, size, RF_ACTIVE); if (res) { sc->port_used = size; sc->port_res = res; return (0); } return (ENOENT); } int fe_alloc_irq(device_t dev, int flags) { struct fe_softc *sc = device_get_softc(dev); struct resource *res; int rid; rid = 0; res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | flags); if (res) { sc->irq_res = res; return (0); } return (ENOENT); } void fe_release_resource(device_t dev) { struct fe_softc *sc = device_get_softc(dev); if (sc->port_res) { bus_release_resource(dev, SYS_RES_IOPORT, 0, sc->port_res); sc->port_res = NULL; } if (sc->irq_res) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); sc->irq_res = NULL; } } /* * Reset interface, after some (hardware) trouble is deteced. */ static void fe_reset (struct fe_softc *sc) { /* Record how many packets are lost by this accident. */ if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, sc->txb_sched + sc->txb_count); sc->mibdata.dot3StatsInternalMacTransmitErrors++; /* Put the interface into known initial state. */ fe_stop(sc); if (sc->ifp->if_flags & IFF_UP) fe_init_locked(sc); } /* * Stop everything on the interface. * * All buffered packets, both transmitting and receiving, * if any, will be lost by stopping the interface. */ void fe_stop (struct fe_softc *sc) { FE_ASSERT_LOCKED(sc); /* Disable interrupts. */ fe_outb(sc, FE_DLCR2, 0x00); fe_outb(sc, FE_DLCR3, 0x00); /* Stop interface hardware. */ DELAY(200); fe_outb(sc, FE_DLCR6, sc->proto_dlcr6 | FE_D6_DLC_DISABLE); DELAY(200); /* Clear all interrupt status. */ fe_outb(sc, FE_DLCR0, 0xFF); fe_outb(sc, FE_DLCR1, 0xFF); /* Put the chip in stand-by mode. */ DELAY(200); fe_outb(sc, FE_DLCR7, sc->proto_dlcr7 | FE_D7_POWER_DOWN); DELAY(200); /* Reset transmitter variables and interface flags. */ sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); sc->tx_timeout = 0; callout_stop(&sc->timer); sc->txb_free = sc->txb_size; sc->txb_count = 0; sc->txb_sched = 0; /* MAR loading can be delayed. */ sc->filter_change = 0; /* Call a device-specific hook. */ if (sc->stop) sc->stop(sc); } /* * Device timeout/watchdog routine. Entered if the device neglects to * generate an interrupt after a transmit has been started on it. */ static void fe_watchdog (void *arg) { struct fe_softc *sc = arg; FE_ASSERT_LOCKED(sc); if (sc->tx_timeout && --sc->tx_timeout == 0) { struct ifnet *ifp = sc->ifp; /* A "debug" message. */ if_printf(ifp, "transmission timeout (%d+%d)%s\n", sc->txb_sched, sc->txb_count, (ifp->if_flags & IFF_UP) ? "" : " when down"); if (ifp->if_get_counter(ifp, IFCOUNTER_OPACKETS) == 0 && ifp->if_get_counter(ifp, IFCOUNTER_IPACKETS) == 0) if_printf(ifp, "wrong IRQ setting in config?\n"); fe_reset(sc); } callout_reset(&sc->timer, hz, fe_watchdog, sc); } /* * Initialize device. */ static void fe_init (void * xsc) { struct fe_softc *sc = xsc; FE_LOCK(sc); fe_init_locked(sc); FE_UNLOCK(sc); } static void fe_init_locked (struct fe_softc *sc) { /* Start initializing 86960. */ /* Call a hook before we start initializing the chip. */ if (sc->init) sc->init(sc); /* * Make sure to disable the chip, also. * This may also help re-programming the chip after * hot insertion of PCMCIAs. */ DELAY(200); fe_outb(sc, FE_DLCR6, sc->proto_dlcr6 | FE_D6_DLC_DISABLE); DELAY(200); /* Power up the chip and select register bank for DLCRs. */ DELAY(200); fe_outb(sc, FE_DLCR7, sc->proto_dlcr7 | FE_D7_RBS_DLCR | FE_D7_POWER_UP); DELAY(200); /* Feed the station address. */ fe_outblk(sc, FE_DLCR8, IF_LLADDR(sc->ifp), ETHER_ADDR_LEN); /* Clear multicast address filter to receive nothing. */ fe_outb(sc, FE_DLCR7, sc->proto_dlcr7 | FE_D7_RBS_MAR | FE_D7_POWER_UP); fe_outblk(sc, FE_MAR8, fe_filter_nothing.data, FE_FILTER_LEN); /* Select the BMPR bank for runtime register access. */ fe_outb(sc, FE_DLCR7, sc->proto_dlcr7 | FE_D7_RBS_BMPR | FE_D7_POWER_UP); /* Initialize registers. */ fe_outb(sc, FE_DLCR0, 0xFF); /* Clear all bits. */ fe_outb(sc, FE_DLCR1, 0xFF); /* ditto. */ fe_outb(sc, FE_DLCR2, 0x00); fe_outb(sc, FE_DLCR3, 0x00); fe_outb(sc, FE_DLCR4, sc->proto_dlcr4); fe_outb(sc, FE_DLCR5, sc->proto_dlcr5); fe_outb(sc, FE_BMPR10, 0x00); fe_outb(sc, FE_BMPR11, FE_B11_CTRL_SKIP | FE_B11_MODE1); fe_outb(sc, FE_BMPR12, 0x00); fe_outb(sc, FE_BMPR13, sc->proto_bmpr13); fe_outb(sc, FE_BMPR14, 0x00); fe_outb(sc, FE_BMPR15, 0x00); /* Enable interrupts. */ fe_outb(sc, FE_DLCR2, FE_TMASK); fe_outb(sc, FE_DLCR3, FE_RMASK); /* Select requested media, just before enabling DLC. */ if (sc->msel) sc->msel(sc); /* Enable transmitter and receiver. */ DELAY(200); fe_outb(sc, FE_DLCR6, sc->proto_dlcr6 | FE_D6_DLC_ENABLE); DELAY(200); #ifdef DIAGNOSTIC /* * Make sure to empty the receive buffer. * * This may be redundant, but *if* the receive buffer were full * at this point, then the driver would hang. I have experienced * some strange hang-up just after UP. I hope the following * code solve the problem. * * I have changed the order of hardware initialization. * I think the receive buffer cannot have any packets at this * point in this version. The following code *must* be * redundant now. FIXME. * * I've heard a rumore that on some PC Card implementation of * 8696x, the receive buffer can have some data at this point. * The following message helps discovering the fact. FIXME. */ if (!(fe_inb(sc, FE_DLCR5) & FE_D5_BUFEMP)) { if_printf(sc->ifp, "receive buffer has some data after reset\n"); fe_emptybuffer(sc); } /* Do we need this here? Actually, no. I must be paranoia. */ fe_outb(sc, FE_DLCR0, 0xFF); /* Clear all bits. */ fe_outb(sc, FE_DLCR1, 0xFF); /* ditto. */ #endif /* Set 'running' flag, because we are now running. */ sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; callout_reset(&sc->timer, hz, fe_watchdog, sc); /* * At this point, the interface is running properly, * except that it receives *no* packets. we then call * fe_setmode() to tell the chip what packets to be * received, based on the if_flags and multicast group * list. It completes the initialization process. */ fe_setmode(sc); #if 0 /* ...and attempt to start output queued packets. */ /* TURNED OFF, because the semi-auto media prober wants to UP the interface keeping it idle. The upper layer will soon start the interface anyway, and there are no significant delay. */ fe_start_locked(sc->ifp); #endif } /* * This routine actually starts the transmission on the interface */ static void fe_xmit (struct fe_softc *sc) { /* * Set a timer just in case we never hear from the board again. * We use longer timeout for multiple packet transmission. * I'm not sure this timer value is appropriate. FIXME. */ sc->tx_timeout = 1 + sc->txb_count; /* Update txb variables. */ sc->txb_sched = sc->txb_count; sc->txb_count = 0; sc->txb_free = sc->txb_size; sc->tx_excolls = 0; /* Start transmitter, passing packets in TX buffer. */ fe_outb(sc, FE_BMPR10, sc->txb_sched | FE_B10_START); } /* * Start output on interface. * We make one assumption here: * 1) that the IFF_DRV_OACTIVE flag is checked before this code is called * (i.e. that the output part of the interface is idle) */ static void fe_start (struct ifnet *ifp) { struct fe_softc *sc = ifp->if_softc; FE_LOCK(sc); fe_start_locked(ifp); FE_UNLOCK(sc); } static void fe_start_locked (struct ifnet *ifp) { struct fe_softc *sc = ifp->if_softc; struct mbuf *m; #ifdef DIAGNOSTIC /* Just a sanity check. */ if ((sc->txb_count == 0) != (sc->txb_free == sc->txb_size)) { /* * Txb_count and txb_free co-works to manage the * transmission buffer. Txb_count keeps track of the * used potion of the buffer, while txb_free does unused * potion. So, as long as the driver runs properly, * txb_count is zero if and only if txb_free is same * as txb_size (which represents whole buffer.) */ if_printf(ifp, "inconsistent txb variables (%d, %d)\n", sc->txb_count, sc->txb_free); /* * So, what should I do, then? * * We now know txb_count and txb_free contradicts. We * cannot, however, tell which is wrong. More * over, we cannot peek 86960 transmission buffer or * reset the transmission buffer. (In fact, we can * reset the entire interface. I don't want to do it.) * * If txb_count is incorrect, leaving it as-is will cause * sending of garbage after next interrupt. We have to * avoid it. Hence, we reset the txb_count here. If * txb_free was incorrect, resetting txb_count just loses * some packets. We can live with it. */ sc->txb_count = 0; } #endif /* * First, see if there are buffered packets and an idle * transmitter - should never happen at this point. */ if ((sc->txb_count > 0) && (sc->txb_sched == 0)) { if_printf(ifp, "transmitter idle with %d buffered packets\n", sc->txb_count); fe_xmit(sc); } /* * Stop accepting more transmission packets temporarily, when * a filter change request is delayed. Updating the MARs on * 86960 flushes the transmission buffer, so it is delayed * until all buffered transmission packets have been sent * out. */ if (sc->filter_change) { /* * Filter change request is delayed only when the DLC is * working. DLC soon raise an interrupt after finishing * the work. */ goto indicate_active; } for (;;) { /* * See if there is room to put another packet in the buffer. * We *could* do better job by peeking the send queue to * know the length of the next packet. Current version just * tests against the worst case (i.e., longest packet). FIXME. * * When adding the packet-peek feature, don't forget adding a * test on txb_count against QUEUEING_MAX. * There is a little chance the packet count exceeds * the limit. Assume transmission buffer is 8KB (2x8KB * configuration) and an application sends a bunch of small * (i.e., minimum packet sized) packets rapidly. An 8KB * buffer can hold 130 blocks of 62 bytes long... */ if (sc->txb_free < ETHER_MAX_LEN - ETHER_CRC_LEN + FE_DATA_LEN_LEN) { /* No room. */ goto indicate_active; } #if FE_SINGLE_TRANSMISSION if (sc->txb_count > 0) { /* Just one packet per a transmission buffer. */ goto indicate_active; } #endif /* * Get the next mbuf chain for a packet to send. */ IF_DEQUEUE(&sc->ifp->if_snd, m); if (m == NULL) { /* No more packets to send. */ goto indicate_inactive; } /* * Copy the mbuf chain into the transmission buffer. * txb_* variables are updated as necessary. */ fe_write_mbufs(sc, m); /* Start transmitter if it's idle. */ if ((sc->txb_count > 0) && (sc->txb_sched == 0)) fe_xmit(sc); /* * Tap off here if there is a bpf listener, * and the device is *not* in promiscuous mode. * (86960 receives self-generated packets if * and only if it is in "receive everything" * mode.) */ if (!(sc->ifp->if_flags & IFF_PROMISC)) BPF_MTAP(sc->ifp, m); m_freem(m); } indicate_inactive: /* * We are using the !OACTIVE flag to indicate to * the outside world that we can accept an * additional packet rather than that the * transmitter is _actually_ active. Indeed, the * transmitter may be active, but if we haven't * filled all the buffers with data then we still * want to accept more. */ sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; return; indicate_active: /* * The transmitter is active, and there are no room for * more outgoing packets in the transmission buffer. */ sc->ifp->if_drv_flags |= IFF_DRV_OACTIVE; return; } /* * Drop (skip) a packet from receive buffer in 86960 memory. */ static void fe_droppacket (struct fe_softc * sc, int len) { int i; /* * 86960 manual says that we have to read 8 bytes from the buffer * before skip the packets and that there must be more than 8 bytes * remaining in the buffer when issue a skip command. * Remember, we have already read 4 bytes before come here. */ if (len > 12) { /* Read 4 more bytes, and skip the rest of the packet. */ if ((sc->proto_dlcr6 & FE_D6_SBW) == FE_D6_SBW_BYTE) { (void) fe_inb(sc, FE_BMPR8); (void) fe_inb(sc, FE_BMPR8); (void) fe_inb(sc, FE_BMPR8); (void) fe_inb(sc, FE_BMPR8); } else { (void) fe_inw(sc, FE_BMPR8); (void) fe_inw(sc, FE_BMPR8); } fe_outb(sc, FE_BMPR14, FE_B14_SKIP); } else { /* We should not come here unless receiving RUNTs. */ if ((sc->proto_dlcr6 & FE_D6_SBW) == FE_D6_SBW_BYTE) { for (i = 0; i < len; i++) (void) fe_inb(sc, FE_BMPR8); } else { for (i = 0; i < len; i += 2) (void) fe_inw(sc, FE_BMPR8); } } } #ifdef DIAGNOSTIC /* * Empty receiving buffer. */ static void fe_emptybuffer (struct fe_softc * sc) { int i; u_char saved_dlcr5; #ifdef FE_DEBUG if_printf(sc->ifp, "emptying receive buffer\n"); #endif /* * Stop receiving packets, temporarily. */ saved_dlcr5 = fe_inb(sc, FE_DLCR5); fe_outb(sc, FE_DLCR5, sc->proto_dlcr5); DELAY(1300); /* * When we come here, the receive buffer management may * have been broken. So, we cannot use skip operation. * Just discard everything in the buffer. */ if ((sc->proto_dlcr6 & FE_D6_SBW) == FE_D6_SBW_BYTE) { for (i = 0; i < 65536; i++) { if (fe_inb(sc, FE_DLCR5) & FE_D5_BUFEMP) break; (void) fe_inb(sc, FE_BMPR8); } } else { for (i = 0; i < 65536; i += 2) { if (fe_inb(sc, FE_DLCR5) & FE_D5_BUFEMP) break; (void) fe_inw(sc, FE_BMPR8); } } /* * Double check. */ if (fe_inb(sc, FE_DLCR5) & FE_D5_BUFEMP) { if_printf(sc->ifp, "could not empty receive buffer\n"); /* Hmm. What should I do if this happens? FIXME. */ } /* * Restart receiving packets. */ fe_outb(sc, FE_DLCR5, saved_dlcr5); } #endif /* * Transmission interrupt handler * The control flow of this function looks silly. FIXME. */ static void fe_tint (struct fe_softc * sc, u_char tstat) { int left; int col; /* * Handle "excessive collision" interrupt. */ if (tstat & FE_D0_COLL16) { /* * Find how many packets (including this collided one) * are left unsent in transmission buffer. */ left = fe_inb(sc, FE_BMPR10); if_printf(sc->ifp, "excessive collision (%d/%d)\n", left, sc->txb_sched); /* * Clear the collision flag (in 86960) here * to avoid confusing statistics. */ fe_outb(sc, FE_DLCR0, FE_D0_COLLID); /* * Restart transmitter, skipping the * collided packet. * * We *must* skip the packet to keep network running * properly. Excessive collision error is an * indication of the network overload. If we * tried sending the same packet after excessive * collision, the network would be filled with * out-of-time packets. Packets belonging * to reliable transport (such as TCP) are resent * by some upper layer. */ fe_outb(sc, FE_BMPR11, FE_B11_CTRL_SKIP | FE_B11_MODE1); /* Update statistics. */ sc->tx_excolls++; } /* * Handle "transmission complete" interrupt. */ if (tstat & FE_D0_TXDONE) { /* * Add in total number of collisions on last * transmission. We also clear "collision occurred" flag * here. * * 86960 has a design flaw on collision count on multiple * packet transmission. When we send two or more packets * with one start command (that's what we do when the * transmission queue is crowded), 86960 informs us number * of collisions occurred on the last packet on the * transmission only. Number of collisions on previous * packets are lost. I have told that the fact is clearly * stated in the Fujitsu document. * * I considered not to mind it seriously. Collision * count is not so important, anyway. Any comments? FIXME. */ if (fe_inb(sc, FE_DLCR0) & FE_D0_COLLID) { /* Clear collision flag. */ fe_outb(sc, FE_DLCR0, FE_D0_COLLID); /* Extract collision count from 86960. */ col = fe_inb(sc, FE_DLCR4); col = (col & FE_D4_COL) >> FE_D4_COL_SHIFT; if (col == 0) { /* * Status register indicates collisions, * while the collision count is zero. * This can happen after multiple packet * transmission, indicating that one or more * previous packet(s) had been collided. * * Since the accurate number of collisions * has been lost, we just guess it as 1; * Am I too optimistic? FIXME. */ col = 1; } if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, col); if (col == 1) sc->mibdata.dot3StatsSingleCollisionFrames++; else sc->mibdata.dot3StatsMultipleCollisionFrames++; sc->mibdata.dot3StatsCollFrequencies[col-1]++; } /* * Update transmission statistics. * Be sure to reflect number of excessive collisions. */ col = sc->tx_excolls; if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, sc->txb_sched - col); if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, col); if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, col * 16); sc->mibdata.dot3StatsExcessiveCollisions += col; sc->mibdata.dot3StatsCollFrequencies[15] += col; sc->txb_sched = 0; /* * The transmitter is no more active. * Reset output active flag and watchdog timer. */ sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->tx_timeout = 0; /* * If more data is ready to transmit in the buffer, start * transmitting them. Otherwise keep transmitter idle, * even if more data is queued. This gives receive * process a slight priority. */ if (sc->txb_count > 0) fe_xmit(sc); } } /* * Ethernet interface receiver interrupt. */ static void fe_rint (struct fe_softc * sc, u_char rstat) { u_short len; u_char status; int i; /* * Update statistics if this interrupt is caused by an error. * Note that, when the system was not sufficiently fast, the * receive interrupt might not be acknowledged immediately. If * one or more errornous frames were received before this routine * was scheduled, they are ignored, and the following error stats * give less than real values. */ if (rstat & (FE_D1_OVRFLO | FE_D1_CRCERR | FE_D1_ALGERR | FE_D1_SRTPKT)) { if (rstat & FE_D1_OVRFLO) sc->mibdata.dot3StatsInternalMacReceiveErrors++; if (rstat & FE_D1_CRCERR) sc->mibdata.dot3StatsFCSErrors++; if (rstat & FE_D1_ALGERR) sc->mibdata.dot3StatsAlignmentErrors++; #if 0 /* The reference MAC receiver defined in 802.3 silently ignores short frames (RUNTs) without notifying upper layer. RFC 1650 (dot3 MIB) is based on the 802.3, and it has no stats entry for RUNTs... */ if (rstat & FE_D1_SRTPKT) sc->mibdata.dot3StatsFrameTooShorts++; /* :-) */ #endif if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1); } /* * MB86960 has a flag indicating "receive queue empty." * We just loop, checking the flag, to pull out all received * packets. * * We limit the number of iterations to avoid infinite-loop. * The upper bound is set to unrealistic high value. */ for (i = 0; i < FE_MAX_RECV_COUNT * 2; i++) { /* Stop the iteration if 86960 indicates no packets. */ if (fe_inb(sc, FE_DLCR5) & FE_D5_BUFEMP) return; /* * Extract a receive status byte. * As our 86960 is in 16 bit bus access mode, we have to * use inw() to get the status byte. The significant * value is returned in lower 8 bits. */ if ((sc->proto_dlcr6 & FE_D6_SBW) == FE_D6_SBW_BYTE) { status = fe_inb(sc, FE_BMPR8); (void) fe_inb(sc, FE_BMPR8); } else { status = (u_char) fe_inw(sc, FE_BMPR8); } /* * Extract the packet length. * It is a sum of a header (14 bytes) and a payload. * CRC has been stripped off by the 86960. */ if ((sc->proto_dlcr6 & FE_D6_SBW) == FE_D6_SBW_BYTE) { len = fe_inb(sc, FE_BMPR8); len |= (fe_inb(sc, FE_BMPR8) << 8); } else { len = fe_inw(sc, FE_BMPR8); } /* * AS our 86960 is programed to ignore errored frame, * we must not see any error indication in the * receive buffer. So, any error condition is a * serious error, e.g., out-of-sync of the receive * buffer pointers. */ if ((status & 0xF0) != 0x20 || len > ETHER_MAX_LEN - ETHER_CRC_LEN || len < ETHER_MIN_LEN - ETHER_CRC_LEN) { if_printf(sc->ifp, "RX buffer out-of-sync\n"); if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1); sc->mibdata.dot3StatsInternalMacReceiveErrors++; fe_reset(sc); return; } /* * Go get a packet. */ if (fe_get_packet(sc, len) < 0) { /* * Negative return from fe_get_packet() * indicates no available mbuf. We stop * receiving packets, even if there are more * in the buffer. We hope we can get more * mbuf next time. */ if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1); sc->mibdata.dot3StatsMissedFrames++; fe_droppacket(sc, len); return; } /* Successfully received a packet. Update stat. */ if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1); } /* Maximum number of frames has been received. Something strange is happening here... */ if_printf(sc->ifp, "unusual receive flood\n"); sc->mibdata.dot3StatsInternalMacReceiveErrors++; fe_reset(sc); } /* * Ethernet interface interrupt processor */ static void fe_intr (void *arg) { struct fe_softc *sc = arg; u_char tstat, rstat; int loop_count = FE_MAX_LOOP; FE_LOCK(sc); /* Loop until there are no more new interrupt conditions. */ while (loop_count-- > 0) { /* * Get interrupt conditions, masking unneeded flags. */ tstat = fe_inb(sc, FE_DLCR0) & FE_TMASK; rstat = fe_inb(sc, FE_DLCR1) & FE_RMASK; if (tstat == 0 && rstat == 0) { FE_UNLOCK(sc); return; } /* * Reset the conditions we are acknowledging. */ fe_outb(sc, FE_DLCR0, tstat); fe_outb(sc, FE_DLCR1, rstat); /* * Handle transmitter interrupts. */ if (tstat) fe_tint(sc, tstat); /* * Handle receiver interrupts */ if (rstat) fe_rint(sc, rstat); /* * Update the multicast address filter if it is * needed and possible. We do it now, because * we can make sure the transmission buffer is empty, * and there is a good chance that the receive queue * is empty. It will minimize the possibility of * packet loss. */ if (sc->filter_change && sc->txb_count == 0 && sc->txb_sched == 0) { fe_loadmar(sc); sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } /* * If it looks like the transmitter can take more data, * attempt to start output on the interface. This is done * after handling the receiver interrupt to give the * receive operation priority. * * BTW, I'm not sure in what case the OACTIVE is on at * this point. Is the following test redundant? * * No. This routine polls for both transmitter and * receiver interrupts. 86960 can raise a receiver * interrupt when the transmission buffer is full. */ if ((sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) fe_start_locked(sc->ifp); } FE_UNLOCK(sc); if_printf(sc->ifp, "too many loops\n"); } /* * Process an ioctl request. This code needs some work - it looks * pretty ugly. */ static int fe_ioctl (struct ifnet * ifp, u_long command, caddr_t data) { struct fe_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int error = 0; switch (command) { case SIOCSIFFLAGS: /* * Switch interface state between "running" and * "stopped", reflecting the UP flag. */ FE_LOCK(sc); if (sc->ifp->if_flags & IFF_UP) { if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) fe_init_locked(sc); } else { if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) fe_stop(sc); } /* * Promiscuous and/or multicast flags may have changed, * so reprogram the multicast filter and/or receive mode. */ fe_setmode(sc); FE_UNLOCK(sc); /* Done. */ break; case SIOCADDMULTI: case SIOCDELMULTI: /* * Multicast list has changed; set the hardware filter * accordingly. */ FE_LOCK(sc); fe_setmode(sc); FE_UNLOCK(sc); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: /* Let if_media to handle these commands and to call us back. */ error = ifmedia_ioctl(ifp, ifr, &sc->media, command); break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } /* * Retrieve packet from receive buffer and send to the next level up via * ether_input(). * Returns 0 if success, -1 if error (i.e., mbuf allocation failure). */ static int fe_get_packet (struct fe_softc * sc, u_short len) { struct ifnet *ifp = sc->ifp; struct ether_header *eh; struct mbuf *m; FE_ASSERT_LOCKED(sc); /* * NFS wants the data be aligned to the word (4 byte) * boundary. Ethernet header has 14 bytes. There is a * 2-byte gap. */ #define NFS_MAGIC_OFFSET 2 /* * This function assumes that an Ethernet packet fits in an * mbuf (with a cluster attached when necessary.) On FreeBSD * 2.0 for x86, which is the primary target of this driver, an * mbuf cluster has 4096 bytes, and we are happy. On ancient * BSDs, such as vanilla 4.3 for 386, a cluster size was 1024, * however. If the following #error message were printed upon * compile, you need to rewrite this function. */ #if ( MCLBYTES < ETHER_MAX_LEN - ETHER_CRC_LEN + NFS_MAGIC_OFFSET ) #error "Too small MCLBYTES to use fe driver." #endif /* * Our strategy has one more problem. There is a policy on * mbuf cluster allocation. It says that we must have at * least MINCLSIZE (208 bytes on FreeBSD 2.0 for x86) to * allocate a cluster. For a packet of a size between * (MHLEN - 2) to (MINCLSIZE - 2), our code violates the rule... * On the other hand, the current code is short, simple, * and fast, however. It does no harmful thing, just waists * some memory. Any comments? FIXME. */ /* Allocate an mbuf with packet header info. */ MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) return -1; /* Attach a cluster if this packet doesn't fit in a normal mbuf. */ if (len > MHLEN - NFS_MAGIC_OFFSET) { - MCLGET(m, M_NOWAIT); - if (!(m->m_flags & M_EXT)) { + if (!(MCLGET(m, M_NOWAIT))) { m_freem(m); return -1; } } /* Initialize packet header info. */ m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = len; /* Set the length of this packet. */ m->m_len = len; /* The following silliness is to make NFS happy */ m->m_data += NFS_MAGIC_OFFSET; /* Get (actually just point to) the header part. */ eh = mtod(m, struct ether_header *); /* Get a packet. */ if ((sc->proto_dlcr6 & FE_D6_SBW) == FE_D6_SBW_BYTE) { fe_insb(sc, FE_BMPR8, (u_int8_t *)eh, len); } else { fe_insw(sc, FE_BMPR8, (u_int16_t *)eh, (len + 1) >> 1); } /* Feed the packet to upper layer. */ FE_UNLOCK(sc); (*ifp->if_input)(ifp, m); FE_LOCK(sc); return 0; } /* * Write an mbuf chain to the transmission buffer memory using 16 bit PIO. * Returns number of bytes actually written, including length word. * * If an mbuf chain is too long for an Ethernet frame, it is not sent. * Packets shorter than Ethernet minimum are legal, and we pad them * before sending out. An exception is "partial" packets which are * shorter than mandatory Ethernet header. */ static void fe_write_mbufs (struct fe_softc *sc, struct mbuf *m) { u_short length, len; struct mbuf *mp; u_char *data; u_short savebyte; /* WARNING: Architecture dependent! */ #define NO_PENDING_BYTE 0xFFFF static u_char padding [ETHER_MIN_LEN - ETHER_CRC_LEN - ETHER_HDR_LEN]; #ifdef DIAGNOSTIC /* First, count up the total number of bytes to copy */ length = 0; for (mp = m; mp != NULL; mp = mp->m_next) length += mp->m_len; /* Check if this matches the one in the packet header. */ if (length != m->m_pkthdr.len) { if_printf(sc->ifp, "packet length mismatch? (%d/%d)\n", length, m->m_pkthdr.len); } #else /* Just use the length value in the packet header. */ length = m->m_pkthdr.len; #endif #ifdef DIAGNOSTIC /* * Should never send big packets. If such a packet is passed, * it should be a bug of upper layer. We just ignore it. * ... Partial (too short) packets, neither. */ if (length < ETHER_HDR_LEN || length > ETHER_MAX_LEN - ETHER_CRC_LEN) { if_printf(sc->ifp, "got an out-of-spec packet (%u bytes) to send\n", length); if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); sc->mibdata.dot3StatsInternalMacTransmitErrors++; return; } #endif /* * Put the length word for this frame. * Does 86960 accept odd length? -- Yes. * Do we need to pad the length to minimum size by ourselves? * -- Generally yes. But for (or will be) the last * packet in the transmission buffer, we can skip the * padding process. It may gain performance slightly. FIXME. */ if ((sc->proto_dlcr6 & FE_D6_SBW) == FE_D6_SBW_BYTE) { len = max(length, ETHER_MIN_LEN - ETHER_CRC_LEN); fe_outb(sc, FE_BMPR8, len & 0x00ff); fe_outb(sc, FE_BMPR8, (len & 0xff00) >> 8); } else { fe_outw(sc, FE_BMPR8, max(length, ETHER_MIN_LEN - ETHER_CRC_LEN)); } /* * Update buffer status now. * Truncate the length up to an even number, since we use outw(). */ if ((sc->proto_dlcr6 & FE_D6_SBW) != FE_D6_SBW_BYTE) { length = (length + 1) & ~1; } sc->txb_free -= FE_DATA_LEN_LEN + max(length, ETHER_MIN_LEN - ETHER_CRC_LEN); sc->txb_count++; /* * Transfer the data from mbuf chain to the transmission buffer. * MB86960 seems to require that data be transferred as words, and * only words. So that we require some extra code to patch * over odd-length mbufs. */ if ((sc->proto_dlcr6 & FE_D6_SBW) == FE_D6_SBW_BYTE) { /* 8-bit cards are easy. */ for (mp = m; mp != 0; mp = mp->m_next) { if (mp->m_len) fe_outsb(sc, FE_BMPR8, mtod(mp, caddr_t), mp->m_len); } } else { /* 16-bit cards are a pain. */ savebyte = NO_PENDING_BYTE; for (mp = m; mp != 0; mp = mp->m_next) { /* Ignore empty mbuf. */ len = mp->m_len; if (len == 0) continue; /* Find the actual data to send. */ data = mtod(mp, caddr_t); /* Finish the last byte. */ if (savebyte != NO_PENDING_BYTE) { fe_outw(sc, FE_BMPR8, savebyte | (*data << 8)); data++; len--; savebyte = NO_PENDING_BYTE; } /* output contiguous words */ if (len > 1) { fe_outsw(sc, FE_BMPR8, (u_int16_t *)data, len >> 1); data += len & ~1; len &= 1; } /* Save a remaining byte, if there is one. */ if (len > 0) savebyte = *data; } /* Spit the last byte, if the length is odd. */ if (savebyte != NO_PENDING_BYTE) fe_outw(sc, FE_BMPR8, savebyte); } /* Pad to the Ethernet minimum length, if the packet is too short. */ if (length < ETHER_MIN_LEN - ETHER_CRC_LEN) { if ((sc->proto_dlcr6 & FE_D6_SBW) == FE_D6_SBW_BYTE) { fe_outsb(sc, FE_BMPR8, padding, ETHER_MIN_LEN - ETHER_CRC_LEN - length); } else { fe_outsw(sc, FE_BMPR8, (u_int16_t *)padding, (ETHER_MIN_LEN - ETHER_CRC_LEN - length) >> 1); } } } /* * Compute the multicast address filter from the * list of multicast addresses we need to listen to. */ static struct fe_filter fe_mcaf ( struct fe_softc *sc ) { int index; struct fe_filter filter; struct ifmultiaddr *ifma; filter = fe_filter_nothing; if_maddr_rlock(sc->ifp); TAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; index = ether_crc32_le(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; #ifdef FE_DEBUG if_printf(sc->ifp, "hash(%6D) == %d\n", enm->enm_addrlo , ":", index); #endif filter.data[index >> 3] |= 1 << (index & 7); } if_maddr_runlock(sc->ifp); return ( filter ); } /* * Calculate a new "multicast packet filter" and put the 86960 * receiver in appropriate mode. */ static void fe_setmode (struct fe_softc *sc) { /* * If the interface is not running, we postpone the update * process for receive modes and multicast address filter * until the interface is restarted. It reduces some * complicated job on maintaining chip states. (Earlier versions * of this driver had a bug on that point...) * * To complete the trick, fe_init() calls fe_setmode() after * restarting the interface. */ if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) return; /* * Promiscuous mode is handled separately. */ if (sc->ifp->if_flags & IFF_PROMISC) { /* * Program 86960 to receive all packets on the segment * including those directed to other stations. * Multicast filter stored in MARs are ignored * under this setting, so we don't need to update it. * * Promiscuous mode in FreeBSD 2 is used solely by * BPF, and BPF only listens to valid (no error) packets. * So, we ignore erroneous ones even in this mode. * (Older versions of fe driver mistook the point.) */ fe_outb(sc, FE_DLCR5, sc->proto_dlcr5 | FE_D5_AFM0 | FE_D5_AFM1); sc->filter_change = 0; return; } /* * Turn the chip to the normal (non-promiscuous) mode. */ fe_outb(sc, FE_DLCR5, sc->proto_dlcr5 | FE_D5_AFM1); /* * Find the new multicast filter value. */ if (sc->ifp->if_flags & IFF_ALLMULTI) sc->filter = fe_filter_all; else sc->filter = fe_mcaf(sc); sc->filter_change = 1; /* * We have to update the multicast filter in the 86960, A.S.A.P. * * Note that the DLC (Data Link Control unit, i.e. transmitter * and receiver) must be stopped when feeding the filter, and * DLC trashes all packets in both transmission and receive * buffers when stopped. * * To reduce the packet loss, we delay the filter update * process until buffers are empty. */ if (sc->txb_sched == 0 && sc->txb_count == 0 && !(fe_inb(sc, FE_DLCR1) & FE_D1_PKTRDY)) { /* * Buffers are (apparently) empty. Load * the new filter value into MARs now. */ fe_loadmar(sc); } else { /* * Buffers are not empty. Mark that we have to update * the MARs. The new filter will be loaded by feintr() * later. */ } } /* * Load a new multicast address filter into MARs. * * The caller must have acquired the softc lock before fe_loadmar. * This function starts the DLC upon return. So it can be called only * when the chip is working, i.e., from the driver's point of view, when * a device is RUNNING. (I mistook the point in previous versions.) */ static void fe_loadmar (struct fe_softc * sc) { /* Stop the DLC (transmitter and receiver). */ DELAY(200); fe_outb(sc, FE_DLCR6, sc->proto_dlcr6 | FE_D6_DLC_DISABLE); DELAY(200); /* Select register bank 1 for MARs. */ fe_outb(sc, FE_DLCR7, sc->proto_dlcr7 | FE_D7_RBS_MAR | FE_D7_POWER_UP); /* Copy filter value into the registers. */ fe_outblk(sc, FE_MAR8, sc->filter.data, FE_FILTER_LEN); /* Restore the bank selection for BMPRs (i.e., runtime registers). */ fe_outb(sc, FE_DLCR7, sc->proto_dlcr7 | FE_D7_RBS_BMPR | FE_D7_POWER_UP); /* Restart the DLC. */ DELAY(200); fe_outb(sc, FE_DLCR6, sc->proto_dlcr6 | FE_D6_DLC_ENABLE); DELAY(200); /* We have just updated the filter. */ sc->filter_change = 0; } /* Change the media selection. */ static int fe_medchange (struct ifnet *ifp) { struct fe_softc *sc = (struct fe_softc *)ifp->if_softc; #ifdef DIAGNOSTIC /* If_media should not pass any request for a media which this interface doesn't support. */ int b; for (b = 0; bit2media[b] != 0; b++) { if (bit2media[b] == sc->media.ifm_media) break; } if (((1 << b) & sc->mbitmap) == 0) { if_printf(sc->ifp, "got an unsupported media request (0x%x)\n", sc->media.ifm_media); return EINVAL; } #endif /* We don't actually change media when the interface is down. fe_init() will do the job, instead. Should we also wait until the transmission buffer being empty? Changing the media when we are sending a frame will cause two garbages on wires, one on old media and another on new. FIXME */ FE_LOCK(sc); if (sc->ifp->if_flags & IFF_UP) { if (sc->msel) sc->msel(sc); } FE_UNLOCK(sc); return 0; } /* I don't know how I can support media status callback... FIXME. */ static void fe_medstat (struct ifnet *ifp, struct ifmediareq *ifmr) { struct fe_softc *sc = ifp->if_softc; ifmr->ifm_active = sc->media.ifm_media; } Index: head/sys/dev/hifn/hifn7751.c =================================================================== --- head/sys/dev/hifn/hifn7751.c (revision 276749) +++ head/sys/dev/hifn/hifn7751.c (revision 276750) @@ -1,2931 +1,2929 @@ /* $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $ */ /*- * Invertex AEON / Hifn 7751 driver * Copyright (c) 1999 Invertex Inc. All rights reserved. * Copyright (c) 1999 Theo de Raadt * Copyright (c) 2000-2001 Network Security Technologies, Inc. * http://www.netsec.net * Copyright (c) 2003 Hifn Inc. * * This driver is based on a previous driver by Invertex, for which they * requested: Please send any comments, feedback, bug-fixes, or feature * requests to software@invertex.com. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Effort sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F30602-01-2-0537. */ #include __FBSDID("$FreeBSD$"); /* * Driver for various Hifn encryption processors. */ #include "opt_hifn.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" #include #include #ifdef HIFN_RNDTEST #include #endif #include #include #ifdef HIFN_VULCANDEV #include #include static struct cdevsw vulcanpk_cdevsw; /* forward declaration */ #endif /* * Prototypes and count for the pci_device structure */ static int hifn_probe(device_t); static int hifn_attach(device_t); static int hifn_detach(device_t); static int hifn_suspend(device_t); static int hifn_resume(device_t); static int hifn_shutdown(device_t); static int hifn_newsession(device_t, u_int32_t *, struct cryptoini *); static int hifn_freesession(device_t, u_int64_t); static int hifn_process(device_t, struct cryptop *, int); static device_method_t hifn_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hifn_probe), DEVMETHOD(device_attach, hifn_attach), DEVMETHOD(device_detach, hifn_detach), DEVMETHOD(device_suspend, hifn_suspend), DEVMETHOD(device_resume, hifn_resume), DEVMETHOD(device_shutdown, hifn_shutdown), /* crypto device methods */ DEVMETHOD(cryptodev_newsession, hifn_newsession), DEVMETHOD(cryptodev_freesession,hifn_freesession), DEVMETHOD(cryptodev_process, hifn_process), DEVMETHOD_END }; static driver_t hifn_driver = { "hifn", hifn_methods, sizeof (struct hifn_softc) }; static devclass_t hifn_devclass; DRIVER_MODULE(hifn, pci, hifn_driver, hifn_devclass, 0, 0); MODULE_DEPEND(hifn, crypto, 1, 1, 1); #ifdef HIFN_RNDTEST MODULE_DEPEND(hifn, rndtest, 1, 1, 1); #endif static void hifn_reset_board(struct hifn_softc *, int); static void hifn_reset_puc(struct hifn_softc *); static void hifn_puc_wait(struct hifn_softc *); static int hifn_enable_crypto(struct hifn_softc *); static void hifn_set_retry(struct hifn_softc *sc); static void hifn_init_dma(struct hifn_softc *); static void hifn_init_pci_registers(struct hifn_softc *); static int hifn_sramsize(struct hifn_softc *); static int hifn_dramsize(struct hifn_softc *); static int hifn_ramtype(struct hifn_softc *); static void hifn_sessions(struct hifn_softc *); static void hifn_intr(void *); static u_int hifn_write_command(struct hifn_command *, u_int8_t *); static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt); static void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *); static int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int); static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *); static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *); static int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *); static int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *); static int hifn_init_pubrng(struct hifn_softc *); static void hifn_rng(void *); static void hifn_tick(void *); static void hifn_abort(struct hifn_softc *); static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *); static void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t); static void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t); static __inline u_int32_t READ_REG_0(struct hifn_softc *sc, bus_size_t reg) { u_int32_t v = bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg); sc->sc_bar0_lastreg = (bus_size_t) -1; return (v); } #define WRITE_REG_0(sc, reg, val) hifn_write_reg_0(sc, reg, val) static __inline u_int32_t READ_REG_1(struct hifn_softc *sc, bus_size_t reg) { u_int32_t v = bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg); sc->sc_bar1_lastreg = (bus_size_t) -1; return (v); } #define WRITE_REG_1(sc, reg, val) hifn_write_reg_1(sc, reg, val) static SYSCTL_NODE(_hw, OID_AUTO, hifn, CTLFLAG_RD, 0, "Hifn driver parameters"); #ifdef HIFN_DEBUG static int hifn_debug = 0; SYSCTL_INT(_hw_hifn, OID_AUTO, debug, CTLFLAG_RW, &hifn_debug, 0, "control debugging msgs"); #endif static struct hifn_stats hifnstats; SYSCTL_STRUCT(_hw_hifn, OID_AUTO, stats, CTLFLAG_RD, &hifnstats, hifn_stats, "driver statistics"); static int hifn_maxbatch = 1; SYSCTL_INT(_hw_hifn, OID_AUTO, maxbatch, CTLFLAG_RW, &hifn_maxbatch, 0, "max ops to batch w/o interrupt"); /* * Probe for a supported device. The PCI vendor and device * IDs are used to detect devices we know how to handle. */ static int hifn_probe(device_t dev) { if (pci_get_vendor(dev) == PCI_VENDOR_INVERTEX && pci_get_device(dev) == PCI_PRODUCT_INVERTEX_AEON) return (BUS_PROBE_DEFAULT); if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && (pci_get_device(dev) == PCI_PRODUCT_HIFN_7751 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7956 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)) return (BUS_PROBE_DEFAULT); if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC && pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751) return (BUS_PROBE_DEFAULT); return (ENXIO); } static void hifn_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *paddr = (bus_addr_t*) arg; *paddr = segs->ds_addr; } static const char* hifn_partname(struct hifn_softc *sc) { /* XXX sprintf numbers when not decoded */ switch (pci_get_vendor(sc->sc_dev)) { case PCI_VENDOR_HIFN: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_HIFN_6500: return "Hifn 6500"; case PCI_PRODUCT_HIFN_7751: return "Hifn 7751"; case PCI_PRODUCT_HIFN_7811: return "Hifn 7811"; case PCI_PRODUCT_HIFN_7951: return "Hifn 7951"; case PCI_PRODUCT_HIFN_7955: return "Hifn 7955"; case PCI_PRODUCT_HIFN_7956: return "Hifn 7956"; } return "Hifn unknown-part"; case PCI_VENDOR_INVERTEX: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_INVERTEX_AEON: return "Invertex AEON"; } return "Invertex unknown-part"; case PCI_VENDOR_NETSEC: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_NETSEC_7751: return "NetSec 7751"; } return "NetSec unknown-part"; } return "Unknown-vendor unknown-part"; } static void default_harvest(struct rndtest_state *rsp, void *buf, u_int count) { random_harvest(buf, count, count*NBBY/2, RANDOM_PURE_HIFN); } static u_int checkmaxmin(device_t dev, const char *what, u_int v, u_int min, u_int max) { if (v > max) { device_printf(dev, "Warning, %s %u out of range, " "using max %u\n", what, v, max); v = max; } else if (v < min) { device_printf(dev, "Warning, %s %u out of range, " "using min %u\n", what, v, min); v = min; } return v; } /* * Select PLL configuration for 795x parts. This is complicated in * that we cannot determine the optimal parameters without user input. * The reference clock is derived from an external clock through a * multiplier. The external clock is either the host bus (i.e. PCI) * or an external clock generator. When using the PCI bus we assume * the clock is either 33 or 66 MHz; for an external source we cannot * tell the speed. * * PLL configuration is done with a string: "pci" for PCI bus, or "ext" * for an external source, followed by the frequency. We calculate * the appropriate multiplier and PLL register contents accordingly. * When no configuration is given we default to "pci66" since that * always will allow the card to work. If a card is using the PCI * bus clock and in a 33MHz slot then it will be operating at half * speed until the correct information is provided. * * We use a default setting of "ext66" because according to Mike Ham * of HiFn, almost every board in existence has an external crystal * populated at 66Mhz. Using PCI can be a problem on modern motherboards, * because PCI33 can have clocks from 0 to 33Mhz, and some have * non-PCI-compliant spread-spectrum clocks, which can confuse the pll. */ static void hifn_getpllconfig(device_t dev, u_int *pll) { const char *pllspec; u_int freq, mul, fl, fh; u_int32_t pllconfig; char *nxt; if (resource_string_value("hifn", device_get_unit(dev), "pllconfig", &pllspec)) pllspec = "ext66"; fl = 33, fh = 66; pllconfig = 0; if (strncmp(pllspec, "ext", 3) == 0) { pllspec += 3; pllconfig |= HIFN_PLL_REF_SEL; switch (pci_get_device(dev)) { case PCI_PRODUCT_HIFN_7955: case PCI_PRODUCT_HIFN_7956: fl = 20, fh = 100; break; #ifdef notyet case PCI_PRODUCT_HIFN_7954: fl = 20, fh = 66; break; #endif } } else if (strncmp(pllspec, "pci", 3) == 0) pllspec += 3; freq = strtoul(pllspec, &nxt, 10); if (nxt == pllspec) freq = 66; else freq = checkmaxmin(dev, "frequency", freq, fl, fh); /* * Calculate multiplier. We target a Fck of 266 MHz, * allowing only even values, possibly rounded down. * Multipliers > 8 must set the charge pump current. */ mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12); pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT; if (mul > 8) pllconfig |= HIFN_PLL_IS; *pll = pllconfig; } /* * Attach an interface that successfully probed. */ static int hifn_attach(device_t dev) { struct hifn_softc *sc = device_get_softc(dev); caddr_t kva; int rseg, rid; char rbase; u_int16_t ena, rev; sc->sc_dev = dev; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "hifn driver", MTX_DEF); /* XXX handle power management */ /* * The 7951 and 795x have a random number generator and * public key support; note this. */ if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC; /* * The 7811 has a random number generator and * we also note it's identity 'cuz of some quirks. */ if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && pci_get_device(dev) == PCI_PRODUCT_HIFN_7811) sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG; /* * The 795x parts support AES. */ if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) { sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES; /* * Select PLL configuration. This depends on the * bus and board design and must be manually configured * if the default setting is unacceptable. */ hifn_getpllconfig(dev, &sc->sc_pllconfig); } /* * Setup PCI resources. Note that we record the bus * tag and handle for each register mapping, this is * used by the READ_REG_0, WRITE_REG_0, READ_REG_1, * and WRITE_REG_1 macros throughout the driver. */ pci_enable_busmaster(dev); rid = HIFN_BAR0; sc->sc_bar0res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_bar0res == NULL) { device_printf(dev, "cannot map bar%d register space\n", 0); goto fail_pci; } sc->sc_st0 = rman_get_bustag(sc->sc_bar0res); sc->sc_sh0 = rman_get_bushandle(sc->sc_bar0res); sc->sc_bar0_lastreg = (bus_size_t) -1; rid = HIFN_BAR1; sc->sc_bar1res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_bar1res == NULL) { device_printf(dev, "cannot map bar%d register space\n", 1); goto fail_io0; } sc->sc_st1 = rman_get_bustag(sc->sc_bar1res); sc->sc_sh1 = rman_get_bushandle(sc->sc_bar1res); sc->sc_bar1_lastreg = (bus_size_t) -1; hifn_set_retry(sc); /* * Setup the area where the Hifn DMA's descriptors * and associated data structures. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), /* PCI parent */ 1, 0, /* alignment,boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ HIFN_MAX_DMALEN, /* maxsize */ MAX_SCATTER, /* nsegments */ HIFN_MAX_SEGLEN, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &sc->sc_dmat)) { device_printf(dev, "cannot allocate DMA tag\n"); goto fail_io1; } if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &sc->sc_dmamap)) { device_printf(dev, "cannot create dma map\n"); bus_dma_tag_destroy(sc->sc_dmat); goto fail_io1; } if (bus_dmamem_alloc(sc->sc_dmat, (void**) &kva, BUS_DMA_NOWAIT, &sc->sc_dmamap)) { device_printf(dev, "cannot alloc dma buffer\n"); bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); bus_dma_tag_destroy(sc->sc_dmat); goto fail_io1; } if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, kva, sizeof (*sc->sc_dma), hifn_dmamap_cb, &sc->sc_dma_physaddr, BUS_DMA_NOWAIT)) { device_printf(dev, "cannot load dma map\n"); bus_dmamem_free(sc->sc_dmat, kva, sc->sc_dmamap); bus_dma_tag_destroy(sc->sc_dmat); goto fail_io1; } sc->sc_dma = (struct hifn_dma *)kva; bzero(sc->sc_dma, sizeof(*sc->sc_dma)); KASSERT(sc->sc_st0 != 0, ("hifn_attach: null bar0 tag!")); KASSERT(sc->sc_sh0 != 0, ("hifn_attach: null bar0 handle!")); KASSERT(sc->sc_st1 != 0, ("hifn_attach: null bar1 tag!")); KASSERT(sc->sc_sh1 != 0, ("hifn_attach: null bar1 handle!")); /* * Reset the board and do the ``secret handshake'' * to enable the crypto support. Then complete the * initialization procedure by setting up the interrupt * and hooking in to the system crypto support so we'll * get used for system services like the crypto device, * IPsec, RNG device, etc. */ hifn_reset_board(sc, 0); if (hifn_enable_crypto(sc) != 0) { device_printf(dev, "crypto enabling failed\n"); goto fail_mem; } hifn_reset_puc(sc); hifn_init_dma(sc); hifn_init_pci_registers(sc); /* XXX can't dynamically determine ram type for 795x; force dram */ if (sc->sc_flags & HIFN_IS_7956) sc->sc_drammodel = 1; else if (hifn_ramtype(sc)) goto fail_mem; if (sc->sc_drammodel == 0) hifn_sramsize(sc); else hifn_dramsize(sc); /* * Workaround for NetSec 7751 rev A: half ram size because two * of the address lines were left floating */ if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC && pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 && pci_get_revid(dev) == 0x61) /*XXX???*/ sc->sc_ramsize >>= 1; /* * Arrange the interrupt line. */ rid = 0; sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE|RF_ACTIVE); if (sc->sc_irq == NULL) { device_printf(dev, "could not map interrupt\n"); goto fail_mem; } /* * NB: Network code assumes we are blocked with splimp() * so make sure the IRQ is marked appropriately. */ if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, hifn_intr, sc, &sc->sc_intrhand)) { device_printf(dev, "could not setup interrupt\n"); goto fail_intr2; } hifn_sessions(sc); /* * NB: Keep only the low 16 bits; this masks the chip id * from the 7951. */ rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff; rseg = sc->sc_ramsize / 1024; rbase = 'K'; if (sc->sc_ramsize >= (1024 * 1024)) { rbase = 'M'; rseg /= 1024; } device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram", hifn_partname(sc), rev, rseg, rbase, sc->sc_drammodel ? 'd' : 's'); if (sc->sc_flags & HIFN_IS_7956) printf(", pll=0x%x<%s clk, %ux mult>", sc->sc_pllconfig, sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci", 2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11)); printf("\n"); sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE); if (sc->sc_cid < 0) { device_printf(dev, "could not get crypto driver id\n"); goto fail_intr; } WRITE_REG_0(sc, HIFN_0_PUCNFG, READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID); ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; switch (ena) { case HIFN_PUSTAT_ENA_2: crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0); if (sc->sc_flags & HIFN_HAS_AES) crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0); /*FALLTHROUGH*/ case HIFN_PUSTAT_ENA_1: crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0); crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0); crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0); break; } bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG)) hifn_init_pubrng(sc); callout_init(&sc->sc_tickto, CALLOUT_MPSAFE); callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); return (0); fail_intr: bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand); fail_intr2: /* XXX don't store rid */ bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); fail_mem: bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap); bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap); bus_dma_tag_destroy(sc->sc_dmat); /* Turn off DMA polling */ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); fail_io1: bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res); fail_io0: bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res); fail_pci: mtx_destroy(&sc->sc_mtx); return (ENXIO); } /* * Detach an interface that successfully probed. */ static int hifn_detach(device_t dev) { struct hifn_softc *sc = device_get_softc(dev); KASSERT(sc != NULL, ("hifn_detach: null software carrier!")); /* disable interrupts */ WRITE_REG_1(sc, HIFN_1_DMA_IER, 0); /*XXX other resources */ callout_stop(&sc->sc_tickto); callout_stop(&sc->sc_rngto); #ifdef HIFN_RNDTEST if (sc->sc_rndtest) rndtest_detach(sc->sc_rndtest); #endif /* Turn off DMA polling */ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); crypto_unregister_all(sc->sc_cid); bus_generic_detach(dev); /*XXX should be no children, right? */ bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand); /* XXX don't store rid */ bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap); bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap); bus_dma_tag_destroy(sc->sc_dmat); bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res); bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res); mtx_destroy(&sc->sc_mtx); return (0); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int hifn_shutdown(device_t dev) { #ifdef notyet hifn_stop(device_get_softc(dev)); #endif return (0); } /* * Device suspend routine. Stop the interface and save some PCI * settings in case the BIOS doesn't restore them properly on * resume. */ static int hifn_suspend(device_t dev) { struct hifn_softc *sc = device_get_softc(dev); #ifdef notyet hifn_stop(sc); #endif sc->sc_suspended = 1; return (0); } /* * Device resume routine. Restore some PCI settings in case the BIOS * doesn't, re-enable busmastering, and restart the interface if * appropriate. */ static int hifn_resume(device_t dev) { struct hifn_softc *sc = device_get_softc(dev); #ifdef notyet /* reinitialize interface if necessary */ if (ifp->if_flags & IFF_UP) rl_init(sc); #endif sc->sc_suspended = 0; return (0); } static int hifn_init_pubrng(struct hifn_softc *sc) { u_int32_t r; int i; #ifdef HIFN_RNDTEST sc->sc_rndtest = rndtest_attach(sc->sc_dev); if (sc->sc_rndtest) sc->sc_harvest = rndtest_harvest; else sc->sc_harvest = default_harvest; #else sc->sc_harvest = default_harvest; #endif if ((sc->sc_flags & HIFN_IS_7811) == 0) { /* Reset 7951 public key/rng engine */ WRITE_REG_1(sc, HIFN_1_PUB_RESET, READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET); for (i = 0; i < 100; i++) { DELAY(1000); if ((READ_REG_1(sc, HIFN_1_PUB_RESET) & HIFN_PUBRST_RESET) == 0) break; } if (i == 100) { device_printf(sc->sc_dev, "public key init failed\n"); return (1); } } /* Enable the rng, if available */ if (sc->sc_flags & HIFN_HAS_RNG) { if (sc->sc_flags & HIFN_IS_7811) { r = READ_REG_1(sc, HIFN_1_7811_RNGENA); if (r & HIFN_7811_RNGENA_ENA) { r &= ~HIFN_7811_RNGENA_ENA; WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); } WRITE_REG_1(sc, HIFN_1_7811_RNGCFG, HIFN_7811_RNGCFG_DEFL); r |= HIFN_7811_RNGENA_ENA; WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); } else WRITE_REG_1(sc, HIFN_1_RNG_CONFIG, READ_REG_1(sc, HIFN_1_RNG_CONFIG) | HIFN_RNGCFG_ENA); sc->sc_rngfirst = 1; if (hz >= 100) sc->sc_rnghz = hz / 100; else sc->sc_rnghz = 1; callout_init(&sc->sc_rngto, CALLOUT_MPSAFE); callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); } /* Enable public key engine, if available */ if (sc->sc_flags & HIFN_HAS_PUBLIC) { WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE); sc->sc_dmaier |= HIFN_DMAIER_PUBDONE; WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); #ifdef HIFN_VULCANDEV sc->sc_pkdev = make_dev(&vulcanpk_cdevsw, 0, UID_ROOT, GID_WHEEL, 0666, "vulcanpk"); sc->sc_pkdev->si_drv1 = sc; #endif } return (0); } static void hifn_rng(void *vsc) { #define RANDOM_BITS(n) (n)*sizeof (u_int32_t), (n)*sizeof (u_int32_t)*NBBY, 0 struct hifn_softc *sc = vsc; u_int32_t sts, num[2]; int i; if (sc->sc_flags & HIFN_IS_7811) { /* ONLY VALID ON 7811!!!! */ for (i = 0; i < 5; i++) { sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS); if (sts & HIFN_7811_RNGSTS_UFL) { device_printf(sc->sc_dev, "RNG underflow: disabling\n"); return; } if ((sts & HIFN_7811_RNGSTS_RDY) == 0) break; /* * There are at least two words in the RNG FIFO * at this point. */ num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); /* NB: discard first data read */ if (sc->sc_rngfirst) sc->sc_rngfirst = 0; else (*sc->sc_harvest)(sc->sc_rndtest, num, sizeof (num)); } } else { num[0] = READ_REG_1(sc, HIFN_1_RNG_DATA); /* NB: discard first data read */ if (sc->sc_rngfirst) sc->sc_rngfirst = 0; else (*sc->sc_harvest)(sc->sc_rndtest, num, sizeof (num[0])); } callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); #undef RANDOM_BITS } static void hifn_puc_wait(struct hifn_softc *sc) { int i; int reg = HIFN_0_PUCTRL; if (sc->sc_flags & HIFN_IS_7956) { reg = HIFN_0_PUCTRL2; } for (i = 5000; i > 0; i--) { DELAY(1); if (!(READ_REG_0(sc, reg) & HIFN_PUCTRL_RESET)) break; } if (!i) device_printf(sc->sc_dev, "proc unit did not reset\n"); } /* * Reset the processing unit. */ static void hifn_reset_puc(struct hifn_softc *sc) { /* Reset processing unit */ int reg = HIFN_0_PUCTRL; if (sc->sc_flags & HIFN_IS_7956) { reg = HIFN_0_PUCTRL2; } WRITE_REG_0(sc, reg, HIFN_PUCTRL_DMAENA); hifn_puc_wait(sc); } /* * Set the Retry and TRDY registers; note that we set them to * zero because the 7811 locks up when forced to retry (section * 3.6 of "Specification Update SU-0014-04". Not clear if we * should do this for all Hifn parts, but it doesn't seem to hurt. */ static void hifn_set_retry(struct hifn_softc *sc) { /* NB: RETRY only responds to 8-bit reads/writes */ pci_write_config(sc->sc_dev, HIFN_RETRY_TIMEOUT, 0, 1); pci_write_config(sc->sc_dev, HIFN_TRDY_TIMEOUT, 0, 1); } /* * Resets the board. Values in the regesters are left as is * from the reset (i.e. initial values are assigned elsewhere). */ static void hifn_reset_board(struct hifn_softc *sc, int full) { u_int32_t reg; /* * Set polling in the DMA configuration register to zero. 0x7 avoids * resetting the board and zeros out the other fields. */ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); /* * Now that polling has been disabled, we have to wait 1 ms * before resetting the board. */ DELAY(1000); /* Reset the DMA unit */ if (full) { WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE); DELAY(1000); } else { WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET); hifn_reset_puc(sc); } KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!")); bzero(sc->sc_dma, sizeof(*sc->sc_dma)); /* Bring dma unit out of reset */ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); hifn_puc_wait(sc); hifn_set_retry(sc); if (sc->sc_flags & HIFN_IS_7811) { for (reg = 0; reg < 1000; reg++) { if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) & HIFN_MIPSRST_CRAMINIT) break; DELAY(1000); } if (reg == 1000) printf(": cram init timeout\n"); } else { /* set up DMA configuration register #2 */ /* turn off all PK and BAR0 swaps */ WRITE_REG_1(sc, HIFN_1_DMA_CNFG2, (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT)| (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT)| (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT)| (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT)); } } static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt) { int i; u_int32_t v; for (i = 0; i < cnt; i++) { /* get the parity */ v = a & 0x80080125; v ^= v >> 16; v ^= v >> 8; v ^= v >> 4; v ^= v >> 2; v ^= v >> 1; a = (v & 1) ^ (a << 1); } return a; } struct pci2id { u_short pci_vendor; u_short pci_prod; char card_id[13]; }; static struct pci2id pci2id[] = { { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { /* * Other vendors share this PCI ID as well, such as * http://www.powercrypt.com, and obviously they also * use the same key. */ PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, }; /* * Checks to see if crypto is already enabled. If crypto isn't enable, * "hifn_enable_crypto" is called to enable it. The check is important, * as enabling crypto twice will lock the board. */ static int hifn_enable_crypto(struct hifn_softc *sc) { u_int32_t dmacfg, ramcfg, encl, addr, i; char *offtbl = NULL; for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) { if (pci2id[i].pci_vendor == pci_get_vendor(sc->sc_dev) && pci2id[i].pci_prod == pci_get_device(sc->sc_dev)) { offtbl = pci2id[i].card_id; break; } } if (offtbl == NULL) { device_printf(sc->sc_dev, "Unknown card!\n"); return (1); } ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG); dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG); /* * The RAM config register's encrypt level bit needs to be set before * every read performed on the encryption level register. */ WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; /* * Make sure we don't re-unlock. Two unlocks kills chip until the * next reboot. */ if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) { #ifdef HIFN_DEBUG if (hifn_debug) device_printf(sc->sc_dev, "Strong crypto already enabled!\n"); #endif goto report; } if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) { #ifdef HIFN_DEBUG if (hifn_debug) device_printf(sc->sc_dev, "Unknown encryption level 0x%x\n", encl); #endif return 1; } WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK | HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); DELAY(1000); addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1); DELAY(1000); WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0); DELAY(1000); for (i = 0; i <= 12; i++) { addr = hifn_next_signature(addr, offtbl[i] + 0x101); WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr); DELAY(1000); } WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; #ifdef HIFN_DEBUG if (hifn_debug) { if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2) device_printf(sc->sc_dev, "Engine is permanently " "locked until next system reset!\n"); else device_printf(sc->sc_dev, "Engine enabled " "successfully!\n"); } #endif report: WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg); WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg); switch (encl) { case HIFN_PUSTAT_ENA_1: case HIFN_PUSTAT_ENA_2: break; case HIFN_PUSTAT_ENA_0: default: device_printf(sc->sc_dev, "disabled"); break; } return 0; } /* * Give initial values to the registers listed in the "Register Space" * section of the HIFN Software Development reference manual. */ static void hifn_init_pci_registers(struct hifn_softc *sc) { /* write fixed values needed by the Initialization registers */ WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD); WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER); /* write all 4 ring address registers */ WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0])); WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0])); WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0])); WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0])); DELAY(2000); /* write status register */ WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST | HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST | HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER | HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST | HIFN_DMACSR_S_WAIT | HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST | HIFN_DMACSR_C_WAIT | HIFN_DMACSR_ENGINE | ((sc->sc_flags & HIFN_HAS_PUBLIC) ? HIFN_DMACSR_PUBDONE : 0) | ((sc->sc_flags & HIFN_IS_7811) ? HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0)); sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0; sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT | HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER | HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT | ((sc->sc_flags & HIFN_IS_7811) ? HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0); sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); if (sc->sc_flags & HIFN_IS_7956) { u_int32_t pll; WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | HIFN_PUCNFG_TCALLPHASES | HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32); /* turn off the clocks and insure bypass is set */ pll = READ_REG_1(sc, HIFN_1_PLL); pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL)) | HIFN_PLL_BP | HIFN_PLL_MBSET; WRITE_REG_1(sc, HIFN_1_PLL, pll); DELAY(10*1000); /* 10ms */ /* change configuration */ pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig; WRITE_REG_1(sc, HIFN_1_PLL, pll); DELAY(10*1000); /* 10ms */ /* disable bypass */ pll &= ~HIFN_PLL_BP; WRITE_REG_1(sc, HIFN_1_PLL, pll); /* enable clocks with new configuration */ pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL; WRITE_REG_1(sc, HIFN_1_PLL, pll); } else { WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES | HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 | (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM)); } WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST | ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) | ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL)); } /* * The maximum number of sessions supported by the card * is dependent on the amount of context ram, which * encryption algorithms are enabled, and how compression * is configured. This should be configured before this * routine is called. */ static void hifn_sessions(struct hifn_softc *sc) { u_int32_t pucnfg; int ctxsize; pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG); if (pucnfg & HIFN_PUCNFG_COMPSING) { if (pucnfg & HIFN_PUCNFG_ENCCNFG) ctxsize = 128; else ctxsize = 512; /* * 7955/7956 has internal context memory of 32K */ if (sc->sc_flags & HIFN_IS_7956) sc->sc_maxses = 32768 / ctxsize; else sc->sc_maxses = 1 + ((sc->sc_ramsize - 32768) / ctxsize); } else sc->sc_maxses = sc->sc_ramsize / 16384; if (sc->sc_maxses > 2048) sc->sc_maxses = 2048; } /* * Determine ram type (sram or dram). Board should be just out of a reset * state when this is called. */ static int hifn_ramtype(struct hifn_softc *sc) { u_int8_t data[8], dataexpect[8]; int i; for (i = 0; i < sizeof(data); i++) data[i] = dataexpect[i] = 0x55; if (hifn_writeramaddr(sc, 0, data)) return (-1); if (hifn_readramaddr(sc, 0, data)) return (-1); if (bcmp(data, dataexpect, sizeof(data)) != 0) { sc->sc_drammodel = 1; return (0); } for (i = 0; i < sizeof(data); i++) data[i] = dataexpect[i] = 0xaa; if (hifn_writeramaddr(sc, 0, data)) return (-1); if (hifn_readramaddr(sc, 0, data)) return (-1); if (bcmp(data, dataexpect, sizeof(data)) != 0) { sc->sc_drammodel = 1; return (0); } return (0); } #define HIFN_SRAM_MAX (32 << 20) #define HIFN_SRAM_STEP_SIZE 16384 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE) static int hifn_sramsize(struct hifn_softc *sc) { u_int32_t a; u_int8_t data[8]; u_int8_t dataexpect[sizeof(data)]; int32_t i; for (i = 0; i < sizeof(data); i++) data[i] = dataexpect[i] = i ^ 0x5a; for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) { a = i * HIFN_SRAM_STEP_SIZE; bcopy(&i, data, sizeof(i)); hifn_writeramaddr(sc, a, data); } for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) { a = i * HIFN_SRAM_STEP_SIZE; bcopy(&i, dataexpect, sizeof(i)); if (hifn_readramaddr(sc, a, data) < 0) return (0); if (bcmp(data, dataexpect, sizeof(data)) != 0) return (0); sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE; } return (0); } /* * XXX For dram boards, one should really try all of the * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG * is already set up correctly. */ static int hifn_dramsize(struct hifn_softc *sc) { u_int32_t cnfg; if (sc->sc_flags & HIFN_IS_7956) { /* * 7955/7956 have a fixed internal ram of only 32K. */ sc->sc_ramsize = 32768; } else { cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) & HIFN_PUCNFG_DRAMMASK; sc->sc_ramsize = 1 << ((cnfg >> 13) + 18); } return (0); } static void hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp) { struct hifn_dma *dma = sc->sc_dma; if (sc->sc_cmdi == HIFN_D_CMD_RSIZE) { sc->sc_cmdi = 0; dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } *cmdp = sc->sc_cmdi++; sc->sc_cmdk = sc->sc_cmdi; if (sc->sc_srci == HIFN_D_SRC_RSIZE) { sc->sc_srci = 0; dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } *srcp = sc->sc_srci++; sc->sc_srck = sc->sc_srci; if (sc->sc_dsti == HIFN_D_DST_RSIZE) { sc->sc_dsti = 0; dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } *dstp = sc->sc_dsti++; sc->sc_dstk = sc->sc_dsti; if (sc->sc_resi == HIFN_D_RES_RSIZE) { sc->sc_resi = 0; dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } *resp = sc->sc_resi++; sc->sc_resk = sc->sc_resi; } static int hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) { struct hifn_dma *dma = sc->sc_dma; hifn_base_command_t wc; const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; int r, cmdi, resi, srci, dsti; wc.masks = htole16(3 << 13); wc.session_num = htole16(addr >> 14); wc.total_source_count = htole16(8); wc.total_dest_count = htole16(addr & 0x3fff); hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); /* build write command */ bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); *(hifn_base_command_t *)dma->command_bufs[cmdi] = wc; bcopy(data, &dma->test_src, sizeof(dma->test_src)); dma->srcr[srci].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, test_src)); dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, test_dst)); dma->cmdr[cmdi].l = htole32(16 | masks); dma->srcr[srci].l = htole32(8 | masks); dma->dstr[dsti].l = htole32(4 | masks); dma->resr[resi].l = htole32(4 | masks); bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); for (r = 10000; r >= 0; r--) { DELAY(10); bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) break; bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } if (r == 0) { device_printf(sc->sc_dev, "writeramaddr -- " "result[%d](addr %d) still valid\n", resi, addr); r = -1; return (-1); } else r = 0; WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); return (r); } static int hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) { struct hifn_dma *dma = sc->sc_dma; hifn_base_command_t rc; const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; int r, cmdi, srci, dsti, resi; rc.masks = htole16(2 << 13); rc.session_num = htole16(addr >> 14); rc.total_source_count = htole16(addr & 0x3fff); rc.total_dest_count = htole16(8); hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); *(hifn_base_command_t *)dma->command_bufs[cmdi] = rc; dma->srcr[srci].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, test_src)); dma->test_src = 0; dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, test_dst)); dma->test_dst = 0; dma->cmdr[cmdi].l = htole32(8 | masks); dma->srcr[srci].l = htole32(8 | masks); dma->dstr[dsti].l = htole32(8 | masks); dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks); bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); for (r = 10000; r >= 0; r--) { DELAY(10); bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) break; bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } if (r == 0) { device_printf(sc->sc_dev, "readramaddr -- " "result[%d](addr %d) still valid\n", resi, addr); r = -1; } else { r = 0; bcopy(&dma->test_dst, data, sizeof(dma->test_dst)); } WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); return (r); } /* * Initialize the descriptor rings. */ static void hifn_init_dma(struct hifn_softc *sc) { struct hifn_dma *dma = sc->sc_dma; int i; hifn_set_retry(sc); /* initialize static pointer values */ for (i = 0; i < HIFN_D_CMD_RSIZE; i++) dma->cmdr[i].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, command_bufs[i][0])); for (i = 0; i < HIFN_D_RES_RSIZE; i++) dma->resr[i].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, result_bufs[i][0])); dma->cmdr[HIFN_D_CMD_RSIZE].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0])); dma->srcr[HIFN_D_SRC_RSIZE].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0])); dma->dstr[HIFN_D_DST_RSIZE].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0])); dma->resr[HIFN_D_RES_RSIZE].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0])); sc->sc_cmdu = sc->sc_srcu = sc->sc_dstu = sc->sc_resu = 0; sc->sc_cmdi = sc->sc_srci = sc->sc_dsti = sc->sc_resi = 0; sc->sc_cmdk = sc->sc_srck = sc->sc_dstk = sc->sc_resk = 0; } /* * Writes out the raw command buffer space. Returns the * command buffer size. */ static u_int hifn_write_command(struct hifn_command *cmd, u_int8_t *buf) { u_int8_t *buf_pos; hifn_base_command_t *base_cmd; hifn_mac_command_t *mac_cmd; hifn_crypt_command_t *cry_cmd; int using_mac, using_crypt, len, ivlen; u_int32_t dlen, slen; buf_pos = buf; using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC; using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT; base_cmd = (hifn_base_command_t *)buf_pos; base_cmd->masks = htole16(cmd->base_masks); slen = cmd->src_mapsize; if (cmd->sloplen) dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t); else dlen = cmd->dst_mapsize; base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO); base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO); dlen >>= 16; slen >>= 16; base_cmd->session_num = htole16( ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) | ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M)); buf_pos += sizeof(hifn_base_command_t); if (using_mac) { mac_cmd = (hifn_mac_command_t *)buf_pos; dlen = cmd->maccrd->crd_len; mac_cmd->source_count = htole16(dlen & 0xffff); dlen >>= 16; mac_cmd->masks = htole16(cmd->mac_masks | ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M)); mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip); mac_cmd->reserved = 0; buf_pos += sizeof(hifn_mac_command_t); } if (using_crypt) { cry_cmd = (hifn_crypt_command_t *)buf_pos; dlen = cmd->enccrd->crd_len; cry_cmd->source_count = htole16(dlen & 0xffff); dlen >>= 16; cry_cmd->masks = htole16(cmd->cry_masks | ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M)); cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip); cry_cmd->reserved = 0; buf_pos += sizeof(hifn_crypt_command_t); } if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) { bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH); buf_pos += HIFN_MAC_KEY_LENGTH; } if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) { switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { case HIFN_CRYPT_CMD_ALG_3DES: bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH); buf_pos += HIFN_3DES_KEY_LENGTH; break; case HIFN_CRYPT_CMD_ALG_DES: bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH); buf_pos += HIFN_DES_KEY_LENGTH; break; case HIFN_CRYPT_CMD_ALG_RC4: len = 256; do { int clen; clen = MIN(cmd->cklen, len); bcopy(cmd->ck, buf_pos, clen); len -= clen; buf_pos += clen; } while (len > 0); bzero(buf_pos, 4); buf_pos += 4; break; case HIFN_CRYPT_CMD_ALG_AES: /* * AES keys are variable 128, 192 and * 256 bits (16, 24 and 32 bytes). */ bcopy(cmd->ck, buf_pos, cmd->cklen); buf_pos += cmd->cklen; break; } } if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) { switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { case HIFN_CRYPT_CMD_ALG_AES: ivlen = HIFN_AES_IV_LENGTH; break; default: ivlen = HIFN_IV_LENGTH; break; } bcopy(cmd->iv, buf_pos, ivlen); buf_pos += ivlen; } if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) { bzero(buf_pos, 8); buf_pos += 8; } return (buf_pos - buf); } static int hifn_dmamap_aligned(struct hifn_operand *op) { int i; for (i = 0; i < op->nsegs; i++) { if (op->segs[i].ds_addr & 3) return (0); if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3)) return (0); } return (1); } static __inline int hifn_dmamap_dstwrap(struct hifn_softc *sc, int idx) { struct hifn_dma *dma = sc->sc_dma; if (++idx == HIFN_D_DST_RSIZE) { dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); idx = 0; } return (idx); } static int hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd) { struct hifn_dma *dma = sc->sc_dma; struct hifn_operand *dst = &cmd->dst; u_int32_t p, l; int idx, used = 0, i; idx = sc->sc_dsti; for (i = 0; i < dst->nsegs - 1; i++) { dma->dstr[idx].p = htole32(dst->segs[i].ds_addr); dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len); HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); used++; idx = hifn_dmamap_dstwrap(sc, idx); } if (cmd->sloplen == 0) { p = dst->segs[i].ds_addr; l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | dst->segs[i].ds_len; } else { p = sc->sc_dma_physaddr + offsetof(struct hifn_dma, slop[cmd->slopidx]); l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | sizeof(u_int32_t); if ((dst->segs[i].ds_len - cmd->sloplen) != 0) { dma->dstr[idx].p = htole32(dst->segs[i].ds_addr); dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_MASKDONEIRQ | (dst->segs[i].ds_len - cmd->sloplen)); HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); used++; idx = hifn_dmamap_dstwrap(sc, idx); } } dma->dstr[idx].p = htole32(p); dma->dstr[idx].l = htole32(l); HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); used++; idx = hifn_dmamap_dstwrap(sc, idx); sc->sc_dsti = idx; sc->sc_dstu += used; return (idx); } static __inline int hifn_dmamap_srcwrap(struct hifn_softc *sc, int idx) { struct hifn_dma *dma = sc->sc_dma; if (++idx == HIFN_D_SRC_RSIZE) { dma->srcr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); idx = 0; } return (idx); } static int hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd) { struct hifn_dma *dma = sc->sc_dma; struct hifn_operand *src = &cmd->src; int idx, i; u_int32_t last = 0; idx = sc->sc_srci; for (i = 0; i < src->nsegs; i++) { if (i == src->nsegs - 1) last = HIFN_D_LAST; dma->srcr[idx].p = htole32(src->segs[i].ds_addr); dma->srcr[idx].l = htole32(src->segs[i].ds_len | HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last); HIFN_SRCR_SYNC(sc, idx, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); idx = hifn_dmamap_srcwrap(sc, idx); } sc->sc_srci = idx; sc->sc_srcu += src->nsegs; return (idx); } static void hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error) { struct hifn_operand *op = arg; KASSERT(nsegs <= MAX_SCATTER, ("hifn_op_cb: too many DMA segments (%u > %u) " "returned when mapping operand", nsegs, MAX_SCATTER)); op->mapsize = mapsize; op->nsegs = nsegs; bcopy(seg, op->segs, nsegs * sizeof (seg[0])); } static int hifn_crypto( struct hifn_softc *sc, struct hifn_command *cmd, struct cryptop *crp, int hint) { struct hifn_dma *dma = sc->sc_dma; u_int32_t cmdlen, csr; int cmdi, resi, err = 0; /* * need 1 cmd, and 1 res * * NB: check this first since it's easy. */ HIFN_LOCK(sc); if ((sc->sc_cmdu + 1) > HIFN_D_CMD_RSIZE || (sc->sc_resu + 1) > HIFN_D_RES_RSIZE) { #ifdef HIFN_DEBUG if (hifn_debug) { device_printf(sc->sc_dev, "cmd/result exhaustion, cmdu %u resu %u\n", sc->sc_cmdu, sc->sc_resu); } #endif hifnstats.hst_nomem_cr++; HIFN_UNLOCK(sc); return (ERESTART); } if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->src_map)) { hifnstats.hst_nomem_map++; HIFN_UNLOCK(sc); return (ENOMEM); } if (crp->crp_flags & CRYPTO_F_IMBUF) { if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map, cmd->src_m, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) { hifnstats.hst_nomem_load++; err = ENOMEM; goto err_srcmap1; } } else if (crp->crp_flags & CRYPTO_F_IOV) { if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map, cmd->src_io, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) { hifnstats.hst_nomem_load++; err = ENOMEM; goto err_srcmap1; } } else { err = EINVAL; goto err_srcmap1; } if (hifn_dmamap_aligned(&cmd->src)) { cmd->sloplen = cmd->src_mapsize & 3; cmd->dst = cmd->src; } else { if (crp->crp_flags & CRYPTO_F_IOV) { err = EINVAL; goto err_srcmap; } else if (crp->crp_flags & CRYPTO_F_IMBUF) { int totlen, len; struct mbuf *m, *m0, *mlast; KASSERT(cmd->dst_m == cmd->src_m, ("hifn_crypto: dst_m initialized improperly")); hifnstats.hst_unaligned++; /* * Source is not aligned on a longword boundary. * Copy the data to insure alignment. If we fail * to allocate mbufs or clusters while doing this * we return ERESTART so the operation is requeued * at the crypto later, but only if there are * ops already posted to the hardware; otherwise we * have no guarantee that we'll be re-entered. */ totlen = cmd->src_mapsize; if (cmd->src_m->m_flags & M_PKTHDR) { len = MHLEN; MGETHDR(m0, M_NOWAIT, MT_DATA); if (m0 && !m_dup_pkthdr(m0, cmd->src_m, M_NOWAIT)) { m_free(m0); m0 = NULL; } } else { len = MLEN; MGET(m0, M_NOWAIT, MT_DATA); } if (m0 == NULL) { hifnstats.hst_nomem_mbuf++; err = sc->sc_cmdu ? ERESTART : ENOMEM; goto err_srcmap; } if (totlen >= MINCLSIZE) { - MCLGET(m0, M_NOWAIT); - if ((m0->m_flags & M_EXT) == 0) { + if (!(MCLGET(m0, M_NOWAIT))) { hifnstats.hst_nomem_mcl++; err = sc->sc_cmdu ? ERESTART : ENOMEM; m_freem(m0); goto err_srcmap; } len = MCLBYTES; } totlen -= len; m0->m_pkthdr.len = m0->m_len = len; mlast = m0; while (totlen > 0) { MGET(m, M_NOWAIT, MT_DATA); if (m == NULL) { hifnstats.hst_nomem_mbuf++; err = sc->sc_cmdu ? ERESTART : ENOMEM; m_freem(m0); goto err_srcmap; } len = MLEN; if (totlen >= MINCLSIZE) { - MCLGET(m, M_NOWAIT); - if ((m->m_flags & M_EXT) == 0) { + if (!(MCLGET(m, M_NOWAIT))) { hifnstats.hst_nomem_mcl++; err = sc->sc_cmdu ? ERESTART : ENOMEM; mlast->m_next = m; m_freem(m0); goto err_srcmap; } len = MCLBYTES; } m->m_len = len; m0->m_pkthdr.len += len; totlen -= len; mlast->m_next = m; mlast = m; } cmd->dst_m = m0; } } if (cmd->dst_map == NULL) { if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->dst_map)) { hifnstats.hst_nomem_map++; err = ENOMEM; goto err_srcmap; } if (crp->crp_flags & CRYPTO_F_IMBUF) { if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map, cmd->dst_m, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) { hifnstats.hst_nomem_map++; err = ENOMEM; goto err_dstmap1; } } else if (crp->crp_flags & CRYPTO_F_IOV) { if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map, cmd->dst_io, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) { hifnstats.hst_nomem_load++; err = ENOMEM; goto err_dstmap1; } } } #ifdef HIFN_DEBUG if (hifn_debug) { device_printf(sc->sc_dev, "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n", READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu, cmd->src_nsegs, cmd->dst_nsegs); } #endif if (cmd->src_map == cmd->dst_map) { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); } else { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, BUS_DMASYNC_PREREAD); } /* * need N src, and N dst */ if ((sc->sc_srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE || (sc->sc_dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) { #ifdef HIFN_DEBUG if (hifn_debug) { device_printf(sc->sc_dev, "src/dst exhaustion, srcu %u+%u dstu %u+%u\n", sc->sc_srcu, cmd->src_nsegs, sc->sc_dstu, cmd->dst_nsegs); } #endif hifnstats.hst_nomem_sd++; err = ERESTART; goto err_dstmap; } if (sc->sc_cmdi == HIFN_D_CMD_RSIZE) { sc->sc_cmdi = 0; dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } cmdi = sc->sc_cmdi++; cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]); HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE); /* .p for command/result already set */ dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ); HIFN_CMDR_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); sc->sc_cmdu++; /* * We don't worry about missing an interrupt (which a "command wait" * interrupt salvages us from), unless there is more than one command * in the queue. */ if (sc->sc_cmdu > 1) { sc->sc_dmaier |= HIFN_DMAIER_C_WAIT; WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); } hifnstats.hst_ipackets++; hifnstats.hst_ibytes += cmd->src_mapsize; hifn_dmamap_load_src(sc, cmd); /* * Unlike other descriptors, we don't mask done interrupt from * result descriptor. */ #ifdef HIFN_DEBUG if (hifn_debug) printf("load res\n"); #endif if (sc->sc_resi == HIFN_D_RES_RSIZE) { sc->sc_resi = 0; dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } resi = sc->sc_resi++; KASSERT(sc->sc_hifn_commands[resi] == NULL, ("hifn_crypto: command slot %u busy", resi)); sc->sc_hifn_commands[resi] = cmd; HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD); if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) { dma->resr[resi].l = htole32(HIFN_MAX_RESULT | HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ); sc->sc_curbatch++; if (sc->sc_curbatch > hifnstats.hst_maxbatch) hifnstats.hst_maxbatch = sc->sc_curbatch; hifnstats.hst_totbatch++; } else { dma->resr[resi].l = htole32(HIFN_MAX_RESULT | HIFN_D_VALID | HIFN_D_LAST); sc->sc_curbatch = 0; } HIFN_RESR_SYNC(sc, resi, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); sc->sc_resu++; if (cmd->sloplen) cmd->slopidx = resi; hifn_dmamap_load_dst(sc, cmd); csr = 0; if (sc->sc_c_busy == 0) { csr |= HIFN_DMACSR_C_CTRL_ENA; sc->sc_c_busy = 1; } if (sc->sc_s_busy == 0) { csr |= HIFN_DMACSR_S_CTRL_ENA; sc->sc_s_busy = 1; } if (sc->sc_r_busy == 0) { csr |= HIFN_DMACSR_R_CTRL_ENA; sc->sc_r_busy = 1; } if (sc->sc_d_busy == 0) { csr |= HIFN_DMACSR_D_CTRL_ENA; sc->sc_d_busy = 1; } if (csr) WRITE_REG_1(sc, HIFN_1_DMA_CSR, csr); #ifdef HIFN_DEBUG if (hifn_debug) { device_printf(sc->sc_dev, "command: stat %8x ier %8x\n", READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER)); } #endif sc->sc_active = 5; HIFN_UNLOCK(sc); KASSERT(err == 0, ("hifn_crypto: success with error %u", err)); return (err); /* success */ err_dstmap: if (cmd->src_map != cmd->dst_map) bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); err_dstmap1: if (cmd->src_map != cmd->dst_map) bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); err_srcmap: if (crp->crp_flags & CRYPTO_F_IMBUF) { if (cmd->src_m != cmd->dst_m) m_freem(cmd->dst_m); } bus_dmamap_unload(sc->sc_dmat, cmd->src_map); err_srcmap1: bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); HIFN_UNLOCK(sc); return (err); } static void hifn_tick(void* vsc) { struct hifn_softc *sc = vsc; HIFN_LOCK(sc); if (sc->sc_active == 0) { u_int32_t r = 0; if (sc->sc_cmdu == 0 && sc->sc_c_busy) { sc->sc_c_busy = 0; r |= HIFN_DMACSR_C_CTRL_DIS; } if (sc->sc_srcu == 0 && sc->sc_s_busy) { sc->sc_s_busy = 0; r |= HIFN_DMACSR_S_CTRL_DIS; } if (sc->sc_dstu == 0 && sc->sc_d_busy) { sc->sc_d_busy = 0; r |= HIFN_DMACSR_D_CTRL_DIS; } if (sc->sc_resu == 0 && sc->sc_r_busy) { sc->sc_r_busy = 0; r |= HIFN_DMACSR_R_CTRL_DIS; } if (r) WRITE_REG_1(sc, HIFN_1_DMA_CSR, r); } else sc->sc_active--; HIFN_UNLOCK(sc); callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); } static void hifn_intr(void *arg) { struct hifn_softc *sc = arg; struct hifn_dma *dma; u_int32_t dmacsr, restart; int i, u; dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR); /* Nothing in the DMA unit interrupted */ if ((dmacsr & sc->sc_dmaier) == 0) return; HIFN_LOCK(sc); dma = sc->sc_dma; #ifdef HIFN_DEBUG if (hifn_debug) { device_printf(sc->sc_dev, "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n", dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier, sc->sc_cmdi, sc->sc_srci, sc->sc_dsti, sc->sc_resi, sc->sc_cmdk, sc->sc_srck, sc->sc_dstk, sc->sc_resk, sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu); } #endif WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier); if ((sc->sc_flags & HIFN_HAS_PUBLIC) && (dmacsr & HIFN_DMACSR_PUBDONE)) WRITE_REG_1(sc, HIFN_1_PUB_STATUS, READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE); restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER); if (restart) device_printf(sc->sc_dev, "overrun %x\n", dmacsr); if (sc->sc_flags & HIFN_IS_7811) { if (dmacsr & HIFN_DMACSR_ILLR) device_printf(sc->sc_dev, "illegal read\n"); if (dmacsr & HIFN_DMACSR_ILLW) device_printf(sc->sc_dev, "illegal write\n"); } restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT | HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT); if (restart) { device_printf(sc->sc_dev, "abort, resetting.\n"); hifnstats.hst_abort++; hifn_abort(sc); HIFN_UNLOCK(sc); return; } if ((dmacsr & HIFN_DMACSR_C_WAIT) && (sc->sc_cmdu == 0)) { /* * If no slots to process and we receive a "waiting on * command" interrupt, we disable the "waiting on command" * (by clearing it). */ sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); } /* clear the rings */ i = sc->sc_resk; u = sc->sc_resu; while (u != 0) { HIFN_RESR_SYNC(sc, i, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (dma->resr[i].l & htole32(HIFN_D_VALID)) { HIFN_RESR_SYNC(sc, i, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); break; } if (i != HIFN_D_RES_RSIZE) { struct hifn_command *cmd; u_int8_t *macbuf = NULL; HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD); cmd = sc->sc_hifn_commands[i]; KASSERT(cmd != NULL, ("hifn_intr: null command slot %u", i)); sc->sc_hifn_commands[i] = NULL; if (cmd->base_masks & HIFN_BASE_CMD_MAC) { macbuf = dma->result_bufs[i]; macbuf += 12; } hifn_callback(sc, cmd, macbuf); hifnstats.hst_opackets++; u--; } if (++i == (HIFN_D_RES_RSIZE + 1)) i = 0; } sc->sc_resk = i; sc->sc_resu = u; i = sc->sc_srck; u = sc->sc_srcu; while (u != 0) { if (i == HIFN_D_SRC_RSIZE) i = 0; HIFN_SRCR_SYNC(sc, i, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (dma->srcr[i].l & htole32(HIFN_D_VALID)) { HIFN_SRCR_SYNC(sc, i, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); break; } i++, u--; } sc->sc_srck = i; sc->sc_srcu = u; i = sc->sc_cmdk; u = sc->sc_cmdu; while (u != 0) { HIFN_CMDR_SYNC(sc, i, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) { HIFN_CMDR_SYNC(sc, i, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); break; } if (i != HIFN_D_CMD_RSIZE) { u--; HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE); } if (++i == (HIFN_D_CMD_RSIZE + 1)) i = 0; } sc->sc_cmdk = i; sc->sc_cmdu = u; HIFN_UNLOCK(sc); if (sc->sc_needwakeup) { /* XXX check high watermark */ int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); #ifdef HIFN_DEBUG if (hifn_debug) device_printf(sc->sc_dev, "wakeup crypto (%x) u %d/%d/%d/%d\n", sc->sc_needwakeup, sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu); #endif sc->sc_needwakeup &= ~wakeup; crypto_unblock(sc->sc_cid, wakeup); } } /* * Allocate a new 'session' and return an encoded session id. 'sidp' * contains our registration id, and should contain an encoded session * id on successful allocation. */ static int hifn_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri) { struct hifn_softc *sc = device_get_softc(dev); struct cryptoini *c; int mac = 0, cry = 0, sesn; struct hifn_session *ses = NULL; KASSERT(sc != NULL, ("hifn_newsession: null softc")); if (sidp == NULL || cri == NULL || sc == NULL) return (EINVAL); HIFN_LOCK(sc); if (sc->sc_sessions == NULL) { ses = sc->sc_sessions = (struct hifn_session *)malloc( sizeof(*ses), M_DEVBUF, M_NOWAIT); if (ses == NULL) { HIFN_UNLOCK(sc); return (ENOMEM); } sesn = 0; sc->sc_nsessions = 1; } else { for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { if (!sc->sc_sessions[sesn].hs_used) { ses = &sc->sc_sessions[sesn]; break; } } if (ses == NULL) { sesn = sc->sc_nsessions; ses = (struct hifn_session *)malloc((sesn + 1) * sizeof(*ses), M_DEVBUF, M_NOWAIT); if (ses == NULL) { HIFN_UNLOCK(sc); return (ENOMEM); } bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses)); bzero(sc->sc_sessions, sesn * sizeof(*ses)); free(sc->sc_sessions, M_DEVBUF); sc->sc_sessions = ses; ses = &sc->sc_sessions[sesn]; sc->sc_nsessions++; } } HIFN_UNLOCK(sc); bzero(ses, sizeof(*ses)); ses->hs_used = 1; for (c = cri; c != NULL; c = c->cri_next) { switch (c->cri_alg) { case CRYPTO_MD5: case CRYPTO_SHA1: case CRYPTO_MD5_HMAC: case CRYPTO_SHA1_HMAC: if (mac) return (EINVAL); mac = 1; ses->hs_mlen = c->cri_mlen; if (ses->hs_mlen == 0) { switch (c->cri_alg) { case CRYPTO_MD5: case CRYPTO_MD5_HMAC: ses->hs_mlen = 16; break; case CRYPTO_SHA1: case CRYPTO_SHA1_HMAC: ses->hs_mlen = 20; break; } } break; case CRYPTO_DES_CBC: case CRYPTO_3DES_CBC: case CRYPTO_AES_CBC: /* XXX this may read fewer, does it matter? */ read_random(ses->hs_iv, c->cri_alg == CRYPTO_AES_CBC ? HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); /*FALLTHROUGH*/ case CRYPTO_ARC4: if (cry) return (EINVAL); cry = 1; break; default: return (EINVAL); } } if (mac == 0 && cry == 0) return (EINVAL); *sidp = HIFN_SID(device_get_unit(sc->sc_dev), sesn); return (0); } /* * Deallocate a session. * XXX this routine should run a zero'd mac/encrypt key into context ram. * XXX to blow away any keys already stored there. */ static int hifn_freesession(device_t dev, u_int64_t tid) { struct hifn_softc *sc = device_get_softc(dev); int session, error; u_int32_t sid = CRYPTO_SESID2LID(tid); KASSERT(sc != NULL, ("hifn_freesession: null softc")); if (sc == NULL) return (EINVAL); HIFN_LOCK(sc); session = HIFN_SESSION(sid); if (session < sc->sc_nsessions) { bzero(&sc->sc_sessions[session], sizeof(struct hifn_session)); error = 0; } else error = EINVAL; HIFN_UNLOCK(sc); return (error); } static int hifn_process(device_t dev, struct cryptop *crp, int hint) { struct hifn_softc *sc = device_get_softc(dev); struct hifn_command *cmd = NULL; int session, err, ivlen; struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; if (crp == NULL || crp->crp_callback == NULL) { hifnstats.hst_invalid++; return (EINVAL); } session = HIFN_SESSION(crp->crp_sid); if (sc == NULL || session >= sc->sc_nsessions) { err = EINVAL; goto errout; } cmd = malloc(sizeof(struct hifn_command), M_DEVBUF, M_NOWAIT | M_ZERO); if (cmd == NULL) { hifnstats.hst_nomem++; err = ENOMEM; goto errout; } if (crp->crp_flags & CRYPTO_F_IMBUF) { cmd->src_m = (struct mbuf *)crp->crp_buf; cmd->dst_m = (struct mbuf *)crp->crp_buf; } else if (crp->crp_flags & CRYPTO_F_IOV) { cmd->src_io = (struct uio *)crp->crp_buf; cmd->dst_io = (struct uio *)crp->crp_buf; } else { err = EINVAL; goto errout; /* XXX we don't handle contiguous buffers! */ } crd1 = crp->crp_desc; if (crd1 == NULL) { err = EINVAL; goto errout; } crd2 = crd1->crd_next; if (crd2 == NULL) { if (crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC || crd1->crd_alg == CRYPTO_SHA1 || crd1->crd_alg == CRYPTO_MD5) { maccrd = crd1; enccrd = NULL; } else if (crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_3DES_CBC || crd1->crd_alg == CRYPTO_AES_CBC || crd1->crd_alg == CRYPTO_ARC4) { if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0) cmd->base_masks |= HIFN_BASE_CMD_DECODE; maccrd = NULL; enccrd = crd1; } else { err = EINVAL; goto errout; } } else { if ((crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC || crd1->crd_alg == CRYPTO_MD5 || crd1->crd_alg == CRYPTO_SHA1) && (crd2->crd_alg == CRYPTO_DES_CBC || crd2->crd_alg == CRYPTO_3DES_CBC || crd2->crd_alg == CRYPTO_AES_CBC || crd2->crd_alg == CRYPTO_ARC4) && ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { cmd->base_masks = HIFN_BASE_CMD_DECODE; maccrd = crd1; enccrd = crd2; } else if ((crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_ARC4 || crd1->crd_alg == CRYPTO_3DES_CBC || crd1->crd_alg == CRYPTO_AES_CBC) && (crd2->crd_alg == CRYPTO_MD5_HMAC || crd2->crd_alg == CRYPTO_SHA1_HMAC || crd2->crd_alg == CRYPTO_MD5 || crd2->crd_alg == CRYPTO_SHA1) && (crd1->crd_flags & CRD_F_ENCRYPT)) { enccrd = crd1; maccrd = crd2; } else { /* * We cannot order the 7751 as requested */ err = EINVAL; goto errout; } } if (enccrd) { cmd->enccrd = enccrd; cmd->base_masks |= HIFN_BASE_CMD_CRYPT; switch (enccrd->crd_alg) { case CRYPTO_ARC4: cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4; break; case CRYPTO_DES_CBC: cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES | HIFN_CRYPT_CMD_MODE_CBC | HIFN_CRYPT_CMD_NEW_IV; break; case CRYPTO_3DES_CBC: cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES | HIFN_CRYPT_CMD_MODE_CBC | HIFN_CRYPT_CMD_NEW_IV; break; case CRYPTO_AES_CBC: cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES | HIFN_CRYPT_CMD_MODE_CBC | HIFN_CRYPT_CMD_NEW_IV; break; default: err = EINVAL; goto errout; } if (enccrd->crd_alg != CRYPTO_ARC4) { ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ? HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); if (enccrd->crd_flags & CRD_F_ENCRYPT) { if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) bcopy(enccrd->crd_iv, cmd->iv, ivlen); else bcopy(sc->sc_sessions[session].hs_iv, cmd->iv, ivlen); if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) { crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_inject, ivlen, cmd->iv); } } else { if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) bcopy(enccrd->crd_iv, cmd->iv, ivlen); else { crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_inject, ivlen, cmd->iv); } } } if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY; cmd->ck = enccrd->crd_key; cmd->cklen = enccrd->crd_klen >> 3; cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY; /* * Need to specify the size for the AES key in the masks. */ if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) == HIFN_CRYPT_CMD_ALG_AES) { switch (cmd->cklen) { case 16: cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128; break; case 24: cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192; break; case 32: cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256; break; default: err = EINVAL; goto errout; } } } if (maccrd) { cmd->maccrd = maccrd; cmd->base_masks |= HIFN_BASE_CMD_MAC; switch (maccrd->crd_alg) { case CRYPTO_MD5: cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | HIFN_MAC_CMD_POS_IPSEC; break; case CRYPTO_MD5_HMAC: cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; break; case CRYPTO_SHA1: cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | HIFN_MAC_CMD_POS_IPSEC; break; case CRYPTO_SHA1_HMAC: cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; break; } if (maccrd->crd_alg == CRYPTO_SHA1_HMAC || maccrd->crd_alg == CRYPTO_MD5_HMAC) { cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY; bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3); bzero(cmd->mac + (maccrd->crd_klen >> 3), HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3)); } } cmd->crp = crp; cmd->session_num = session; cmd->softc = sc; err = hifn_crypto(sc, cmd, crp, hint); if (!err) { return 0; } else if (err == ERESTART) { /* * There weren't enough resources to dispatch the request * to the part. Notify the caller so they'll requeue this * request and resubmit it again soon. */ #ifdef HIFN_DEBUG if (hifn_debug) device_printf(sc->sc_dev, "requeue request\n"); #endif free(cmd, M_DEVBUF); sc->sc_needwakeup |= CRYPTO_SYMQ; return (err); } errout: if (cmd != NULL) free(cmd, M_DEVBUF); if (err == EINVAL) hifnstats.hst_invalid++; else hifnstats.hst_nomem++; crp->crp_etype = err; crypto_done(crp); return (err); } static void hifn_abort(struct hifn_softc *sc) { struct hifn_dma *dma = sc->sc_dma; struct hifn_command *cmd; struct cryptop *crp; int i, u; i = sc->sc_resk; u = sc->sc_resu; while (u != 0) { cmd = sc->sc_hifn_commands[i]; KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i)); sc->sc_hifn_commands[i] = NULL; crp = cmd->crp; if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) { /* Salvage what we can. */ u_int8_t *macbuf; if (cmd->base_masks & HIFN_BASE_CMD_MAC) { macbuf = dma->result_bufs[i]; macbuf += 12; } else macbuf = NULL; hifnstats.hst_opackets++; hifn_callback(sc, cmd, macbuf); } else { if (cmd->src_map == cmd->dst_map) { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); } else { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, BUS_DMASYNC_POSTREAD); } if (cmd->src_m != cmd->dst_m) { m_freem(cmd->src_m); crp->crp_buf = (caddr_t)cmd->dst_m; } /* non-shared buffers cannot be restarted */ if (cmd->src_map != cmd->dst_map) { /* * XXX should be EAGAIN, delayed until * after the reset. */ crp->crp_etype = ENOMEM; bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); } else crp->crp_etype = ENOMEM; bus_dmamap_unload(sc->sc_dmat, cmd->src_map); bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); free(cmd, M_DEVBUF); if (crp->crp_etype != EAGAIN) crypto_done(crp); } if (++i == HIFN_D_RES_RSIZE) i = 0; u--; } sc->sc_resk = i; sc->sc_resu = u; hifn_reset_board(sc, 1); hifn_init_dma(sc); hifn_init_pci_registers(sc); } static void hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf) { struct hifn_dma *dma = sc->sc_dma; struct cryptop *crp = cmd->crp; struct cryptodesc *crd; struct mbuf *m; int totlen, i, u, ivlen; if (cmd->src_map == cmd->dst_map) { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); } else { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, BUS_DMASYNC_POSTREAD); } if (crp->crp_flags & CRYPTO_F_IMBUF) { if (cmd->src_m != cmd->dst_m) { crp->crp_buf = (caddr_t)cmd->dst_m; totlen = cmd->src_mapsize; for (m = cmd->dst_m; m != NULL; m = m->m_next) { if (totlen < m->m_len) { m->m_len = totlen; totlen = 0; } else totlen -= m->m_len; } cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len; m_freem(cmd->src_m); } } if (cmd->sloplen != 0) { crypto_copyback(crp->crp_flags, crp->crp_buf, cmd->src_mapsize - cmd->sloplen, cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]); } i = sc->sc_dstk; u = sc->sc_dstu; while (u != 0) { if (i == HIFN_D_DST_RSIZE) i = 0; bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (dma->dstr[i].l & htole32(HIFN_D_VALID)) { bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); break; } i++, u--; } sc->sc_dstk = i; sc->sc_dstu = u; hifnstats.hst_obytes += cmd->dst_mapsize; if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) == HIFN_BASE_CMD_CRYPT) { for (crd = crp->crp_desc; crd; crd = crd->crd_next) { if (crd->crd_alg != CRYPTO_DES_CBC && crd->crd_alg != CRYPTO_3DES_CBC && crd->crd_alg != CRYPTO_AES_CBC) continue; ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ? HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_skip + crd->crd_len - ivlen, ivlen, cmd->softc->sc_sessions[cmd->session_num].hs_iv); break; } } if (macbuf != NULL) { for (crd = crp->crp_desc; crd; crd = crd->crd_next) { int len; if (crd->crd_alg != CRYPTO_MD5 && crd->crd_alg != CRYPTO_SHA1 && crd->crd_alg != CRYPTO_MD5_HMAC && crd->crd_alg != CRYPTO_SHA1_HMAC) { continue; } len = cmd->softc->sc_sessions[cmd->session_num].hs_mlen; crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject, len, macbuf); break; } } if (cmd->src_map != cmd->dst_map) { bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); } bus_dmamap_unload(sc->sc_dmat, cmd->src_map); bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); free(cmd, M_DEVBUF); crypto_done(crp); } /* * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0 * and Group 1 registers; avoid conditions that could create * burst writes by doing a read in between the writes. * * NB: The read we interpose is always to the same register; * we do this because reading from an arbitrary (e.g. last) * register may not always work. */ static void hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val) { if (sc->sc_flags & HIFN_IS_7811) { if (sc->sc_bar0_lastreg == reg - 4) bus_space_read_4(sc->sc_st0, sc->sc_sh0, HIFN_0_PUCNFG); sc->sc_bar0_lastreg = reg; } bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val); } static void hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val) { if (sc->sc_flags & HIFN_IS_7811) { if (sc->sc_bar1_lastreg == reg - 4) bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID); sc->sc_bar1_lastreg = reg; } bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val); } #ifdef HIFN_VULCANDEV /* * this code provides support for mapping the PK engine's register * into a userspace program. * */ static int vulcanpk_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr) { struct hifn_softc *sc; vm_paddr_t pd; void *b; sc = dev->si_drv1; pd = rman_get_start(sc->sc_bar1res); b = rman_get_virtual(sc->sc_bar1res); #if 0 printf("vpk mmap: %p(%016llx) offset=%lld\n", b, (unsigned long long)pd, offset); hexdump(b, HIFN_1_PUB_MEMEND, "vpk", 0); #endif if (offset == 0) { *paddr = pd; return (0); } return (-1); } static struct cdevsw vulcanpk_cdevsw = { .d_version = D_VERSION, .d_mmap = vulcanpk_mmap, .d_name = "vulcanpk", }; #endif /* HIFN_VULCANDEV */ Index: head/sys/dev/ie/if_ie.c =================================================================== --- head/sys/dev/ie/if_ie.c (revision 276749) +++ head/sys/dev/ie/if_ie.c (revision 276750) @@ -1,1701 +1,1700 @@ /*- * Copyright (c) 1992, 1993, University of Vermont and State * Agricultural College. * Copyright (c) 1992, 1993, Garrett A. Wollman. * * Portions: * Copyright (c) 1990, 1991, William F. Jolitz * Copyright (c) 1990, The Regents of the University of California * * 3Com 3C507 support: * Copyright (c) 1993, 1994, Charles M. Hannum * * EtherExpress 16 support: * Copyright (c) 1993, 1994, 1995, Rodney W. Grimes * Copyright (c) 1997, Aaron C. Smith * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * Vermont and State Agricultural College and Garrett A. Wollman, by * William F. Jolitz, by the University of California, Berkeley, * Lawrence Berkeley Laboratory, and their contributors, by * Charles M. Hannum, by Rodney W. Grimes, and by Aaron C. Smith. * 4. Neither the names of the Universities nor the names of the authors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR AUTHORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * MAINTAINER: Matthew N. Dodd */ #include __FBSDID("$FreeBSD$"); /* * Intel 82586 Ethernet chip * Register, bit, and structure definitions. * * Written by GAW with reference to the Clarkson Packet Driver code for this * chip written by Russ Nelson and others. * * Intel EtherExpress 16 support from if_ix.c, written by Rodney W. Grimes. */ /* * The i82586 is a very versatile chip, found in many implementations. * Programming this chip is mostly the same, but certain details differ * from card to card. This driver is written so that different cards * can be automatically detected at run-time. */ /* * Mode of operation: * * We run the 82586 in a standard Ethernet mode. We keep NFRAMES * received frame descriptors around for the receiver to use, and * NRXBUFS associated receive buffer descriptors, both in a circular * list. Whenever a frame is received, we rotate both lists as * necessary. (The 586 treats both lists as a simple queue.) We also * keep a transmit command around so that packets can be sent off * quickly. * * We configure the adapter in AL-LOC = 1 mode, which means that the * Ethernet/802.3 MAC header is placed at the beginning of the receive * buffer rather than being split off into various fields in the RFD. * This also means that we must include this header in the transmit * buffer as well. * * By convention, all transmit commands, and only transmit commands, * shall have the I (IE_CMD_INTR) bit set in the command. This way, * when an interrupt arrives at ieintr(), it is immediately possible * to tell what precisely caused it. ANY OTHER command-sending routines * should run at splimp(), and should post an acknowledgement to every * interrupt they generate. * * The 82586 has a 24-bit address space internally, and the adaptor's * memory is located at the top of this region. However, the value * we are given in configuration is normally the *bottom* of the adaptor * RAM. So, we must go through a few gyrations to come up with a * kernel virtual address which represents the actual beginning of the * 586 address space. First, we autosize the RAM by running through * several possible sizes and trying to initialize the adapter under * the assumption that the selected size is correct. Then, knowing * the correct RAM size, we set up our pointers in the softc `iomem' * represents the computed base of the 586 address space. `iomembot' * represents the actual configured base of adapter RAM. Finally, * `iosize' represents the calculated size of 586 RAM. Then, when * laying out commands, we use the interval [iomembot, iomembot + * iosize); to make 24-pointers, we subtract iomem, and to make * 16-pointers, we subtract iomem and and with 0xffff. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEBUG #define IED_RINT 0x01 #define IED_TINT 0x02 #define IED_RNR 0x04 #define IED_CNA 0x08 #define IED_READFRAME 0x10 static int ie_debug = IED_RNR; #endif #define IE_BUF_LEN ETHER_MAX_LEN /* length of transmit buffer */ /* XXX this driver uses `volatile' and `caddr_t' to a fault. */ typedef volatile char *v_caddr_t; /* core address, pointer to volatile */ /* Forward declaration */ struct ie_softc; static void ieinit (void *); static void ieinit_locked (struct ie_softc *); static void ie_stop (struct ie_softc *); static int ieioctl (struct ifnet *, u_long, caddr_t); static void iestart (struct ifnet *); static void iestart_locked (struct ifnet *); static __inline void ee16_interrupt_enable (struct ie_softc *); static __inline void ie_ack (struct ie_softc *, u_int); static void iereset (struct ie_softc *); static void ie_readframe (struct ie_softc *, int); static void ie_drop_packet_buffer (struct ie_softc *); static int command_and_wait (struct ie_softc *, int, void volatile *, int); static void run_tdr (struct ie_softc *, volatile struct ie_tdr_cmd *); static int ierint (struct ie_softc *); static int ietint (struct ie_softc *); static int iernr (struct ie_softc *); static void start_receiver (struct ie_softc *); static __inline int ieget (struct ie_softc *, struct mbuf **); static v_caddr_t setup_rfa (struct ie_softc *, v_caddr_t); static int mc_setup (struct ie_softc *); static void ie_mc_reset (struct ie_softc *); #ifdef DEBUG static void print_rbd (volatile struct ie_recv_buf_desc * rbd); static int in_ierint = 0; static int in_ietint = 0; #endif static const char *ie_hardware_names[] = { "None", "StarLAN 10", "EN100", "StarLAN Fiber", "3C507", "NI5210", "EtherExpress 16", "Unknown" }; /* * sizeof(iscp) == 1+1+2+4 == 8 * sizeof(scb) == 2+2+2+2+2+2+2+2 == 16 * NFRAMES * sizeof(rfd) == NFRAMES*(2+2+2+2+6+6+2+2) == NFRAMES*24 == 384 * sizeof(xmit_cmd) == 2+2+2+2+6+2 == 18 * sizeof(transmit buffer) == 1512 * sizeof(transmit buffer desc) == 8 * ----- * 1946 * * NRXBUFS * sizeof(rbd) == NRXBUFS*(2+2+4+2+2) == NRXBUFS*12 * NRXBUFS * IE_RBUF_SIZE == NRXBUFS*256 * * NRXBUFS should be (16384 - 1946) / (256 + 12) == 14438 / 268 == 53 * * With NRXBUFS == 48, this leaves us 1574 bytes for another command or * more buffers. Another transmit command would be 18+8+1512 == 1538 * ---just barely fits! * * Obviously all these would have to be reduced for smaller memory sizes. * With a larger memory, it would be possible to roughly double the number * of both transmit and receive buffers. */ #define NFRAMES 4 /* number of receive frames */ #define NRXBUFS 24 /* number of buffers to allocate */ #define IE_RBUF_SIZE 256 /* size of each buffer, MUST BE POWER OF TWO */ #define NTXBUFS 1 /* number of transmit commands */ #define IE_TBUF_SIZE ETHER_MAX_LEN /* size of transmit buffer */ #define MK_24(base, ptr) ((caddr_t)((uintptr_t)ptr - (uintptr_t)base)) #define MK_16(base, ptr) ((u_short)(uintptr_t)MK_24(base, ptr)) void ee16_shutdown(struct ie_softc *sc) { ee16_reset_586(sc); outb(PORT(sc) + IEE16_ECTRL, IEE16_RESET_ASIC); outb(PORT(sc) + IEE16_ECTRL, 0); } /* * Taken almost exactly from Bill's if_is.c, then modified beyond recognition. */ int ie_attach(device_t dev) { struct ie_softc * sc; struct ifnet * ifp; size_t allocsize; int error, factor; sc = device_get_softc(dev); ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(sc->dev, "can not if_alloc()\n"); return (ENOSPC); } sc->dev = dev; mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); /* * based on the amount of memory we have, allocate our tx and rx * resources. */ factor = rman_get_size(sc->mem_res) / 8192; sc->nframes = factor * NFRAMES; sc->nrxbufs = factor * NRXBUFS; sc->ntxbufs = factor * NTXBUFS; /* * Since all of these guys are arrays of pointers, allocate as one * big chunk and dole out accordingly. */ allocsize = sizeof(void *) * (sc->nframes + (sc->nrxbufs * 2) + (sc->ntxbufs * 3)); sc->rframes = (volatile struct ie_recv_frame_desc **) malloc(allocsize, M_DEVBUF, M_NOWAIT); if (sc->rframes == NULL) { mtx_destroy(&sc->lock); return (ENXIO); } sc->rbuffs = (volatile struct ie_recv_buf_desc **)&sc->rframes[sc->nframes]; sc->cbuffs = (volatile u_char **)&sc->rbuffs[sc->nrxbufs]; sc->xmit_cmds = (volatile struct ie_xmit_cmd **)&sc->cbuffs[sc->nrxbufs]; sc->xmit_buffs = (volatile struct ie_xmit_buf **)&sc->xmit_cmds[sc->ntxbufs]; sc->xmit_cbuffs = (volatile u_char **)&sc->xmit_buffs[sc->ntxbufs]; if (bootverbose) device_printf(sc->dev, "hardware type %s, revision %d\n", ie_hardware_names[sc->hard_type], sc->hard_vers + 1); ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_start = iestart; ifp->if_ioctl = ieioctl; ifp->if_init = ieinit; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ether_ifattach(ifp, sc->enaddr); error = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE, NULL, ie_intr, sc, &sc->irq_ih); if (error) { device_printf(dev, "Unable to register interrupt handler\n"); mtx_destroy(&sc->lock); return (error); } return (0); } static __inline void ie_ack(struct ie_softc *sc, u_int mask) { sc->scb->ie_command = sc->scb->ie_status & mask; (*sc->ie_chan_attn) (sc); } /* * What to do upon receipt of an interrupt. */ void ie_intr(void *xsc) { struct ie_softc *sc = (struct ie_softc *)xsc; u_short status; IE_LOCK(sc); /* Clear the interrupt latch on the 3C507. */ if (sc->hard_type == IE_3C507 && (inb(PORT(sc) + IE507_CTRL) & EL_CTRL_INTL)) outb(PORT(sc) + IE507_ICTRL, 1); /* disable interrupts on the EE16. */ if (sc->hard_type == IE_EE16) outb(PORT(sc) + IEE16_IRQ, sc->irq_encoded); status = sc->scb->ie_status; loop: /* Don't ack interrupts which we didn't receive */ ie_ack(sc, IE_ST_WHENCE & status); if (status & (IE_ST_RECV | IE_ST_RNR)) { #ifdef DEBUG in_ierint++; if (ie_debug & IED_RINT) if_printf(sc->ifp, "rint\n"); #endif ierint(sc); #ifdef DEBUG in_ierint--; #endif } if (status & IE_ST_DONE) { #ifdef DEBUG in_ietint++; if (ie_debug & IED_TINT) if_printf(sc->ifp, "tint\n"); #endif ietint(sc); #ifdef DEBUG in_ietint--; #endif } if (status & IE_ST_RNR) { #ifdef DEBUG if (ie_debug & IED_RNR) if_printf(sc->ifp, "rnr\n"); #endif iernr(sc); } #ifdef DEBUG if ((status & IE_ST_ALLDONE) && (ie_debug & IED_CNA)) if_printf(sc->ifp, "cna\n"); #endif if ((status = sc->scb->ie_status) & IE_ST_WHENCE) goto loop; /* Clear the interrupt latch on the 3C507. */ if (sc->hard_type == IE_3C507) outb(PORT(sc) + IE507_ICTRL, 1); /* enable interrupts on the EE16. */ if (sc->hard_type == IE_EE16) outb(PORT(sc) + IEE16_IRQ, sc->irq_encoded | IEE16_IRQ_ENABLE); IE_UNLOCK(sc); } /* * Process a received-frame interrupt. */ static int ierint(struct ie_softc *sc) { int i, status; static int timesthru = 1024; i = sc->rfhead; while (1) { status = sc->rframes[i]->ie_fd_status; if ((status & IE_FD_COMPLETE) && (status & IE_FD_OK)) { if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1); if (!--timesthru) { if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, sc->scb->ie_err_crc + sc->scb->ie_err_align + sc->scb->ie_err_resource + sc->scb->ie_err_overrun); sc->scb->ie_err_crc = 0; sc->scb->ie_err_align = 0; sc->scb->ie_err_resource = 0; sc->scb->ie_err_overrun = 0; timesthru = 1024; } ie_readframe(sc, i); } else { if (status & IE_FD_RNR) { if (!(sc->scb->ie_status & IE_RU_READY)) { sc->rframes[0]->ie_fd_next = MK_16(MEM(sc), sc->rbuffs[0]); sc->scb->ie_recv_list = MK_16(MEM(sc), sc->rframes[0]); command_and_wait(sc, IE_RU_START, 0, 0); } } break; } i = (i + 1) % sc->nframes; } return (0); } /* * Process a command-complete interrupt. These are only generated by * the transmission of frames. This routine is deceptively simple, since * most of the real work is done by iestart(). */ static int ietint(struct ie_softc *sc) { struct ifnet *ifp = sc->ifp; int status; int i; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; for (i = 0; i < sc->xmit_count; i++) { status = sc->xmit_cmds[i]->ie_xmit_status; if (status & IE_XS_LATECOLL) { if_printf(ifp, "late collision\n"); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } else if (status & IE_XS_NOCARRIER) { if_printf(ifp, "no carrier\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } else if (status & IE_XS_LOSTCTS) { if_printf(ifp, "lost CTS\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } else if (status & IE_XS_UNDERRUN) { if_printf(ifp, "DMA underrun\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } else if (status & IE_XS_EXCMAX) { if_printf(ifp, "too many collisions\n"); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 16); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } else { if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, status & IE_XS_MAXCOLL); } } sc->xmit_count = 0; /* * If multicast addresses were added or deleted while we were * transmitting, ie_mc_reset() set the want_mcsetup flag indicating * that we should do it. */ if (sc->want_mcsetup) { mc_setup(sc); sc->want_mcsetup = 0; } /* Wish I knew why this seems to be necessary... */ sc->xmit_cmds[0]->ie_xmit_status |= IE_STAT_COMPL; iestart_locked(ifp); return (0); /* shouldn't be necessary */ } /* * Process a receiver-not-ready interrupt. I believe that we get these * when there aren't enough buffers to go around. For now (FIXME), we * just restart the receiver, and hope everything's ok. */ static int iernr(struct ie_softc *sc) { #ifdef doesnt_work setup_rfa(sc, (v_caddr_t) sc->rframes[0]); sc->scb->ie_recv_list = MK_16(MEM(sc), sc->rframes[0]); command_and_wait(sc, IE_RU_START, 0, 0); #else /* This doesn't work either, but it doesn't hang either. */ command_and_wait(sc, IE_RU_DISABLE, 0, 0); /* just in case */ setup_rfa(sc, (v_caddr_t) sc->rframes[0]); /* ignore cast-qual */ sc->scb->ie_recv_list = MK_16(MEM(sc), sc->rframes[0]); command_and_wait(sc, IE_RU_START, 0, 0); /* was ENABLE */ #endif ie_ack(sc, IE_ST_WHENCE); if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1); return (0); } /* * Compare two Ether/802 addresses for equality, inlined and * unrolled for speed. I'd love to have an inline assembler * version of this... */ static __inline int ether_equal(u_char * one, u_char * two) { if (one[0] != two[0]) return (0); if (one[1] != two[1]) return (0); if (one[2] != two[2]) return (0); if (one[3] != two[3]) return (0); if (one[4] != two[4]) return (0); if (one[5] != two[5]) return (0); return 1; } /* * Determine quickly whether we should bother reading in this packet. * This depends on whether BPF and/or bridging is enabled, whether we * are receiving multicast address, and whether promiscuous mode is enabled. * We assume that if IFF_PROMISC is set, then *somebody* wants to see * all incoming packets. */ static __inline int check_eh(struct ie_softc *sc, struct ether_header *eh) { /* Optimize the common case: normal operation. We've received either a unicast with our dest or a multicast packet. */ if (sc->promisc == 0) { int i; /* If not multicast, it's definitely for us */ if ((eh->ether_dhost[0] & 1) == 0) return (1); /* Accept broadcasts (loose but fast check) */ if (eh->ether_dhost[0] == 0xff) return (1); /* Compare against our multicast addresses */ for (i = 0; i < sc->mcast_count; i++) { if (ether_equal(eh->ether_dhost, (u_char *)&sc->mcast_addrs[i])) return (1); } return (0); } /* Always accept packets when in promiscuous mode */ if ((sc->promisc & IFF_PROMISC) != 0) return (1); /* Always accept packets directed at us */ if (ether_equal(eh->ether_dhost, IF_LLADDR(sc->ifp))) return (1); /* Must have IFF_ALLMULTI but not IFF_PROMISC set. The chip is actually in promiscuous mode, so discard unicast packets. */ return((eh->ether_dhost[0] & 1) != 0); } /* * We want to isolate the bits that have meaning... This assumes that * IE_RBUF_SIZE is an even power of two. If somehow the act_len exceeds * the size of the buffer, then we are screwed anyway. */ static __inline int ie_buflen(struct ie_softc *sc, int head) { return (sc->rbuffs[head]->ie_rbd_actual & (IE_RBUF_SIZE | (IE_RBUF_SIZE - 1))); } static __inline int ie_packet_len(struct ie_softc *sc) { int i; int head = sc->rbhead; int acc = 0; do { if (!(sc->rbuffs[sc->rbhead]->ie_rbd_actual & IE_RBD_USED)) { #ifdef DEBUG print_rbd(sc->rbuffs[sc->rbhead]); #endif log(LOG_ERR, "%s: receive descriptors out of sync at %d\n", sc->ifp->if_xname, sc->rbhead); iereset(sc); return (-1); } i = sc->rbuffs[head]->ie_rbd_actual & IE_RBD_LAST; acc += ie_buflen(sc, head); head = (head + 1) % sc->nrxbufs; } while (!i); return (acc); } /* * Read data off the interface, and turn it into an mbuf chain. * * This code is DRAMATICALLY different from the previous version; this * version tries to allocate the entire mbuf chain up front, given the * length of the data available. This enables us to allocate mbuf * clusters in many situations where before we would have had a long * chain of partially-full mbufs. This should help to speed up the * operation considerably. (Provided that it works, of course.) */ static __inline int ieget(struct ie_softc *sc, struct mbuf **mp) { struct ether_header eh; struct mbuf *m, *top, **mymp; int offset; int totlen, resid; int thismboff; int head; totlen = ie_packet_len(sc); if (totlen <= 0) return (-1); /* * Snarf the Ethernet header. */ bcopy(sc->cbuffs[sc->rbhead], &eh, sizeof(struct ether_header)); /* ignore cast-qual warning here */ /* * As quickly as possible, check if this packet is for us. If not, * don't waste a single cycle copying the rest of the packet in. * This is only a consideration when FILTER is defined; i.e., when * we are either running BPF or doing multicasting. */ if (!check_eh(sc, &eh)) { ie_drop_packet_buffer(sc); return (-1); } MGETHDR(m, M_NOWAIT, MT_DATA); if (!m) { ie_drop_packet_buffer(sc); return (-1); } *mp = m; m->m_pkthdr.rcvif = sc->ifp; m->m_len = MHLEN; resid = m->m_pkthdr.len = totlen; top = 0; mymp = ⊤ /* * This loop goes through and allocates mbufs for all the data we * will be copying in. It does not actually do the copying yet. */ do { /* while(resid > 0) */ /* * Try to allocate an mbuf to hold the data that we have. * If we already allocated one, just get another one and * stick it on the end (eventually). If we don't already * have one, try to allocate an mbuf cluster big enough to * hold the whole packet, if we think it's reasonable, or a * single mbuf which may or may not be big enough. Got that? */ if (top) { MGET(m, M_NOWAIT, MT_DATA); if (!m) { m_freem(top); ie_drop_packet_buffer(sc); return (-1); } m->m_len = MLEN; } if (resid >= MINCLSIZE) { - MCLGET(m, M_NOWAIT); - if (m->m_flags & M_EXT) + if (MCLGET(m, M_NOWAIT)) m->m_len = min(resid, MCLBYTES); } else { if (resid < m->m_len) { if (!top && resid + max_linkhdr <= m->m_len) m->m_data += max_linkhdr; m->m_len = resid; } } resid -= m->m_len; *mymp = m; mymp = &m->m_next; } while (resid > 0); resid = totlen; /* remaining data */ offset = 0; /* packet offset */ thismboff = 0; /* offset in m */ m = top; /* current mbuf */ head = sc->rbhead; /* current rx buffer */ /* * Now we take the mbuf chain (hopefully only one mbuf most of the * time) and stuff the data into it. There are no possible failures * at or after this point. */ while (resid > 0) { /* while there's stuff left */ int thislen = ie_buflen(sc, head) - offset; /* * If too much data for the current mbuf, then fill the * current one up, go to the next one, and try again. */ if (thislen > m->m_len - thismboff) { int newlen = m->m_len - thismboff; bcopy((v_caddr_t) (sc->cbuffs[head] + offset), mtod(m, caddr_t) +thismboff, (unsigned) newlen); /* ignore cast-qual warning */ m = m->m_next; thismboff = 0; /* new mbuf, so no offset */ offset += newlen; /* we are now this far into * the packet */ resid -= newlen; /* so there is this much left * to get */ continue; } /* * If there is more than enough space in the mbuf to hold * the contents of this buffer, copy everything in, advance * pointers, and so on. */ if (thislen < m->m_len - thismboff) { bcopy((v_caddr_t) (sc->cbuffs[head] + offset), mtod(m, caddr_t) +thismboff, (unsigned) thislen); thismboff += thislen; /* we are this far into the * mbuf */ resid -= thislen; /* and this much is left */ goto nextbuf; } /* * Otherwise, there is exactly enough space to put this * buffer's contents into the current mbuf. Do the * combination of the above actions. */ bcopy((v_caddr_t) (sc->cbuffs[head] + offset), mtod(m, caddr_t) + thismboff, (unsigned) thislen); m = m->m_next; thismboff = 0; /* new mbuf, start at the beginning */ resid -= thislen; /* and we are this far through */ /* * Advance all the pointers. We can get here from either of * the last two cases, but never the first. */ nextbuf: offset = 0; sc->rbuffs[head]->ie_rbd_actual = 0; sc->rbuffs[head]->ie_rbd_length |= IE_RBD_LAST; sc->rbhead = head = (head + 1) % sc->nrxbufs; sc->rbuffs[sc->rbtail]->ie_rbd_length &= ~IE_RBD_LAST; sc->rbtail = (sc->rbtail + 1) % sc->nrxbufs; } /* * Unless something changed strangely while we were doing the copy, * we have now copied everything in from the shared memory. This * means that we are done. */ return (0); } /* * Read frame NUM from unit UNIT (pre-cached as IE). * * This routine reads the RFD at NUM, and copies in the buffers from * the list of RBD, then rotates the RBD and RFD lists so that the receiver * doesn't start complaining. Trailers are DROPPED---there's no point * in wasting time on confusing code to deal with them. Hopefully, * this machine will never ARP for trailers anyway. */ static void ie_readframe(struct ie_softc *sc, int num/* frame number to read */) { struct ifnet *ifp = sc->ifp; struct ie_recv_frame_desc rfd; struct mbuf *m = 0; #ifdef DEBUG struct ether_header *eh; #endif bcopy((v_caddr_t) (sc->rframes[num]), &rfd, sizeof(struct ie_recv_frame_desc)); /* * Immediately advance the RFD list, since we we have copied ours * now. */ sc->rframes[num]->ie_fd_status = 0; sc->rframes[num]->ie_fd_last |= IE_FD_LAST; sc->rframes[sc->rftail]->ie_fd_last &= ~IE_FD_LAST; sc->rftail = (sc->rftail + 1) % sc->nframes; sc->rfhead = (sc->rfhead + 1) % sc->nframes; if (rfd.ie_fd_status & IE_FD_OK) { if (ieget(sc, &m)) { if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1); /* this counts as an * error */ return; } } #ifdef DEBUG eh = mtod(m, struct ether_header *); if (ie_debug & IED_READFRAME) { if_printf(ifp, "frame from ether %6D type %x\n", eh->ether_shost, ":", (unsigned) eh->ether_type); } if (ntohs(eh->ether_type) > ETHERTYPE_TRAIL && ntohs(eh->ether_type) < (ETHERTYPE_TRAIL + ETHERTYPE_NTRAILER)) printf("received trailer!\n"); #endif if (!m) return; /* * Finally pass this packet up to higher layers. */ IE_UNLOCK(sc); (*ifp->if_input)(ifp, m); IE_LOCK(sc); } static void ie_drop_packet_buffer(struct ie_softc *sc) { int i; do { /* * This means we are somehow out of sync. So, we reset the * adapter. */ if (!(sc->rbuffs[sc->rbhead]->ie_rbd_actual & IE_RBD_USED)) { #ifdef DEBUG print_rbd(sc->rbuffs[sc->rbhead]); #endif log(LOG_ERR, "%s: receive descriptors out of sync at %d\n", sc->ifp->if_xname, sc->rbhead); iereset(sc); return; } i = sc->rbuffs[sc->rbhead]->ie_rbd_actual & IE_RBD_LAST; sc->rbuffs[sc->rbhead]->ie_rbd_length |= IE_RBD_LAST; sc->rbuffs[sc->rbhead]->ie_rbd_actual = 0; sc->rbhead = (sc->rbhead + 1) % sc->nrxbufs; sc->rbuffs[sc->rbtail]->ie_rbd_length &= ~IE_RBD_LAST; sc->rbtail = (sc->rbtail + 1) % sc->nrxbufs; } while (!i); } /* * Start transmission on an interface. */ static void iestart(struct ifnet *ifp) { struct ie_softc *sc = ifp->if_softc; IE_LOCK(sc); iestart_locked(ifp); IE_UNLOCK(sc); } static void iestart_locked(struct ifnet *ifp) { struct ie_softc *sc = ifp->if_softc; struct mbuf *m0, *m; volatile unsigned char *buffer; u_short len; /* * This is not really volatile, in this routine, but it makes gcc * happy. */ volatile u_short *bptr = &sc->scb->ie_command_list; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) return; if (ifp->if_drv_flags & IFF_DRV_OACTIVE) return; do { IF_DEQUEUE(&sc->ifp->if_snd, m); if (!m) break; BPF_MTAP(ifp, m); buffer = sc->xmit_cbuffs[sc->xmit_count]; len = 0; for (m0 = m; m && len < IE_BUF_LEN; m = m->m_next) { bcopy(mtod(m, caddr_t), buffer, m->m_len); buffer += m->m_len; len += m->m_len; } m_freem(m0); len = max(len, ETHER_MIN_LEN); sc->xmit_buffs[sc->xmit_count]->ie_xmit_flags = IE_XMIT_LAST|len; sc->xmit_buffs[sc->xmit_count]->ie_xmit_next = 0xffff; sc->xmit_buffs[sc->xmit_count]->ie_xmit_buf = MK_24(sc->iomem, sc->xmit_cbuffs[sc->xmit_count]); sc->xmit_cmds[sc->xmit_count]->com.ie_cmd_cmd = IE_CMD_XMIT; sc->xmit_cmds[sc->xmit_count]->ie_xmit_status = 0; sc->xmit_cmds[sc->xmit_count]->ie_xmit_desc = MK_16(sc->iomem, sc->xmit_buffs[sc->xmit_count]); *bptr = MK_16(sc->iomem, sc->xmit_cmds[sc->xmit_count]); bptr = &sc->xmit_cmds[sc->xmit_count]->com.ie_cmd_link; sc->xmit_count++; } while (sc->xmit_count < sc->ntxbufs); /* * If we queued up anything for transmission, send it. */ if (sc->xmit_count) { sc->xmit_cmds[sc->xmit_count - 1]->com.ie_cmd_cmd |= IE_CMD_LAST | IE_CMD_INTR; /* * By passing the command pointer as a null, we tell * command_and_wait() to pretend that this isn't an action * command. I wish I understood what was happening here. */ command_and_wait(sc, IE_CU_START, 0, 0); ifp->if_drv_flags |= IFF_DRV_OACTIVE; } return; } /* * Check to see if there's an 82586 out there. */ int check_ie_present(struct ie_softc *sc) { volatile struct ie_sys_conf_ptr *scp; volatile struct ie_int_sys_conf_ptr *iscp; volatile struct ie_sys_ctl_block *scb; u_long realbase; realbase = (uintptr_t) sc->iomembot + sc->iosize - (1 << 24); scp = (volatile struct ie_sys_conf_ptr *) (uintptr_t) (realbase + IE_SCP_ADDR); bzero((volatile char *) scp, sizeof *scp); /* * First we put the ISCP at the bottom of memory; this tests to make * sure that our idea of the size of memory is the same as the * controller's. This is NOT where the ISCP will be in normal * operation. */ iscp = (volatile struct ie_int_sys_conf_ptr *) sc->iomembot; bzero((volatile char *)iscp, sizeof *iscp); scb = (volatile struct ie_sys_ctl_block *) sc->iomembot; bzero((volatile char *)scb, sizeof *scb); scp->ie_bus_use = sc->bus_use; /* 8-bit or 16-bit */ scp->ie_iscp_ptr = (caddr_t) (uintptr_t) ((volatile char *) iscp - (volatile char *) (uintptr_t) realbase); iscp->ie_busy = 1; iscp->ie_scb_offset = MK_16(realbase, scb) + 256; (*sc->ie_reset_586) (sc); (*sc->ie_chan_attn) (sc); DELAY(100); /* wait a while... */ if (iscp->ie_busy) { return (0); } /* * Now relocate the ISCP to its real home, and reset the controller * again. */ iscp = (void *) Align((caddr_t) (uintptr_t) (realbase + IE_SCP_ADDR - sizeof(struct ie_int_sys_conf_ptr))); bzero((volatile char *) iscp, sizeof *iscp); /* ignore cast-qual */ scp->ie_iscp_ptr = (caddr_t) (uintptr_t) ((volatile char *) iscp - (volatile char *) (uintptr_t) realbase); iscp->ie_busy = 1; iscp->ie_scb_offset = MK_16(realbase, scb); (*sc->ie_reset_586) (sc); (*sc->ie_chan_attn) (sc); DELAY(100); if (iscp->ie_busy) { return (0); } sc->iomem = (caddr_t) (uintptr_t) realbase; sc->iscp = iscp; sc->scb = scb; /* * Acknowledge any interrupts we may have caused... */ ie_ack(sc, IE_ST_WHENCE); return (1); } void el_reset_586(struct ie_softc *sc) { outb(PORT(sc) + IE507_CTRL, EL_CTRL_RESET); DELAY(100); outb(PORT(sc) + IE507_CTRL, EL_CTRL_NORMAL); DELAY(100); } void sl_reset_586(struct ie_softc *sc) { outb(PORT(sc) + IEATT_RESET, 0); } void ee16_reset_586(struct ie_softc *sc) { outb(PORT(sc) + IEE16_ECTRL, IEE16_RESET_586); DELAY(100); outb(PORT(sc) + IEE16_ECTRL, 0); DELAY(100); } void el_chan_attn(struct ie_softc *sc) { outb(PORT(sc) + IE507_ATTN, 1); } void sl_chan_attn(struct ie_softc *sc) { outb(PORT(sc) + IEATT_ATTN, 0); } void ee16_chan_attn(struct ie_softc *sc) { outb(PORT(sc) + IEE16_ATTN, 0); } static __inline void ee16_interrupt_enable(struct ie_softc *sc) { DELAY(100); outb(sc->port + IEE16_IRQ, sc->irq_encoded | IEE16_IRQ_ENABLE); DELAY(100); } void sl_read_ether(struct ie_softc *sc, unsigned char *addr) { int i; for (i = 0; i < 6; i++) addr[i] = inb(PORT(sc) + i); } static void iereset(struct ie_softc *sc) { struct ifnet *ifp = sc->ifp; if_printf(ifp, "reset\n"); ie_stop(sc); /* * Stop i82586 dead in its tracks. */ if (command_and_wait(sc, IE_RU_ABORT | IE_CU_ABORT, 0, 0)) if_printf(ifp, "abort commands timed out\n"); if (command_and_wait(sc, IE_RU_DISABLE | IE_CU_STOP, 0, 0)) if_printf(ifp, "disable commands timed out\n"); #ifdef notdef if (!check_ie_present(sc)) panic("ie disappeared!"); #endif if (ifp->if_flags & IFF_UP) ieinit_locked(sc); return; } /* * Send a command to the controller and wait for it to either * complete or be accepted, depending on the command. If the * command pointer is null, then pretend that the command is * not an action command. If the command pointer is not null, * and the command is an action command, wait for * ((volatile struct ie_cmd_common *)pcmd)->ie_cmd_status & MASK * to become true. */ static int command_and_wait(struct ie_softc *sc, int cmd, volatile void *pcmd, int mask) { volatile struct ie_cmd_common *cc = pcmd; int i; sc->scb->ie_command = (u_short) cmd; if (IE_ACTION_COMMAND(cmd) && pcmd) { (*sc->ie_chan_attn) (sc); /* * Now spin-lock waiting for status. This is not a very * nice thing to do, but I haven't figured out how, or * indeed if, we can put the process waiting for action to * sleep. (We may be getting called through some other * timeout running in the kernel.) * * According to the packet driver, the minimum timeout * should be .369 seconds, which we round up to .37. */ for (i = 0; i < 370; i++) { if (cc->ie_cmd_status & mask) return (0); DELAY(1000); } return (1); } else { /* * Otherwise, just wait for the command to be accepted. */ (*sc->ie_chan_attn) (sc); while (sc->scb->ie_command); /* spin lock */ return (0); } } /* * Run the time-domain reflectometer... */ static void run_tdr(struct ie_softc *sc, volatile struct ie_tdr_cmd *cmd) { int result; cmd->com.ie_cmd_status = 0; cmd->com.ie_cmd_cmd = IE_CMD_TDR | IE_CMD_LAST; cmd->com.ie_cmd_link = 0xffff; cmd->ie_tdr_time = 0; sc->scb->ie_command_list = MK_16(MEM(sc), cmd); cmd->ie_tdr_time = 0; if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL)) result = 0x2000; else result = cmd->ie_tdr_time; ie_ack(sc, IE_ST_WHENCE); if (result & IE_TDR_SUCCESS) return; if (result & IE_TDR_XCVR) { if_printf(sc->ifp, "transceiver problem\n"); } else if (result & IE_TDR_OPEN) { if_printf(sc->ifp, "TDR detected an open %d clocks away\n", result & IE_TDR_TIME); } else if (result & IE_TDR_SHORT) { if_printf(sc->ifp, "TDR detected a short %d clocks away\n", result & IE_TDR_TIME); } else { if_printf(sc->ifp, "TDR returned unknown status %x\n", result); } } static void start_receiver(struct ie_softc *sc) { sc->scb->ie_recv_list = MK_16(MEM(sc), sc->rframes[0]); command_and_wait(sc, IE_RU_START, 0, 0); ie_ack(sc, IE_ST_WHENCE); } /* * Here is a helper routine for iernr() and ieinit(). This sets up * the RFA. */ static v_caddr_t setup_rfa(struct ie_softc *sc, v_caddr_t ptr) { volatile struct ie_recv_frame_desc *rfd = (volatile void *)ptr; volatile struct ie_recv_buf_desc *rbd; int i; /* First lay them out */ for (i = 0; i < sc->nframes; i++) { sc->rframes[i] = rfd; bzero((volatile char *) rfd, sizeof *rfd); /* ignore cast-qual */ rfd++; } ptr = Alignvol(rfd); /* ignore cast-qual */ /* Now link them together */ for (i = 0; i < sc->nframes; i++) { sc->rframes[i]->ie_fd_next = MK_16(MEM(sc), sc->rframes[(i + 1) % sc->nframes]); } /* Finally, set the EOL bit on the last one. */ sc->rframes[sc->nframes - 1]->ie_fd_last |= IE_FD_LAST; /* * Now lay out some buffers for the incoming frames. Note that we * set aside a bit of slop in each buffer, to make sure that we have * enough space to hold a single frame in every buffer. */ rbd = (volatile void *) ptr; for (i = 0; i < sc->nrxbufs; i++) { sc->rbuffs[i] = rbd; bzero((volatile char *)rbd, sizeof *rbd); ptr = Alignvol(ptr + sizeof *rbd); rbd->ie_rbd_length = IE_RBUF_SIZE; rbd->ie_rbd_buffer = MK_24(MEM(sc), ptr); sc->cbuffs[i] = (volatile void *) ptr; ptr += IE_RBUF_SIZE; rbd = (volatile void *) ptr; } /* Now link them together */ for (i = 0; i < sc->nrxbufs; i++) { sc->rbuffs[i]->ie_rbd_next = MK_16(MEM(sc), sc->rbuffs[(i + 1) % sc->nrxbufs]); } /* Tag EOF on the last one */ sc->rbuffs[sc->nrxbufs - 1]->ie_rbd_length |= IE_RBD_LAST; /* * We use the head and tail pointers on receive to keep track of the * order in which RFDs and RBDs are used. */ sc->rfhead = 0; sc->rftail = sc->nframes - 1; sc->rbhead = 0; sc->rbtail = sc->nrxbufs - 1; sc->scb->ie_recv_list = MK_16(MEM(sc), sc->rframes[0]); sc->rframes[0]->ie_fd_buf_desc = MK_16(MEM(sc), sc->rbuffs[0]); ptr = Alignvol(ptr); return (ptr); } /* * Run the multicast setup command. */ static int mc_setup(struct ie_softc *sc) { volatile struct ie_mcast_cmd *cmd = (volatile void *)sc->xmit_cbuffs[0]; cmd->com.ie_cmd_status = 0; cmd->com.ie_cmd_cmd = IE_CMD_MCAST | IE_CMD_LAST; cmd->com.ie_cmd_link = 0xffff; /* ignore cast-qual */ bcopy((v_caddr_t) sc->mcast_addrs, (v_caddr_t) cmd->ie_mcast_addrs, sc->mcast_count * sizeof *sc->mcast_addrs); cmd->ie_mcast_bytes = sc->mcast_count * 6; /* grrr... */ sc->scb->ie_command_list = MK_16(MEM(sc), cmd); if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) || !(cmd->com.ie_cmd_status & IE_STAT_OK)) { if_printf(sc->ifp, "multicast address setup command failed\n"); return (0); } return (1); } /* * This routine takes the environment generated by check_ie_present() * and adds to it all the other structures we need to operate the adapter. * This includes executing the CONFIGURE, IA-SETUP, and MC-SETUP commands, * starting the receiver unit, and clearing interrupts. */ static void ieinit(xsc) void *xsc; { struct ie_softc *sc = xsc; IE_LOCK(sc); ieinit_locked(sc); IE_UNLOCK(sc); } static void ieinit_locked(struct ie_softc *sc) { struct ifnet *ifp = sc->ifp; volatile struct ie_sys_ctl_block *scb = sc->scb; caddr_t ptr; int i; ptr = Alignvol((volatile char *) scb + sizeof *scb); /* * Send the configure command first. */ { volatile struct ie_config_cmd *cmd = (volatile void *) ptr; ie_setup_config(cmd, sc->promisc, sc->hard_type == IE_STARLAN10); cmd->com.ie_cmd_status = 0; cmd->com.ie_cmd_cmd = IE_CMD_CONFIG | IE_CMD_LAST; cmd->com.ie_cmd_link = 0xffff; scb->ie_command_list = MK_16(MEM(sc), cmd); if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) || !(cmd->com.ie_cmd_status & IE_STAT_OK)) { if_printf(ifp, "configure command failed\n"); return; } } /* * Now send the Individual Address Setup command. */ { volatile struct ie_iasetup_cmd *cmd = (volatile void *) ptr; cmd->com.ie_cmd_status = 0; cmd->com.ie_cmd_cmd = IE_CMD_IASETUP | IE_CMD_LAST; cmd->com.ie_cmd_link = 0xffff; bcopy((volatile char *)IF_LLADDR(ifp), (volatile char *)&cmd->ie_address, sizeof cmd->ie_address); scb->ie_command_list = MK_16(MEM(sc), cmd); if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) || !(cmd->com.ie_cmd_status & IE_STAT_OK)) { if_printf(ifp, "individual address " "setup command failed\n"); return; } } /* * Now run the time-domain reflectometer. */ run_tdr(sc, (volatile void *) ptr); /* * Acknowledge any interrupts we have generated thus far. */ ie_ack(sc, IE_ST_WHENCE); /* * Set up the RFA. */ ptr = setup_rfa(sc, ptr); /* * Finally, the transmit command and buffer are the last little bit * of work. */ /* transmit command buffers */ for (i = 0; i < sc->ntxbufs; i++) { sc->xmit_cmds[i] = (volatile void *) ptr; ptr += sizeof *sc->xmit_cmds[i]; ptr = Alignvol(ptr); sc->xmit_buffs[i] = (volatile void *)ptr; ptr += sizeof *sc->xmit_buffs[i]; ptr = Alignvol(ptr); } /* transmit buffers */ for (i = 0; i < sc->ntxbufs - 1; i++) { sc->xmit_cbuffs[i] = (volatile void *)ptr; ptr += IE_BUF_LEN; ptr = Alignvol(ptr); } sc->xmit_cbuffs[sc->ntxbufs - 1] = (volatile void *) ptr; for (i = 1; i < sc->ntxbufs; i++) { bzero((v_caddr_t) sc->xmit_cmds[i], sizeof *sc->xmit_cmds[i]); bzero((v_caddr_t) sc->xmit_buffs[i], sizeof *sc->xmit_buffs[i]); } /* * This must be coordinated with iestart() and ietint(). */ sc->xmit_cmds[0]->ie_xmit_status = IE_STAT_COMPL; /* take the ee16 out of loopback */ if (sc->hard_type == IE_EE16) { u_int8_t bart_config; bart_config = inb(PORT(sc) + IEE16_CONFIG); bart_config &= ~IEE16_BART_LOOPBACK; /* inb doesn't get bit! */ bart_config |= IEE16_BART_MCS16_TEST; outb(PORT(sc) + IEE16_CONFIG, bart_config); ee16_interrupt_enable(sc); ee16_chan_attn(sc); } ifp->if_drv_flags |= IFF_DRV_RUNNING; /* tell higher levels * we're here */ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; start_receiver(sc); return; } static void ie_stop(struct ie_softc *sc) { struct ifnet *ifp = sc->ifp; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); command_and_wait(sc, IE_RU_DISABLE, 0, 0); } static int ieioctl(struct ifnet *ifp, u_long command, caddr_t data) { int error = 0; struct ie_softc *sc = ifp->if_softc; switch (command) { case SIOCSIFFLAGS: /* * Note that this device doesn't have an "all multicast" * mode, so we must turn on promiscuous mode and do the * filtering manually. */ IE_LOCK(sc); if ((ifp->if_flags & IFF_UP) == 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING)) { ie_stop(sc); } else if ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { sc->promisc = ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI); ieinit_locked(sc); } else if (sc->promisc ^ (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI))) { sc->promisc = ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI); ieinit_locked(sc); } IE_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: /* * Update multicast listeners */ /* reset multicast filtering */ IE_LOCK(sc); ie_mc_reset(sc); IE_UNLOCK(sc); error = 0; break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void ie_mc_reset(struct ie_softc *sc) { struct ifmultiaddr *ifma; /* * Step through the list of addresses. */ sc->mcast_count = 0; if_maddr_rlock(sc->ifp); TAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; /* XXX - this is broken... */ if (sc->mcast_count >= MAXMCAST) { sc->ifp->if_flags |= IFF_ALLMULTI; if (sc->ifp->if_flags & IFF_UP) ieinit_locked(sc); goto setflag; } bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), &(sc->mcast_addrs[sc->mcast_count]), 6); sc->mcast_count++; } if_maddr_runlock(sc->ifp); setflag: sc->want_mcsetup = 1; } #ifdef DEBUG static void print_rbd(volatile struct ie_recv_buf_desc * rbd) { printf("RBD at %p:\n" "actual %04x, next %04x, buffer %p\n" "length %04x, mbz %04x\n", (volatile void *) rbd, rbd->ie_rbd_actual, rbd->ie_rbd_next, (void *) rbd->ie_rbd_buffer, rbd->ie_rbd_length, rbd->mbz); } #endif /* DEBUG */ int ie_alloc_resources (device_t dev) { struct ie_softc * sc; int error; error = 0; sc = device_get_softc(dev); sc->io_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->io_rid, RF_ACTIVE); if (!sc->io_res) { device_printf(dev, "No I/O space?!\n"); error = ENOMEM; goto bad; } sc->io_bt = rman_get_bustag(sc->io_res); sc->io_bh = rman_get_bushandle(sc->io_res); sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, RF_ACTIVE); if (!sc->mem_res) { device_printf(dev, "No Memory!\n"); error = ENOMEM; goto bad; } sc->mem_bt = rman_get_bustag(sc->mem_res); sc->mem_bh = rman_get_bushandle(sc->mem_res); sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE); if (!sc->irq_res) { device_printf(dev, "No IRQ!\n"); error = ENOMEM; goto bad; } sc->port = rman_get_start(sc->io_res); /* XXX hack */ sc->iomembot = rman_get_virtual(sc->mem_res); sc->iosize = rman_get_size(sc->mem_res); return (0); bad: return (error); } void ie_release_resources (device_t dev) { struct ie_softc * sc; sc = device_get_softc(dev); if (sc->irq_ih) bus_teardown_intr(dev, sc->irq_res, sc->irq_ih); if (sc->rframes) free(sc->rframes, M_DEVBUF); if (sc->io_res) bus_release_resource(dev, SYS_RES_IOPORT, sc->io_rid, sc->io_res); if (sc->irq_res) bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); if (sc->mem_res) bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res); if (sc->ifp) if_free(sc->ifp); return; } int ie_detach (device_t dev) { struct ie_softc * sc; struct ifnet * ifp; sc = device_get_softc(dev); ifp = sc->ifp; IE_LOCK(sc); if (sc->hard_type == IE_EE16) ee16_shutdown(sc); ie_stop(sc); IE_UNLOCK(sc); ether_ifdetach(ifp); ie_release_resources(dev); mtx_destroy(&sc->lock); return (0); } Index: head/sys/dev/le/lance.c =================================================================== --- head/sys/dev/le/lance.c (revision 276749) +++ head/sys/dev/le/lance.c (revision 276750) @@ -1,817 +1,816 @@ /* $NetBSD: lance.c,v 1.34 2005/12/24 20:27:30 perry Exp $ */ /*- * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace * Simulation Facility, NASA Ames Research Center. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Ralph Campbell and Rick Macklem. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)if_le.c 8.2 (Berkeley) 11/16/93 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include devclass_t le_devclass; static void lance_start(struct ifnet *); static void lance_stop(struct lance_softc *); static void lance_init(void *); static void lance_watchdog(void *s); static int lance_mediachange(struct ifnet *); static void lance_mediastatus(struct ifnet *, struct ifmediareq *); static int lance_ioctl(struct ifnet *, u_long, caddr_t); int lance_config(struct lance_softc *sc, const char* name, int unit) { struct ifnet *ifp; int i, nbuf; if (LE_LOCK_INITIALIZED(sc) == 0) return (ENXIO); ifp = sc->sc_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) return (ENOSPC); callout_init_mtx(&sc->sc_wdog_ch, &sc->sc_mtx, 0); /* Initialize ifnet structure. */ ifp->if_softc = sc; if_initname(ifp, name, unit); ifp->if_start = lance_start; ifp->if_ioctl = lance_ioctl; ifp->if_init = lance_init; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; #ifdef LANCE_REVC_BUG ifp->if_flags &= ~IFF_MULTICAST; #endif ifp->if_baudrate = IF_Mbps(10); IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); /* Initialize ifmedia structures. */ ifmedia_init(&sc->sc_media, 0, lance_mediachange, lance_mediastatus); if (sc->sc_supmedia != NULL) { for (i = 0; i < sc->sc_nsupmedia; i++) ifmedia_add(&sc->sc_media, sc->sc_supmedia[i], 0, NULL); ifmedia_set(&sc->sc_media, sc->sc_defaultmedia); } else { ifmedia_add(&sc->sc_media, IFM_MAKEWORD(IFM_ETHER, IFM_MANUAL, 0, 0), 0, NULL); ifmedia_set(&sc->sc_media, IFM_MAKEWORD(IFM_ETHER, IFM_MANUAL, 0, 0)); } switch (sc->sc_memsize) { case 8192: sc->sc_nrbuf = 4; sc->sc_ntbuf = 1; break; case 16384: sc->sc_nrbuf = 8; sc->sc_ntbuf = 2; break; case 32768: sc->sc_nrbuf = 16; sc->sc_ntbuf = 4; break; case 65536: sc->sc_nrbuf = 32; sc->sc_ntbuf = 8; break; case 131072: sc->sc_nrbuf = 64; sc->sc_ntbuf = 16; break; case 262144: sc->sc_nrbuf = 128; sc->sc_ntbuf = 32; break; default: /* weird memory size; cope with it */ nbuf = sc->sc_memsize / LEBLEN; sc->sc_ntbuf = nbuf / 5; sc->sc_nrbuf = nbuf - sc->sc_ntbuf; } if_printf(ifp, "%d receive buffers, %d transmit buffers\n", sc->sc_nrbuf, sc->sc_ntbuf); /* Make sure the chip is stopped. */ LE_LOCK(sc); lance_stop(sc); LE_UNLOCK(sc); return (0); } void lance_attach(struct lance_softc *sc) { struct ifnet *ifp = sc->sc_ifp; /* Attach the interface. */ ether_ifattach(ifp, sc->sc_enaddr); /* Claim 802.1q capability. */ ifp->if_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_capenable |= IFCAP_VLAN_MTU; } void lance_detach(struct lance_softc *sc) { struct ifnet *ifp = sc->sc_ifp; LE_LOCK(sc); lance_stop(sc); LE_UNLOCK(sc); callout_drain(&sc->sc_wdog_ch); ether_ifdetach(ifp); if_free(ifp); } void lance_suspend(struct lance_softc *sc) { LE_LOCK(sc); lance_stop(sc); LE_UNLOCK(sc); } void lance_resume(struct lance_softc *sc) { LE_LOCK(sc); if (sc->sc_ifp->if_flags & IFF_UP) lance_init_locked(sc); LE_UNLOCK(sc); } static void lance_start(struct ifnet *ifp) { struct lance_softc *sc = ifp->if_softc; LE_LOCK(sc); (*sc->sc_start_locked)(sc); LE_UNLOCK(sc); } static void lance_stop(struct lance_softc *sc) { struct ifnet *ifp = sc->sc_ifp; LE_LOCK_ASSERT(sc, MA_OWNED); /* * Mark the interface down and cancel the watchdog timer. */ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); callout_stop(&sc->sc_wdog_ch); sc->sc_wdog_timer = 0; (*sc->sc_wrcsr)(sc, LE_CSR0, LE_C0_STOP); } static void lance_init(void *xsc) { struct lance_softc *sc = (struct lance_softc *)xsc; LE_LOCK(sc); lance_init_locked(sc); LE_UNLOCK(sc); } /* * Initialization of interface; set up initialization block * and transmit/receive descriptor rings. */ void lance_init_locked(struct lance_softc *sc) { struct ifnet *ifp = sc->sc_ifp; u_long a; int timo; LE_LOCK_ASSERT(sc, MA_OWNED); (*sc->sc_wrcsr)(sc, LE_CSR0, LE_C0_STOP); DELAY(100); /* Newer LANCE chips have a reset register. */ if (sc->sc_hwreset) (*sc->sc_hwreset)(sc); /* Set the correct byte swapping mode, etc. */ (*sc->sc_wrcsr)(sc, LE_CSR3, sc->sc_conf3); /* Set the current media. This may require the chip to be stopped. */ if (sc->sc_mediachange) (void)(*sc->sc_mediachange)(sc); /* * Update our private copy of the Ethernet address. * We NEED the copy so we can ensure its alignment! */ memcpy(sc->sc_enaddr, IF_LLADDR(ifp), ETHER_ADDR_LEN); /* Set up LANCE init block. */ (*sc->sc_meminit)(sc); /* Give LANCE the physical address of its init block. */ a = sc->sc_addr + LE_INITADDR(sc); (*sc->sc_wrcsr)(sc, LE_CSR1, a & 0xffff); (*sc->sc_wrcsr)(sc, LE_CSR2, a >> 16); /* Try to initialize the LANCE. */ DELAY(100); (*sc->sc_wrcsr)(sc, LE_CSR0, LE_C0_INIT); /* Wait for initialization to finish. */ for (timo = 100000; timo; timo--) if ((*sc->sc_rdcsr)(sc, LE_CSR0) & LE_C0_IDON) break; if ((*sc->sc_rdcsr)(sc, LE_CSR0) & LE_C0_IDON) { /* Start the LANCE. */ (*sc->sc_wrcsr)(sc, LE_CSR0, LE_C0_INEA | LE_C0_STRT); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->sc_wdog_timer = 0; callout_reset(&sc->sc_wdog_ch, hz, lance_watchdog, sc); (*sc->sc_start_locked)(sc); } else if_printf(ifp, "controller failed to initialize\n"); if (sc->sc_hwinit) (*sc->sc_hwinit)(sc); } /* * Routine to copy from mbuf chain to transmit buffer in * network buffer memory. */ int lance_put(struct lance_softc *sc, int boff, struct mbuf *m) { struct mbuf *n; int len, tlen = 0; LE_LOCK_ASSERT(sc, MA_OWNED); for (; m; m = n) { len = m->m_len; if (len == 0) { n = m_free(m); m = NULL; continue; } (*sc->sc_copytobuf)(sc, mtod(m, caddr_t), boff, len); boff += len; tlen += len; n = m_free(m); m = NULL; } if (tlen < LEMINSIZE) { (*sc->sc_zerobuf)(sc, boff, LEMINSIZE - tlen); tlen = LEMINSIZE; } return (tlen); } /* * Pull data off an interface. * Len is length of data, with local net header stripped. * We copy the data into mbufs. When full cluster sized units are present * we copy into clusters. */ struct mbuf * lance_get(struct lance_softc *sc, int boff, int totlen) { struct ifnet *ifp = sc->sc_ifp; struct mbuf *m, *m0, *newm; caddr_t newdata; int len; if (totlen <= ETHER_HDR_LEN || totlen > LEBLEN - ETHER_CRC_LEN) { #ifdef LEDEBUG if_printf(ifp, "invalid packet size %d; dropping\n", totlen); #endif return (NULL); } MGETHDR(m0, M_NOWAIT, MT_DATA); if (m0 == NULL) return (NULL); m0->m_pkthdr.rcvif = ifp; m0->m_pkthdr.len = totlen; len = MHLEN; m = m0; while (totlen > 0) { if (totlen >= MINCLSIZE) { - MCLGET(m, M_NOWAIT); - if ((m->m_flags & M_EXT) == 0) + if (!(MCLGET(m, M_NOWAIT))) goto bad; len = MCLBYTES; } if (m == m0) { newdata = (caddr_t) ALIGN(m->m_data + ETHER_HDR_LEN) - ETHER_HDR_LEN; len -= newdata - m->m_data; m->m_data = newdata; } m->m_len = len = min(totlen, len); (*sc->sc_copyfrombuf)(sc, mtod(m, caddr_t), boff, len); boff += len; totlen -= len; if (totlen > 0) { MGET(newm, M_NOWAIT, MT_DATA); if (newm == 0) goto bad; len = MLEN; m = m->m_next = newm; } } return (m0); bad: m_freem(m0); return (NULL); } static void lance_watchdog(void *xsc) { struct lance_softc *sc = (struct lance_softc *)xsc; struct ifnet *ifp = sc->sc_ifp; LE_LOCK_ASSERT(sc, MA_OWNED); if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) { callout_reset(&sc->sc_wdog_ch, hz, lance_watchdog, sc); return; } if_printf(ifp, "device timeout\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); lance_init_locked(sc); } static int lance_mediachange(struct ifnet *ifp) { struct lance_softc *sc = ifp->if_softc; if (sc->sc_mediachange) { /* * For setting the port in LE_CSR15 the PCnet chips must * be powered down or stopped and unlike documented may * not take effect without an initialization. So don't * invoke (*sc_mediachange) directly here but go through * lance_init_locked(). */ LE_LOCK(sc); lance_stop(sc); lance_init_locked(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) (*sc->sc_start_locked)(sc); LE_UNLOCK(sc); } return (0); } static void lance_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) { struct lance_softc *sc = ifp->if_softc; LE_LOCK(sc); if (!(ifp->if_flags & IFF_UP)) { LE_UNLOCK(sc); return; } ifmr->ifm_status = IFM_AVALID; if (sc->sc_flags & LE_CARRIER) ifmr->ifm_status |= IFM_ACTIVE; if (sc->sc_mediastatus) (*sc->sc_mediastatus)(sc, ifmr); LE_UNLOCK(sc); } /* * Process an ioctl request. */ static int lance_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct lance_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int error = 0; switch (cmd) { case SIOCSIFFLAGS: LE_LOCK(sc); if (ifp->if_flags & IFF_PROMISC) { if (!(sc->sc_flags & LE_PROMISC)) { sc->sc_flags |= LE_PROMISC; lance_init_locked(sc); } } else if (sc->sc_flags & LE_PROMISC) { sc->sc_flags &= ~LE_PROMISC; lance_init_locked(sc); } if ((ifp->if_flags & IFF_ALLMULTI) && !(sc->sc_flags & LE_ALLMULTI)) { sc->sc_flags |= LE_ALLMULTI; lance_init_locked(sc); } else if (!(ifp->if_flags & IFF_ALLMULTI) && (sc->sc_flags & LE_ALLMULTI)) { sc->sc_flags &= ~LE_ALLMULTI; lance_init_locked(sc); } if (!(ifp->if_flags & IFF_UP) && ifp->if_drv_flags & IFF_DRV_RUNNING) { /* * If interface is marked down and it is running, then * stop it. */ lance_stop(sc); } else if (ifp->if_flags & IFF_UP && !(ifp->if_drv_flags & IFF_DRV_RUNNING)) { /* * If interface is marked up and it is stopped, then * start it. */ lance_init_locked(sc); } #ifdef LEDEBUG if (ifp->if_flags & IFF_DEBUG) sc->sc_flags |= LE_DEBUG; else sc->sc_flags &= ~LE_DEBUG; #endif LE_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: /* * Multicast list has changed; set the hardware filter * accordingly. */ LE_LOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) lance_init_locked(sc); LE_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } /* * Set up the logical address filter. */ void lance_setladrf(struct lance_softc *sc, uint16_t *af) { struct ifnet *ifp = sc->sc_ifp; struct ifmultiaddr *ifma; uint32_t crc; /* * Set up multicast address filter by passing all multicast addresses * through a crc generator, and then using the high order 6 bits as an * index into the 64 bit logical address filter. The high order bit * selects the word, while the rest of the bits select the bit within * the word. */ if (ifp->if_flags & IFF_PROMISC || sc->sc_flags & LE_ALLMULTI) { af[0] = af[1] = af[2] = af[3] = 0xffff; return; } af[0] = af[1] = af[2] = af[3] = 0x0000; if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; crc = ether_crc32_le(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN); /* Just want the 6 most significant bits. */ crc >>= 26; /* Set the corresponding bit in the filter. */ af[crc >> 4] |= LE_HTOLE16(1 << (crc & 0xf)); } if_maddr_runlock(ifp); } /* * Routines for accessing the transmit and receive buffers. * The various CPU and adapter configurations supported by this * driver require three different access methods for buffers * and descriptors: * (1) contig (contiguous data; no padding), * (2) gap2 (two bytes of data followed by two bytes of padding), * (3) gap16 (16 bytes of data followed by 16 bytes of padding). */ /* * contig: contiguous data with no padding. * * Buffers may have any alignment. */ void lance_copytobuf_contig(struct lance_softc *sc, void *from, int boff, int len) { volatile caddr_t buf = sc->sc_mem; /* * Just call memcpy() to do the work. */ memcpy(buf + boff, from, len); } void lance_copyfrombuf_contig(struct lance_softc *sc, void *to, int boff, int len) { volatile caddr_t buf = sc->sc_mem; /* * Just call memcpy() to do the work. */ memcpy(to, buf + boff, len); } void lance_zerobuf_contig(struct lance_softc *sc, int boff, int len) { volatile caddr_t buf = sc->sc_mem; /* * Just let memset() do the work */ memset(buf + boff, 0, len); } #if 0 /* * Examples only; duplicate these and tweak (if necessary) in * machine-specific front-ends. */ /* * gap2: two bytes of data followed by two bytes of pad. * * Buffers must be 4-byte aligned. The code doesn't worry about * doing an extra byte. */ static void lance_copytobuf_gap2(struct lance_softc *sc, void *fromv, int boff, int len) { volatile caddr_t buf = sc->sc_mem; caddr_t from = fromv; volatile uint16_t *bptr; if (boff & 0x1) { /* Handle unaligned first byte. */ bptr = ((volatile uint16_t *)buf) + (boff - 1); *bptr = (*from++ << 8) | (*bptr & 0xff); bptr += 2; len--; } else bptr = ((volatile uint16_t *)buf) + boff; while (len > 1) { *bptr = (from[1] << 8) | (from[0] & 0xff); bptr += 2; from += 2; len -= 2; } if (len == 1) *bptr = (uint16_t)*from; } static void lance_copyfrombuf_gap2(struct lance_softc *sc, void *tov, int boff, int len) { volatile caddr_t buf = sc->sc_mem; caddr_t to = tov; volatile uint16_t *bptr; uint16_t tmp; if (boff & 0x1) { /* Handle unaligned first byte. */ bptr = ((volatile uint16_t *)buf) + (boff - 1); *to++ = (*bptr >> 8) & 0xff; bptr += 2; len--; } else bptr = ((volatile uint16_t *)buf) + boff; while (len > 1) { tmp = *bptr; *to++ = tmp & 0xff; *to++ = (tmp >> 8) & 0xff; bptr += 2; len -= 2; } if (len == 1) *to = *bptr & 0xff; } static void lance_zerobuf_gap2(struct lance_softc *sc, int boff, int len) { volatile caddr_t buf = sc->sc_mem; volatile uint16_t *bptr; if ((unsigned)boff & 0x1) { bptr = ((volatile uint16_t *)buf) + (boff - 1); *bptr &= 0xff; bptr += 2; len--; } else bptr = ((volatile uint16_t *)buf) + boff; while (len > 0) { *bptr = 0; bptr += 2; len -= 2; } } /* * gap16: 16 bytes of data followed by 16 bytes of pad. * * Buffers must be 32-byte aligned. */ static void lance_copytobuf_gap16(struct lance_softc *sc, void *fromv, int boff, int len) { volatile caddr_t buf = sc->sc_mem; caddr_t bptr, from = fromv; int xfer; bptr = buf + ((boff << 1) & ~0x1f); boff &= 0xf; xfer = min(len, 16 - boff); while (len > 0) { memcpy(bptr + boff, from, xfer); from += xfer; bptr += 32; boff = 0; len -= xfer; xfer = min(len, 16); } } static void lance_copyfrombuf_gap16(struct lance_softc *sc, void *tov, int boff, int len) { volatile caddr_t buf = sc->sc_mem; caddr_t bptr, to = tov; int xfer; bptr = buf + ((boff << 1) & ~0x1f); boff &= 0xf; xfer = min(len, 16 - boff); while (len > 0) { memcpy(to, bptr + boff, xfer); to += xfer; bptr += 32; boff = 0; len -= xfer; xfer = min(len, 16); } } static void lance_zerobuf_gap16(struct lance_softc *sc, int boff, int len) { volatile caddr_t buf = sc->sc_mem; caddr_t bptr; int xfer; bptr = buf + ((boff << 1) & ~0x1f); boff &= 0xf; xfer = min(len, 16 - boff); while (len > 0) { memset(bptr + boff, 0, xfer); bptr += 32; boff = 0; len -= xfer; xfer = min(len, 16); } } #endif /* Example only */ Index: head/sys/dev/lmc/if_lmc.c =================================================================== --- head/sys/dev/lmc/if_lmc.c (revision 276749) +++ head/sys/dev/lmc/if_lmc.c (revision 276750) @@ -1,4589 +1,4588 @@ /* * $FreeBSD$ * * Copyright (c) 2002-2004 David Boggs. * All rights reserved. * * BSD License: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * GNU General Public License: * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Description: * * This is an open-source Unix device driver for PCI-bus WAN interface cards. * It sends and receives packets in HDLC frames over synchronous links. * A generic PC plus Unix plus some SBE/LMC cards makes an OPEN router. * This driver works with FreeBSD, NetBSD, OpenBSD, BSD/OS and Linux. * It has been tested on i386 (32-bit little-end), Sparc (64-bit big-end), * and Alpha (64-bit little-end) architectures. * * History and Authors: * * Ron Crane had the neat idea to use a Fast Ethernet chip as a PCI * interface and add an Ethernet-to-HDLC gate array to make a WAN card. * David Boggs designed the Ethernet-to-HDLC gate arrays and PC cards. * We did this at our company, LAN Media Corporation (LMC). * SBE Corp acquired LMC and continues to make the cards. * * Since the cards use Tulip Ethernet chips, we started with Matt Thomas' * ubiquitous "de" driver. Michael Graff stripped out the Ethernet stuff * and added HSSI stuff. Basil Gunn ported it to Solaris (lost) and * Rob Braun ported it to Linux. Andrew Stanley-Jones added support * for three more cards and wrote the first version of lmcconfig. * During 2002-5 David Boggs rewrote it and now feels responsible for it. * * Responsible Individual: * * Send bug reports and improvements to . */ # include /* OS version */ # define IFNET 1 # include "opt_inet.h" /* INET */ # include "opt_inet6.h" /* INET6 */ # include "opt_netgraph.h" /* NETGRAPH */ # ifdef HAVE_KERNEL_OPTION_HEADERS # include "opt_device_polling.h" /* DEVICE_POLLING */ # endif # ifndef INET # define INET 0 # endif # ifndef INET6 # define INET6 0 # endif # ifndef NETGRAPH # define NETGRAPH 0 # endif # define P2P 0 /* not in FreeBSD */ # define NSPPP 1 /* No count devices in FreeBSD 5 */ # include "opt_bpf.h" /* DEV_BPF */ # define NBPFILTER DEV_BPF # define GEN_HDLC 0 /* not in FreeBSD */ # # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # if NETGRAPH # include # include # endif # if (INET || INET6) # include # include # endif # if NSPPP # include # endif # if NBPFILTER # include # endif /* and finally... */ # include /* The SROM is a generic 93C46 serial EEPROM (64 words by 16 bits). */ /* Data is set up before the RISING edge of CLK; CLK is parked low. */ static void shift_srom_bits(softc_t *sc, u_int32_t data, u_int32_t len) { u_int32_t csr = READ_CSR(TLP_SROM_MII); for (; len>0; len--) { /* MSB first */ if (data & (1<<(len-1))) csr |= TLP_SROM_DIN; /* DIN setup */ else csr &= ~TLP_SROM_DIN; /* DIN setup */ WRITE_CSR(TLP_SROM_MII, csr); csr |= TLP_SROM_CLK; /* CLK rising edge */ WRITE_CSR(TLP_SROM_MII, csr); csr &= ~TLP_SROM_CLK; /* CLK falling edge */ WRITE_CSR(TLP_SROM_MII, csr); } } /* Data is sampled on the RISING edge of CLK; CLK is parked low. */ static u_int16_t read_srom(softc_t *sc, u_int8_t addr) { int i; u_int32_t csr; u_int16_t data; /* Enable SROM access. */ csr = (TLP_SROM_SEL | TLP_SROM_RD | TLP_MII_MDOE); WRITE_CSR(TLP_SROM_MII, csr); /* CS rising edge prepares SROM for a new cycle. */ csr |= TLP_SROM_CS; WRITE_CSR(TLP_SROM_MII, csr); /* assert CS */ shift_srom_bits(sc, 6, 4); /* issue read cmd */ shift_srom_bits(sc, addr, 6); /* issue address */ for (data=0, i=16; i>=0; i--) /* read ->17<- bits of data */ { /* MSB first */ csr = READ_CSR(TLP_SROM_MII); /* DOUT sampled */ data = (data<<1) | ((csr & TLP_SROM_DOUT) ? 1:0); csr |= TLP_SROM_CLK; /* CLK rising edge */ WRITE_CSR(TLP_SROM_MII, csr); csr &= ~TLP_SROM_CLK; /* CLK falling edge */ WRITE_CSR(TLP_SROM_MII, csr); } /* Disable SROM access. */ WRITE_CSR(TLP_SROM_MII, TLP_MII_MDOE); return data; } /* The SROM is formatted by the mfgr and should NOT be written! */ /* But lmcconfig can rewrite it in case it gets overwritten somehow. */ /* IOCTL SYSCALL: can sleep. */ static void write_srom(softc_t *sc, u_int8_t addr, u_int16_t data) { u_int32_t csr; int i; /* Enable SROM access. */ csr = (TLP_SROM_SEL | TLP_SROM_RD | TLP_MII_MDOE); WRITE_CSR(TLP_SROM_MII, csr); /* Issue write-enable command. */ csr |= TLP_SROM_CS; WRITE_CSR(TLP_SROM_MII, csr); /* assert CS */ shift_srom_bits(sc, 4, 4); /* issue write enable cmd */ shift_srom_bits(sc, 63, 6); /* issue address */ csr &= ~TLP_SROM_CS; WRITE_CSR(TLP_SROM_MII, csr); /* deassert CS */ /* Issue erase command. */ csr |= TLP_SROM_CS; WRITE_CSR(TLP_SROM_MII, csr); /* assert CS */ shift_srom_bits(sc, 7, 4); /* issue erase cmd */ shift_srom_bits(sc, addr, 6); /* issue address */ csr &= ~TLP_SROM_CS; WRITE_CSR(TLP_SROM_MII, csr); /* deassert CS */ /* Issue write command. */ csr |= TLP_SROM_CS; WRITE_CSR(TLP_SROM_MII, csr); /* assert CS */ for (i=0; i<10; i++) /* 100 ms max wait */ if ((READ_CSR(TLP_SROM_MII) & TLP_SROM_DOUT)==0) SLEEP(10000); shift_srom_bits(sc, 5, 4); /* issue write cmd */ shift_srom_bits(sc, addr, 6); /* issue address */ shift_srom_bits(sc, data, 16); /* issue data */ csr &= ~TLP_SROM_CS; WRITE_CSR(TLP_SROM_MII, csr); /* deassert CS */ /* Issue write-disable command. */ csr |= TLP_SROM_CS; WRITE_CSR(TLP_SROM_MII, csr); /* assert CS */ for (i=0; i<10; i++) /* 100 ms max wait */ if ((READ_CSR(TLP_SROM_MII) & TLP_SROM_DOUT)==0) SLEEP(10000); shift_srom_bits(sc, 4, 4); /* issue write disable cmd */ shift_srom_bits(sc, 0, 6); /* issue address */ csr &= ~TLP_SROM_CS; WRITE_CSR(TLP_SROM_MII, csr); /* deassert CS */ /* Disable SROM access. */ WRITE_CSR(TLP_SROM_MII, TLP_MII_MDOE); } /* Not all boards have BIOS roms. */ /* The BIOS ROM is an AMD 29F010 1Mbit (128K by 8) EEPROM. */ static u_int8_t read_bios(softc_t *sc, u_int32_t addr) { u_int32_t srom_mii; /* Load the BIOS rom address register. */ WRITE_CSR(TLP_BIOS_ROM, addr); /* Enable the BIOS rom. */ srom_mii = TLP_BIOS_SEL | TLP_BIOS_RD | TLP_MII_MDOE; WRITE_CSR(TLP_SROM_MII, srom_mii); /* Wait at least 20 PCI cycles. */ DELAY(20); /* Read the BIOS rom data. */ srom_mii = READ_CSR(TLP_SROM_MII); /* Disable the BIOS rom. */ WRITE_CSR(TLP_SROM_MII, TLP_MII_MDOE); return (u_int8_t)srom_mii & 0xFF; } static void write_bios_phys(softc_t *sc, u_int32_t addr, u_int8_t data) { u_int32_t srom_mii; /* Load the BIOS rom address register. */ WRITE_CSR(TLP_BIOS_ROM, addr); /* Enable the BIOS rom. */ srom_mii = TLP_BIOS_SEL | TLP_BIOS_WR | TLP_MII_MDOE; /* Load the data into the data register. */ srom_mii = (srom_mii & 0xFFFFFF00) | (data & 0xFF); WRITE_CSR(TLP_SROM_MII, srom_mii); /* Wait at least 20 PCI cycles. */ DELAY(20); /* Disable the BIOS rom. */ WRITE_CSR(TLP_SROM_MII, TLP_MII_MDOE); } /* IOCTL SYSCALL: can sleep. */ static void write_bios(softc_t *sc, u_int32_t addr, u_int8_t data) { u_int8_t read_data; /* this sequence enables writing */ write_bios_phys(sc, 0x5555, 0xAA); write_bios_phys(sc, 0x2AAA, 0x55); write_bios_phys(sc, 0x5555, 0xA0); write_bios_phys(sc, addr, data); /* Wait for the write operation to complete. */ for (;;) /* interruptable syscall */ { for (;;) { read_data = read_bios(sc, addr); if ((read_data & 0x80) == (data & 0x80)) break; if (read_data & 0x20) { /* Data sheet says read it again. */ read_data = read_bios(sc, addr); if ((read_data & 0x80) == (data & 0x80)) break; if (DRIVER_DEBUG) printf("%s: write_bios() failed; rom addr=0x%x\n", NAME_UNIT, addr); return; } } read_data = read_bios(sc, addr); if (read_data == data) break; } } /* IOCTL SYSCALL: can sleep. */ static void erase_bios(softc_t *sc) { unsigned char read_data; /* This sequence enables erasing: */ write_bios_phys(sc, 0x5555, 0xAA); write_bios_phys(sc, 0x2AAA, 0x55); write_bios_phys(sc, 0x5555, 0x80); write_bios_phys(sc, 0x5555, 0xAA); write_bios_phys(sc, 0x2AAA, 0x55); write_bios_phys(sc, 0x5555, 0x10); /* Wait for the erase operation to complete. */ for (;;) /* interruptable syscall */ { for (;;) { read_data = read_bios(sc, 0); if (read_data & 0x80) break; if (read_data & 0x20) { /* Data sheet says read it again. */ read_data = read_bios(sc, 0); if (read_data & 0x80) break; if (DRIVER_DEBUG) printf("%s: erase_bios() failed\n", NAME_UNIT); return; } } read_data = read_bios(sc, 0); if (read_data == 0xFF) break; } } /* MDIO is 3-stated between tranactions. */ /* MDIO is set up before the RISING edge of MDC; MDC is parked low. */ static void shift_mii_bits(softc_t *sc, u_int32_t data, u_int32_t len) { u_int32_t csr = READ_CSR(TLP_SROM_MII); for (; len>0; len--) { /* MSB first */ if (data & (1<<(len-1))) csr |= TLP_MII_MDOUT; /* MDOUT setup */ else csr &= ~TLP_MII_MDOUT; /* MDOUT setup */ WRITE_CSR(TLP_SROM_MII, csr); csr |= TLP_MII_MDC; /* MDC rising edge */ WRITE_CSR(TLP_SROM_MII, csr); csr &= ~TLP_MII_MDC; /* MDC falling edge */ WRITE_CSR(TLP_SROM_MII, csr); } } /* The specification for the MII is IEEE Std 802.3 clause 22. */ /* MDIO is sampled on the RISING edge of MDC; MDC is parked low. */ static u_int16_t read_mii(softc_t *sc, u_int8_t regad) { int i; u_int32_t csr; u_int16_t data = 0; WRITE_CSR(TLP_SROM_MII, TLP_MII_MDOUT); shift_mii_bits(sc, 0xFFFFF, 20); /* preamble */ shift_mii_bits(sc, 0xFFFFF, 20); /* preamble */ shift_mii_bits(sc, 1, 2); /* start symbol */ shift_mii_bits(sc, 2, 2); /* read op */ shift_mii_bits(sc, 0, 5); /* phyad=0 */ shift_mii_bits(sc, regad, 5); /* regad */ csr = READ_CSR(TLP_SROM_MII); csr |= TLP_MII_MDOE; WRITE_CSR(TLP_SROM_MII, csr); shift_mii_bits(sc, 0, 2); /* turn-around */ for (i=15; i>=0; i--) /* data */ { /* MSB first */ csr = READ_CSR(TLP_SROM_MII); /* MDIN sampled */ data = (data<<1) | ((csr & TLP_MII_MDIN) ? 1:0); csr |= TLP_MII_MDC; /* MDC rising edge */ WRITE_CSR(TLP_SROM_MII, csr); csr &= ~TLP_MII_MDC; /* MDC falling edge */ WRITE_CSR(TLP_SROM_MII, csr); } return data; } static void write_mii(softc_t *sc, u_int8_t regad, u_int16_t data) { WRITE_CSR(TLP_SROM_MII, TLP_MII_MDOUT); shift_mii_bits(sc, 0xFFFFF, 20); /* preamble */ shift_mii_bits(sc, 0xFFFFF, 20); /* preamble */ shift_mii_bits(sc, 1, 2); /* start symbol */ shift_mii_bits(sc, 1, 2); /* write op */ shift_mii_bits(sc, 0, 5); /* phyad=0 */ shift_mii_bits(sc, regad, 5); /* regad */ shift_mii_bits(sc, 2, 2); /* turn-around */ shift_mii_bits(sc, data, 16); /* data */ WRITE_CSR(TLP_SROM_MII, TLP_MII_MDOE); if (regad == 16) sc->led_state = data; /* a small optimization */ } static void set_mii16_bits(softc_t *sc, u_int16_t bits) { u_int16_t mii16 = read_mii(sc, 16); mii16 |= bits; write_mii(sc, 16, mii16); } static void clr_mii16_bits(softc_t *sc, u_int16_t bits) { u_int16_t mii16 = read_mii(sc, 16); mii16 &= ~bits; write_mii(sc, 16, mii16); } static void set_mii17_bits(softc_t *sc, u_int16_t bits) { u_int16_t mii17 = read_mii(sc, 17); mii17 |= bits; write_mii(sc, 17, mii17); } static void clr_mii17_bits(softc_t *sc, u_int16_t bits) { u_int16_t mii17 = read_mii(sc, 17); mii17 &= ~bits; write_mii(sc, 17, mii17); } /* * Watchdog code is more readable if it refreshes LEDs * once a second whether they need it or not. * But MII refs take 150 uSecs each, so remember the last value * written to MII16 and avoid LED writes that do nothing. */ static void led_off(softc_t *sc, u_int16_t led) { if ((led & sc->led_state) == led) return; set_mii16_bits(sc, led); } static void led_on(softc_t *sc, u_int16_t led) { if ((led & sc->led_state) == 0) return; clr_mii16_bits(sc, led); } static void led_inv(softc_t *sc, u_int16_t led) { u_int16_t mii16 = read_mii(sc, 16); mii16 ^= led; write_mii(sc, 16, mii16); } /* * T1 & T3 framer registers are accessed through MII regs 17 & 18. * Write the address to MII reg 17 then R/W data through MII reg 18. * The hardware interface is an Intel-style 8-bit muxed A/D bus. */ static void write_framer(softc_t *sc, u_int16_t addr, u_int8_t data) { write_mii(sc, 17, addr); write_mii(sc, 18, data); } static u_int8_t read_framer(softc_t *sc, u_int16_t addr) { write_mii(sc, 17, addr); return (u_int8_t)read_mii(sc, 18); } /* Tulip's hardware implementation of General Purpose IO * (GPIO) pins makes life difficult for software. * Bits 7-0 in the Tulip GPIO CSR are used for two purposes * depending on the state of bit 8. * If bit 8 is 0 then bits 7-0 are "data" bits. * If bit 8 is 1 then bits 7-0 are "direction" bits. * If a direction bit is one, the data bit is an output. * The problem is that the direction bits are WRITE-ONLY. * Software must remember the direction bits in a shadow copy. * (sc->gpio_dir) in order to change some but not all of the bits. * All accesses to the Tulip GPIO register use these five procedures. */ static void make_gpio_input(softc_t *sc, u_int32_t bits) { sc->gpio_dir &= ~bits; WRITE_CSR(TLP_GPIO, TLP_GPIO_DIR | (sc->gpio_dir)); } static void make_gpio_output(softc_t *sc, u_int32_t bits) { sc->gpio_dir |= bits; WRITE_CSR(TLP_GPIO, TLP_GPIO_DIR | (sc->gpio_dir)); } static u_int32_t read_gpio(softc_t *sc) { return READ_CSR(TLP_GPIO); } static void set_gpio_bits(softc_t *sc, u_int32_t bits) { WRITE_CSR(TLP_GPIO, (read_gpio(sc) | bits) & 0xFF); } static void clr_gpio_bits(softc_t *sc, u_int32_t bits) { WRITE_CSR(TLP_GPIO, (read_gpio(sc) & ~bits) & 0xFF); } /* Reset ALL of the flip-flops in the gate array to zero. */ /* This does NOT change the gate array programming. */ /* Called during initialization so it must not sleep. */ static void reset_xilinx(softc_t *sc) { /* Drive RESET low to force initialization. */ clr_gpio_bits(sc, GPIO_RESET); make_gpio_output(sc, GPIO_RESET); /* Hold RESET low for more than 10 uSec. */ DELAY(50); /* Done with RESET; make it an input. */ make_gpio_input(sc, GPIO_RESET); } /* Load Xilinx gate array program from on-board rom. */ /* This changes the gate array programming. */ /* IOCTL SYSCALL: can sleep. */ static void load_xilinx_from_rom(softc_t *sc) { int i; /* Drive MODE low to load from ROM rather than GPIO. */ clr_gpio_bits(sc, GPIO_MODE); make_gpio_output(sc, GPIO_MODE); /* Drive DP & RESET low to force configuration. */ clr_gpio_bits(sc, GPIO_RESET | GPIO_DP); make_gpio_output(sc, GPIO_RESET | GPIO_DP); /* Hold RESET & DP low for more than 10 uSec. */ DELAY(50); /* Done with RESET & DP; make them inputs. */ make_gpio_input(sc, GPIO_DP | GPIO_RESET); /* BUSY-WAIT for Xilinx chip to configure itself from ROM bits. */ for (i=0; i<100; i++) /* 1 sec max delay */ if ((read_gpio(sc) & GPIO_DP) == 0) SLEEP(10000); /* Done with MODE; make it an input. */ make_gpio_input(sc, GPIO_MODE); } /* Load the Xilinx gate array program from userland bits. */ /* This changes the gate array programming. */ /* IOCTL SYSCALL: can sleep. */ static int load_xilinx_from_file(softc_t *sc, char *addr, u_int32_t len) { char *data; int i, j, error; /* Get some pages to hold the Xilinx bits; biggest file is < 6 KB. */ if (len > 8192) return EFBIG; /* too big */ data = malloc(len, M_DEVBUF, M_WAITOK); if (data == NULL) return ENOMEM; /* Copy the Xilinx bits from userland. */ if ((error = copyin(addr, data, len))) { free(data, M_DEVBUF); return error; } /* Drive MODE high to load from GPIO rather than ROM. */ set_gpio_bits(sc, GPIO_MODE); make_gpio_output(sc, GPIO_MODE); /* Drive DP & RESET low to force configuration. */ clr_gpio_bits(sc, GPIO_RESET | GPIO_DP); make_gpio_output(sc, GPIO_RESET | GPIO_DP); /* Hold RESET & DP low for more than 10 uSec. */ DELAY(50); /* Done with RESET & DP; make them inputs. */ make_gpio_input(sc, GPIO_RESET | GPIO_DP); /* BUSY-WAIT for Xilinx chip to clear its config memory. */ make_gpio_input(sc, GPIO_INIT); for (i=0; i<10000; i++) /* 1 sec max delay */ if ((read_gpio(sc) & GPIO_INIT)==0) SLEEP(10000); /* Configure CLK and DATA as outputs. */ set_gpio_bits(sc, GPIO_CLK); /* park CLK high */ make_gpio_output(sc, GPIO_CLK | GPIO_DATA); /* Write bits to Xilinx; CLK is parked HIGH. */ /* DATA is set up before the RISING edge of CLK. */ for (i=0; istatus.card_type == TLP_CSID_SSI) { if (synth->prescale == 9) /* divide by 512 */ set_mii17_bits(sc, MII17_SSI_PRESCALE); else /* divide by 32 */ clr_mii17_bits(sc, MII17_SSI_PRESCALE); } clr_gpio_bits(sc, GPIO_DATA | GPIO_CLK); make_gpio_output(sc, GPIO_DATA | GPIO_CLK); /* SYNTH is a low-true chip enable for the AV9110 chip. */ set_gpio_bits(sc, GPIO_SSI_SYNTH); make_gpio_output(sc, GPIO_SSI_SYNTH); clr_gpio_bits(sc, GPIO_SSI_SYNTH); /* Serially shift the command into the AV9110 chip. */ shift_synth_bits(sc, synth->n, 7); shift_synth_bits(sc, synth->m, 7); shift_synth_bits(sc, synth->v, 1); shift_synth_bits(sc, synth->x, 2); shift_synth_bits(sc, synth->r, 2); shift_synth_bits(sc, 0x16, 5); /* enable clk/x output */ /* SYNTH (chip enable) going high ends the command. */ set_gpio_bits(sc, GPIO_SSI_SYNTH); make_gpio_input(sc, GPIO_SSI_SYNTH); /* Stop driving serial-related signals; pullups/pulldowns take over. */ make_gpio_input(sc, GPIO_DATA | GPIO_CLK); /* remember the new synthesizer parameters */ if (&sc->config.synth != synth) sc->config.synth = *synth; } /* Write a command to the DAC controlling the VCXO on some T3 adapters. */ /* The DAC is a TI-TLV5636: 12-bit resolution and a serial interface. */ /* DATA is set up before the FALLING edge of CLK. CLK is parked HIGH. */ static void write_dac(softc_t *sc, u_int16_t data) { int i; /* Prepare to use DATA and CLK. */ set_gpio_bits(sc, GPIO_DATA | GPIO_CLK); make_gpio_output(sc, GPIO_DATA | GPIO_CLK); /* High-to-low transition prepares DAC for new value. */ set_gpio_bits(sc, GPIO_T3_DAC); make_gpio_output(sc, GPIO_T3_DAC); clr_gpio_bits(sc, GPIO_T3_DAC); /* Serially shift command bits into DAC. */ for (i=0; i<16; i++) { /* MSB first */ if ((data & (1<<(15-i))) != 0) set_gpio_bits(sc, GPIO_DATA); /* DATA setup */ else clr_gpio_bits(sc, GPIO_DATA); /* DATA setup */ clr_gpio_bits(sc, GPIO_CLK); /* CLK falling edge */ set_gpio_bits(sc, GPIO_CLK); /* CLK rising edge */ } /* Done with DAC; make it an input; loads new value into DAC. */ set_gpio_bits(sc, GPIO_T3_DAC); make_gpio_input(sc, GPIO_T3_DAC); /* Stop driving serial-related signals; pullups/pulldowns take over. */ make_gpio_input(sc, GPIO_DATA | GPIO_CLK); } /* begin HSSI card code */ /* Must not sleep. */ static void hssi_config(softc_t *sc) { if (sc->status.card_type == 0) { /* defaults */ sc->status.card_type = READ_PCI_CFG(sc, TLP_CSID); sc->config.crc_len = CFG_CRC_16; sc->config.loop_back = CFG_LOOP_NONE; sc->config.tx_clk_src = CFG_CLKMUX_ST; sc->config.dte_dce = CFG_DTE; sc->config.synth.n = 52; /* 52.000 Mbs */ sc->config.synth.m = 5; sc->config.synth.v = 0; sc->config.synth.x = 0; sc->config.synth.r = 0; sc->config.synth.prescale = 2; } /* set CRC length */ if (sc->config.crc_len == CFG_CRC_32) set_mii16_bits(sc, MII16_HSSI_CRC32); else clr_mii16_bits(sc, MII16_HSSI_CRC32); /* Assert pin LA in HSSI conn: ask modem for local loop. */ if (sc->config.loop_back == CFG_LOOP_LL) set_mii16_bits(sc, MII16_HSSI_LA); else clr_mii16_bits(sc, MII16_HSSI_LA); /* Assert pin LB in HSSI conn: ask modem for remote loop. */ if (sc->config.loop_back == CFG_LOOP_RL) set_mii16_bits(sc, MII16_HSSI_LB); else clr_mii16_bits(sc, MII16_HSSI_LB); if (sc->status.card_type == TLP_CSID_HSSI) { /* set TXCLK src */ if (sc->config.tx_clk_src == CFG_CLKMUX_ST) set_gpio_bits(sc, GPIO_HSSI_TXCLK); else clr_gpio_bits(sc, GPIO_HSSI_TXCLK); make_gpio_output(sc, GPIO_HSSI_TXCLK); } else if (sc->status.card_type == TLP_CSID_HSSIc) { /* cPCI HSSI rev C has extra features */ /* Set TXCLK source. */ u_int16_t mii16 = read_mii(sc, 16); mii16 &= ~MII16_HSSI_CLKMUX; mii16 |= (sc->config.tx_clk_src&3)<<13; write_mii(sc, 16, mii16); /* cPCI HSSI implements loopback towards the net. */ if (sc->config.loop_back == CFG_LOOP_LINE) set_mii16_bits(sc, MII16_HSSI_LOOP); else clr_mii16_bits(sc, MII16_HSSI_LOOP); /* Set DTE/DCE mode. */ if (sc->config.dte_dce == CFG_DCE) set_gpio_bits(sc, GPIO_HSSI_DCE); else clr_gpio_bits(sc, GPIO_HSSI_DCE); make_gpio_output(sc, GPIO_HSSI_DCE); /* Program the synthesized oscillator. */ write_synth(sc, &sc->config.synth); } } static void hssi_ident(softc_t *sc) { } /* Called once a second; must not sleep. */ static int hssi_watchdog(softc_t *sc) { u_int16_t mii16 = read_mii(sc, 16) & MII16_HSSI_MODEM; int link_status = STATUS_UP; led_inv(sc, MII16_HSSI_LED_UL); /* Software is alive. */ led_on(sc, MII16_HSSI_LED_LL); /* always on (SSI cable) */ /* Check the transmit clock. */ if (sc->status.tx_speed == 0) { led_on(sc, MII16_HSSI_LED_UR); link_status = STATUS_DOWN; } else led_off(sc, MII16_HSSI_LED_UR); /* Is the modem ready? */ if ((mii16 & MII16_HSSI_CA) == 0) { led_off(sc, MII16_HSSI_LED_LR); link_status = STATUS_DOWN; } else led_on(sc, MII16_HSSI_LED_LR); /* Print the modem control signals if they changed. */ if ((DRIVER_DEBUG) && (mii16 != sc->last_mii16)) { char *on = "ON ", *off = "OFF"; printf("%s: TA=%s CA=%s LA=%s LB=%s LC=%s TM=%s\n", NAME_UNIT, (mii16 & MII16_HSSI_TA) ? on : off, (mii16 & MII16_HSSI_CA) ? on : off, (mii16 & MII16_HSSI_LA) ? on : off, (mii16 & MII16_HSSI_LB) ? on : off, (mii16 & MII16_HSSI_LC) ? on : off, (mii16 & MII16_HSSI_TM) ? on : off); } /* SNMP one-second-report */ sc->status.snmp.hssi.sigs = mii16 & MII16_HSSI_MODEM; /* Remember this state until next time. */ sc->last_mii16 = mii16; /* If a loop back is in effect, link status is UP */ if (sc->config.loop_back != CFG_LOOP_NONE) link_status = STATUS_UP; return link_status; } /* IOCTL SYSCALL: can sleep (but doesn't). */ static int hssi_ioctl(softc_t *sc, struct ioctl *ioctl) { int error = 0; if (ioctl->cmd == IOCTL_SNMP_SIGS) { u_int16_t mii16 = read_mii(sc, 16); mii16 &= ~MII16_HSSI_MODEM; mii16 |= (MII16_HSSI_MODEM & ioctl->data); write_mii(sc, 16, mii16); } else if (ioctl->cmd == IOCTL_SET_STATUS) { if (ioctl->data != 0) set_mii16_bits(sc, MII16_HSSI_TA); else clr_mii16_bits(sc, MII16_HSSI_TA); } else error = EINVAL; return error; } /* begin DS3 card code */ /* Must not sleep. */ static void t3_config(softc_t *sc) { int i; u_int8_t ctl1; if (sc->status.card_type == 0) { /* defaults */ sc->status.card_type = TLP_CSID_T3; sc->config.crc_len = CFG_CRC_16; sc->config.loop_back = CFG_LOOP_NONE; sc->config.format = CFG_FORMAT_T3CPAR; sc->config.cable_len = 10; /* meters */ sc->config.scrambler = CFG_SCRAM_DL_KEN; sc->config.tx_clk_src = CFG_CLKMUX_INT; /* Center the VCXO -- get within 20 PPM of 44736000. */ write_dac(sc, 0x9002); /* set Vref = 2.048 volts */ write_dac(sc, 2048); /* range is 0..4095 */ } /* Set cable length. */ if (sc->config.cable_len > 30) clr_mii16_bits(sc, MII16_DS3_ZERO); else set_mii16_bits(sc, MII16_DS3_ZERO); /* Set payload scrambler polynomial. */ if (sc->config.scrambler == CFG_SCRAM_LARS) set_mii16_bits(sc, MII16_DS3_POLY); else clr_mii16_bits(sc, MII16_DS3_POLY); /* Set payload scrambler on/off. */ if (sc->config.scrambler == CFG_SCRAM_OFF) clr_mii16_bits(sc, MII16_DS3_SCRAM); else set_mii16_bits(sc, MII16_DS3_SCRAM); /* Set CRC length. */ if (sc->config.crc_len == CFG_CRC_32) set_mii16_bits(sc, MII16_DS3_CRC32); else clr_mii16_bits(sc, MII16_DS3_CRC32); /* Loopback towards host thru the line interface. */ if (sc->config.loop_back == CFG_LOOP_OTHER) set_mii16_bits(sc, MII16_DS3_TRLBK); else clr_mii16_bits(sc, MII16_DS3_TRLBK); /* Loopback towards network thru the line interface. */ if (sc->config.loop_back == CFG_LOOP_LINE) set_mii16_bits(sc, MII16_DS3_LNLBK); else if (sc->config.loop_back == CFG_LOOP_DUAL) set_mii16_bits(sc, MII16_DS3_LNLBK); else clr_mii16_bits(sc, MII16_DS3_LNLBK); /* Configure T3 framer chip; write EVERY writeable register. */ ctl1 = CTL1_SER | CTL1_XTX; if (sc->config.loop_back == CFG_LOOP_INWARD) ctl1 |= CTL1_3LOOP; if (sc->config.loop_back == CFG_LOOP_DUAL) ctl1 |= CTL1_3LOOP; if (sc->config.format == CFG_FORMAT_T3M13) ctl1 |= CTL1_M13MODE; write_framer(sc, T3CSR_CTL1, ctl1); write_framer(sc, T3CSR_TX_FEAC, CTL5_EMODE); write_framer(sc, T3CSR_CTL8, CTL8_FBEC); write_framer(sc, T3CSR_CTL12, CTL12_DLCB1 | CTL12_C21 | CTL12_MCB1); write_framer(sc, T3CSR_DBL_FEAC, 0); write_framer(sc, T3CSR_CTL14, CTL14_RGCEN | CTL14_TGCEN); write_framer(sc, T3CSR_INTEN, 0); write_framer(sc, T3CSR_CTL20, CTL20_CVEN); /* Clear error counters and latched error bits */ /* that may have happened while initializing. */ for (i=0; i<21; i++) read_framer(sc, i); } static void t3_ident(softc_t *sc) { printf(", TXC03401 rev B"); } /* Called once a second; must not sleep. */ static int t3_watchdog(softc_t *sc) { u_int16_t CV; u_int8_t CERR, PERR, MERR, FERR, FEBE; u_int8_t ctl1, stat16, feac; int link_status = STATUS_UP; u_int16_t mii16; /* Read the alarm registers. */ ctl1 = read_framer(sc, T3CSR_CTL1); stat16 = read_framer(sc, T3CSR_STAT16); mii16 = read_mii(sc, 16); /* Always ignore the RTLOC alarm bit. */ stat16 &= ~STAT16_RTLOC; /* Software is alive. */ led_inv(sc, MII16_DS3_LED_GRN); /* Receiving Alarm Indication Signal (AIS). */ if ((stat16 & STAT16_RAIS) != 0) /* receiving ais */ led_on(sc, MII16_DS3_LED_BLU); else if (ctl1 & CTL1_TXAIS) /* sending ais */ led_inv(sc, MII16_DS3_LED_BLU); else led_off(sc, MII16_DS3_LED_BLU); /* Receiving Remote Alarm Indication (RAI). */ if ((stat16 & STAT16_XERR) != 0) /* receiving rai */ led_on(sc, MII16_DS3_LED_YEL); else if ((ctl1 & CTL1_XTX) == 0) /* sending rai */ led_inv(sc, MII16_DS3_LED_YEL); else led_off(sc, MII16_DS3_LED_YEL); /* If certain status bits are set then the link is 'down'. */ /* The bad bits are: rxlos rxoof rxais rxidl xerr. */ if ((stat16 & ~(STAT16_FEAC | STAT16_SEF)) != 0) link_status = STATUS_DOWN; /* Declare local Red Alarm if the link is down. */ if (link_status == STATUS_DOWN) led_on(sc, MII16_DS3_LED_RED); else if (sc->loop_timer != 0) /* loopback is active */ led_inv(sc, MII16_DS3_LED_RED); else led_off(sc, MII16_DS3_LED_RED); /* Print latched error bits if they changed. */ if ((DRIVER_DEBUG) && ((stat16 & ~STAT16_FEAC) != sc->last_stat16)) { char *on = "ON ", *off = "OFF"; printf("%s: RLOS=%s ROOF=%s RAIS=%s RIDL=%s SEF=%s XERR=%s\n", NAME_UNIT, (stat16 & STAT16_RLOS) ? on : off, (stat16 & STAT16_ROOF) ? on : off, (stat16 & STAT16_RAIS) ? on : off, (stat16 & STAT16_RIDL) ? on : off, (stat16 & STAT16_SEF) ? on : off, (stat16 & STAT16_XERR) ? on : off); } /* Check and print error counters if non-zero. */ CV = read_framer(sc, T3CSR_CVHI)<<8; CV += read_framer(sc, T3CSR_CVLO); PERR = read_framer(sc, T3CSR_PERR); CERR = read_framer(sc, T3CSR_CERR); FERR = read_framer(sc, T3CSR_FERR); MERR = read_framer(sc, T3CSR_MERR); FEBE = read_framer(sc, T3CSR_FEBE); /* CV is invalid during LOS. */ if ((stat16 & STAT16_RLOS)!=0) CV = 0; /* CERR & FEBE are invalid in M13 mode */ if (sc->config.format == CFG_FORMAT_T3M13) CERR = FEBE = 0; /* FEBE is invalid during AIS. */ if ((stat16 & STAT16_RAIS)!=0) FEBE = 0; if (DRIVER_DEBUG && (CV || PERR || CERR || FERR || MERR || FEBE)) printf("%s: CV=%u PERR=%u CERR=%u FERR=%u MERR=%u FEBE=%u\n", NAME_UNIT, CV, PERR, CERR, FERR, MERR, FEBE); /* Driver keeps crude link-level error counters (SNMP is better). */ sc->status.cntrs.lcv_errs += CV; sc->status.cntrs.par_errs += PERR; sc->status.cntrs.cpar_errs += CERR; sc->status.cntrs.frm_errs += FERR; sc->status.cntrs.mfrm_errs += MERR; sc->status.cntrs.febe_errs += FEBE; /* Check for FEAC messages (FEAC not defined in M13 mode). */ if (FORMAT_T3CPAR && (stat16 & STAT16_FEAC)) do { feac = read_framer(sc, T3CSR_FEAC_STK); if ((feac & FEAC_STK_VALID)==0) break; /* Ignore RxFEACs while a far end loopback has been requested. */ if ((sc->status.snmp.t3.line & TLOOP_FAR_LINE)!=0) continue; switch (feac & FEAC_STK_FEAC) { case T3BOP_LINE_UP: break; case T3BOP_LINE_DOWN: break; case T3BOP_LOOP_DS3: { if (sc->last_FEAC == T3BOP_LINE_DOWN) { if (DRIVER_DEBUG) printf("%s: Received a 'line loopback deactivate' FEAC msg\n", NAME_UNIT); clr_mii16_bits(sc, MII16_DS3_LNLBK); sc->loop_timer = 0; } if (sc->last_FEAC == T3BOP_LINE_UP) { if (DRIVER_DEBUG) printf("%s: Received a 'line loopback activate' FEAC msg\n", NAME_UNIT); set_mii16_bits(sc, MII16_DS3_LNLBK); sc->loop_timer = 300; } break; } case T3BOP_OOF: { if (DRIVER_DEBUG) printf("%s: Received a 'far end LOF' FEAC msg\n", NAME_UNIT); break; } case T3BOP_IDLE: { if (DRIVER_DEBUG) printf("%s: Received a 'far end IDL' FEAC msg\n", NAME_UNIT); break; } case T3BOP_AIS: { if (DRIVER_DEBUG) printf("%s: Received a 'far end AIS' FEAC msg\n", NAME_UNIT); break; } case T3BOP_LOS: { if (DRIVER_DEBUG) printf("%s: Received a 'far end LOS' FEAC msg\n", NAME_UNIT); break; } default: { if (DRIVER_DEBUG) printf("%s: Received a 'type 0x%02X' FEAC msg\n", NAME_UNIT, feac & FEAC_STK_FEAC); break; } } sc->last_FEAC = feac & FEAC_STK_FEAC; } while ((feac & FEAC_STK_MORE) != 0); stat16 &= ~STAT16_FEAC; /* Send Service-Affecting priority FEAC messages */ if (((sc->last_stat16 ^ stat16) & 0xF0) && (FORMAT_T3CPAR)) { /* Transmit continuous FEACs */ write_framer(sc, T3CSR_CTL14, read_framer(sc, T3CSR_CTL14) & ~CTL14_FEAC10); if ((stat16 & STAT16_RLOS)!=0) write_framer(sc, T3CSR_TX_FEAC, 0xC0 + T3BOP_LOS); else if ((stat16 & STAT16_ROOF)!=0) write_framer(sc, T3CSR_TX_FEAC, 0xC0 + T3BOP_OOF); else if ((stat16 & STAT16_RAIS)!=0) write_framer(sc, T3CSR_TX_FEAC, 0xC0 + T3BOP_AIS); else if ((stat16 & STAT16_RIDL)!=0) write_framer(sc, T3CSR_TX_FEAC, 0xC0 + T3BOP_IDLE); else write_framer(sc, T3CSR_TX_FEAC, CTL5_EMODE); } /* Start sending RAI, Remote Alarm Indication. */ if (((stat16 & STAT16_ROOF)!=0) && ((stat16 & STAT16_RLOS)==0) && ((sc->last_stat16 & STAT16_ROOF)==0)) write_framer(sc, T3CSR_CTL1, ctl1 &= ~CTL1_XTX); /* Stop sending RAI, Remote Alarm Indication. */ else if (((stat16 & STAT16_ROOF)==0) && ((sc->last_stat16 & STAT16_ROOF)!=0)) write_framer(sc, T3CSR_CTL1, ctl1 |= CTL1_XTX); /* Start sending AIS, Alarm Indication Signal */ if (((stat16 & STAT16_RLOS)!=0) && ((sc->last_stat16 & STAT16_RLOS)==0)) { set_mii16_bits(sc, MII16_DS3_FRAME); write_framer(sc, T3CSR_CTL1, ctl1 | CTL1_TXAIS); } /* Stop sending AIS, Alarm Indication Signal */ else if (((stat16 & STAT16_RLOS)==0) && ((sc->last_stat16 & STAT16_RLOS)!=0)) { clr_mii16_bits(sc, MII16_DS3_FRAME); write_framer(sc, T3CSR_CTL1, ctl1 & ~CTL1_TXAIS); } /* Time out loopback requests. */ if (sc->loop_timer != 0) if (--sc->loop_timer == 0) if ((mii16 & MII16_DS3_LNLBK)!=0) { if (DRIVER_DEBUG) printf("%s: Timeout: Loop Down after 300 seconds\n", NAME_UNIT); clr_mii16_bits(sc, MII16_DS3_LNLBK); /* line loopback off */ } /* SNMP error counters */ sc->status.snmp.t3.lcv = CV; sc->status.snmp.t3.pcv = PERR; sc->status.snmp.t3.ccv = CERR; sc->status.snmp.t3.febe = FEBE; /* SNMP Line Status */ sc->status.snmp.t3.line = 0; if ((ctl1 & CTL1_XTX)==0) sc->status.snmp.t3.line |= TLINE_TX_RAI; if (stat16 & STAT16_XERR) sc->status.snmp.t3.line |= TLINE_RX_RAI; if (ctl1 & CTL1_TXAIS) sc->status.snmp.t3.line |= TLINE_TX_AIS; if (stat16 & STAT16_RAIS) sc->status.snmp.t3.line |= TLINE_RX_AIS; if (stat16 & STAT16_ROOF) sc->status.snmp.t3.line |= TLINE_LOF; if (stat16 & STAT16_RLOS) sc->status.snmp.t3.line |= TLINE_LOS; if (stat16 & STAT16_SEF) sc->status.snmp.t3.line |= T3LINE_SEF; /* SNMP Loopback Status */ sc->status.snmp.t3.loop &= ~TLOOP_FAR_LINE; if (sc->config.loop_back == CFG_LOOP_TULIP) sc->status.snmp.t3.loop |= TLOOP_NEAR_OTHER; if (ctl1 & CTL1_3LOOP) sc->status.snmp.t3.loop |= TLOOP_NEAR_INWARD; if (mii16 & MII16_DS3_TRLBK) sc->status.snmp.t3.loop |= TLOOP_NEAR_OTHER; if (mii16 & MII16_DS3_LNLBK) sc->status.snmp.t3.loop |= TLOOP_NEAR_LINE; /*if (ctl12 & CTL12_RTPLOOP) sc->status.snmp.t3.loop |= TLOOP_NEAR_PAYLOAD; */ /* Remember this state until next time. */ sc->last_stat16 = stat16; /* If an INWARD loopback is in effect, link status is UP */ if (sc->config.loop_back != CFG_LOOP_NONE) /* XXX INWARD ONLY */ link_status = STATUS_UP; return link_status; } /* IOCTL SYSCALL: can sleep. */ static void t3_send_dbl_feac(softc_t *sc, int feac1, int feac2) { u_int8_t tx_feac; int i; /* The FEAC transmitter could be sending a continuous */ /* FEAC msg when told to send a double FEAC message. */ /* So save the current state of the FEAC transmitter. */ tx_feac = read_framer(sc, T3CSR_TX_FEAC); /* Load second FEAC code and stop FEAC transmitter. */ write_framer(sc, T3CSR_TX_FEAC, CTL5_EMODE + feac2); /* FEAC transmitter sends 10 more FEACs and then stops. */ SLEEP(20000); /* sending one FEAC takes 1700 uSecs */ /* Load first FEAC code and start FEAC transmitter. */ write_framer(sc, T3CSR_DBL_FEAC, CTL13_DFEXEC + feac1); /* Wait for double FEAC sequence to complete -- about 70 ms. */ for (i=0; i<10; i++) /* max delay 100 ms */ if (read_framer(sc, T3CSR_DBL_FEAC) & CTL13_DFEXEC) SLEEP(10000); /* Flush received FEACS; don't respond to our own loop cmd! */ while (read_framer(sc, T3CSR_FEAC_STK) & FEAC_STK_VALID) DELAY(1); /* XXX HANG */ /* Restore previous state of the FEAC transmitter. */ /* If it was sending a continous FEAC, it will resume. */ write_framer(sc, T3CSR_TX_FEAC, tx_feac); } /* IOCTL SYSCALL: can sleep. */ static int t3_ioctl(softc_t *sc, struct ioctl *ioctl) { int error = 0; switch (ioctl->cmd) { case IOCTL_SNMP_SEND: /* set opstatus? */ { if (sc->config.format != CFG_FORMAT_T3CPAR) error = EINVAL; else if (ioctl->data == TSEND_LINE) { sc->status.snmp.t3.loop |= TLOOP_FAR_LINE; t3_send_dbl_feac(sc, T3BOP_LINE_UP, T3BOP_LOOP_DS3); } else if (ioctl->data == TSEND_RESET) { t3_send_dbl_feac(sc, T3BOP_LINE_DOWN, T3BOP_LOOP_DS3); sc->status.snmp.t3.loop &= ~TLOOP_FAR_LINE; } else error = EINVAL; break; } case IOCTL_SNMP_LOOP: /* set opstatus = test? */ { if (ioctl->data == CFG_LOOP_NONE) { clr_mii16_bits(sc, MII16_DS3_FRAME); clr_mii16_bits(sc, MII16_DS3_TRLBK); clr_mii16_bits(sc, MII16_DS3_LNLBK); write_framer(sc, T3CSR_CTL1, read_framer(sc, T3CSR_CTL1) & ~CTL1_3LOOP); write_framer(sc, T3CSR_CTL12, read_framer(sc, T3CSR_CTL12) & ~(CTL12_RTPLOOP | CTL12_RTPLLEN)); } else if (ioctl->data == CFG_LOOP_LINE) set_mii16_bits(sc, MII16_DS3_LNLBK); else if (ioctl->data == CFG_LOOP_OTHER) set_mii16_bits(sc, MII16_DS3_TRLBK); else if (ioctl->data == CFG_LOOP_INWARD) write_framer(sc, T3CSR_CTL1, read_framer(sc, T3CSR_CTL1) | CTL1_3LOOP); else if (ioctl->data == CFG_LOOP_DUAL) { set_mii16_bits(sc, MII16_DS3_LNLBK); write_framer(sc, T3CSR_CTL1, read_framer(sc, T3CSR_CTL1) | CTL1_3LOOP); } else if (ioctl->data == CFG_LOOP_PAYLOAD) { set_mii16_bits(sc, MII16_DS3_FRAME); write_framer(sc, T3CSR_CTL12, read_framer(sc, T3CSR_CTL12) | CTL12_RTPLOOP); write_framer(sc, T3CSR_CTL12, read_framer(sc, T3CSR_CTL12) | CTL12_RTPLLEN); DELAY(25); /* at least two frames (22 uS) */ write_framer(sc, T3CSR_CTL12, read_framer(sc, T3CSR_CTL12) & ~CTL12_RTPLLEN); } else error = EINVAL; break; } default: error = EINVAL; break; } return error; } /* begin SSI card code */ /* Must not sleep. */ static void ssi_config(softc_t *sc) { if (sc->status.card_type == 0) { /* defaults */ sc->status.card_type = TLP_CSID_SSI; sc->config.crc_len = CFG_CRC_16; sc->config.loop_back = CFG_LOOP_NONE; sc->config.tx_clk_src = CFG_CLKMUX_ST; sc->config.dte_dce = CFG_DTE; sc->config.synth.n = 51; /* 1.536 MHz */ sc->config.synth.m = 83; sc->config.synth.v = 1; sc->config.synth.x = 1; sc->config.synth.r = 1; sc->config.synth.prescale = 4; } /* Disable the TX clock driver while programming the oscillator. */ clr_gpio_bits(sc, GPIO_SSI_DCE); make_gpio_output(sc, GPIO_SSI_DCE); /* Program the synthesized oscillator. */ write_synth(sc, &sc->config.synth); /* Set DTE/DCE mode. */ /* If DTE mode then DCD & TXC are received. */ /* If DCE mode then DCD & TXC are driven. */ /* Boards with MII rev=4.0 don't drive DCD. */ if (sc->config.dte_dce == CFG_DCE) set_gpio_bits(sc, GPIO_SSI_DCE); else clr_gpio_bits(sc, GPIO_SSI_DCE); make_gpio_output(sc, GPIO_SSI_DCE); /* Set CRC length. */ if (sc->config.crc_len == CFG_CRC_32) set_mii16_bits(sc, MII16_SSI_CRC32); else clr_mii16_bits(sc, MII16_SSI_CRC32); /* Loop towards host thru cable drivers and receivers. */ /* Asserts DCD at the far end of a null modem cable. */ if (sc->config.loop_back == CFG_LOOP_PINS) set_mii16_bits(sc, MII16_SSI_LOOP); else clr_mii16_bits(sc, MII16_SSI_LOOP); /* Assert pin LL in modem conn: ask modem for local loop. */ /* Asserts TM at the far end of a null modem cable. */ if (sc->config.loop_back == CFG_LOOP_LL) set_mii16_bits(sc, MII16_SSI_LL); else clr_mii16_bits(sc, MII16_SSI_LL); /* Assert pin RL in modem conn: ask modem for remote loop. */ if (sc->config.loop_back == CFG_LOOP_RL) set_mii16_bits(sc, MII16_SSI_RL); else clr_mii16_bits(sc, MII16_SSI_RL); } static void ssi_ident(softc_t *sc) { printf(", LTC1343/44"); } /* Called once a second; must not sleep. */ static int ssi_watchdog(softc_t *sc) { u_int16_t cable; u_int16_t mii16 = read_mii(sc, 16) & MII16_SSI_MODEM; int link_status = STATUS_UP; /* Software is alive. */ led_inv(sc, MII16_SSI_LED_UL); /* Check the transmit clock. */ if (sc->status.tx_speed == 0) { led_on(sc, MII16_SSI_LED_UR); link_status = STATUS_DOWN; } else led_off(sc, MII16_SSI_LED_UR); /* Check the external cable. */ cable = read_mii(sc, 17); cable = cable & MII17_SSI_CABLE_MASK; cable = cable >> MII17_SSI_CABLE_SHIFT; if (cable == 7) { led_off(sc, MII16_SSI_LED_LL); /* no cable */ link_status = STATUS_DOWN; } else led_on(sc, MII16_SSI_LED_LL); /* The unit at the other end of the cable is ready if: */ /* DTE mode and DCD pin is asserted */ /* DCE mode and DSR pin is asserted */ if (((sc->config.dte_dce == CFG_DTE) && ((mii16 & MII16_SSI_DCD)==0)) || ((sc->config.dte_dce == CFG_DCE) && ((mii16 & MII16_SSI_DSR)==0))) { led_off(sc, MII16_SSI_LED_LR); link_status = STATUS_DOWN; } else led_on(sc, MII16_SSI_LED_LR); if (DRIVER_DEBUG && (cable != sc->status.cable_type)) printf("%s: SSI cable type changed to '%s'\n", NAME_UNIT, ssi_cables[cable]); sc->status.cable_type = cable; /* Print the modem control signals if they changed. */ if ((DRIVER_DEBUG) && (mii16 != sc->last_mii16)) { char *on = "ON ", *off = "OFF"; printf("%s: DTR=%s DSR=%s RTS=%s CTS=%s DCD=%s RI=%s LL=%s RL=%s TM=%s\n", NAME_UNIT, (mii16 & MII16_SSI_DTR) ? on : off, (mii16 & MII16_SSI_DSR) ? on : off, (mii16 & MII16_SSI_RTS) ? on : off, (mii16 & MII16_SSI_CTS) ? on : off, (mii16 & MII16_SSI_DCD) ? on : off, (mii16 & MII16_SSI_RI) ? on : off, (mii16 & MII16_SSI_LL) ? on : off, (mii16 & MII16_SSI_RL) ? on : off, (mii16 & MII16_SSI_TM) ? on : off); } /* SNMP one-second report */ sc->status.snmp.ssi.sigs = mii16 & MII16_SSI_MODEM; /* Remember this state until next time. */ sc->last_mii16 = mii16; /* If a loop back is in effect, link status is UP */ if (sc->config.loop_back != CFG_LOOP_NONE) link_status = STATUS_UP; return link_status; } /* IOCTL SYSCALL: can sleep (but doesn't). */ static int ssi_ioctl(softc_t *sc, struct ioctl *ioctl) { int error = 0; if (ioctl->cmd == IOCTL_SNMP_SIGS) { u_int16_t mii16 = read_mii(sc, 16); mii16 &= ~MII16_SSI_MODEM; mii16 |= (MII16_SSI_MODEM & ioctl->data); write_mii(sc, 16, mii16); } else if (ioctl->cmd == IOCTL_SET_STATUS) { if (ioctl->data != 0) set_mii16_bits(sc, (MII16_SSI_DTR | MII16_SSI_RTS | MII16_SSI_DCD)); else clr_mii16_bits(sc, (MII16_SSI_DTR | MII16_SSI_RTS | MII16_SSI_DCD)); } else error = EINVAL; return error; } /* begin T1E1 card code */ /* Must not sleep. */ static void t1_config(softc_t *sc) { int i; u_int8_t pulse, lbo, gain; if (sc->status.card_type == 0) { /* defaults */ sc->status.card_type = TLP_CSID_T1E1; sc->config.crc_len = CFG_CRC_16; sc->config.loop_back = CFG_LOOP_NONE; sc->config.tx_clk_src = CFG_CLKMUX_INT; sc->config.format = CFG_FORMAT_T1ESF; sc->config.cable_len = 10; sc->config.time_slots = 0x01FFFFFE; sc->config.tx_pulse = CFG_PULSE_AUTO; sc->config.rx_gain = CFG_GAIN_AUTO; sc->config.tx_lbo = CFG_LBO_AUTO; /* Bt8370 occasionally powers up in a loopback mode. */ /* Data sheet says zero LOOP reg and do a s/w reset. */ write_framer(sc, Bt8370_LOOP, 0x00); /* no loopback */ write_framer(sc, Bt8370_CR0, 0x80); /* s/w reset */ for (i=0; i<10; i++) /* max delay 10 ms */ if (read_framer(sc, Bt8370_CR0) & 0x80) DELAY(1000); } /* Set CRC length. */ if (sc->config.crc_len == CFG_CRC_32) set_mii16_bits(sc, MII16_T1_CRC32); else clr_mii16_bits(sc, MII16_T1_CRC32); /* Invert HDLC payload data in SF/AMI mode. */ /* HDLC stuff bits satisfy T1 pulse density. */ if (FORMAT_T1SF) set_mii16_bits(sc, MII16_T1_INVERT); else clr_mii16_bits(sc, MII16_T1_INVERT); /* Set the transmitter output impedance. */ if (FORMAT_E1ANY) set_mii16_bits(sc, MII16_T1_Z); /* 001:CR0 -- Control Register 0 - T1/E1 and frame format */ write_framer(sc, Bt8370_CR0, sc->config.format); /* 002:JAT_CR -- Jitter Attenuator Control Register */ if (sc->config.tx_clk_src == CFG_CLKMUX_RT) /* loop timing */ write_framer(sc, Bt8370_JAT_CR, 0xA3); /* JAT in RX path */ else { /* 64-bit elastic store; free-running JCLK and CLADO */ write_framer(sc, Bt8370_JAT_CR, 0x4B); /* assert jcenter */ write_framer(sc, Bt8370_JAT_CR, 0x43); /* release jcenter */ } /* 00C-013:IERn -- Interrupt Enable Registers */ for (i=Bt8370_IER7; i<=Bt8370_IER0; i++) write_framer(sc, i, 0); /* no interrupts; polled */ /* 014:LOOP -- loopbacks */ if (sc->config.loop_back == CFG_LOOP_PAYLOAD) write_framer(sc, Bt8370_LOOP, LOOP_PAYLOAD); else if (sc->config.loop_back == CFG_LOOP_LINE) write_framer(sc, Bt8370_LOOP, LOOP_LINE); else if (sc->config.loop_back == CFG_LOOP_OTHER) write_framer(sc, Bt8370_LOOP, LOOP_ANALOG); else if (sc->config.loop_back == CFG_LOOP_INWARD) write_framer(sc, Bt8370_LOOP, LOOP_FRAMER); else if (sc->config.loop_back == CFG_LOOP_DUAL) write_framer(sc, Bt8370_LOOP, LOOP_DUAL); else write_framer(sc, Bt8370_LOOP, 0x00); /* no loopback */ /* 015:DL3_TS -- Data Link 3 */ write_framer(sc, Bt8370_DL3_TS, 0x00); /* disabled */ /* 018:PIO -- Programmable I/O */ write_framer(sc, Bt8370_PIO, 0xFF); /* all pins are outputs */ /* 019:POE -- Programmable Output Enable */ write_framer(sc, Bt8370_POE, 0x00); /* all outputs are enabled */ /* 01A;CMUX -- Clock Input Mux */ if (sc->config.tx_clk_src == CFG_CLKMUX_EXT) write_framer(sc, Bt8370_CMUX, 0x0C); /* external timing */ else write_framer(sc, Bt8370_CMUX, 0x0F); /* internal timing */ /* 020:LIU_CR -- Line Interface Unit Config Register */ write_framer(sc, Bt8370_LIU_CR, 0xC1); /* reset LIU, squelch */ /* 022:RLIU_CR -- RX Line Interface Unit Config Reg */ /* Errata sheet says don't use freeze-short, but we do anyway! */ write_framer(sc, Bt8370_RLIU_CR, 0xB1); /* AGC=2048, Long Eye */ /* Select Rx sensitivity based on cable length. */ if ((gain = sc->config.rx_gain) == CFG_GAIN_AUTO) { if (sc->config.cable_len > 2000) gain = CFG_GAIN_EXTEND; else if (sc->config.cable_len > 1000) gain = CFG_GAIN_LONG; else if (sc->config.cable_len > 100) gain = CFG_GAIN_MEDIUM; else gain = CFG_GAIN_SHORT; } /* 024:VGA_MAX -- Variable Gain Amplifier Max gain */ write_framer(sc, Bt8370_VGA_MAX, gain); /* 028:PRE_EQ -- Pre Equalizer */ if (gain == CFG_GAIN_EXTEND) write_framer(sc, Bt8370_PRE_EQ, 0xE6); /* ON; thresh 6 */ else write_framer(sc, Bt8370_PRE_EQ, 0xA6); /* OFF; thresh 6 */ /* 038-03C:GAINn -- RX Equalizer gain thresholds */ write_framer(sc, Bt8370_GAIN0, 0x24); write_framer(sc, Bt8370_GAIN1, 0x28); write_framer(sc, Bt8370_GAIN2, 0x2C); write_framer(sc, Bt8370_GAIN3, 0x30); write_framer(sc, Bt8370_GAIN4, 0x34); /* 040:RCR0 -- Receiver Control Register 0 */ if (FORMAT_T1ESF) write_framer(sc, Bt8370_RCR0, 0x05); /* B8ZS, 2/5 FErrs */ else if (FORMAT_T1SF) write_framer(sc, Bt8370_RCR0, 0x84); /* AMI, 2/5 FErrs */ else if (FORMAT_E1NONE) write_framer(sc, Bt8370_RCR0, 0x41); /* HDB3, rabort */ else if (FORMAT_E1CRC) write_framer(sc, Bt8370_RCR0, 0x09); /* HDB3, 3 FErrs or 915 CErrs */ else /* E1 no CRC */ write_framer(sc, Bt8370_RCR0, 0x19); /* HDB3, 3 FErrs */ /* 041:RPATT -- Receive Test Pattern configuration */ write_framer(sc, Bt8370_RPATT, 0x3E); /* looking for framed QRSS */ /* 042:RLB -- Receive Loop Back code detector config */ write_framer(sc, Bt8370_RLB, 0x09); /* 6 bits down; 5 bits up */ /* 043:LBA -- Loop Back Activate code */ write_framer(sc, Bt8370_LBA, 0x08); /* 10000 10000 10000 ... */ /* 044:LBD -- Loop Back Deactivate code */ write_framer(sc, Bt8370_LBD, 0x24); /* 100100 100100 100100 ... */ /* 045:RALM -- Receive Alarm signal configuration */ write_framer(sc, Bt8370_RALM, 0x0C); /* yel_intg rlof_intg */ /* 046:LATCH -- Alarm/Error/Counter Latch register */ write_framer(sc, Bt8370_LATCH, 0x1F); /* stop_cnt latch_{cnt,err,alm} */ /* Select Pulse Shape based on cable length (T1 only). */ if ((pulse = sc->config.tx_pulse) == CFG_PULSE_AUTO) { if (FORMAT_T1ANY) { if (sc->config.cable_len > 200) pulse = CFG_PULSE_T1CSU; else if (sc->config.cable_len > 160) pulse = CFG_PULSE_T1DSX4; else if (sc->config.cable_len > 120) pulse = CFG_PULSE_T1DSX3; else if (sc->config.cable_len > 80) pulse = CFG_PULSE_T1DSX2; else if (sc->config.cable_len > 40) pulse = CFG_PULSE_T1DSX1; else pulse = CFG_PULSE_T1DSX0; } else pulse = CFG_PULSE_E1TWIST; } /* Select Line Build Out based on cable length (T1CSU only). */ if ((lbo = sc->config.tx_lbo) == CFG_LBO_AUTO) { if (pulse == CFG_PULSE_T1CSU) { if (sc->config.cable_len > 1500) lbo = CFG_LBO_0DB; else if (sc->config.cable_len > 1000) lbo = CFG_LBO_7DB; else if (sc->config.cable_len > 500) lbo = CFG_LBO_15DB; else lbo = CFG_LBO_22DB; } else lbo = 0; } /* 068:TLIU_CR -- Transmit LIU Control Register */ write_framer(sc, Bt8370_TLIU_CR, (0x40 | (lbo & 0x30) | (pulse & 0x0E))); /* 070:TCR0 -- Transmit Framer Configuration */ write_framer(sc, Bt8370_TCR0, sc->config.format>>1); /* 071:TCR1 -- Transmitter Configuration */ if (FORMAT_T1SF) write_framer(sc, Bt8370_TCR1, 0x43); /* tabort, AMI PDV enforced */ else write_framer(sc, Bt8370_TCR1, 0x41); /* tabort, B8ZS or HDB3 */ /* 072:TFRM -- Transmit Frame format MYEL YEL MF FE CRC FBIT */ if (sc->config.format == CFG_FORMAT_T1ESF) write_framer(sc, Bt8370_TFRM, 0x0B); /* - YEL MF - CRC FBIT */ else if (sc->config.format == CFG_FORMAT_T1SF) write_framer(sc, Bt8370_TFRM, 0x19); /* - YEL MF - - FBIT */ else if (sc->config.format == CFG_FORMAT_E1FAS) write_framer(sc, Bt8370_TFRM, 0x11); /* - YEL - - - FBIT */ else if (sc->config.format == CFG_FORMAT_E1FASCRC) write_framer(sc, Bt8370_TFRM, 0x1F); /* - YEL MF FE CRC FBIT */ else if (sc->config.format == CFG_FORMAT_E1FASCAS) write_framer(sc, Bt8370_TFRM, 0x31); /* MYEL YEL - - - FBIT */ else if (sc->config.format == CFG_FORMAT_E1FASCRCCAS) write_framer(sc, Bt8370_TFRM, 0x3F); /* MYEL YEL MF FE CRC FBIT */ else if (sc->config.format == CFG_FORMAT_E1NONE) write_framer(sc, Bt8370_TFRM, 0x00); /* NO FRAMING BITS AT ALL! */ /* 073:TERROR -- Transmit Error Insert */ write_framer(sc, Bt8370_TERROR, 0x00); /* no errors, please! */ /* 074:TMAN -- Transmit Manual Sa-byte/FEBE configuration */ write_framer(sc, Bt8370_TMAN, 0x00); /* none */ /* 075:TALM -- Transmit Alarm Signal Configuration */ if (FORMAT_E1ANY) write_framer(sc, Bt8370_TALM, 0x38); /* auto_myel auto_yel auto_ais */ else if (FORMAT_T1ANY) write_framer(sc, Bt8370_TALM, 0x18); /* auto_yel auto_ais */ /* 076:TPATT -- Transmit Test Pattern Configuration */ write_framer(sc, Bt8370_TPATT, 0x00); /* disabled */ /* 077:TLB -- Transmit Inband Loopback Code Configuration */ write_framer(sc, Bt8370_TLB, 0x00); /* disabled */ /* 090:CLAD_CR -- Clack Rate Adapter Configuration */ if (FORMAT_T1ANY) write_framer(sc, Bt8370_CLAD_CR, 0x06); /* loop filter gain 1/2^6 */ else write_framer(sc, Bt8370_CLAD_CR, 0x08); /* loop filter gain 1/2^8 */ /* 091:CSEL -- CLAD frequency Select */ if (FORMAT_T1ANY) write_framer(sc, Bt8370_CSEL, 0x55); /* 1544 kHz */ else write_framer(sc, Bt8370_CSEL, 0x11); /* 2048 kHz */ /* 092:CPHASE -- CLAD Phase detector */ if (FORMAT_T1ANY) write_framer(sc, Bt8370_CPHASE, 0x22); /* phase compare @ 386 kHz */ else write_framer(sc, Bt8370_CPHASE, 0x00); /* phase compare @ 2048 kHz */ if (FORMAT_T1ESF) /* BOP & PRM are enabled in T1ESF mode only. */ { /* 0A0:BOP -- Bit Oriented Protocol messages */ write_framer(sc, Bt8370_BOP, RBOP_25 | TBOP_OFF); /* 0A4:DL1_TS -- Data Link 1 Time Slot Enable */ write_framer(sc, Bt8370_DL1_TS, 0x40); /* FDL bits in odd frames */ /* 0A6:DL1_CTL -- Data Link 1 Control */ write_framer(sc, Bt8370_DL1_CTL, 0x03); /* FCS mode, TX on, RX on */ /* 0A7:RDL1_FFC -- Rx Data Link 1 Fifo Fill Control */ write_framer(sc, Bt8370_RDL1_FFC, 0x30); /* assert "near full" at 48 */ /* 0AA:PRM -- Performance Report Messages */ write_framer(sc, Bt8370_PRM, 0x80); } /* 0D0:SBI_CR -- System Bus Interface Configuration Register */ if (FORMAT_T1ANY) write_framer(sc, Bt8370_SBI_CR, 0x47); /* 1.544 with 24 TS +Fbits */ else write_framer(sc, Bt8370_SBI_CR, 0x46); /* 2.048 with 32 TS */ /* 0D1:RSB_CR -- Receive System Bus Configuration Register */ /* Change RINDO & RFSYNC on falling edge of RSBCLKI. */ write_framer(sc, Bt8370_RSB_CR, 0x70); /* 0D2,0D3:RSYNC_{TS,BIT} -- Receive frame Sync offset */ write_framer(sc, Bt8370_RSYNC_BIT, 0x00); write_framer(sc, Bt8370_RSYNC_TS, 0x00); /* 0D4:TSB_CR -- Transmit System Bus Configuration Register */ /* Change TINDO & TFSYNC on falling edge of TSBCLKI. */ write_framer(sc, Bt8370_TSB_CR, 0x30); /* 0D5,0D6:TSYNC_{TS,BIT} -- Transmit frame Sync offset */ write_framer(sc, Bt8370_TSYNC_BIT, 0x00); write_framer(sc, Bt8370_TSYNC_TS, 0x00); /* 0D7:RSIG_CR -- Receive SIGnalling Configuratin Register */ write_framer(sc, Bt8370_RSIG_CR, 0x00); /* Assign and configure 64Kb TIME SLOTS. */ /* TS24..TS1 must be assigned for T1, TS31..TS0 for E1. */ /* Timeslots with no user data have RINDO and TINDO off. */ for (i=0; i<32; i++) { /* 0E0-0FF:SBCn -- System Bus Per-Channel Control */ if (FORMAT_T1ANY && (i==0 || i>24)) write_framer(sc, Bt8370_SBCn +i, 0x00); /* not assigned in T1 mode */ else if (FORMAT_E1ANY && (i==0) && !FORMAT_E1NONE) write_framer(sc, Bt8370_SBCn +i, 0x01); /* assigned, TS0 o/h bits */ else if (FORMAT_E1CAS && (i==16) && !FORMAT_E1NONE) write_framer(sc, Bt8370_SBCn +i, 0x01); /* assigned, TS16 o/h bits */ else if ((sc->config.time_slots & (1<config.time_slots & (1<>4, read_framer(sc, Bt8370_DID)&0x0F); } /* Called once a second; must not sleep. */ static int t1_watchdog(softc_t *sc) { u_int16_t LCV = 0, FERR = 0, CRC = 0, FEBE = 0; u_int8_t alm1, alm3, loop, isr0; int link_status = STATUS_UP; int i; /* Read the alarm registers */ alm1 = read_framer(sc, Bt8370_ALM1); alm3 = read_framer(sc, Bt8370_ALM3); loop = read_framer(sc, Bt8370_LOOP); isr0 = read_framer(sc, Bt8370_ISR0); /* Always ignore the SIGFRZ alarm bit, */ alm1 &= ~ALM1_SIGFRZ; if (FORMAT_T1ANY) /* ignore RYEL in T1 modes */ alm1 &= ~ALM1_RYEL; else if (FORMAT_E1NONE) /* ignore all alarms except LOS */ alm1 &= ALM1_RLOS; /* Software is alive. */ led_inv(sc, MII16_T1_LED_GRN); /* Receiving Alarm Indication Signal (AIS). */ if ((alm1 & ALM1_RAIS)!=0) /* receiving ais */ led_on(sc, MII16_T1_LED_BLU); else if ((alm1 & ALM1_RLOS)!=0) /* sending ais */ led_inv(sc, MII16_T1_LED_BLU); else led_off(sc, MII16_T1_LED_BLU); /* Receiving Remote Alarm Indication (RAI). */ if ((alm1 & (ALM1_RMYEL | ALM1_RYEL))!=0) /* receiving rai */ led_on(sc, MII16_T1_LED_YEL); else if ((alm1 & ALM1_RLOF)!=0) /* sending rai */ led_inv(sc, MII16_T1_LED_YEL); else led_off(sc, MII16_T1_LED_YEL); /* If any alarm bits are set then the link is 'down'. */ /* The bad bits are: rmyel ryel rais ralos rlos rlof. */ /* Some alarm bits have been masked by this point. */ if (alm1 != 0) link_status = STATUS_DOWN; /* Declare local Red Alarm if the link is down. */ if (link_status == STATUS_DOWN) led_on(sc, MII16_T1_LED_RED); else if (sc->loop_timer != 0) /* loopback is active */ led_inv(sc, MII16_T1_LED_RED); else led_off(sc, MII16_T1_LED_RED); /* Print latched error bits if they changed. */ if ((DRIVER_DEBUG) && (alm1 != sc->last_alm1)) { char *on = "ON ", *off = "OFF"; printf("%s: RLOF=%s RLOS=%s RALOS=%s RAIS=%s RYEL=%s RMYEL=%s\n", NAME_UNIT, (alm1 & ALM1_RLOF) ? on : off, (alm1 & ALM1_RLOS) ? on : off, (alm1 & ALM1_RALOS) ? on : off, (alm1 & ALM1_RAIS) ? on : off, (alm1 & ALM1_RYEL) ? on : off, (alm1 & ALM1_RMYEL) ? on : off); } /* Check and print error counters if non-zero. */ LCV = read_framer(sc, Bt8370_LCV_LO) + (read_framer(sc, Bt8370_LCV_HI)<<8); if (!FORMAT_E1NONE) FERR = read_framer(sc, Bt8370_FERR_LO) + (read_framer(sc, Bt8370_FERR_HI)<<8); if (FORMAT_E1CRC || FORMAT_T1ESF) CRC = read_framer(sc, Bt8370_CRC_LO) + (read_framer(sc, Bt8370_CRC_HI)<<8); if (FORMAT_E1CRC) FEBE = read_framer(sc, Bt8370_FEBE_LO) + (read_framer(sc, Bt8370_FEBE_HI)<<8); /* Only LCV is valid if Out-Of-Frame */ if (FORMAT_E1NONE) FERR = CRC = FEBE = 0; if ((DRIVER_DEBUG) && (LCV || FERR || CRC || FEBE)) printf("%s: LCV=%u FERR=%u CRC=%u FEBE=%u\n", NAME_UNIT, LCV, FERR, CRC, FEBE); /* Driver keeps crude link-level error counters (SNMP is better). */ sc->status.cntrs.lcv_errs += LCV; sc->status.cntrs.frm_errs += FERR; sc->status.cntrs.crc_errs += CRC; sc->status.cntrs.febe_errs += FEBE; /* Check for BOP messages in the ESF Facility Data Link. */ if ((FORMAT_T1ESF) && (read_framer(sc, Bt8370_ISR1) & 0x80)) { u_int8_t bop_code = read_framer(sc, Bt8370_RBOP) & 0x3F; switch (bop_code) { case T1BOP_OOF: { if ((DRIVER_DEBUG) && ((sc->last_alm1 & ALM1_RMYEL)==0)) printf("%s: Receiving a 'yellow alarm' BOP msg\n", NAME_UNIT); break; } case T1BOP_LINE_UP: { if (DRIVER_DEBUG) printf("%s: Received a 'line loopback activate' BOP msg\n", NAME_UNIT); write_framer(sc, Bt8370_LOOP, LOOP_LINE); sc->loop_timer = 305; break; } case T1BOP_LINE_DOWN: { if (DRIVER_DEBUG) printf("%s: Received a 'line loopback deactivate' BOP msg\n", NAME_UNIT); write_framer(sc, Bt8370_LOOP, read_framer(sc, Bt8370_LOOP) & ~LOOP_LINE); sc->loop_timer = 0; break; } case T1BOP_PAY_UP: { if (DRIVER_DEBUG) printf("%s: Received a 'payload loopback activate' BOP msg\n", NAME_UNIT); write_framer(sc, Bt8370_LOOP, LOOP_PAYLOAD); sc->loop_timer = 305; break; } case T1BOP_PAY_DOWN: { if (DRIVER_DEBUG) printf("%s: Received a 'payload loopback deactivate' BOP msg\n", NAME_UNIT); write_framer(sc, Bt8370_LOOP, read_framer(sc, Bt8370_LOOP) & ~LOOP_PAYLOAD); sc->loop_timer = 0; break; } default: { if (DRIVER_DEBUG) printf("%s: Received a type 0x%02X BOP msg\n", NAME_UNIT, bop_code); break; } } } /* Check for HDLC pkts in the ESF Facility Data Link. */ if ((FORMAT_T1ESF) && (read_framer(sc, Bt8370_ISR2) & 0x70)) { /* while (not fifo-empty && not start-of-msg) flush fifo */ while ((read_framer(sc, Bt8370_RDL1_STAT) & 0x0C) == 0) read_framer(sc, Bt8370_RDL1); /* If (not fifo-empty), then begin processing fifo contents. */ if ((read_framer(sc, Bt8370_RDL1_STAT) & 0x0C) == 0x08) { u_int8_t msg[64]; u_int8_t stat = read_framer(sc, Bt8370_RDL1); sc->status.cntrs.fdl_pkts++; for (i=0; i<(stat & 0x3F); i++) msg[i] = read_framer(sc, Bt8370_RDL1); /* Is this FDL message a T1.403 performance report? */ if (((stat & 0x3F)==11) && ((msg[0]==0x38) || (msg[0]==0x3A)) && (msg[1]==1) && (msg[2]==3)) /* Copy 4 PRs from FDL pkt to SNMP struct. */ memcpy(sc->status.snmp.t1.prm, msg+3, 8); } } /* Check for inband loop up/down commands. */ if (FORMAT_T1ANY) { u_int8_t isr6 = read_framer(sc, Bt8370_ISR6); u_int8_t alarm2 = read_framer(sc, Bt8370_ALM2); u_int8_t tlb = read_framer(sc, Bt8370_TLB); /* Inband Code == Loop Up && On Transition && Inband Tx Inactive */ if ((isr6 & 0x40) && (alarm2 & 0x40) && ((tlb & 1)==0)) { /* CSU loop up is 10000 10000 ... */ if (DRIVER_DEBUG) printf("%s: Received a 'CSU Loop Up' inband msg\n", NAME_UNIT); write_framer(sc, Bt8370_LOOP, LOOP_LINE); /* Loop up */ sc->loop_timer = 305; } /* Inband Code == Loop Down && On Transition && Inband Tx Inactive */ if ((isr6 & 0x80) && (alarm2 & 0x80) && ((tlb & 1)==0)) { /* CSU loop down is 100 100 100 ... */ if (DRIVER_DEBUG) printf("%s: Received a 'CSU Loop Down' inband msg\n", NAME_UNIT); write_framer(sc, Bt8370_LOOP, read_framer(sc, Bt8370_LOOP) & ~LOOP_LINE); /* loop down */ sc->loop_timer = 0; } } /* Manually send Yellow Alarm BOP msgs. */ if (FORMAT_T1ESF) { u_int8_t isr7 = read_framer(sc, Bt8370_ISR7); if ((isr7 & 0x02) && (alm1 & 0x02)) /* RLOF on-transition */ { /* Start sending continuous Yellow Alarm BOP messages. */ write_framer(sc, Bt8370_BOP, RBOP_25 | TBOP_CONT); write_framer(sc, Bt8370_TBOP, 0x00); /* send BOP; order matters */ } else if ((isr7 & 0x02) && ((alm1 & 0x02)==0)) /* RLOF off-transition */ { /* Stop sending continuous Yellow Alarm BOP messages. */ write_framer(sc, Bt8370_BOP, RBOP_25 | TBOP_OFF); } } /* Time out loopback requests. */ if (sc->loop_timer != 0) if (--sc->loop_timer == 0) if (loop != 0) { if (DRIVER_DEBUG) printf("%s: Timeout: Loop Down after 300 seconds\n", NAME_UNIT); write_framer(sc, Bt8370_LOOP, loop & ~(LOOP_PAYLOAD | LOOP_LINE)); } /* RX Test Pattern status */ if ((DRIVER_DEBUG) && (isr0 & 0x10)) printf("%s: RX Test Pattern Sync\n", NAME_UNIT); /* SNMP Error Counters */ sc->status.snmp.t1.lcv = LCV; sc->status.snmp.t1.fe = FERR; sc->status.snmp.t1.crc = CRC; sc->status.snmp.t1.febe = FEBE; /* SNMP Line Status */ sc->status.snmp.t1.line = 0; if (alm1 & ALM1_RMYEL) sc->status.snmp.t1.line |= TLINE_RX_RAI; if (alm1 & ALM1_RYEL) sc->status.snmp.t1.line |= TLINE_RX_RAI; if (alm1 & ALM1_RLOF) sc->status.snmp.t1.line |= TLINE_TX_RAI; if (alm1 & ALM1_RAIS) sc->status.snmp.t1.line |= TLINE_RX_AIS; if (alm1 & ALM1_RLOS) sc->status.snmp.t1.line |= TLINE_TX_AIS; if (alm1 & ALM1_RLOF) sc->status.snmp.t1.line |= TLINE_LOF; if (alm1 & ALM1_RLOS) sc->status.snmp.t1.line |= TLINE_LOS; if (alm3 & ALM3_RMAIS) sc->status.snmp.t1.line |= T1LINE_RX_TS16_AIS; if (alm3 & ALM3_SRED) sc->status.snmp.t1.line |= T1LINE_TX_TS16_LOMF; if (alm3 & ALM3_SEF) sc->status.snmp.t1.line |= T1LINE_SEF; if (isr0 & 0x10) sc->status.snmp.t1.line |= T1LINE_RX_TEST; if ((alm1 & ALM1_RMYEL) && (FORMAT_E1CAS)) sc->status.snmp.t1.line |= T1LINE_RX_TS16_LOMF; /* SNMP Loopback Status */ sc->status.snmp.t1.loop &= ~(TLOOP_FAR_LINE | TLOOP_FAR_PAYLOAD); if (sc->config.loop_back == CFG_LOOP_TULIP) sc->status.snmp.t1.loop |= TLOOP_NEAR_OTHER; if (loop & LOOP_PAYLOAD) sc->status.snmp.t1.loop |= TLOOP_NEAR_PAYLOAD; if (loop & LOOP_LINE) sc->status.snmp.t1.loop |= TLOOP_NEAR_LINE; if (loop & LOOP_ANALOG) sc->status.snmp.t1.loop |= TLOOP_NEAR_OTHER; if (loop & LOOP_FRAMER) sc->status.snmp.t1.loop |= TLOOP_NEAR_INWARD; /* Remember this state until next time. */ sc->last_alm1 = alm1; /* If an INWARD loopback is in effect, link status is UP */ if (sc->config.loop_back != CFG_LOOP_NONE) /* XXX INWARD ONLY */ link_status = STATUS_UP; return link_status; } /* IOCTL SYSCALL: can sleep. */ static void t1_send_bop(softc_t *sc, int bop_code) { u_int8_t bop; int i; /* The BOP transmitter could be sending a continuous */ /* BOP msg when told to send this BOP_25 message. */ /* So save and restore the state of the BOP machine. */ bop = read_framer(sc, Bt8370_BOP); write_framer(sc, Bt8370_BOP, RBOP_OFF | TBOP_OFF); for (i=0; i<40; i++) /* max delay 400 ms. */ if (read_framer(sc, Bt8370_BOP_STAT) & 0x80) SLEEP(10000); /* send 25 repetitions of bop_code */ write_framer(sc, Bt8370_BOP, RBOP_OFF | TBOP_25); write_framer(sc, Bt8370_TBOP, bop_code); /* order matters */ /* wait for tx to stop */ for (i=0; i<40; i++) /* max delay 400 ms. */ if (read_framer(sc, Bt8370_BOP_STAT) & 0x80) SLEEP(10000); /* Restore previous state of the BOP machine. */ write_framer(sc, Bt8370_BOP, bop); } /* IOCTL SYSCALL: can sleep. */ static int t1_ioctl(softc_t *sc, struct ioctl *ioctl) { int error = 0; switch (ioctl->cmd) { case IOCTL_SNMP_SEND: /* set opstatus? */ { switch (ioctl->data) { case TSEND_NORMAL: { write_framer(sc, Bt8370_TPATT, 0x00); /* tx pattern generator off */ write_framer(sc, Bt8370_RPATT, 0x00); /* rx pattern detector off */ write_framer(sc, Bt8370_TLB, 0x00); /* tx inband generator off */ break; } case TSEND_LINE: { if (FORMAT_T1ESF) t1_send_bop(sc, T1BOP_LINE_UP); else if (FORMAT_T1SF) { write_framer(sc, Bt8370_LBP, 0x08); /* 10000 10000 ... */ write_framer(sc, Bt8370_TLB, 0x05); /* 5 bits, framed, start */ } sc->status.snmp.t1.loop |= TLOOP_FAR_LINE; break; } case TSEND_PAYLOAD: { t1_send_bop(sc, T1BOP_PAY_UP); sc->status.snmp.t1.loop |= TLOOP_FAR_PAYLOAD; break; } case TSEND_RESET: { if (sc->status.snmp.t1.loop == TLOOP_FAR_LINE) { if (FORMAT_T1ESF) t1_send_bop(sc, T1BOP_LINE_DOWN); else if (FORMAT_T1SF) { write_framer(sc, Bt8370_LBP, 0x24); /* 100100 100100 ... */ write_framer(sc, Bt8370_TLB, 0x09); /* 6 bits, framed, start */ } sc->status.snmp.t1.loop &= ~TLOOP_FAR_LINE; } if (sc->status.snmp.t1.loop == TLOOP_FAR_PAYLOAD) { t1_send_bop(sc, T1BOP_PAY_DOWN); sc->status.snmp.t1.loop &= ~TLOOP_FAR_PAYLOAD; } break; } case TSEND_QRS: { write_framer(sc, Bt8370_TPATT, 0x1E); /* framed QRSS */ break; } default: { error = EINVAL; break; } } break; } case IOCTL_SNMP_LOOP: /* set opstatus = test? */ { u_int8_t new_loop = 0; if (ioctl->data == CFG_LOOP_NONE) new_loop = 0; else if (ioctl->data == CFG_LOOP_PAYLOAD) new_loop = LOOP_PAYLOAD; else if (ioctl->data == CFG_LOOP_LINE) new_loop = LOOP_LINE; else if (ioctl->data == CFG_LOOP_OTHER) new_loop = LOOP_ANALOG; else if (ioctl->data == CFG_LOOP_INWARD) new_loop = LOOP_FRAMER; else if (ioctl->data == CFG_LOOP_DUAL) new_loop = LOOP_DUAL; else error = EINVAL; if (error == 0) { write_framer(sc, Bt8370_LOOP, new_loop); sc->config.loop_back = ioctl->data; } break; } default: error = EINVAL; break; } return error; } static struct card hssi_card = { .config = hssi_config, .ident = hssi_ident, .watchdog = hssi_watchdog, .ioctl = hssi_ioctl, }; static struct card t3_card = { .config = t3_config, .ident = t3_ident, .watchdog = t3_watchdog, .ioctl = t3_ioctl, }; static struct card ssi_card = { .config = ssi_config, .ident = ssi_ident, .watchdog = ssi_watchdog, .ioctl = ssi_ioctl, }; static struct card t1_card = { .config = t1_config, .ident = t1_ident, .watchdog = t1_watchdog, .ioctl = t1_ioctl, }; /* RAWIP is raw IP packets (v4 or v6) in HDLC frames with NO HEADERS. */ /* No HDLC Address/Control fields! No line control protocol at all! */ /* rxintr_cleanup calls this to give a newly arrived pkt to higher levels. */ static void lmc_raw_input(struct ifnet *ifp, struct mbuf *mbuf) { softc_t *sc = IFP2SC(ifp); M_SETFIB(mbuf, ifp->if_fib); # if INET if (mbuf->m_data[0]>>4 == 4) netisr_dispatch(NETISR_IP, mbuf); else # endif # if INET6 if (mbuf->m_data[0]>>4 == 6) netisr_dispatch(NETISR_IPV6, mbuf); else # endif { m_freem(mbuf); sc->status.cntrs.idiscards++; if (DRIVER_DEBUG) printf("%s: lmc_raw_input: rx pkt discarded: not IPv4 or IPv6\n", NAME_UNIT); } } /* * We are "standing on the head of a pin" in these routines. * Tulip CSRs can be accessed, but nothing else is interrupt-safe! * Do NOT access: MII, GPIO, SROM, BIOSROM, XILINX, SYNTH, or DAC. */ /* Singly-linked tail-queues hold mbufs with active DMA. * For RX, single mbuf clusters; for TX, mbuf chains are queued. * NB: mbufs are linked through their m_nextpkt field. * Callers must hold sc->bottom_lock; not otherwise locked. */ /* Put an mbuf (chain) on the tail of the descriptor ring queue. */ static void /* BSD version */ mbuf_enqueue(struct desc_ring *ring, struct mbuf *m) { m->m_nextpkt = NULL; if (ring->tail == NULL) ring->head = m; else ring->tail->m_nextpkt = m; ring->tail = m; } /* Get an mbuf (chain) from the head of the descriptor ring queue. */ static struct mbuf* /* BSD version */ mbuf_dequeue(struct desc_ring *ring) { struct mbuf *m = ring->head; if (m != NULL) if ((ring->head = m->m_nextpkt) == NULL) ring->tail = NULL; return m; } static void /* *** FreeBSD ONLY *** Callout from bus_dmamap_load() */ fbsd_dmamap_load(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct desc_ring *ring = arg; ring->nsegs = error ? 0 : nsegs; ring->segs[0] = segs[0]; ring->segs[1] = segs[1]; } /* Initialize a DMA descriptor ring. */ static int /* BSD version */ create_ring(softc_t *sc, struct desc_ring *ring, int num_descs) { struct dma_desc *descs; int size_descs = sizeof(struct dma_desc)*num_descs; int i, error = 0; /* The DMA descriptor array must not cross a page boundary. */ if (size_descs > PAGE_SIZE) { printf("%s: DMA descriptor array > PAGE_SIZE (%d)\n", NAME_UNIT, (u_int)PAGE_SIZE); return EINVAL; } /* Create a DMA tag for descriptors and buffers. */ if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, PAGE_SIZE, 2, PAGE_SIZE, BUS_DMA_ALLOCNOW, NULL, NULL, &ring->tag))) { printf("%s: bus_dma_tag_create() failed: error %d\n", NAME_UNIT, error); return error; } /* Allocate wired physical memory for DMA descriptor array */ /* and map physical address to kernel virtual address. */ if ((error = bus_dmamem_alloc(ring->tag, (void**)&ring->first, BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->map))) { printf("%s: bus_dmamem_alloc() failed; error %d\n", NAME_UNIT, error); return error; } descs = ring->first; /* Map kernel virtual address to PCI address for DMA descriptor array. */ if ((error = bus_dmamap_load(ring->tag, ring->map, descs, size_descs, fbsd_dmamap_load, ring, 0))) { printf("%s: bus_dmamap_load() failed; error %d\n", NAME_UNIT, error); return error; } ring->dma_addr = ring->segs[0].ds_addr; /* Allocate dmamaps for each DMA descriptor. */ for (i=0; itag, 0, &descs[i].map))) { printf("%s: bus_dmamap_create() failed; error %d\n", NAME_UNIT, error); return error; } ring->read = descs; ring->write = descs; ring->first = descs; ring->last = descs + num_descs -1; ring->last->control = TLP_DCTL_END_RING; ring->num_descs = num_descs; ring->size_descs = size_descs; ring->head = NULL; ring->tail = NULL; return 0; } /* Destroy a DMA descriptor ring */ static void /* BSD version */ destroy_ring(softc_t *sc, struct desc_ring *ring) { struct dma_desc *desc; struct mbuf *m; /* Free queued mbufs. */ while ((m = mbuf_dequeue(ring)) != NULL) m_freem(m); /* TX may have one pkt that is not on any queue. */ if (sc->tx_mbuf != NULL) { m_freem(sc->tx_mbuf); sc->tx_mbuf = NULL; } /* Unmap active DMA descriptors. */ while (ring->read != ring->write) { bus_dmamap_unload(ring->tag, ring->read->map); if (ring->read++ == ring->last) ring->read = ring->first; } /* Free the dmamaps of all DMA descriptors. */ for (desc=ring->first; desc!=ring->last+1; desc++) if (desc->map != NULL) bus_dmamap_destroy(ring->tag, desc->map); /* Unmap PCI address for DMA descriptor array. */ if (ring->dma_addr != 0) bus_dmamap_unload(ring->tag, ring->map); /* Free kernel memory for DMA descriptor array. */ if (ring->first != NULL) bus_dmamem_free(ring->tag, ring->first, ring->map); /* Free the DMA tag created for this ring. */ if (ring->tag != NULL) bus_dma_tag_destroy(ring->tag); } /* Clean up after a packet has been received. */ static int /* BSD version */ rxintr_cleanup(softc_t *sc) { struct desc_ring *ring = &sc->rxring; struct dma_desc *first_desc, *last_desc; struct mbuf *first_mbuf=NULL, *last_mbuf=NULL; struct mbuf *new_mbuf; int pkt_len, desc_len; #if defined(DEVICE_POLLING) /* Input packet flow control (livelock prevention): */ /* Give pkts to higher levels only if quota is > 0. */ if (sc->quota <= 0) return 0; #endif /* This looks complicated, but remember: typically packets up */ /* to 2048 bytes long fit in one mbuf and use one descriptor. */ first_desc = last_desc = ring->read; /* ASSERTION: If there is a descriptor in the ring and the hardware has */ /* finished with it, then that descriptor will have RX_FIRST_DESC set. */ if ((ring->read != ring->write) && /* descriptor ring not empty */ ((ring->read->status & TLP_DSTS_OWNER) == 0) && /* hardware done */ ((ring->read->status & TLP_DSTS_RX_FIRST_DESC) == 0)) /* should be set */ panic("%s: rxintr_cleanup: rx-first-descriptor not set.\n", NAME_UNIT); /* First decide if a complete packet has arrived. */ /* Run down DMA descriptors looking for one marked "last". */ /* Bail out if an active descriptor is encountered. */ /* Accumulate most significant bits of packet length. */ pkt_len = 0; for (;;) { if (last_desc == ring->write) return 0; /* no more descs */ if (last_desc->status & TLP_DSTS_OWNER) return 0; /* still active */ if (last_desc->status & TLP_DSTS_RX_LAST_DESC) break; /* end of packet */ pkt_len += last_desc->length1 + last_desc->length2; /* entire desc filled */ if (last_desc++->control & TLP_DCTL_END_RING) last_desc = ring->first; /* ring wrap */ } /* A complete packet has arrived; how long is it? */ /* H/w ref man shows RX pkt length as a 14-bit field. */ /* An experiment found that only the 12 LSBs work. */ if (((last_desc->status>>16)&0xFFF) == 0) pkt_len += 4096; /* carry-bit */ pkt_len = (pkt_len & 0xF000) + ((last_desc->status>>16) & 0x0FFF); /* Subtract the CRC length unless doing so would underflow. */ if (pkt_len >= sc->config.crc_len) pkt_len -= sc->config.crc_len; /* Run down DMA descriptors again doing the following: * 1) put pkt info in pkthdr of first mbuf, * 2) link mbufs, * 3) set mbuf lengths. */ first_desc = ring->read; do { /* Read a DMA descriptor from the ring. */ last_desc = ring->read; /* Advance the ring read pointer. */ if (ring->read++ == ring->last) ring->read = ring->first; /* Dequeue the corresponding cluster mbuf. */ new_mbuf = mbuf_dequeue(ring); if (new_mbuf == NULL) panic("%s: rxintr_cleanup: expected an mbuf\n", NAME_UNIT); desc_len = last_desc->length1 + last_desc->length2; /* If bouncing, copy bounce buf to mbuf. */ DMA_SYNC(last_desc->map, desc_len, BUS_DMASYNC_POSTREAD); /* Unmap kernel virtual address to PCI address. */ bus_dmamap_unload(ring->tag, last_desc->map); /* 1) Put pkt info in pkthdr of first mbuf. */ if (last_desc == first_desc) { first_mbuf = new_mbuf; first_mbuf->m_pkthdr.len = pkt_len; /* total pkt length */ first_mbuf->m_pkthdr.rcvif = sc->ifp; /* how it got here */ } else /* 2) link mbufs. */ { last_mbuf->m_next = new_mbuf; /* M_PKTHDR should be set in the first mbuf only. */ new_mbuf->m_flags &= ~M_PKTHDR; } last_mbuf = new_mbuf; /* 3) Set mbuf lengths. */ new_mbuf->m_len = (pkt_len >= desc_len) ? desc_len : pkt_len; pkt_len -= new_mbuf->m_len; } while ((last_desc->status & TLP_DSTS_RX_LAST_DESC) == 0); /* Decide whether to accept or to discard this packet. */ /* RxHDLC sets MIIERR for bad CRC, abort and partial byte at pkt end. */ if (((last_desc->status & TLP_DSTS_RX_BAD) == 0) && (sc->status.oper_status == STATUS_UP) && (first_mbuf->m_pkthdr.len > 0)) { /* Optimization: copy a small pkt into a small mbuf. */ if (first_mbuf->m_pkthdr.len <= COPY_BREAK) { MGETHDR(new_mbuf, M_NOWAIT, MT_DATA); if (new_mbuf != NULL) { new_mbuf->m_pkthdr.rcvif = first_mbuf->m_pkthdr.rcvif; new_mbuf->m_pkthdr.len = first_mbuf->m_pkthdr.len; new_mbuf->m_len = first_mbuf->m_len; memcpy(new_mbuf->m_data, first_mbuf->m_data, first_mbuf->m_pkthdr.len); m_freem(first_mbuf); first_mbuf = new_mbuf; } } /* Include CRC and one flag byte in input byte count. */ sc->status.cntrs.ibytes += first_mbuf->m_pkthdr.len + sc->config.crc_len +1; sc->status.cntrs.ipackets++; if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1); LMC_BPF_MTAP(first_mbuf); #if defined(DEVICE_POLLING) sc->quota--; #endif /* Give this good packet to the network stacks. */ #if NETGRAPH if (sc->ng_hook != NULL) /* is hook connected? */ { int error; /* ignore error */ NG_SEND_DATA_ONLY(error, sc->ng_hook, first_mbuf); return 1; /* did something */ } #endif /* NETGRAPH */ if (sc->config.line_pkg == PKG_RAWIP) lmc_raw_input(sc->ifp, first_mbuf); else { #if NSPPP sppp_input(sc->ifp, first_mbuf); #elif P2P new_mbuf = first_mbuf; while (new_mbuf != NULL) { sc->p2p->p2p_hdrinput(sc->p2p, new_mbuf->m_data, new_mbuf->m_len); new_mbuf = new_mbuf->m_next; } sc->p2p->p2p_input(sc->p2p, NULL); m_freem(first_mbuf); #else m_freem(first_mbuf); sc->status.cntrs.idiscards++; #endif } } else if (sc->status.oper_status != STATUS_UP) { /* If the link is down, this packet is probably noise. */ m_freem(first_mbuf); sc->status.cntrs.idiscards++; if (DRIVER_DEBUG) printf("%s: rxintr_cleanup: rx pkt discarded: link down\n", NAME_UNIT); } else /* Log and discard this bad packet. */ { if (DRIVER_DEBUG) printf("%s: RX bad pkt; len=%d %s%s%s%s\n", NAME_UNIT, first_mbuf->m_pkthdr.len, (last_desc->status & TLP_DSTS_RX_MII_ERR) ? " miierr" : "", (last_desc->status & TLP_DSTS_RX_DRIBBLE) ? " dribble" : "", (last_desc->status & TLP_DSTS_RX_DESC_ERR) ? " descerr" : "", (last_desc->status & TLP_DSTS_RX_OVERRUN) ? " overrun" : ""); if (last_desc->status & TLP_DSTS_RX_OVERRUN) sc->status.cntrs.fifo_over++; else sc->status.cntrs.ierrors++; m_freem(first_mbuf); } return 1; /* did something */ } /* Setup (prepare) to receive a packet. */ /* Try to keep the RX descriptor ring full of empty buffers. */ static int /* BSD version */ rxintr_setup(softc_t *sc) { struct desc_ring *ring = &sc->rxring; struct dma_desc *desc; struct mbuf *m; int desc_len; int error; /* Ring is full if (wrap(write+1)==read) */ if (((ring->write == ring->last) ? ring->first : ring->write+1) == ring->read) return 0; /* ring is full; nothing to do */ /* Allocate a small mbuf and attach an mbuf cluster. */ MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { sc->status.cntrs.rxdma++; if (DRIVER_DEBUG) printf("%s: rxintr_setup: MGETHDR() failed\n", NAME_UNIT); return 0; } - MCLGET(m, M_NOWAIT); - if ((m->m_flags & M_EXT) == 0) + if (!(MCLGET(m, M_NOWAIT))) { m_freem(m); sc->status.cntrs.rxdma++; if (DRIVER_DEBUG) printf("%s: rxintr_setup: MCLGET() failed\n", NAME_UNIT); return 0; } /* Queue the mbuf for later processing by rxintr_cleanup. */ mbuf_enqueue(ring, m); /* Write a DMA descriptor into the ring. */ /* Hardware won't see it until the OWNER bit is set. */ desc = ring->write; /* Advance the ring write pointer. */ if (ring->write++ == ring->last) ring->write = ring->first; desc_len = (MCLBYTES < MAX_DESC_LEN) ? MCLBYTES : MAX_DESC_LEN; /* Map kernel virtual address to PCI address. */ if ((error = DMA_LOAD(desc->map, m->m_data, desc_len))) printf("%s: bus_dmamap_load(rx) failed; error %d\n", NAME_UNIT, error); /* Invalidate the cache for this mbuf. */ DMA_SYNC(desc->map, desc_len, BUS_DMASYNC_PREREAD); /* Set up the DMA descriptor. */ desc->address1 = ring->segs[0].ds_addr; desc->length1 = desc_len>>1; desc->address2 = desc->address1 + desc->length1; desc->length2 = desc_len>>1; /* Before setting the OWNER bit, flush the cache (memory barrier). */ DMA_SYNC(ring->map, ring->size_descs, BUS_DMASYNC_PREWRITE); /* Commit the DMA descriptor to the hardware. */ desc->status = TLP_DSTS_OWNER; /* Notify the receiver that there is another buffer available. */ WRITE_CSR(TLP_RX_POLL, 1); return 1; /* did something */ } /* Clean up after a packet has been transmitted. */ /* Free the mbuf chain and update the DMA descriptor ring. */ static int /* BSD version */ txintr_cleanup(softc_t *sc) { struct desc_ring *ring = &sc->txring; struct dma_desc *desc; while ((ring->read != ring->write) && /* while ring is not empty */ ((ring->read->status & TLP_DSTS_OWNER) == 0)) { /* Read a DMA descriptor from the ring. */ desc = ring->read; /* Advance the ring read pointer. */ if (ring->read++ == ring->last) ring->read = ring->first; /* This is a no-op on most architectures. */ DMA_SYNC(desc->map, desc->length1 + desc->length2, BUS_DMASYNC_POSTWRITE); /* Unmap kernel virtual address to PCI address. */ bus_dmamap_unload(ring->tag, desc->map); /* If this descriptor is the last segment of a packet, */ /* then dequeue and free the corresponding mbuf chain. */ if ((desc->control & TLP_DCTL_TX_LAST_SEG) != 0) { struct mbuf *m; if ((m = mbuf_dequeue(ring)) == NULL) panic("%s: txintr_cleanup: expected an mbuf\n", NAME_UNIT); /* Include CRC and one flag byte in output byte count. */ sc->status.cntrs.obytes += m->m_pkthdr.len + sc->config.crc_len +1; sc->status.cntrs.opackets++; if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1); LMC_BPF_MTAP(m); /* The only bad TX status is fifo underrun. */ if ((desc->status & TLP_DSTS_TX_UNDERRUN) != 0) sc->status.cntrs.fifo_under++; m_freem(m); return 1; /* did something */ } } return 0; } /* Build DMA descriptors for a transmit packet mbuf chain. */ static int /* 0=success; 1=error */ /* BSD version */ txintr_setup_mbuf(softc_t *sc, struct mbuf *m) { struct desc_ring *ring = &sc->txring; struct dma_desc *desc; unsigned int desc_len; /* build DMA descriptors for a chain of mbufs. */ while (m != NULL) { char *data = m->m_data; int length = m->m_len; /* zero length mbufs happen! */ /* Build DMA descriptors for one mbuf. */ while (length > 0) { int error; /* Ring is full if (wrap(write+1)==read) */ if (((ring->temp==ring->last) ? ring->first : ring->temp+1) == ring->read) { /* Not enough DMA descriptors; try later. */ for (; ring->temp!=ring->write; ring->temp = (ring->temp==ring->first)? ring->last : ring->temp-1) bus_dmamap_unload(ring->tag, ring->temp->map); sc->status.cntrs.txdma++; return 1; } /* Provisionally, write a descriptor into the ring. */ /* But don't change the REAL ring write pointer. */ /* Hardware won't see it until the OWNER bit is set. */ desc = ring->temp; /* Advance the temporary ring write pointer. */ if (ring->temp++ == ring->last) ring->temp = ring->first; /* Clear all control bits except the END_RING bit. */ desc->control &= TLP_DCTL_END_RING; /* Don't pad short packets up to 64 bytes. */ desc->control |= TLP_DCTL_TX_NO_PAD; /* Use Tulip's CRC-32 generator, if appropriate. */ if (sc->config.crc_len != CFG_CRC_32) desc->control |= TLP_DCTL_TX_NO_CRC; /* Set the OWNER bit, except in the first descriptor. */ if (desc != ring->write) desc->status = TLP_DSTS_OWNER; desc_len = (length > MAX_CHUNK_LEN) ? MAX_CHUNK_LEN : length; /* Map kernel virtual address to PCI address. */ if ((error = DMA_LOAD(desc->map, data, desc_len))) printf("%s: bus_dmamap_load(tx) failed; error %d\n", NAME_UNIT, error); /* Flush the cache and if bouncing, copy mbuf to bounce buf. */ DMA_SYNC(desc->map, desc_len, BUS_DMASYNC_PREWRITE); /* Prevent wild fetches if mapping fails (nsegs==0). */ desc->length1 = desc->length2 = 0; desc->address1 = desc->address2 = 0; { bus_dma_segment_t *segs = ring->segs; int nsegs = ring->nsegs; if (nsegs >= 1) { desc->address1 = segs[0].ds_addr; desc->length1 = segs[0].ds_len; } if (nsegs == 2) { desc->address2 = segs[1].ds_addr; desc->length2 = segs[1].ds_len; } } data += desc_len; length -= desc_len; } /* while (length > 0) */ m = m->m_next; } /* while (m != NULL) */ return 0; /* success */ } /* Setup (prepare) to transmit a packet. */ /* Select a packet, build DMA descriptors and give packet to hardware. */ /* If DMA descriptors run out, abandon the attempt and return 0. */ static int /* BSD version */ txintr_setup(softc_t *sc) { struct desc_ring *ring = &sc->txring; struct dma_desc *first_desc, *last_desc; /* Protect against half-up links: Don't transmit */ /* if the receiver can't hear the far end. */ if (sc->status.oper_status != STATUS_UP) return 0; /* Pick a packet to transmit. */ #if NETGRAPH if ((sc->ng_hook != NULL) && (sc->tx_mbuf == NULL)) { if (!IFQ_IS_EMPTY(&sc->ng_fastq)) IFQ_DEQUEUE(&sc->ng_fastq, sc->tx_mbuf); else IFQ_DEQUEUE(&sc->ng_sndq, sc->tx_mbuf); } else #endif if (sc->tx_mbuf == NULL) { if (sc->config.line_pkg == PKG_RAWIP) IFQ_DEQUEUE(&sc->ifp->if_snd, sc->tx_mbuf); else { #if NSPPP sc->tx_mbuf = sppp_dequeue(sc->ifp); #elif P2P if (!IFQ_IS_EMPTY(&sc->p2p->p2p_isnd)) IFQ_DEQUEUE(&sc->p2p->p2p_isnd, sc->tx_mbuf); else IFQ_DEQUEUE(&sc->ifp->if_snd, sc->tx_mbuf); #endif } } if (sc->tx_mbuf == NULL) return 0; /* no pkt to transmit */ /* Build DMA descriptors for an outgoing mbuf chain. */ ring->temp = ring->write; /* temporary ring write pointer */ if (txintr_setup_mbuf(sc, sc->tx_mbuf) != 0) return 0; /* Enqueue the mbuf; txintr_cleanup will free it. */ mbuf_enqueue(ring, sc->tx_mbuf); /* The transmitter has room for another packet. */ sc->tx_mbuf = NULL; /* Set first & last segment bits. */ /* last_desc is the desc BEFORE the one pointed to by ring->temp. */ first_desc = ring->write; first_desc->control |= TLP_DCTL_TX_FIRST_SEG; last_desc = (ring->temp==ring->first)? ring->last : ring->temp-1; last_desc->control |= TLP_DCTL_TX_LAST_SEG; /* Interrupt at end-of-transmission? Why bother the poor computer! */ /* last_desc->control |= TLP_DCTL_TX_INTERRUPT; */ /* Make sure the OWNER bit is not set in the next descriptor. */ /* The OWNER bit may have been set if a previous call aborted. */ ring->temp->status = 0; /* Commit the DMA descriptors to the software. */ ring->write = ring->temp; /* Before setting the OWNER bit, flush the cache (memory barrier). */ DMA_SYNC(ring->map, ring->size_descs, BUS_DMASYNC_PREWRITE); /* Commit the DMA descriptors to the hardware. */ first_desc->status = TLP_DSTS_OWNER; /* Notify the transmitter that there is another packet to send. */ WRITE_CSR(TLP_TX_POLL, 1); return 1; /* did something */ } static void check_intr_status(softc_t *sc) { u_int32_t status, cfcs, op_mode; u_int32_t missed, overruns; /* Check for four unusual events: * 1) fatal PCI bus errors - some are recoverable * 2) transmitter FIFO underruns - increase fifo threshold * 3) receiver FIFO overruns - clear potential hangup * 4) no receive descs or bufs - count missed packets */ /* 1) A fatal bus error causes a Tulip to stop initiating bus cycles. */ /* Module unload/load or boot are the only fixes for Parity Errors. */ /* Master and Target Aborts can be cleared and life may continue. */ status = READ_CSR(TLP_STATUS); if ((status & TLP_STAT_FATAL_ERROR) != 0) { u_int32_t fatal = (status & TLP_STAT_FATAL_BITS)>>TLP_STAT_FATAL_SHIFT; printf("%s: FATAL PCI BUS ERROR: %s%s%s%s\n", NAME_UNIT, (fatal == 0) ? "PARITY ERROR" : "", (fatal == 1) ? "MASTER ABORT" : "", (fatal == 2) ? "TARGET ABORT" : "", (fatal >= 3) ? "RESERVED (?)" : ""); cfcs = READ_PCI_CFG(sc, TLP_CFCS); /* try to clear it */ cfcs &= ~(TLP_CFCS_MSTR_ABORT | TLP_CFCS_TARG_ABORT); WRITE_PCI_CFG(sc, TLP_CFCS, cfcs); } /* 2) If the transmitter fifo underruns, increase the transmit fifo */ /* threshold: the number of bytes required to be in the fifo */ /* before starting the transmitter (cost: increased tx delay). */ /* The TX_FSM must be stopped to change this parameter. */ if ((status & TLP_STAT_TX_UNDERRUN) != 0) { op_mode = READ_CSR(TLP_OP_MODE); /* enable store-and-forward mode if tx_threshold tops out? */ if ((op_mode & TLP_OP_TX_THRESH) < TLP_OP_TX_THRESH) { op_mode += 0x4000; /* increment TX_THRESH field; can't overflow */ WRITE_CSR(TLP_OP_MODE, op_mode & ~TLP_OP_TX_RUN); /* Wait for the TX FSM to stop; it might be processing a pkt. */ while (READ_CSR(TLP_STATUS) & TLP_STAT_TX_FSM); /* XXX HANG */ WRITE_CSR(TLP_OP_MODE, op_mode); /* restart tx */ if (DRIVER_DEBUG) printf("%s: tx underrun; tx fifo threshold now %d bytes\n", NAME_UNIT, 128<<((op_mode>>TLP_OP_TR_SHIFT)&3)); } } /* 3) Errata memo from Digital Equipment Corp warns that 21140A */ /* receivers through rev 2.2 can hang if the fifo overruns. */ /* Recommended fix: stop and start the RX FSM after an overrun. */ missed = READ_CSR(TLP_MISSED); if ((overruns = ((missed & TLP_MISS_OVERRUN)>>TLP_OVERRUN_SHIFT)) != 0) { if (DRIVER_DEBUG) printf("%s: rx overrun cntr=%d\n", NAME_UNIT, overruns); sc->status.cntrs.overruns += overruns; if ((READ_PCI_CFG(sc, TLP_CFRV) & 0xFF) <= 0x22) { op_mode = READ_CSR(TLP_OP_MODE); WRITE_CSR(TLP_OP_MODE, op_mode & ~TLP_OP_RX_RUN); /* Wait for the RX FSM to stop; it might be processing a pkt. */ while (READ_CSR(TLP_STATUS) & TLP_STAT_RX_FSM); /* XXX HANG */ WRITE_CSR(TLP_OP_MODE, op_mode); /* restart rx */ } } /* 4) When the receiver is enabled and a packet arrives, but no DMA */ /* descriptor is available, the packet is counted as 'missed'. */ /* The receiver should never miss packets; warn if it happens. */ if ((missed = (missed & TLP_MISS_MISSED)) != 0) { if (DRIVER_DEBUG) printf("%s: rx missed %d pkts\n", NAME_UNIT, missed); sc->status.cntrs.missed += missed; } } static void /* This is where the work gets done. */ core_interrupt(void *arg, int check_status) { softc_t *sc = arg; int activity; /* If any CPU is inside this critical section, then */ /* other CPUs should go away without doing anything. */ if (BOTTOM_TRYLOCK == 0) { sc->status.cntrs.lck_intr++; return; } /* Clear pending card interrupts. */ WRITE_CSR(TLP_STATUS, READ_CSR(TLP_STATUS)); /* In Linux, pci_alloc_consistent() means DMA descriptors */ /* don't need explicit syncing. */ { struct desc_ring *ring = &sc->txring; DMA_SYNC(sc->txring.map, sc->txring.size_descs, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); ring = &sc->rxring; DMA_SYNC(sc->rxring.map, sc->rxring.size_descs, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); } do /* This is the main loop for interrupt processing. */ { activity = txintr_cleanup(sc); activity += txintr_setup(sc); activity += rxintr_cleanup(sc); activity += rxintr_setup(sc); } while (activity); { struct desc_ring *ring = &sc->txring; DMA_SYNC(sc->txring.map, sc->txring.size_descs, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); ring = &sc->rxring; DMA_SYNC(sc->rxring.map, sc->rxring.size_descs, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } /* As the interrupt is dismissed, check for four unusual events. */ if (check_status) check_intr_status(sc); BOTTOM_UNLOCK; } /* user_interrupt() may be called from a syscall or a softirq */ static void user_interrupt(softc_t *sc, int check_status) { DISABLE_INTR; /* noop on FreeBSD-5 and Linux */ core_interrupt(sc, check_status); ENABLE_INTR; /* noop on FreeBSD-5 and Linux */ } # if defined(DEVICE_POLLING) /* Service the card from the kernel idle loop without interrupts. */ static int fbsd_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { softc_t *sc = IFP2SC(ifp); sc->quota = count; core_interrupt(sc, (cmd==POLL_AND_CHECK_STATUS)); return 0; } # endif /* DEVICE_POLLING */ /* BSD kernels call this procedure when an interrupt happens. */ static intr_return_t bsd_interrupt(void *arg) { softc_t *sc = arg; /* Cut losses early if this is not our interrupt. */ if ((READ_CSR(TLP_STATUS) & TLP_INT_TXRX) == 0) return IRQ_NONE; # if defined(DEVICE_POLLING) if (sc->ifp->if_capenable & IFCAP_POLLING) return IRQ_NONE; if ((sc->ifp->if_capabilities & IFCAP_POLLING) && (ether_poll_register(fbsd_poll, sc->ifp))) { WRITE_CSR(TLP_INT_ENBL, TLP_INT_DISABLE); return IRQ_NONE; } else sc->quota = sc->rxring.num_descs; /* input flow control */ # endif /* DEVICE_POLLING */ /* Disable card interrupts. */ WRITE_CSR(TLP_INT_ENBL, TLP_INT_DISABLE); core_interrupt(sc, 0); /* Enable card interrupts. */ WRITE_CSR(TLP_INT_ENBL, TLP_INT_TXRX); return IRQ_HANDLED; } /* Administrative status of the driver (UP or DOWN) has changed. */ /* A card-specific action may be required: T1 and T3 cards: no-op. */ /* HSSI and SSI cards change the state of modem ready signals. */ static void set_status(softc_t *sc, int status) { struct ioctl ioctl; ioctl.cmd = IOCTL_SET_STATUS; ioctl.data = status; sc->card->ioctl(sc, &ioctl); } #if P2P /* Callout from P2P: */ /* Get the state of DCD (Data Carrier Detect). */ static int p2p_getmdm(struct p2pcom *p2p, caddr_t result) { softc_t *sc = IFP2SC(&p2p->p2p_if); /* Non-zero isn't good enough; TIOCM_CAR is 0x40. */ *(int *)result = (sc->status.oper_status==STATUS_UP) ? TIOCM_CAR : 0; return 0; } /* Callout from P2P: */ /* Set the state of DTR (Data Terminal Ready). */ static int p2p_mdmctl(struct p2pcom *p2p, int flag) { softc_t *sc = IFP2SC(&p2p->p2p_if); set_status(sc, flag); return 0; } #endif /* P2P */ #if NSPPP # ifndef PP_FR # define PP_FR 0 # endif /* Callout from SPPP: */ static void sppp_tls(struct sppp *sppp) { if (!(sppp->pp_mode & IFF_LINK2) && !(sppp->pp_flags & PP_FR)) sppp->pp_up(sppp); } /* Callout from SPPP: */ static void sppp_tlf(struct sppp *sppp) { if (!(sppp->pp_mode & IFF_LINK2) && !(sppp->pp_flags & PP_FR)) sppp->pp_down(sppp); } #endif /* NSPPP */ /* Configure line protocol stuff. * Called by attach_card() during module init. * Called by core_ioctl() when lmcconfig writes sc->config. * Called by detach_card() during module shutdown. */ static void config_proto(softc_t *sc, struct config *config) { /* Use line protocol stack instead of RAWIP mode. */ if ((sc->config.line_pkg == PKG_RAWIP) && (config->line_pkg != PKG_RAWIP)) { #if NSPPP LMC_BPF_DETACH; sppp_attach(sc->ifp); LMC_BPF_ATTACH(DLT_PPP, 4); sc->sppp->pp_tls = sppp_tls; sc->sppp->pp_tlf = sppp_tlf; /* Force reconfiguration of SPPP params. */ sc->config.line_prot = 0; sc->config.keep_alive = config->keep_alive ? 0:1; #elif P2P int error = 0; sc->p2p->p2p_proto = 0; /* force p2p_attach */ if ((error = p2p_attach(sc->p2p))) /* calls bpfattach() */ { printf("%s: p2p_attach() failed; error %d\n", NAME_UNIT, error); config->line_pkg = PKG_RAWIP; /* still in RAWIP mode */ } else { sc->p2p->p2p_mdmctl = p2p_mdmctl; /* set DTR */ sc->p2p->p2p_getmdm = p2p_getmdm; /* get DCD */ } #elif GEN_HDLC int error = 0; sc->net_dev->mtu = HDLC_MAX_MTU; if ((error = hdlc_open(sc->net_dev))) { printf("%s: hdlc_open() failed; error %d\n", NAME_UNIT, error); printf("%s: Try 'sethdlc %s ppp'\n", NAME_UNIT, NAME_UNIT); config->line_pkg = PKG_RAWIP; /* still in RAWIP mode */ } #else /* no line protocol stack was configured */ config->line_pkg = PKG_RAWIP; /* still in RAWIP mode */ #endif } /* Bypass line protocol stack and return to RAWIP mode. */ if ((sc->config.line_pkg != PKG_RAWIP) && (config->line_pkg == PKG_RAWIP)) { #if NSPPP LMC_BPF_DETACH; sppp_flush(sc->ifp); sppp_detach(sc->ifp); setup_ifnet(sc->ifp); LMC_BPF_ATTACH(DLT_RAW, 0); #elif P2P int error = 0; if_qflush(&sc->p2p->p2p_isnd); if ((error = p2p_detach(sc->p2p))) { printf("%s: p2p_detach() failed; error %d\n", NAME_UNIT, error); printf("%s: Try 'ifconfig %s down -remove'\n", NAME_UNIT, NAME_UNIT); config->line_pkg = PKG_P2P; /* not in RAWIP mode; still attached to P2P */ } else { setup_ifnet(sc->ifp); LMC_BPF_ATTACH(DLT_RAW, 0); } #elif GEN_HDLC hdlc_proto_detach(sc->hdlc_dev); hdlc_close(sc->net_dev); setup_netdev(sc->net_dev); #endif } #if NSPPP if (config->line_pkg != PKG_RAWIP) { /* Check for change to PPP protocol. */ if ((sc->config.line_prot != PROT_PPP) && (config->line_prot == PROT_PPP)) { LMC_BPF_DETACH; sc->ifp->if_flags &= ~IFF_LINK2; sc->sppp->pp_flags &= ~PP_FR; LMC_BPF_ATTACH(DLT_PPP, 4); sppp_ioctl(sc->ifp, SIOCSIFFLAGS, NULL); } # ifndef DLT_C_HDLC # define DLT_C_HDLC DLT_PPP # endif /* Check for change to C_HDLC protocol. */ if ((sc->config.line_prot != PROT_C_HDLC) && (config->line_prot == PROT_C_HDLC)) { LMC_BPF_DETACH; sc->ifp->if_flags |= IFF_LINK2; sc->sppp->pp_flags &= ~PP_FR; LMC_BPF_ATTACH(DLT_C_HDLC, 4); sppp_ioctl(sc->ifp, SIOCSIFFLAGS, NULL); } /* Check for change to Frame Relay protocol. */ if ((sc->config.line_prot != PROT_FRM_RLY) && (config->line_prot == PROT_FRM_RLY)) { LMC_BPF_DETACH; sc->ifp->if_flags &= ~IFF_LINK2; sc->sppp->pp_flags |= PP_FR; LMC_BPF_ATTACH(DLT_FRELAY, 4); sppp_ioctl(sc->ifp, SIOCSIFFLAGS, NULL); } /* Check for disabling keep-alives. */ if ((sc->config.keep_alive != 0) && (config->keep_alive == 0)) sc->sppp->pp_flags &= ~PP_KEEPALIVE; /* Check for enabling keep-alives. */ if ((sc->config.keep_alive == 0) && (config->keep_alive != 0)) sc->sppp->pp_flags |= PP_KEEPALIVE; } #endif /* NSPPP */ /* Loop back through the TULIP Ethernet chip; (no CRC). */ /* Data sheet says stop DMA before changing OPMODE register. */ /* But that's not as simple as it sounds; works anyway. */ /* Check for enabling loopback thru Tulip chip. */ if ((sc->config.loop_back != CFG_LOOP_TULIP) && (config->loop_back == CFG_LOOP_TULIP)) { u_int32_t op_mode = READ_CSR(TLP_OP_MODE); op_mode |= TLP_OP_INT_LOOP; WRITE_CSR(TLP_OP_MODE, op_mode); config->crc_len = CFG_CRC_0; } /* Check for disabling loopback thru Tulip chip. */ if ((sc->config.loop_back == CFG_LOOP_TULIP) && (config->loop_back != CFG_LOOP_TULIP)) { u_int32_t op_mode = READ_CSR(TLP_OP_MODE); op_mode &= ~TLP_OP_LOOP_MODE; WRITE_CSR(TLP_OP_MODE, op_mode); config->crc_len = CFG_CRC_16; } } /* This is the core ioctl procedure. */ /* It handles IOCTLs from lmcconfig(8). */ /* It must not run when card watchdogs run. */ /* Called from a syscall (user context; no spinlocks). */ /* This procedure can SLEEP. */ static int core_ioctl(softc_t *sc, u_long cmd, caddr_t data) { struct iohdr *iohdr = (struct iohdr *) data; struct ioctl *ioctl = (struct ioctl *) data; struct status *status = (struct status *) data; struct config *config = (struct config *) data; int error = 0; /* All structs start with a string and a cookie. */ if (((struct iohdr *)data)->cookie != NGM_LMC_COOKIE) return EINVAL; while (TOP_TRYLOCK == 0) { sc->status.cntrs.lck_ioctl++; SLEEP(10000); /* yield? */ } switch (cmd) { case LMCIOCGSTAT: { *status = sc->status; iohdr->cookie = NGM_LMC_COOKIE; break; } case LMCIOCGCFG: { *config = sc->config; iohdr->cookie = NGM_LMC_COOKIE; break; } case LMCIOCSCFG: { if ((error = CHECK_CAP)) break; config_proto(sc, config); sc->config = *config; sc->card->config(sc); break; } case LMCIOCREAD: { if (ioctl->cmd == IOCTL_RW_PCI) { if (ioctl->address > 252) { error = EFAULT; break; } ioctl->data = READ_PCI_CFG(sc, ioctl->address); } else if (ioctl->cmd == IOCTL_RW_CSR) { if (ioctl->address > 15) { error = EFAULT; break; } ioctl->data = READ_CSR(ioctl->address*TLP_CSR_STRIDE); } else if (ioctl->cmd == IOCTL_RW_SROM) { if (ioctl->address > 63) { error = EFAULT; break; } ioctl->data = read_srom(sc, ioctl->address); } else if (ioctl->cmd == IOCTL_RW_BIOS) ioctl->data = read_bios(sc, ioctl->address); else if (ioctl->cmd == IOCTL_RW_MII) ioctl->data = read_mii(sc, ioctl->address); else if (ioctl->cmd == IOCTL_RW_FRAME) ioctl->data = read_framer(sc, ioctl->address); else error = EINVAL; break; } case LMCIOCWRITE: { if ((error = CHECK_CAP)) break; if (ioctl->cmd == IOCTL_RW_PCI) { if (ioctl->address > 252) { error = EFAULT; break; } WRITE_PCI_CFG(sc, ioctl->address, ioctl->data); } else if (ioctl->cmd == IOCTL_RW_CSR) { if (ioctl->address > 15) { error = EFAULT; break; } WRITE_CSR(ioctl->address*TLP_CSR_STRIDE, ioctl->data); } else if (ioctl->cmd == IOCTL_RW_SROM) { if (ioctl->address > 63) { error = EFAULT; break; } write_srom(sc, ioctl->address, ioctl->data); /* can sleep */ } else if (ioctl->cmd == IOCTL_RW_BIOS) { if (ioctl->address == 0) erase_bios(sc); write_bios(sc, ioctl->address, ioctl->data); /* can sleep */ } else if (ioctl->cmd == IOCTL_RW_MII) write_mii(sc, ioctl->address, ioctl->data); else if (ioctl->cmd == IOCTL_RW_FRAME) write_framer(sc, ioctl->address, ioctl->data); else if (ioctl->cmd == IOCTL_WO_SYNTH) write_synth(sc, (struct synth *)&ioctl->data); else if (ioctl->cmd == IOCTL_WO_DAC) { write_dac(sc, 0x9002); /* set Vref = 2.048 volts */ write_dac(sc, ioctl->data & 0xFFF); } else error = EINVAL; break; } case LMCIOCTL: { if ((error = CHECK_CAP)) break; if (ioctl->cmd == IOCTL_XILINX_RESET) { reset_xilinx(sc); sc->card->config(sc); } else if (ioctl->cmd == IOCTL_XILINX_ROM) { load_xilinx_from_rom(sc); /* can sleep */ sc->card->config(sc); } else if (ioctl->cmd == IOCTL_XILINX_FILE) { /* load_xilinx_from_file() can sleep. */ error = load_xilinx_from_file(sc, ioctl->ucode, ioctl->data); if (error != 0) load_xilinx_from_rom(sc); /* try the rom */ sc->card->config(sc); set_status(sc, (error==0)); /* XXX */ } else if (ioctl->cmd == IOCTL_RESET_CNTRS) { memset(&sc->status.cntrs, 0, sizeof(struct event_cntrs)); microtime(&sc->status.cntrs.reset_time); } else error = sc->card->ioctl(sc, ioctl); /* can sleep */ break; } default: error = EINVAL; break; } TOP_UNLOCK; return error; } /* This is the core watchdog procedure. */ /* It calculates link speed, and calls the card-specific watchdog code. */ /* Calls interrupt() in case one got lost; also kick-starts the device. */ /* ioctl syscalls and card watchdog routines must be interlocked. */ /* This procedure must not sleep. */ static void core_watchdog(softc_t *sc) { /* Read and restart the Tulip timer. */ u_int32_t tx_speed = READ_CSR(TLP_TIMER); WRITE_CSR(TLP_TIMER, 0xFFFF); /* Measure MII clock using a timer in the Tulip chip. * This timer counts transmitter bits divided by 4096. * Since this is called once a second the math is easy. * This is only correct when the link is NOT sending pkts. * On a fully-loaded link, answer will be HALF actual rate. * Clock rate during pkt is HALF clk rate between pkts. * Measuring clock rate really measures link utilization! */ sc->status.tx_speed = (0xFFFF - (tx_speed & 0xFFFF)) << 12; /* The first status reset time is when the calendar clock is set. */ if (sc->status.cntrs.reset_time.tv_sec < 1000) microtime(&sc->status.cntrs.reset_time); /* Update hardware (operational) status. */ /* Call the card-specific watchdog routines. */ if (TOP_TRYLOCK != 0) { sc->status.oper_status = sc->card->watchdog(sc); /* Increment a counter which tells user-land */ /* observers that SNMP state has been updated. */ sc->status.ticks++; TOP_UNLOCK; } else sc->status.cntrs.lck_watch++; /* In case an interrupt gets lost... */ user_interrupt(sc, 1); } /* Called from a syscall (user context; no spinlocks). */ static int lmc_raw_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct ifreq *ifr = (struct ifreq *) data; int error = 0; switch (cmd) { case SIOCAIFADDR: case SIOCSIFFLAGS: case SIOCSIFADDR: ifp->if_flags |= IFF_UP; /* a Unix tradition */ break; case SIOCSIFMTU: ifp->if_mtu = ifr->ifr_mtu; break; default: error = EINVAL; break; } return error; } /* Called from a syscall (user context; no spinlocks). */ static int lmc_ifnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { softc_t *sc = IFP2SC(ifp); int error = 0; switch (cmd) { /* Catch the IOCTLs used by lmcconfig. */ case LMCIOCGSTAT: case LMCIOCGCFG: case LMCIOCSCFG: case LMCIOCREAD: case LMCIOCWRITE: case LMCIOCTL: error = core_ioctl(sc, cmd, data); break; /* Pass the rest to the line protocol. */ default: if (sc->config.line_pkg == PKG_RAWIP) error = lmc_raw_ioctl(ifp, cmd, data); else # if NSPPP error = sppp_ioctl(ifp, cmd, data); # elif P2P error = p2p_ioctl(ifp, cmd, data); # else error = EINVAL; # endif break; } if (DRIVER_DEBUG && (error!=0)) printf("%s: lmc_ifnet_ioctl; cmd=0x%08lx error=%d\n", NAME_UNIT, cmd, error); return error; } /* Called from a syscall (user context; no spinlocks). */ static void lmc_ifnet_start(struct ifnet *ifp) { softc_t *sc = IFP2SC(ifp); /* Start the transmitter; incoming pkts are NOT processed. */ user_interrupt(sc, 0); } /* sppp and p2p replace this with their own proc. */ /* RAWIP mode is the only time this is used. */ /* Called from a syscall (user context; no spinlocks). */ static int lmc_raw_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, struct route *ro) { softc_t *sc = IFP2SC(ifp); int error = 0; /* Fail if the link is down. */ if (sc->status.oper_status != STATUS_UP) { m_freem(m); sc->status.cntrs.odiscards++; if (DRIVER_DEBUG) printf("%s: lmc_raw_output: tx pkt discarded: link down\n", NAME_UNIT); return ENETDOWN; } # if NETGRAPH /* Netgraph has priority over the ifnet kernel interface. */ if (sc->ng_hook != NULL) { m_freem(m); sc->status.cntrs.odiscards++; if (DRIVER_DEBUG) printf("%s: lmc_raw_output: tx pkt discarded: netgraph active\n", NAME_UNIT); return EBUSY; } # endif /* lmc_raw_output() ENQUEUEs in a syscall or softirq. */ /* txintr_setup() DEQUEUEs in a hard interrupt. */ /* Some BSD QUEUE routines are not interrupt-safe. */ { DISABLE_INTR; IFQ_ENQUEUE(&ifp->if_snd, m, error); ENABLE_INTR; } if (error==0) user_interrupt(sc, 0); /* start the transmitter */ else { m_freem(m); sc->status.cntrs.odiscards++; if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1); if (DRIVER_DEBUG) printf("%s: lmc_raw_output: IFQ_ENQUEUE() failed; error %d\n", NAME_UNIT, error); } return error; } /* Called from a softirq once a second. */ static void lmc_watchdog(void *arg) { struct ifnet *ifp = arg; softc_t *sc = IFP2SC(ifp); u_int8_t old_oper_status = sc->status.oper_status; core_watchdog(sc); /* updates oper_status */ #if NETGRAPH if (sc->ng_hook != NULL) { sc->status.line_pkg = PKG_NG; sc->status.line_prot = 0; } else #endif if (sc->config.line_pkg == PKG_RAWIP) { sc->status.line_pkg = PKG_RAWIP; sc->status.line_prot = PROT_IP_HDLC; } else { # if P2P /* Notice change in link status. */ if ((old_oper_status != sc->status.oper_status) && (sc->p2p->p2p_modem)) (*sc->p2p->p2p_modem)(sc->p2p, sc->status.oper_status==STATUS_UP); /* Notice change in line protocol. */ sc->status.line_pkg = PKG_P2P; switch (sc->ifp->if_type) { case IFT_PPP: sc->status.line_prot = PROT_PPP; break; case IFT_PTPSERIAL: sc->status.line_prot = PROT_C_HDLC; break; case IFT_FRELAY: sc->status.line_prot = PROT_FRM_RLY; break; default: sc->status.line_prot = 0; break; } # elif NSPPP /* Notice change in link status. */ if ((old_oper_status != STATUS_UP) && (sc->status.oper_status == STATUS_UP)) /* link came up */ sppp_tls(sc->sppp); if ((old_oper_status == STATUS_UP) && (sc->status.oper_status != STATUS_UP)) /* link went down */ sppp_tlf(sc->sppp); /* Notice change in line protocol. */ sc->status.line_pkg = PKG_SPPP; if (sc->sppp->pp_flags & PP_FR) sc->status.line_prot = PROT_FRM_RLY; else if (sc->ifp->if_flags & IFF_LINK2) sc->status.line_prot = PROT_C_HDLC; else sc->status.line_prot = PROT_PPP; # else /* Suppress compiler warning. */ if (old_oper_status == STATUS_UP); # endif } ifp->if_baudrate = sc->status.tx_speed; if (sc->status.oper_status == STATUS_UP) ifp->if_link_state = LINK_STATE_UP; else ifp->if_link_state = LINK_STATE_DOWN; /* Call this procedure again after one second. */ callout_reset(&sc->callout, hz, lmc_watchdog, ifp); } static uint64_t lmc_get_counter(struct ifnet *ifp, ift_counter cnt) { softc_t *sc; struct event_cntrs *cntrs; sc = if_getsoftc(ifp); cntrs = &sc->status.cntrs; switch (cnt) { case IFCOUNTER_IPACKETS: return (cntrs->ipackets); case IFCOUNTER_OPACKETS: return (cntrs->opackets); case IFCOUNTER_IBYTES: return (cntrs->ibytes); case IFCOUNTER_OBYTES: return (cntrs->obytes); case IFCOUNTER_IERRORS: return (cntrs->ierrors); case IFCOUNTER_OERRORS: return (cntrs->oerrors); case IFCOUNTER_IQDROPS: return (cntrs->idiscards); default: return (if_get_counter_default(ifp, cnt)); } } static void setup_ifnet(struct ifnet *ifp) { softc_t *sc = ifp->if_softc; /* Initialize the generic network interface. */ ifp->if_flags = IFF_POINTOPOINT; ifp->if_flags |= IFF_RUNNING; ifp->if_ioctl = lmc_ifnet_ioctl; ifp->if_start = lmc_ifnet_start; /* sppp changes this */ ifp->if_output = lmc_raw_output; /* sppp & p2p change this */ ifp->if_input = lmc_raw_input; ifp->if_get_counter = lmc_get_counter; ifp->if_mtu = MAX_DESC_LEN; /* sppp & p2p change this */ ifp->if_type = IFT_PTPSERIAL; /* p2p changes this */ # if defined(DEVICE_POLLING) ifp->if_capabilities |= IFCAP_POLLING; ifp->if_capenable |= IFCAP_POLLING_NOCOUNT; # endif if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); } static int lmc_ifnet_attach(softc_t *sc) { sc->ifp = if_alloc(NSPPP ? IFT_PPP : IFT_OTHER); if (sc->ifp == NULL) return ENOMEM; # if NSPPP sc->sppp = sc->ifp->if_l2com; # elif P2P sc->ifp = &sc->p2pcom.p2p_if; sc->p2p = &sc->p2pcom; # endif /* Initialize the network interface struct. */ sc->ifp->if_softc = sc; setup_ifnet(sc->ifp); /* ALTQ output queue initialization. */ IFQ_SET_MAXLEN(&sc->ifp->if_snd, SNDQ_MAXLEN); IFQ_SET_READY(&sc->ifp->if_snd); /* Attach to the ifnet kernel interface. */ if_attach(sc->ifp); /* Attach Berkeley Packet Filter. */ LMC_BPF_ATTACH(DLT_RAW, 0); callout_reset(&sc->callout, hz, lmc_watchdog, sc); return 0; } static void lmc_ifnet_detach(softc_t *sc) { # if defined(DEVICE_POLLING) if (sc->ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(sc->ifp); # endif /* Detach Berkeley Packet Filter. */ LMC_BPF_DETACH; /* Detach from the ifnet kernel interface. */ if_detach(sc->ifp); if_free(sc->ifp); } #if NETGRAPH /* These next two macros should be added to netgraph */ # define NG_TYPE_REF(type) atomic_add_int(&(type)->refs, 1) # define NG_TYPE_UNREF(type) \ do { \ if ((type)->refs == 1) \ ng_rmtype(type); \ else \ atomic_subtract_int(&(type)->refs, 1); \ } while (0) /* It is an error to construct new copies of this Netgraph node. */ /* All instances are constructed by ng_attach and are persistent. */ static int ng_constructor(node_p node) { return EINVAL; } /* Incoming Netgraph control message. */ static int ng_rcvmsg(node_p node, item_p item, hook_p lasthook) { struct ng_mesg *msg; struct ng_mesg *resp = NULL; softc_t *sc = NG_NODE_PRIVATE(node); int error = 0; NGI_GET_MSG(item, msg); if (msg->header.typecookie == NGM_LMC_COOKIE) { switch (msg->header.cmd) { case LMCIOCGSTAT: case LMCIOCGCFG: case LMCIOCSCFG: case LMCIOCREAD: case LMCIOCWRITE: case LMCIOCTL: { /* Call the core ioctl procedure. */ error = core_ioctl(sc, msg->header.cmd, msg->data); if ((msg->header.cmd & IOC_OUT) != 0) { /* synchronous response */ NG_MKRESPONSE(resp, msg, sizeof(struct ng_mesg) + IOCPARM_LEN(msg->header.cmd), M_NOWAIT); if (resp == NULL) error = ENOMEM; else memcpy(resp->data, msg->data, IOCPARM_LEN(msg->header.cmd)); } break; } default: error = EINVAL; break; } } else if ((msg->header.typecookie == NGM_GENERIC_COOKIE) && (msg->header.cmd == NGM_TEXT_STATUS)) { /* synchronous response */ NG_MKRESPONSE(resp, msg, sizeof(struct ng_mesg) + NG_TEXTRESPONSE, M_NOWAIT); if (resp == NULL) error = ENOMEM; else { char *s = resp->data; sprintf(s, "Card type = <%s>\n" "This driver considers the link to be %s.\n" "Use lmcconfig to configure this interface.\n", sc->dev_desc, (sc->status.oper_status==STATUS_UP) ? "UP" : "DOWN"); resp->header.arglen = strlen(s) +1; } } else /* Netgraph should be able to read and write these * parameters with text-format control messages: * SSI HSSI T1E1 T3 * crc crc crc crc * loop loop loop loop * clksrc clksrc * dte dte format format * synth synth cablen cablen * cable timeslot scram * gain * pulse * lbo * Someday I'll implement this... */ error = EINVAL; /* Handle synchronous response. */ NG_RESPOND_MSG(error, node, item, resp); NG_FREE_MSG(msg); return error; } /* This is a persistent netgraph node. */ static int ng_shutdown(node_p node) { /* unless told to really die, bounce back to life */ if ((node->nd_flags & NG_REALLY_DIE)==0) node->nd_flags &= ~NG_INVALID; /* bounce back to life */ return 0; } /* ng_disconnect is the opposite of this procedure. */ static int ng_newhook(node_p node, hook_p hook, const char *name) { softc_t *sc = NG_NODE_PRIVATE(node); /* Hook name must be 'rawdata'. */ if (strncmp(name, "rawdata", 7) != 0) return EINVAL; /* Is our hook connected? */ if (sc->ng_hook != NULL) return EBUSY; /* Accept the hook. */ sc->ng_hook = hook; return 0; } /* Both ends have accepted their hooks and the links have been made. */ /* This is the last chance to reject the connection request. */ static int ng_connect(hook_p hook) { /* Probably not at splnet, force outward queueing. (huh?) */ NG_HOOK_FORCE_QUEUE(NG_HOOK_PEER(hook)); return 0; /* always accept */ } /* Receive data in mbufs from another Netgraph node. */ /* Transmit an mbuf-chain on the communication link. */ /* This procedure is very similar to lmc_raw_output(). */ /* Called from a syscall (user context; no spinlocks). */ static int ng_rcvdata(hook_p hook, item_p item) { softc_t *sc = NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); int error = 0; struct mbuf *m; meta_p meta = NULL; NGI_GET_M(item, m); NGI_GET_META(item, meta); NG_FREE_ITEM(item); /* This macro must not store into meta! */ NG_FREE_META(meta); /* Fail if the link is down. */ if (sc->status.oper_status != STATUS_UP) { m_freem(m); sc->status.cntrs.odiscards++; if (DRIVER_DEBUG) printf("%s: ng_rcvdata: tx pkt discarded: link down\n", NAME_UNIT); return ENETDOWN; } /* ng_rcvdata() ENQUEUEs in a syscall or softirq. */ /* txintr_setup() DEQUEUEs in a hard interrupt. */ /* Some BSD QUEUE routines are not interrupt-safe. */ { DISABLE_INTR; if (meta==NULL) IFQ_ENQUEUE(&sc->ng_sndq, m, error); else IFQ_ENQUEUE(&sc->ng_fastq, m, error); ENABLE_INTR; } if (error==0) user_interrupt(sc, 0); /* start the transmitter */ else { m_freem(m); sc->status.cntrs.odiscards++; if (DRIVER_DEBUG) printf("%s: ng_rcvdata: IFQ_ENQUEUE() failed; error %d\n", NAME_UNIT, error); } return error; } /* ng_newhook is the opposite of this procedure, not */ /* ng_connect, as you might expect from the names. */ static int ng_disconnect(hook_p hook) { softc_t *sc = NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); /* Disconnect the hook. */ sc->ng_hook = NULL; return 0; } static struct ng_type ng_type = { .version = NG_ABI_VERSION, .name = NG_LMC_NODE_TYPE, .mod_event = NULL, .constructor = ng_constructor, .rcvmsg = ng_rcvmsg, .close = NULL, .shutdown = ng_shutdown, .newhook = ng_newhook, .findhook = NULL, .connect = ng_connect, .rcvdata = ng_rcvdata, .disconnect = ng_disconnect, }; /* Attach to the Netgraph kernel interface (/sys/netgraph). * It is called once for each physical card during device attach. * This is effectively ng_constructor. */ static int ng_attach(softc_t *sc) { int error; /* If this node type is not known to Netgraph then register it. */ if (ng_type.refs == 0) /* or: if (ng_findtype(&ng_type) == NULL) */ { if ((error = ng_newtype(&ng_type))) { printf("%s: ng_newtype() failed; error %d\n", NAME_UNIT, error); return error; } } else NG_TYPE_REF(&ng_type); /* Call the superclass node constructor. */ if ((error = ng_make_node_common(&ng_type, &sc->ng_node))) { NG_TYPE_UNREF(&ng_type); printf("%s: ng_make_node_common() failed; error %d\n", NAME_UNIT, error); return error; } /* Associate a name with this netgraph node. */ if ((error = ng_name_node(sc->ng_node, NAME_UNIT))) { NG_NODE_UNREF(sc->ng_node); NG_TYPE_UNREF(&ng_type); printf("%s: ng_name_node() failed; error %d\n", NAME_UNIT, error); return error; } /* Initialize the send queue mutexes. */ mtx_init(&sc->ng_sndq.ifq_mtx, NAME_UNIT, "sndq", MTX_DEF); mtx_init(&sc->ng_fastq.ifq_mtx, NAME_UNIT, "fastq", MTX_DEF); /* Put a backpointer to the softc in the netgraph node. */ NG_NODE_SET_PRIVATE(sc->ng_node, sc); /* ALTQ output queue initialization. */ IFQ_SET_MAXLEN(&sc->ng_fastq, SNDQ_MAXLEN); IFQ_SET_READY(&sc->ng_fastq); IFQ_SET_MAXLEN(&sc->ng_sndq, SNDQ_MAXLEN); IFQ_SET_READY(&sc->ng_sndq); return 0; } static void ng_detach(softc_t *sc) { callout_drain(&sc->callout); mtx_destroy(&sc->ng_sndq.ifq_mtx); mtx_destroy(&sc->ng_fastq.ifq_mtx); ng_rmnode_self(sc->ng_node); /* free hook */ NG_NODE_UNREF(sc->ng_node); /* free node */ NG_TYPE_UNREF(&ng_type); } #endif /* NETGRAPH */ /* The next few procedures initialize the card. */ /* Returns 0 on success; error code on failure. */ static int startup_card(softc_t *sc) { int num_rx_descs, error = 0; u_int32_t tlp_bus_pbl, tlp_bus_cal, tlp_op_tr; u_int32_t tlp_cfdd, tlp_cfcs; u_int32_t tlp_cflt, tlp_csid, tlp_cfit; /* Make sure the COMMAND bits are reasonable. */ tlp_cfcs = READ_PCI_CFG(sc, TLP_CFCS); tlp_cfcs &= ~TLP_CFCS_MWI_ENABLE; tlp_cfcs |= TLP_CFCS_BUS_MASTER; tlp_cfcs |= TLP_CFCS_MEM_ENABLE; tlp_cfcs |= TLP_CFCS_IO_ENABLE; tlp_cfcs |= TLP_CFCS_PAR_ERROR; tlp_cfcs |= TLP_CFCS_SYS_ERROR; WRITE_PCI_CFG(sc, TLP_CFCS, tlp_cfcs); /* Set the LATENCY TIMER to the recommended value, */ /* and make sure the CACHE LINE SIZE is reasonable. */ tlp_cfit = READ_PCI_CFG(sc, TLP_CFIT); tlp_cflt = READ_PCI_CFG(sc, TLP_CFLT); tlp_cflt &= ~TLP_CFLT_LATENCY; tlp_cflt |= (tlp_cfit & TLP_CFIT_MAX_LAT)>>16; /* "prgmbl burst length" and "cache alignment" used below. */ switch(tlp_cflt & TLP_CFLT_CACHE) { case 8: /* 8 bytes per cache line */ { tlp_bus_pbl = 32; tlp_bus_cal = 1; break; } case 16: { tlp_bus_pbl = 32; tlp_bus_cal = 2; break; } case 32: { tlp_bus_pbl = 32; tlp_bus_cal = 3; break; } default: { tlp_bus_pbl = 32; tlp_bus_cal = 1; tlp_cflt &= ~TLP_CFLT_CACHE; tlp_cflt |= 8; break; } } WRITE_PCI_CFG(sc, TLP_CFLT, tlp_cflt); /* Make sure SNOOZE and SLEEP modes are disabled. */ tlp_cfdd = READ_PCI_CFG(sc, TLP_CFDD); tlp_cfdd &= ~TLP_CFDD_SLEEP; tlp_cfdd &= ~TLP_CFDD_SNOOZE; WRITE_PCI_CFG(sc, TLP_CFDD, tlp_cfdd); DELAY(11*1000); /* Tulip wakes up in 10 ms max */ /* Software Reset the Tulip chip; stops DMA and Interrupts. */ /* This does not change the PCI config regs just set above. */ WRITE_CSR(TLP_BUS_MODE, TLP_BUS_RESET); /* self-clearing */ DELAY(5); /* Tulip is dead for 50 PCI cycles after reset. */ /* Reset the Xilinx Field Programmable Gate Array. */ reset_xilinx(sc); /* side effect: turns on all four LEDs */ /* Configure card-specific stuff (framers, line interfaces, etc.). */ sc->card->config(sc); /* Initializing cards can glitch clocks and upset fifos. */ /* Reset the FIFOs between the Tulip and Xilinx chips. */ set_mii16_bits(sc, MII16_FIFO); clr_mii16_bits(sc, MII16_FIFO); /* Initialize the PCI busmode register. */ /* The PCI bus cycle type "Memory Write and Invalidate" does NOT */ /* work cleanly in any version of the 21140A, so don't enable it! */ WRITE_CSR(TLP_BUS_MODE, (tlp_bus_cal ? TLP_BUS_READ_LINE : 0) | (tlp_bus_cal ? TLP_BUS_READ_MULT : 0) | (tlp_bus_pbl<txring, NUM_TX_DESCS))) return error; WRITE_CSR(TLP_TX_LIST, sc->txring.dma_addr); if ((error = create_ring(sc, &sc->rxring, num_rx_descs))) return error; WRITE_CSR(TLP_RX_LIST, sc->rxring.dma_addr); /* Initialize the operating mode register. */ WRITE_CSR(TLP_OP_MODE, TLP_OP_INIT | (tlp_op_tr<txring); destroy_ring(sc, &sc->rxring); } /* Start the card and attach a kernel interface and line protocol. */ static int attach_card(softc_t *sc, const char *intrstr) { struct config config; u_int32_t tlp_cfrv; u_int16_t mii3; u_int8_t *ieee; int i, error = 0; /* Start the card. */ if ((error = startup_card(sc))) return error; callout_init(&sc->callout, 0); /* Attach a kernel interface. */ #if NETGRAPH if ((error = ng_attach(sc))) return error; sc->flags |= FLAG_NETGRAPH; #endif if ((error = lmc_ifnet_attach(sc))) return error; sc->flags |= FLAG_IFNET; /* Attach a line protocol stack. */ sc->config.line_pkg = PKG_RAWIP; config = sc->config; /* get current config */ config.line_pkg = 0; /* select external stack */ config.line_prot = PROT_C_HDLC; config.keep_alive = 1; config_proto(sc, &config); /* reconfigure */ sc->config = config; /* save new configuration */ /* Print interesting hardware-related things. */ mii3 = read_mii(sc, 3); tlp_cfrv = READ_PCI_CFG(sc, TLP_CFRV); printf("%s: PCI rev %d.%d, MII rev %d.%d", NAME_UNIT, (tlp_cfrv>>4) & 0xF, tlp_cfrv & 0xF, (mii3>>4) & 0xF, mii3 & 0xF); ieee = (u_int8_t *)sc->status.ieee; for (i=0; i<3; i++) sc->status.ieee[i] = read_srom(sc, 10+i); printf(", IEEE addr %02x:%02x:%02x:%02x:%02x:%02x", ieee[0], ieee[1], ieee[2], ieee[3], ieee[4], ieee[5]); sc->card->ident(sc); printf(" %s\n", intrstr); /* Print interesting software-related things. */ printf("%s: Driver rev %d.%d.%d", NAME_UNIT, DRIVER_MAJOR_VERSION, DRIVER_MINOR_VERSION, DRIVER_SUB_VERSION); printf(", Options %s%s%s%s%s%s%s%s%s\n", NETGRAPH ? "NETGRAPH " : "", GEN_HDLC ? "GEN_HDLC " : "", NSPPP ? "SPPP " : "", P2P ? "P2P " : "", ALTQ_PRESENT ? "ALTQ " : "", NBPFILTER ? "BPF " : "", DEV_POLL ? "POLL " : "", IOREF_CSR ? "IO_CSR " : "MEM_CSR ", (BYTE_ORDER == BIG_ENDIAN) ? "BIG_END " : "LITTLE_END "); /* Make the local hardware ready. */ set_status(sc, 1); return 0; } /* Detach from the kernel in all ways. */ static void detach_card(softc_t *sc) { struct config config; /* Make the local hardware NOT ready. */ set_status(sc, 0); /* Detach external line protocol stack. */ if (sc->config.line_pkg != PKG_RAWIP) { config = sc->config; config.line_pkg = PKG_RAWIP; config_proto(sc, &config); sc->config = config; } /* Detach kernel interfaces. */ #if NETGRAPH if (sc->flags & FLAG_NETGRAPH) { IFQ_PURGE(&sc->ng_fastq); IFQ_PURGE(&sc->ng_sndq); ng_detach(sc); sc->flags &= ~FLAG_NETGRAPH; } #endif if (sc->flags & FLAG_IFNET) { IFQ_PURGE(&sc->ifp->if_snd); lmc_ifnet_detach(sc); sc->flags &= ~FLAG_IFNET; } /* Reset the Tulip chip; stops DMA and Interrupts. */ shutdown_card(sc); } /* This is the I/O configuration interface for FreeBSD */ static int fbsd_probe(device_t dev) { u_int32_t cfid = pci_read_config(dev, TLP_CFID, 4); u_int32_t csid = pci_read_config(dev, TLP_CSID, 4); /* Looking for a DEC 21140A chip on any Lan Media Corp card. */ if (cfid != TLP_CFID_TULIP) return ENXIO; switch (csid) { case TLP_CSID_HSSI: case TLP_CSID_HSSIc: device_set_desc(dev, HSSI_DESC); break; case TLP_CSID_T3: device_set_desc(dev, T3_DESC); break; case TLP_CSID_SSI: device_set_desc(dev, SSI_DESC); break; case TLP_CSID_T1E1: device_set_desc(dev, T1E1_DESC); break; default: return ENXIO; } return 0; } static int fbsd_detach(device_t dev) { softc_t *sc = device_get_softc(dev); /* Stop the card and detach from the kernel. */ detach_card(sc); /* Release resources. */ if (sc->irq_cookie != NULL) { bus_teardown_intr(dev, sc->irq_res, sc->irq_cookie); sc->irq_cookie = NULL; } if (sc->irq_res != NULL) { bus_release_resource(dev, SYS_RES_IRQ, sc->irq_res_id, sc->irq_res); sc->irq_res = NULL; } if (sc->csr_res != NULL) { bus_release_resource(dev, sc->csr_res_type, sc->csr_res_id, sc->csr_res); sc->csr_res = NULL; } mtx_destroy(&sc->top_mtx); mtx_destroy(&sc->bottom_mtx); return 0; /* no error */ } static int fbsd_shutdown(device_t dev) { shutdown_card(device_get_softc(dev)); return 0; } static int fbsd_attach(device_t dev) { softc_t *sc = device_get_softc(dev); int error; /* READ/WRITE_PCI_CFG need this. */ sc->dev = dev; /* What kind of card are we driving? */ switch (READ_PCI_CFG(sc, TLP_CSID)) { case TLP_CSID_HSSI: case TLP_CSID_HSSIc: sc->card = &hssi_card; break; case TLP_CSID_T3: sc->card = &t3_card; break; case TLP_CSID_SSI: sc->card = &ssi_card; break; case TLP_CSID_T1E1: sc->card = &t1_card; break; default: return ENXIO; } sc->dev_desc = device_get_desc(dev); /* Allocate PCI memory or IO resources to access the Tulip chip CSRs. */ # if IOREF_CSR sc->csr_res_id = TLP_CBIO; sc->csr_res_type = SYS_RES_IOPORT; # else sc->csr_res_id = TLP_CBMA; sc->csr_res_type = SYS_RES_MEMORY; # endif sc->csr_res = bus_alloc_resource(dev, sc->csr_res_type, &sc->csr_res_id, 0, ~0, 1, RF_ACTIVE); if (sc->csr_res == NULL) { printf("%s: bus_alloc_resource(csr) failed.\n", NAME_UNIT); return ENXIO; } sc->csr_tag = rman_get_bustag(sc->csr_res); sc->csr_handle = rman_get_bushandle(sc->csr_res); /* Allocate PCI interrupt resources for the card. */ sc->irq_res_id = 0; sc->irq_res = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->irq_res_id, 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE); if (sc->irq_res == NULL) { printf("%s: bus_alloc_resource(irq) failed.\n", NAME_UNIT); fbsd_detach(dev); return ENXIO; } if ((error = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE, NULL, bsd_interrupt, sc, &sc->irq_cookie))) { printf("%s: bus_setup_intr() failed; error %d\n", NAME_UNIT, error); fbsd_detach(dev); return error; } /* Initialize the top-half and bottom-half locks. */ mtx_init(&sc->top_mtx, NAME_UNIT, "top half lock", MTX_DEF); mtx_init(&sc->bottom_mtx, NAME_UNIT, "bottom half lock", MTX_DEF); /* Start the card and attach a kernel interface and line protocol. */ if ((error = attach_card(sc, ""))) detach_card(sc); return error; } static device_method_t methods[] = { DEVMETHOD(device_probe, fbsd_probe), DEVMETHOD(device_attach, fbsd_attach), DEVMETHOD(device_detach, fbsd_detach), DEVMETHOD(device_shutdown, fbsd_shutdown), /* This driver does not suspend and resume. */ { 0, 0 } }; static driver_t driver = { .name = DEVICE_NAME, .methods = methods, .size = sizeof(softc_t), }; static devclass_t devclass; DRIVER_MODULE(lmc, pci, driver, devclass, 0, 0); MODULE_VERSION(lmc, 2); MODULE_DEPEND(lmc, pci, 1, 1, 1); # if NETGRAPH MODULE_DEPEND(lmc, netgraph, NG_ABI_VERSION, NG_ABI_VERSION, NG_ABI_VERSION); # endif # if NSPPP MODULE_DEPEND(lmc, sppp, 1, 1, 1); # endif /* This is the I/O configuration interface for NetBSD. */ /* This is the I/O configuration interface for OpenBSD. */ /* This is the I/O configuration interface for BSD/OS. */ Index: head/sys/dev/mn/if_mn.c =================================================================== --- head/sys/dev/mn/if_mn.c (revision 276749) +++ head/sys/dev/mn/if_mn.c (revision 276750) @@ -1,1432 +1,1431 @@ /*- * ---------------------------------------------------------------------------- * "THE BEER-WARE LICENSE" (Revision 42): * wrote this file. As long as you retain this notice you * can do whatever you want with this stuff. If we meet some day, and you think * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp * ---------------------------------------------------------------------------- */ /* * Driver for Siemens reference design card "Easy321-R1". * * This card contains a FALC54 E1/T1 framer and a MUNICH32X 32-channel HDLC * controller. * * The driver supports E1 mode with up to 31 channels. We send CRC4 but don't * check it coming in. * * The FALC54 and MUNICH32X have far too many registers and weird modes for * comfort, so I have not bothered typing it all into a "fooreg.h" file, * you will (badly!) need the documentation anyway if you want to mess with * this gadget. */ #include __FBSDID("$FreeBSD$"); /* * Stuff to describe the MUNIC32X and FALC54 chips. */ #define M32_CHAN 32 /* We have 32 channels */ #define M32_TS 32 /* We have 32 timeslots */ #define NG_MN_NODE_TYPE "mn" #include #include #include #include #include #include #include #include #include #include "pci_if.h" #include #include #include #include #include #include #include static int mn_maxlatency = 1000; SYSCTL_INT(_debug, OID_AUTO, mn_maxlatency, CTLFLAG_RW, &mn_maxlatency, 0, "The number of milliseconds a packet is allowed to spend in the output queue. " "If the output queue is longer than this number of milliseconds when the packet " "arrives for output, the packet will be dropped." ); #ifndef NMN /* Most machines don't support more than 4 busmaster PCI slots, if even that many */ #define NMN 4 #endif /* From: PEB 20321 data sheet, p187, table 22 */ struct m32xreg { u_int32_t conf, cmd, stat, imask; u_int32_t fill10, piqba, piql, fill1c; u_int32_t mode1, mode2, ccba, txpoll; u_int32_t tiqba, tiql, riqba, riql; u_int32_t lconf, lccba, fill48, ltran; u_int32_t ltiqba, ltiql, lriqba, lriql; u_int32_t lreg0, lreg1, lreg2, lreg3; u_int32_t lreg4, lreg5, lre6, lstat; u_int32_t gpdir, gpdata, gpod, fill8c; u_int32_t ssccon, sscbr, ssctb, sscrb; u_int32_t ssccse, sscim, fillab, fillac; u_int32_t iomcon1, iomcon2, iomstat, fillbc; u_int32_t iomcit0, iomcit1, iomcir0, iomcir1; u_int32_t iomtmo, iomrmo, filld8, filldc; u_int32_t mbcmd, mbdata1, mbdata2, mbdata3; u_int32_t mbdata4, mbdata5, mbdata6, mbdata7; }; /* From: PEB 2254 data sheet, p80, table 10 */ struct f54wreg { u_int16_t xfifo; u_int8_t cmdr, mode, rah1, rah2, ral1, ral2; u_int8_t ipc, ccr1, ccr3, pre, rtr1, rtr2, rtr3, rtr4; u_int8_t ttr1, ttr2, ttr3, ttr4, imr0, imr1, imr2, imr3; u_int8_t imr4, fill19, fmr0, fmr1, fmr2, loop, xsw, xsp; u_int8_t xc0, xc1, rc0, rc1, xpm0, xpm1, xpm2, tswm; u_int8_t test1, idle, xsa4, xsa5, xsa6, xsa7, xsa8, fmr3; u_int8_t icb1, icb2, icb3, icb4, lim0, lim1, pcd, pcr; u_int8_t lim2, fill39[7]; u_int8_t fill40[8]; u_int8_t fill48[8]; u_int8_t fill50[8]; u_int8_t fill58[8]; u_int8_t dec, fill61, test2, fill63[5]; u_int8_t fill68[8]; u_int8_t xs[16]; }; /* From: PEB 2254 data sheet, p117, table 10 */ struct f54rreg { u_int16_t rfifo; u_int8_t fill2, mode, rah1, rah2, ral1, ral2; u_int8_t ipc, ccr1, ccr3, pre, rtr1, rtr2, rtr3, rtr4; u_int8_t ttr1, ttr2, ttr3, ttr4, imr0, imr1, imr2, imr3; u_int8_t imr4, fill19, fmr0, fmr1, fmr2, loop, xsw, xsp; u_int8_t xc0, xc1, rc0, rc1, xpm0, xpm1, xpm2, tswm; u_int8_t test, idle, xsa4, xsa5, xsa6, xsa7, xsa8, fmr13; u_int8_t icb1, icb2, icb3, icb4, lim0, lim1, pcd, pcr; u_int8_t lim2, fill39[7]; u_int8_t fill40[8]; u_int8_t fill48[4], frs0, frs1, rsw, rsp; u_int16_t fec, cvc, cec1, ebc; u_int16_t cec2, cec3; u_int8_t rsa4, rsa5, rsa6, rsa7; u_int8_t rsa8, rsa6s, tsr0, tsr1, sis, rsis; u_int16_t rbc; u_int8_t isr0, isr1, isr2, isr3, fill6c, fill6d, gis, vstr; u_int8_t rs[16]; }; /* Transmit & receive descriptors */ struct trxd { u_int32_t flags; vm_offset_t next; vm_offset_t data; u_int32_t status; /* only used for receive */ struct mbuf *m; /* software use only */ struct trxd *vnext; /* software use only */ }; /* Channel specification */ struct cspec { u_int32_t flags; vm_offset_t rdesc; vm_offset_t tdesc; u_int32_t itbs; }; struct m32_mem { vm_offset_t csa; u_int32_t ccb; u_int32_t reserve1[2]; u_int32_t ts[M32_TS]; struct cspec cs[M32_CHAN]; vm_offset_t crxd[M32_CHAN]; vm_offset_t ctxd[M32_CHAN]; }; struct mn_softc; struct sockaddr; struct rtentry; static int mn_probe(device_t self); static int mn_attach(device_t self); static void mn_create_channel(struct mn_softc *sc, int chan); static int mn_reset(struct mn_softc *sc); static struct trxd * mn_alloc_desc(void); static void mn_free_desc(struct trxd *dp); static void mn_intr(void *xsc); static u_int32_t mn_parse_ts(const char *s, int *nbit); #ifdef notyet static void m32_dump(struct mn_softc *sc); static void f54_dump(struct mn_softc *sc); static void mn_fmt_ts(char *p, u_int32_t ts); #endif /* notyet */ static void f54_init(struct mn_softc *sc); static ng_constructor_t ngmn_constructor; static ng_rcvmsg_t ngmn_rcvmsg; static ng_shutdown_t ngmn_shutdown; static ng_newhook_t ngmn_newhook; static ng_connect_t ngmn_connect; static ng_rcvdata_t ngmn_rcvdata; static ng_disconnect_t ngmn_disconnect; static struct ng_type mntypestruct = { .version = NG_ABI_VERSION, .name = NG_MN_NODE_TYPE, .constructor = ngmn_constructor, .rcvmsg = ngmn_rcvmsg, .shutdown = ngmn_shutdown, .newhook = ngmn_newhook, .connect = ngmn_connect, .rcvdata = ngmn_rcvdata, .disconnect = ngmn_disconnect, }; static MALLOC_DEFINE(M_MN, "mn", "Mx driver related"); #define NIQB 64 struct schan { enum {DOWN, UP} state; struct mn_softc *sc; int chan; u_int32_t ts; char name[8]; struct trxd *r1, *rl; struct trxd *x1, *xl; hook_p hook; time_t last_recv; time_t last_rxerr; time_t last_xmit; u_long rx_error; u_long short_error; u_long crc_error; u_long dribble_error; u_long long_error; u_long abort_error; u_long overflow_error; int last_error; int prev_error; u_long tx_pending; u_long tx_limit; }; enum framing {WHOKNOWS, E1, E1U, T1, T1U}; struct mn_softc { int unit; device_t dev; struct resource *irq; void *intrhand; enum framing framing; int nhooks; void *m0v, *m1v; vm_offset_t m0p, m1p; struct m32xreg *m32x; struct f54wreg *f54w; struct f54rreg *f54r; struct m32_mem m32_mem; u_int32_t tiqb[NIQB]; u_int32_t riqb[NIQB]; u_int32_t piqb[NIQB]; u_int32_t ltiqb[NIQB]; u_int32_t lriqb[NIQB]; char name[8]; u_int32_t falc_irq, falc_state, framer_state; struct schan *ch[M32_CHAN]; char nodename[NG_NODESIZ]; node_p node; u_long cnt_fec; u_long cnt_cvc; u_long cnt_cec1; u_long cnt_ebc; u_long cnt_cec2; u_long cnt_cec3; u_long cnt_rbc; }; static int ngmn_constructor(node_p node) { return (EINVAL); } static int ngmn_shutdown(node_p nodep) { return (EINVAL); } static void ngmn_config(node_p node, char *set, char *ret) { struct mn_softc *sc; enum framing wframing; sc = NG_NODE_PRIVATE(node); if (set != NULL) { if (!strncmp(set, "line ", 5)) { wframing = sc->framing; if (!strcmp(set, "line e1")) { wframing = E1; } else if (!strcmp(set, "line e1u")) { wframing = E1U; } else { strcat(ret, "ENOGROK\n"); return; } if (wframing == sc->framing) return; if (sc->nhooks > 0) { sprintf(ret, "Cannot change line when %d hooks open\n", sc->nhooks); return; } sc->framing = wframing; #if 1 f54_init(sc); #else mn_reset(sc); #endif } else { printf("%s CONFIG SET [%s]\n", sc->nodename, set); strcat(ret, "ENOGROK\n"); return; } } } static int ngmn_rcvmsg(node_p node, item_p item, hook_p lasthook) { struct mn_softc *sc; struct ng_mesg *resp = NULL; struct schan *sch; char *s, *r; int pos, i; struct ng_mesg *msg; NGI_GET_MSG(item, msg); sc = NG_NODE_PRIVATE(node); if (msg->header.typecookie != NGM_GENERIC_COOKIE) { NG_FREE_ITEM(item); NG_FREE_MSG(msg); return (EINVAL); } if (msg->header.cmd != NGM_TEXT_CONFIG && msg->header.cmd != NGM_TEXT_STATUS) { NG_FREE_ITEM(item); NG_FREE_MSG(msg); return (EINVAL); } NG_MKRESPONSE(resp, msg, sizeof(struct ng_mesg) + NG_TEXTRESPONSE, M_NOWAIT); if (resp == NULL) { NG_FREE_ITEM(item); NG_FREE_MSG(msg); return (ENOMEM); } if (msg->header.arglen) s = (char *)msg->data; else s = NULL; r = (char *)resp->data; *r = '\0'; if (msg->header.cmd == NGM_TEXT_CONFIG) { ngmn_config(node, s, r); resp->header.arglen = strlen(r) + 1; NG_RESPOND_MSG(i, node, item, resp); NG_FREE_MSG(msg); return (0); } pos = 0; pos += sprintf(pos + r,"Framer status %b;\n", sc->framer_state, "\20" "\40LOS\37AIS\36LFA\35RRA" "\34AUXP\33NMF\32LMFA\31frs0.0" "\30frs1.7\27TS16RA\26TS16LOS\25TS16AIS" "\24TS16LFA\23frs1.2\22XLS\21XLO" "\20RS1\17rsw.6\16RRA\15RY0" "\14RY1\13RY2\12RY3\11RY4" "\10SI1\7SI2\6rsp.5\5rsp.4" "\4rsp.3\3RSIF\2RS13\1RS15"); pos += sprintf(pos + r," Framing errors: %lu", sc->cnt_fec); pos += sprintf(pos + r," Code Violations: %lu\n", sc->cnt_cvc); pos += sprintf(pos + r," Falc State %b;\n", sc->falc_state, "\20" "\40LOS\37AIS\36LFA\35RRA" "\34AUXP\33NMF\32LMFA\31frs0.0" "\30frs1.7\27TS16RA\26TS16LOS\25TS16AIS" "\24TS16LFA\23frs1.2\22XLS\21XLO" "\20RS1\17rsw.6\16RRA\15RY0" "\14RY1\13RY2\12RY3\11RY4" "\10SI1\7SI2\6rsp.5\5rsp.4" "\4rsp.3\3RSIF\2RS13\1RS15"); pos += sprintf(pos + r, " Falc IRQ %b\n", sc->falc_irq, "\20" "\40RME\37RFS\36T8MS\35RMB\34CASC\33CRC4\32SA6SC\31RPF" "\30b27\27RDO\26ALLS\25XDU\24XMB\23b22\22XLSC\21XPR" "\20FAR\17LFA\16MFAR\15T400MS\14AIS\13LOS\12RAR\11RA" "\10ES\7SEC\6LMFA16\5AIS16\4RA16\3API\2SLN\1SLP"); for (i = 0; i < M32_CHAN; i++) { if (!sc->ch[i]) continue; sch = sc->ch[i]; pos += sprintf(r + pos, " Chan %d <%s> ", i, NG_HOOK_NAME(sch->hook)); pos += sprintf(r + pos, " Last Rx: "); if (sch->last_recv) pos += sprintf(r + pos, "%lu s", (unsigned long)(time_second - sch->last_recv)); else pos += sprintf(r + pos, "never"); pos += sprintf(r + pos, ", last RxErr: "); if (sch->last_rxerr) pos += sprintf(r + pos, "%lu s", (unsigned long)(time_second - sch->last_rxerr)); else pos += sprintf(r + pos, "never"); pos += sprintf(r + pos, ", last Tx: "); if (sch->last_xmit) pos += sprintf(r + pos, "%lu s\n", (unsigned long)(time_second - sch->last_xmit)); else pos += sprintf(r + pos, "never\n"); pos += sprintf(r + pos, " RX error(s) %lu", sch->rx_error); pos += sprintf(r + pos, " Short: %lu", sch->short_error); pos += sprintf(r + pos, " CRC: %lu", sch->crc_error); pos += sprintf(r + pos, " Mod8: %lu", sch->dribble_error); pos += sprintf(r + pos, " Long: %lu", sch->long_error); pos += sprintf(r + pos, " Abort: %lu", sch->abort_error); pos += sprintf(r + pos, " Overflow: %lu\n", sch->overflow_error); pos += sprintf(r + pos, " Last error: %b Prev error: %b\n", sch->last_error, "\20\7SHORT\5CRC\4MOD8\3LONG\2ABORT\1OVERRUN", sch->prev_error, "\20\7SHORT\5CRC\4MOD8\3LONG\2ABORT\1OVERRUN"); pos += sprintf(r + pos, " Xmit bytes pending %ld\n", sch->tx_pending); } resp->header.arglen = pos + 1; /* Take care of synchronous response, if any */ NG_RESPOND_MSG(i, node, item, resp); NG_FREE_MSG(msg); return (0); } static int ngmn_newhook(node_p node, hook_p hook, const char *name) { u_int32_t ts, chan; struct mn_softc *sc; int nbit; sc = NG_NODE_PRIVATE(node); if (name[0] != 't' || name[1] != 's') return (EINVAL); ts = mn_parse_ts(name + 2, &nbit); printf("%d bits %x\n", nbit, ts); if (sc->framing == E1 && (ts & 1)) return (EINVAL); if (sc->framing == E1U && nbit != 32) return (EINVAL); if (ts == 0) return (EINVAL); if (sc->framing == E1) chan = ffs(ts) - 1; else chan = 1; if (!sc->ch[chan]) mn_create_channel(sc, chan); else if (sc->ch[chan]->state == UP) return (EBUSY); sc->ch[chan]->ts = ts; sc->ch[chan]->hook = hook; sc->ch[chan]->tx_limit = nbit * 8; NG_HOOK_SET_PRIVATE(hook, sc->ch[chan]); sc->nhooks++; return(0); } static struct trxd *mn_desc_free; static struct trxd * mn_alloc_desc(void) { struct trxd *dp; dp = mn_desc_free; if (dp) mn_desc_free = dp->vnext; else dp = (struct trxd *)malloc(sizeof *dp, M_MN, M_NOWAIT); return (dp); } static void mn_free_desc(struct trxd *dp) { dp->vnext = mn_desc_free; mn_desc_free = dp; } static u_int32_t mn_parse_ts(const char *s, int *nbit) { unsigned r; int i, j; char *p; r = 0; j = -1; *nbit = 0; while(*s) { i = strtol(s, &p, 0); if (i < 0 || i > 31) return (0); while (j != -1 && j < i) { r |= 1 << j++; (*nbit)++; } j = -1; r |= 1 << i; (*nbit)++; if (*p == ',') { s = p + 1; continue; } else if (*p == '-') { j = i + 1; s = p + 1; continue; } else if (!*p) { break; } else { return (0); } } return (r); } #ifdef notyet static void mn_fmt_ts(char *p, u_int32_t ts) { char *s; int j; s = ""; ts &= 0xffffffff; for (j = 0; j < 32; j++) { if (!(ts & (1 << j))) continue; sprintf(p, "%s%d", s, j); p += strlen(p); s = ","; if (!(ts & (1 << (j+1)))) continue; for (; j < 32; j++) if (!(ts & (1 << (j+1)))) break; sprintf(p, "-%d", j); p += strlen(p); s = ","; } } #endif /* notyet */ /* * OUTPUT */ static int ngmn_rcvdata(hook_p hook, item_p item) { struct mbuf *m2; struct trxd *dp, *dp2; struct schan *sch; struct mn_softc *sc; int chan, pitch, len; struct mbuf *m; sch = NG_HOOK_PRIVATE(hook); sc = sch->sc; chan = sch->chan; if (sch->state != UP) { NG_FREE_ITEM(item); return (0); } NGI_GET_M(item, m); if (sch->tx_pending + m->m_pkthdr.len > sch->tx_limit * mn_maxlatency) { NG_FREE_M(m); NG_FREE_ITEM(item); return (0); } NG_FREE_ITEM(item); pitch = 0; m2 = m; dp2 = sc->ch[chan]->xl; len = m->m_pkthdr.len; while (len) { dp = mn_alloc_desc(); if (!dp) { pitch++; m_freem(m); sc->ch[chan]->xl = dp2; dp = dp2->vnext; while (dp) { dp2 = dp->vnext; mn_free_desc(dp); dp = dp2; } sc->ch[chan]->xl->vnext = 0; break; } dp->data = vtophys(m2->m_data); dp->flags = m2->m_len << 16; dp->flags += 1; len -= m2->m_len; dp->next = vtophys(dp); dp->vnext = 0; sc->ch[chan]->xl->next = vtophys(dp); sc->ch[chan]->xl->vnext = dp; sc->ch[chan]->xl = dp; if (!len) { dp->m = m; dp->flags |= 0xc0000000; dp2->flags &= ~0x40000000; } else { dp->m = 0; m2 = m2->m_next; } } if (pitch) printf("%s%d: Short on mem, pitched %d packets\n", sc->name, chan, pitch); else { #if 0 printf("%d = %d + %d (%p)\n", sch->tx_pending + m->m_pkthdr.len, sch->tx_pending , m->m_pkthdr.len, m); #endif sch->tx_pending += m->m_pkthdr.len; sc->m32x->txpoll &= ~(1 << chan); } return (0); } /* * OPEN */ static int ngmn_connect(hook_p hook) { int i, nts, chan; struct trxd *dp, *dp2; struct mbuf *m; struct mn_softc *sc; struct schan *sch; u_int32_t u; sch = NG_HOOK_PRIVATE(hook); chan = sch->chan; sc = sch->sc; if (sch->state == UP) return (0); sch->state = UP; /* Count and configure the timeslots for this channel */ for (nts = i = 0; i < 32; i++) if (sch->ts & (1 << i)) { sc->m32_mem.ts[i] = 0x00ff00ff | (chan << 24) | (chan << 8); nts++; } /* Init the receiver & xmitter to HDLC */ sc->m32_mem.cs[chan].flags = 0x80e90006; /* Allocate two buffers per timeslot */ if (nts == 32) sc->m32_mem.cs[chan].itbs = 63; else sc->m32_mem.cs[chan].itbs = nts * 2; /* Setup a transmit chain with one descriptor */ /* XXX: we actually send a 1 byte packet */ dp = mn_alloc_desc(); MGETHDR(m, M_WAITOK, MT_DATA); m->m_pkthdr.len = 0; dp->m = m; dp->flags = 0xc0000000 + (1 << 16); dp->next = vtophys(dp); dp->vnext = 0; dp->data = vtophys(sc->name); sc->m32_mem.cs[chan].tdesc = vtophys(dp); sc->ch[chan]->x1 = dp; sc->ch[chan]->xl = dp; /* Setup a receive chain with 5 + NTS descriptors */ dp = mn_alloc_desc(); m = NULL; MGETHDR(m, M_WAITOK, MT_DATA); MCLGET(m, M_WAITOK); dp->m = m; dp->data = vtophys(m->m_data); dp->flags = 0x40000000; dp->flags += 1600 << 16; dp->next = vtophys(dp); dp->vnext = 0; sc->ch[chan]->rl = dp; for (i = 0; i < (nts + 10); i++) { dp2 = dp; dp = mn_alloc_desc(); m = NULL; MGETHDR(m, M_WAITOK, MT_DATA); MCLGET(m, M_WAITOK); dp->m = m; dp->data = vtophys(m->m_data); dp->flags = 0x00000000; dp->flags += 1600 << 16; dp->next = vtophys(dp2); dp->vnext = dp2; } sc->m32_mem.cs[chan].rdesc = vtophys(dp); sc->ch[chan]->r1 = dp; /* Initialize this channel */ sc->m32_mem.ccb = 0x00008000 + (chan << 8); sc->m32x->cmd = 0x1; DELAY(1000); u = sc->m32x->stat; if (!(u & 1)) printf("%s: init chan %d stat %08x\n", sc->name, chan, u); sc->m32x->stat = 1; /* probably not at splnet, force outward queueing */ NG_HOOK_FORCE_QUEUE(NG_HOOK_PEER(hook)); return (0); } /* * CLOSE */ static int ngmn_disconnect(hook_p hook) { int chan, i; struct mn_softc *sc; struct schan *sch; struct trxd *dp, *dp2; u_int32_t u; sch = NG_HOOK_PRIVATE(hook); chan = sch->chan; sc = sch->sc; if (sch->state == DOWN) return (0); sch->state = DOWN; /* Set receiver & transmitter off */ sc->m32_mem.cs[chan].flags = 0x80920006; sc->m32_mem.cs[chan].itbs = 0; /* free the timeslots */ for (i = 0; i < 32; i++) if (sc->ch[chan]->ts & (1 << i)) sc->m32_mem.ts[i] = 0x20002000; /* Initialize this channel */ sc->m32_mem.ccb = 0x00008000 + (chan << 8); sc->m32x->cmd = 0x1; DELAY(30); u = sc->m32x->stat; if (!(u & 1)) printf("%s: zap chan %d stat %08x\n", sc->name, chan, u); sc->m32x->stat = 1; /* Free all receive descriptors and mbufs */ for (dp = sc->ch[chan]->r1; dp ; dp = dp2) { if (dp->m) m_freem(dp->m); sc->ch[chan]->r1 = dp2 = dp->vnext; mn_free_desc(dp); } /* Free all transmit descriptors and mbufs */ for (dp = sc->ch[chan]->x1; dp ; dp = dp2) { if (dp->m) { sc->ch[chan]->tx_pending -= dp->m->m_pkthdr.len; m_freem(dp->m); } sc->ch[chan]->x1 = dp2 = dp->vnext; mn_free_desc(dp); } sc->nhooks--; return(0); } /* * Create a new channel. */ static void mn_create_channel(struct mn_softc *sc, int chan) { struct schan *sch; sch = sc->ch[chan] = (struct schan *)malloc(sizeof *sc->ch[chan], M_MN, M_WAITOK | M_ZERO); sch->sc = sc; sch->state = DOWN; sch->chan = chan; sprintf(sch->name, "%s%d", sc->name, chan); return; } #ifdef notyet /* * Dump Munich32x state */ static void m32_dump(struct mn_softc *sc) { u_int32_t *tp4; int i, j; printf("mn%d: MUNICH32X dump\n", sc->unit); tp4 = (u_int32_t *)sc->m0v; for(j = 0; j < 64; j += 8) { printf("%02x", j * sizeof *tp4); for(i = 0; i < 8; i++) printf(" %08x", tp4[i+j]); printf("\n"); } for(j = 0; j < M32_CHAN; j++) { if (!sc->ch[j]) continue; printf("CH%d: state %d ts %08x", j, sc->ch[j]->state, sc->ch[j]->ts); printf(" %08x %08x %08x %08x %08x %08x\n", sc->m32_mem.cs[j].flags, sc->m32_mem.cs[j].rdesc, sc->m32_mem.cs[j].tdesc, sc->m32_mem.cs[j].itbs, sc->m32_mem.crxd[j], sc->m32_mem.ctxd[j] ); } } /* * Dump Falch54 state */ static void f54_dump(struct mn_softc *sc) { u_int8_t *tp1; int i, j; printf("%s: FALC54 dump\n", sc->name); tp1 = (u_int8_t *)sc->m1v; for(j = 0; j < 128; j += 16) { printf("%s: %02x |", sc->name, j * sizeof *tp1); for(i = 0; i < 16; i++) printf(" %02x", tp1[i+j]); printf("\n"); } } #endif /* notyet */ /* * Init Munich32x */ static void m32_init(struct mn_softc *sc) { sc->m32x->conf = 0x00000000; sc->m32x->mode1 = 0x81048000 + 1600; /* XXX: temp */ #if 1 sc->m32x->mode2 = 0x00000081; sc->m32x->txpoll = 0xffffffff; #elif 1 sc->m32x->mode2 = 0x00000081; sc->m32x->txpoll = 0xffffffff; #else sc->m32x->mode2 = 0x00000101; #endif sc->m32x->lconf = 0x6060009B; sc->m32x->imask = 0x00000000; } /* * Init the Falc54 */ static void f54_init(struct mn_softc *sc) { sc->f54w->ipc = 0x07; sc->f54w->xpm0 = 0xbd; sc->f54w->xpm1 = 0x03; sc->f54w->xpm2 = 0x00; sc->f54w->imr0 = 0x18; /* RMB, CASC */ sc->f54w->imr1 = 0x08; /* XMB */ sc->f54w->imr2 = 0x00; sc->f54w->imr3 = 0x38; /* LMFA16, AIS16, RA16 */ sc->f54w->imr4 = 0x00; sc->f54w->fmr0 = 0xf0; /* X: HDB3, R: HDB3 */ sc->f54w->fmr1 = 0x0e; /* Send CRC4, 2Mbit, ECM */ if (sc->framing == E1) sc->f54w->fmr2 = 0x03; /* Auto Rem-Alarm, Auto resync */ else if (sc->framing == E1U) sc->f54w->fmr2 = 0x33; /* dais, rtm, Auto Rem-Alarm, Auto resync */ sc->f54w->lim1 = 0xb0; /* XCLK=8kHz, .62V threshold */ sc->f54w->pcd = 0x0a; sc->f54w->pcr = 0x15; sc->f54w->xsw = 0x9f; /* fmr4 */ if (sc->framing == E1) sc->f54w->xsp = 0x1c; /* fmr5 */ else if (sc->framing == E1U) sc->f54w->xsp = 0x3c; /* tt0, fmr5 */ sc->f54w->xc0 = 0x07; sc->f54w->xc1 = 0x3d; sc->f54w->rc0 = 0x05; sc->f54w->rc1 = 0x00; sc->f54w->cmdr = 0x51; } static int mn_reset(struct mn_softc *sc) { u_int32_t u; int i; sc->m32x->ccba = vtophys(&sc->m32_mem.csa); sc->m32_mem.csa = vtophys(&sc->m32_mem.ccb); bzero(sc->tiqb, sizeof sc->tiqb); sc->m32x->tiqba = vtophys(&sc->tiqb); sc->m32x->tiql = NIQB / 16 - 1; bzero(sc->riqb, sizeof sc->riqb); sc->m32x->riqba = vtophys(&sc->riqb); sc->m32x->riql = NIQB / 16 - 1; bzero(sc->ltiqb, sizeof sc->ltiqb); sc->m32x->ltiqba = vtophys(&sc->ltiqb); sc->m32x->ltiql = NIQB / 16 - 1; bzero(sc->lriqb, sizeof sc->lriqb); sc->m32x->lriqba = vtophys(&sc->lriqb); sc->m32x->lriql = NIQB / 16 - 1; bzero(sc->piqb, sizeof sc->piqb); sc->m32x->piqba = vtophys(&sc->piqb); sc->m32x->piql = NIQB / 16 - 1; m32_init(sc); f54_init(sc); u = sc->m32x->stat; sc->m32x->stat = u; sc->m32_mem.ccb = 0x4; sc->m32x->cmd = 0x1; DELAY(1000); u = sc->m32x->stat; sc->m32x->stat = u; /* set all timeslots to known state */ for (i = 0; i < 32; i++) sc->m32_mem.ts[i] = 0x20002000; if (!(u & 1)) { printf( "mn%d: WARNING: Controller failed the PCI bus-master test.\n" "mn%d: WARNING: Use a PCI slot which can support bus-master cards.\n", sc->unit, sc->unit); return (0); } return (1); } /* * FALC54 interrupt handling */ static void f54_intr(struct mn_softc *sc) { unsigned g, u, s; g = sc->f54r->gis; u = sc->f54r->isr0 << 24; u |= sc->f54r->isr1 << 16; u |= sc->f54r->isr2 << 8; u |= sc->f54r->isr3; sc->falc_irq = u; /* don't chat about the 1 sec heart beat */ if (u & ~0x40) { #if 0 printf("%s*: FALC54 IRQ GIS:%02x %b\n", sc->name, g, u, "\20" "\40RME\37RFS\36T8MS\35RMB\34CASC\33CRC4\32SA6SC\31RPF" "\30b27\27RDO\26ALLS\25XDU\24XMB\23b22\22XLSC\21XPR" "\20FAR\17LFA\16MFAR\15T400MS\14AIS\13LOS\12RAR\11RA" "\10ES\7SEC\6LMFA16\5AIS16\4RA16\3API\2SLN\1SLP"); #endif s = sc->f54r->frs0 << 24; s |= sc->f54r->frs1 << 16; s |= sc->f54r->rsw << 8; s |= sc->f54r->rsp; sc->falc_state = s; s &= ~0x01844038; /* undefined or static bits */ s &= ~0x00009fc7; /* bits we don't care about */ s &= ~0x00780000; /* XXX: TS16 related */ s &= ~0x06000000; /* XXX: Multiframe related */ #if 0 printf("%s*: FALC54 Status %b\n", sc->name, s, "\20" "\40LOS\37AIS\36LFA\35RRA\34AUXP\33NMF\32LMFA\31frs0.0" "\30frs1.7\27TS16RA\26TS16LOS\25TS16AIS\24TS16LFA\23frs1.2\22XLS\21XLO" "\20RS1\17rsw.6\16RRA\15RY0\14RY1\13RY2\12RY3\11RY4" "\10SI1\7SI2\6rsp.5\5rsp.4\4rsp.3\3RSIF\2RS13\1RS15"); #endif if (s != sc->framer_state) { #if 0 for (i = 0; i < M32_CHAN; i++) { if (!sc->ch[i]) continue; sp = &sc->ch[i]->ifsppp; if (!(SP2IFP(sp)->if_flags & IFF_UP)) continue; if (s) timeout((timeout_t *)sp->pp_down, sp, 1 * hz); else timeout((timeout_t *)sp->pp_up, sp, 1 * hz); } #endif sc->framer_state = s; } } /* Once per second check error counters */ /* XXX: not clear if this is actually ok */ if (!(u & 0x40)) return; sc->cnt_fec += sc->f54r->fec; sc->cnt_cvc += sc->f54r->cvc; sc->cnt_cec1 += sc->f54r->cec1; sc->cnt_ebc += sc->f54r->ebc; sc->cnt_cec2 += sc->f54r->cec2; sc->cnt_cec3 += sc->f54r->cec3; sc->cnt_rbc += sc->f54r->rbc; } /* * Transmit interrupt for one channel */ static void mn_tx_intr(struct mn_softc *sc, u_int32_t vector) { u_int32_t chan; struct trxd *dp; struct mbuf *m; chan = vector & 0x1f; if (!sc->ch[chan]) return; if (sc->ch[chan]->state != UP) { printf("%s: tx_intr when not UP\n", sc->name); return; } for (;;) { dp = sc->ch[chan]->x1; if (vtophys(dp) == sc->m32_mem.ctxd[chan]) return; m = dp->m; if (m) { #if 0 printf("%d = %d - %d (%p)\n", sc->ch[chan]->tx_pending - m->m_pkthdr.len, sc->ch[chan]->tx_pending , m->m_pkthdr.len, m); #endif sc->ch[chan]->tx_pending -= m->m_pkthdr.len; m_freem(m); } sc->ch[chan]->last_xmit = time_second; sc->ch[chan]->x1 = dp->vnext; mn_free_desc(dp); } } /* * Receive interrupt for one channel */ static void mn_rx_intr(struct mn_softc *sc, u_int32_t vector) { u_int32_t chan, err; struct trxd *dp; struct mbuf *m; struct schan *sch; chan = vector & 0x1f; if (!sc->ch[chan]) return; sch = sc->ch[chan]; if (sch->state != UP) { printf("%s: rx_intr when not UP\n", sc->name); return; } vector &= ~0x1f; if (vector == 0x30000b00) sch->rx_error++; for (;;) { dp = sch->r1; if (vtophys(dp) == sc->m32_mem.crxd[chan]) return; m = dp->m; dp->m = 0; m->m_pkthdr.len = m->m_len = (dp->status >> 16) & 0x1fff; err = (dp->status >> 8) & 0xff; if (!err) { int error; NG_SEND_DATA_ONLY(error, sch->hook, m); sch->last_recv = time_second; /* we could be down by now... */ if (sch->state != UP) return; } else if (err & 0x40) { sch->short_error++; } else if (err & 0x10) { sch->crc_error++; } else if (err & 0x08) { sch->dribble_error++; } else if (err & 0x04) { sch->long_error++; } else if (err & 0x02) { sch->abort_error++; } else if (err & 0x01) { sch->overflow_error++; } if (err) { sch->last_rxerr = time_second; sch->prev_error = sch->last_error; sch->last_error = err; } sc->ch[chan]->r1 = dp->vnext; /* Replenish desc + mbuf supplies */ if (!m) { MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { mn_free_desc(dp); return; /* ENOBUFS */ } - MCLGET(m, M_NOWAIT); - if((m->m_flags & M_EXT) == 0) { + if (!(MCLGET(m, M_NOWAIT))) { mn_free_desc(dp); m_freem(m); return; /* ENOBUFS */ } } dp->m = m; dp->data = vtophys(m->m_data); dp->flags = 0x40000000; dp->flags += 1600 << 16; dp->next = vtophys(dp); dp->vnext = 0; sc->ch[chan]->rl->next = vtophys(dp); sc->ch[chan]->rl->vnext = dp; sc->ch[chan]->rl->flags &= ~0x40000000; sc->ch[chan]->rl = dp; } } /* * Interupt handler */ static void mn_intr(void *xsc) { struct mn_softc *sc; u_int32_t stat, lstat, u; int i, j; sc = xsc; stat = sc->m32x->stat; lstat = sc->m32x->lstat; #if 0 if (!stat && !(lstat & 2)) return; #endif if (stat & ~0xc200) { printf("%s: I stat=%08x lstat=%08x\n", sc->name, stat, lstat); } if ((stat & 0x200) || (lstat & 2)) f54_intr(sc); for (j = i = 0; i < 64; i ++) { u = sc->riqb[i]; if (u) { sc->riqb[i] = 0; mn_rx_intr(sc, u); if ((u & ~0x1f) == 0x30000800 || (u & ~0x1f) == 0x30000b00) continue; u &= ~0x30000400; /* bits we don't care about */ if ((u & ~0x1f) == 0x00000900) continue; if (!(u & ~0x1f)) continue; if (!j) printf("%s*: RIQB:", sc->name); printf(" [%d]=%08x", i, u); j++; } } if (j) printf("\n"); for (j = i = 0; i < 64; i ++) { u = sc->tiqb[i]; if (u) { sc->tiqb[i] = 0; mn_tx_intr(sc, u); if ((u & ~0x1f) == 0x20000800) continue; u &= ~0x20000000; /* bits we don't care about */ if (!u) continue; if (!j) printf("%s*: TIQB:", sc->name); printf(" [%d]=%08x", i, u); j++; } } if (j) printf("\n"); sc->m32x->stat = stat; } /* * PCI initialization stuff */ static int mn_probe (device_t self) { u_int id = pci_get_devid(self); if (sizeof (struct m32xreg) != 256) { printf("MN: sizeof(struct m32xreg) = %zd, should have been 256\n", sizeof (struct m32xreg)); return (ENXIO); } if (sizeof (struct f54rreg) != 128) { printf("MN: sizeof(struct f54rreg) = %zd, should have been 128\n", sizeof (struct f54rreg)); return (ENXIO); } if (sizeof (struct f54wreg) != 128) { printf("MN: sizeof(struct f54wreg) = %zd, should have been 128\n", sizeof (struct f54wreg)); return (ENXIO); } if (id != 0x2101110a) return (ENXIO); device_set_desc_copy(self, "Munich32X E1/T1 HDLC Controller"); return (BUS_PROBE_DEFAULT); } static int mn_attach (device_t self) { struct mn_softc *sc; u_int32_t u; u_int32_t ver; static int once; int rid, error; struct resource *res; if (!once) { if (ng_newtype(&mntypestruct)) printf("ng_newtype failed\n"); once++; } sc = (struct mn_softc *)malloc(sizeof *sc, M_MN, M_WAITOK | M_ZERO); device_set_softc(self, sc); sc->dev = self; sc->unit = device_get_unit(self); sc->framing = E1; sprintf(sc->name, "mn%d", sc->unit); rid = PCIR_BAR(0); res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (res == NULL) { device_printf(self, "Could not map memory\n"); free(sc, M_MN); return ENXIO; } sc->m0v = rman_get_virtual(res); sc->m0p = rman_get_start(res); rid = PCIR_BAR(1); res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (res == NULL) { device_printf(self, "Could not map memory\n"); free(sc, M_MN); return ENXIO; } sc->m1v = rman_get_virtual(res); sc->m1p = rman_get_start(res); /* Allocate interrupt */ rid = 0; sc->irq = bus_alloc_resource_any(self, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->irq == NULL) { printf("couldn't map interrupt\n"); free(sc, M_MN); return(ENXIO); } error = bus_setup_intr(self, sc->irq, INTR_TYPE_NET, NULL, mn_intr, sc, &sc->intrhand); if (error) { printf("couldn't set up irq\n"); free(sc, M_MN); return(ENXIO); } u = pci_read_config(self, PCIR_COMMAND, 2); printf("%x\n", u); pci_write_config(self, PCIR_COMMAND, u | PCIM_CMD_PERRESPEN | PCIM_CMD_BUSMASTEREN, 2); #if 0 pci_write_config(self, PCIR_COMMAND, 0x02800046, 4); #endif u = pci_read_config(self, PCIR_COMMAND, 1); printf("%x\n", u); ver = pci_get_revid(self); sc->m32x = (struct m32xreg *) sc->m0v; sc->f54w = (struct f54wreg *) sc->m1v; sc->f54r = (struct f54rreg *) sc->m1v; /* We must reset before poking at FALC54 registers */ u = mn_reset(sc); if (!u) return (0); printf("mn%d: Munich32X", sc->unit); switch (ver) { case 0x13: printf(" Rev 2.2"); break; default: printf(" Rev 0x%x\n", ver); } printf(", Falc54"); switch (sc->f54r->vstr) { case 0: printf(" Rev < 1.3\n"); break; case 1: printf(" Rev 1.3\n"); break; case 2: printf(" Rev 1.4\n"); break; case 0x10: printf("-LH Rev 1.1\n"); break; case 0x13: printf("-LH Rev 1.3\n"); break; default: printf(" Rev 0x%x\n", sc->f54r->vstr); } if (ng_make_node_common(&mntypestruct, &sc->node) != 0) { printf("ng_make_node_common failed\n"); return (0); } NG_NODE_SET_PRIVATE(sc->node, sc); sprintf(sc->nodename, "%s%d", NG_MN_NODE_TYPE, sc->unit); if (ng_name_node(sc->node, sc->nodename)) { NG_NODE_UNREF(sc->node); return (0); } return (0); } static device_method_t mn_methods[] = { /* Device interface */ DEVMETHOD(device_probe, mn_probe), DEVMETHOD(device_attach, mn_attach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD_END }; static driver_t mn_driver = { "mn", mn_methods, 0 }; static devclass_t mn_devclass; DRIVER_MODULE(mn, pci, mn_driver, mn_devclass, 0, 0); Index: head/sys/dev/my/if_my.c =================================================================== --- head/sys/dev/my/if_my.c (revision 276749) +++ head/sys/dev/my/if_my.c (revision 276750) @@ -1,1776 +1,1774 @@ /*- * Written by: yen_cw@myson.com.tw * Copyright (c) 2002 Myson Technology Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Myson fast ethernet PCI NIC driver, available at: http://www.myson.com.tw/ */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #define NBPFILTER 1 #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include /* * #define MY_USEIOSPACE */ static int MY_USEIOSPACE = 1; #ifdef MY_USEIOSPACE #define MY_RES SYS_RES_IOPORT #define MY_RID MY_PCI_LOIO #else #define MY_RES SYS_RES_MEMORY #define MY_RID MY_PCI_LOMEM #endif #include /* * Various supported device vendors/types and their names. */ struct my_type *my_info_tmp; static struct my_type my_devs[] = { {MYSONVENDORID, MTD800ID, "Myson MTD80X Based Fast Ethernet Card"}, {MYSONVENDORID, MTD803ID, "Myson MTD80X Based Fast Ethernet Card"}, {MYSONVENDORID, MTD891ID, "Myson MTD89X Based Giga Ethernet Card"}, {0, 0, NULL} }; /* * Various supported PHY vendors/types and their names. Note that this driver * will work with pretty much any MII-compliant PHY, so failure to positively * identify the chip is not a fatal error. */ static struct my_type my_phys[] = { {MysonPHYID0, MysonPHYID0, ""}, {SeeqPHYID0, SeeqPHYID0, ""}, {AhdocPHYID0, AhdocPHYID0, ""}, {MarvellPHYID0, MarvellPHYID0, ""}, {LevelOnePHYID0, LevelOnePHYID0, ""}, {0, 0, ""} }; static int my_probe(device_t); static int my_attach(device_t); static int my_detach(device_t); static int my_newbuf(struct my_softc *, struct my_chain_onefrag *); static int my_encap(struct my_softc *, struct my_chain *, struct mbuf *); static void my_rxeof(struct my_softc *); static void my_txeof(struct my_softc *); static void my_txeoc(struct my_softc *); static void my_intr(void *); static void my_start(struct ifnet *); static void my_start_locked(struct ifnet *); static int my_ioctl(struct ifnet *, u_long, caddr_t); static void my_init(void *); static void my_init_locked(struct my_softc *); static void my_stop(struct my_softc *); static void my_autoneg_timeout(void *); static void my_watchdog(void *); static int my_shutdown(device_t); static int my_ifmedia_upd(struct ifnet *); static void my_ifmedia_sts(struct ifnet *, struct ifmediareq *); static u_int16_t my_phy_readreg(struct my_softc *, int); static void my_phy_writereg(struct my_softc *, int, int); static void my_autoneg_xmit(struct my_softc *); static void my_autoneg_mii(struct my_softc *, int, int); static void my_setmode_mii(struct my_softc *, int); static void my_getmode_mii(struct my_softc *); static void my_setcfg(struct my_softc *, int); static void my_setmulti(struct my_softc *); static void my_reset(struct my_softc *); static int my_list_rx_init(struct my_softc *); static int my_list_tx_init(struct my_softc *); static long my_send_cmd_to_phy(struct my_softc *, int, int); #define MY_SETBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) #define MY_CLRBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) static device_method_t my_methods[] = { /* Device interface */ DEVMETHOD(device_probe, my_probe), DEVMETHOD(device_attach, my_attach), DEVMETHOD(device_detach, my_detach), DEVMETHOD(device_shutdown, my_shutdown), DEVMETHOD_END }; static driver_t my_driver = { "my", my_methods, sizeof(struct my_softc) }; static devclass_t my_devclass; DRIVER_MODULE(my, pci, my_driver, my_devclass, 0, 0); MODULE_DEPEND(my, pci, 1, 1, 1); MODULE_DEPEND(my, ether, 1, 1, 1); static long my_send_cmd_to_phy(struct my_softc * sc, int opcode, int regad) { long miir; int i; int mask, data; MY_LOCK_ASSERT(sc); /* enable MII output */ miir = CSR_READ_4(sc, MY_MANAGEMENT); miir &= 0xfffffff0; miir |= MY_MASK_MIIR_MII_WRITE + MY_MASK_MIIR_MII_MDO; /* send 32 1's preamble */ for (i = 0; i < 32; i++) { /* low MDC; MDO is already high (miir) */ miir &= ~MY_MASK_MIIR_MII_MDC; CSR_WRITE_4(sc, MY_MANAGEMENT, miir); /* high MDC */ miir |= MY_MASK_MIIR_MII_MDC; CSR_WRITE_4(sc, MY_MANAGEMENT, miir); } /* calculate ST+OP+PHYAD+REGAD+TA */ data = opcode | (sc->my_phy_addr << 7) | (regad << 2); /* sent out */ mask = 0x8000; while (mask) { /* low MDC, prepare MDO */ miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO); if (mask & data) miir |= MY_MASK_MIIR_MII_MDO; CSR_WRITE_4(sc, MY_MANAGEMENT, miir); /* high MDC */ miir |= MY_MASK_MIIR_MII_MDC; CSR_WRITE_4(sc, MY_MANAGEMENT, miir); DELAY(30); /* next */ mask >>= 1; if (mask == 0x2 && opcode == MY_OP_READ) miir &= ~MY_MASK_MIIR_MII_WRITE; } return miir; } static u_int16_t my_phy_readreg(struct my_softc * sc, int reg) { long miir; int mask, data; MY_LOCK_ASSERT(sc); if (sc->my_info->my_did == MTD803ID) data = CSR_READ_2(sc, MY_PHYBASE + reg * 2); else { miir = my_send_cmd_to_phy(sc, MY_OP_READ, reg); /* read data */ mask = 0x8000; data = 0; while (mask) { /* low MDC */ miir &= ~MY_MASK_MIIR_MII_MDC; CSR_WRITE_4(sc, MY_MANAGEMENT, miir); /* read MDI */ miir = CSR_READ_4(sc, MY_MANAGEMENT); if (miir & MY_MASK_MIIR_MII_MDI) data |= mask; /* high MDC, and wait */ miir |= MY_MASK_MIIR_MII_MDC; CSR_WRITE_4(sc, MY_MANAGEMENT, miir); DELAY(30); /* next */ mask >>= 1; } /* low MDC */ miir &= ~MY_MASK_MIIR_MII_MDC; CSR_WRITE_4(sc, MY_MANAGEMENT, miir); } return (u_int16_t) data; } static void my_phy_writereg(struct my_softc * sc, int reg, int data) { long miir; int mask; MY_LOCK_ASSERT(sc); if (sc->my_info->my_did == MTD803ID) CSR_WRITE_2(sc, MY_PHYBASE + reg * 2, data); else { miir = my_send_cmd_to_phy(sc, MY_OP_WRITE, reg); /* write data */ mask = 0x8000; while (mask) { /* low MDC, prepare MDO */ miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO); if (mask & data) miir |= MY_MASK_MIIR_MII_MDO; CSR_WRITE_4(sc, MY_MANAGEMENT, miir); DELAY(1); /* high MDC */ miir |= MY_MASK_MIIR_MII_MDC; CSR_WRITE_4(sc, MY_MANAGEMENT, miir); DELAY(1); /* next */ mask >>= 1; } /* low MDC */ miir &= ~MY_MASK_MIIR_MII_MDC; CSR_WRITE_4(sc, MY_MANAGEMENT, miir); } return; } /* * Program the 64-bit multicast hash filter. */ static void my_setmulti(struct my_softc * sc) { struct ifnet *ifp; int h = 0; u_int32_t hashes[2] = {0, 0}; struct ifmultiaddr *ifma; u_int32_t rxfilt; int mcnt = 0; MY_LOCK_ASSERT(sc); ifp = sc->my_ifp; rxfilt = CSR_READ_4(sc, MY_TCRRCR); if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { rxfilt |= MY_AM; CSR_WRITE_4(sc, MY_TCRRCR, rxfilt); CSR_WRITE_4(sc, MY_MAR0, 0xFFFFFFFF); CSR_WRITE_4(sc, MY_MAR1, 0xFFFFFFFF); return; } /* first, zot all the existing hash bits */ CSR_WRITE_4(sc, MY_MAR0, 0); CSR_WRITE_4(sc, MY_MAR1, 0); /* now program new ones */ if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ~ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); mcnt++; } if_maddr_runlock(ifp); if (mcnt) rxfilt |= MY_AM; else rxfilt &= ~MY_AM; CSR_WRITE_4(sc, MY_MAR0, hashes[0]); CSR_WRITE_4(sc, MY_MAR1, hashes[1]); CSR_WRITE_4(sc, MY_TCRRCR, rxfilt); return; } /* * Initiate an autonegotiation session. */ static void my_autoneg_xmit(struct my_softc * sc) { u_int16_t phy_sts = 0; MY_LOCK_ASSERT(sc); my_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET); DELAY(500); while (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_RESET); phy_sts = my_phy_readreg(sc, PHY_BMCR); phy_sts |= PHY_BMCR_AUTONEGENBL | PHY_BMCR_AUTONEGRSTR; my_phy_writereg(sc, PHY_BMCR, phy_sts); return; } static void my_autoneg_timeout(void *arg) { struct my_softc *sc; sc = arg; MY_LOCK_ASSERT(sc); my_autoneg_mii(sc, MY_FLAG_DELAYTIMEO, 1); } /* * Invoke autonegotiation on a PHY. */ static void my_autoneg_mii(struct my_softc * sc, int flag, int verbose) { u_int16_t phy_sts = 0, media, advert, ability; u_int16_t ability2 = 0; struct ifnet *ifp; struct ifmedia *ifm; MY_LOCK_ASSERT(sc); ifm = &sc->ifmedia; ifp = sc->my_ifp; ifm->ifm_media = IFM_ETHER | IFM_AUTO; #ifndef FORCE_AUTONEG_TFOUR /* * First, see if autoneg is supported. If not, there's no point in * continuing. */ phy_sts = my_phy_readreg(sc, PHY_BMSR); if (!(phy_sts & PHY_BMSR_CANAUTONEG)) { if (verbose) device_printf(sc->my_dev, "autonegotiation not supported\n"); ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX; return; } #endif switch (flag) { case MY_FLAG_FORCEDELAY: /* * XXX Never use this option anywhere but in the probe * routine: making the kernel stop dead in its tracks for * three whole seconds after we've gone multi-user is really * bad manners. */ my_autoneg_xmit(sc); DELAY(5000000); break; case MY_FLAG_SCHEDDELAY: /* * Wait for the transmitter to go idle before starting an * autoneg session, otherwise my_start() may clobber our * timeout, and we don't want to allow transmission during an * autoneg session since that can screw it up. */ if (sc->my_cdata.my_tx_head != NULL) { sc->my_want_auto = 1; MY_UNLOCK(sc); return; } my_autoneg_xmit(sc); callout_reset(&sc->my_autoneg_timer, hz * 5, my_autoneg_timeout, sc); sc->my_autoneg = 1; sc->my_want_auto = 0; return; case MY_FLAG_DELAYTIMEO: callout_stop(&sc->my_autoneg_timer); sc->my_autoneg = 0; break; default: device_printf(sc->my_dev, "invalid autoneg flag: %d\n", flag); return; } if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) { if (verbose) device_printf(sc->my_dev, "autoneg complete, "); phy_sts = my_phy_readreg(sc, PHY_BMSR); } else { if (verbose) device_printf(sc->my_dev, "autoneg not complete, "); } media = my_phy_readreg(sc, PHY_BMCR); /* Link is good. Report modes and set duplex mode. */ if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) { if (verbose) device_printf(sc->my_dev, "link status good. "); advert = my_phy_readreg(sc, PHY_ANAR); ability = my_phy_readreg(sc, PHY_LPAR); if ((sc->my_pinfo->my_vid == MarvellPHYID0) || (sc->my_pinfo->my_vid == LevelOnePHYID0)) { ability2 = my_phy_readreg(sc, PHY_1000SR); if (ability2 & PHY_1000SR_1000BTXFULL) { advert = 0; ability = 0; /* * this version did not support 1000M, * ifm->ifm_media = * IFM_ETHER|IFM_1000_T|IFM_FDX; */ ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX; media &= ~PHY_BMCR_SPEEDSEL; media |= PHY_BMCR_1000; media |= PHY_BMCR_DUPLEX; printf("(full-duplex, 1000Mbps)\n"); } else if (ability2 & PHY_1000SR_1000BTXHALF) { advert = 0; ability = 0; /* * this version did not support 1000M, * ifm->ifm_media = IFM_ETHER|IFM_1000_T; */ ifm->ifm_media = IFM_ETHER | IFM_100_TX; media &= ~PHY_BMCR_SPEEDSEL; media &= ~PHY_BMCR_DUPLEX; media |= PHY_BMCR_1000; printf("(half-duplex, 1000Mbps)\n"); } } if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) { ifm->ifm_media = IFM_ETHER | IFM_100_T4; media |= PHY_BMCR_SPEEDSEL; media &= ~PHY_BMCR_DUPLEX; printf("(100baseT4)\n"); } else if (advert & PHY_ANAR_100BTXFULL && ability & PHY_ANAR_100BTXFULL) { ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX; media |= PHY_BMCR_SPEEDSEL; media |= PHY_BMCR_DUPLEX; printf("(full-duplex, 100Mbps)\n"); } else if (advert & PHY_ANAR_100BTXHALF && ability & PHY_ANAR_100BTXHALF) { ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX; media |= PHY_BMCR_SPEEDSEL; media &= ~PHY_BMCR_DUPLEX; printf("(half-duplex, 100Mbps)\n"); } else if (advert & PHY_ANAR_10BTFULL && ability & PHY_ANAR_10BTFULL) { ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX; media &= ~PHY_BMCR_SPEEDSEL; media |= PHY_BMCR_DUPLEX; printf("(full-duplex, 10Mbps)\n"); } else if (advert) { ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX; media &= ~PHY_BMCR_SPEEDSEL; media &= ~PHY_BMCR_DUPLEX; printf("(half-duplex, 10Mbps)\n"); } media &= ~PHY_BMCR_AUTONEGENBL; /* Set ASIC's duplex mode to match the PHY. */ my_phy_writereg(sc, PHY_BMCR, media); my_setcfg(sc, media); } else { if (verbose) device_printf(sc->my_dev, "no carrier\n"); } my_init_locked(sc); if (sc->my_tx_pend) { sc->my_autoneg = 0; sc->my_tx_pend = 0; my_start_locked(ifp); } return; } /* * To get PHY ability. */ static void my_getmode_mii(struct my_softc * sc) { u_int16_t bmsr; struct ifnet *ifp; MY_LOCK_ASSERT(sc); ifp = sc->my_ifp; bmsr = my_phy_readreg(sc, PHY_BMSR); if (bootverbose) device_printf(sc->my_dev, "PHY status word: %x\n", bmsr); /* fallback */ sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX; if (bmsr & PHY_BMSR_10BTHALF) { if (bootverbose) device_printf(sc->my_dev, "10Mbps half-duplex mode supported\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T, 0, NULL); } if (bmsr & PHY_BMSR_10BTFULL) { if (bootverbose) device_printf(sc->my_dev, "10Mbps full-duplex mode supported\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX; } if (bmsr & PHY_BMSR_100BTXHALF) { if (bootverbose) device_printf(sc->my_dev, "100Mbps half-duplex mode supported\n"); ifp->if_baudrate = 100000000; ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX, 0, NULL); sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX; } if (bmsr & PHY_BMSR_100BTXFULL) { if (bootverbose) device_printf(sc->my_dev, "100Mbps full-duplex mode supported\n"); ifp->if_baudrate = 100000000; ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX; } /* Some also support 100BaseT4. */ if (bmsr & PHY_BMSR_100BT4) { if (bootverbose) device_printf(sc->my_dev, "100baseT4 mode supported\n"); ifp->if_baudrate = 100000000; ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_T4, 0, NULL); sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_T4; #ifdef FORCE_AUTONEG_TFOUR if (bootverbose) device_printf(sc->my_dev, "forcing on autoneg support for BT4\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0 NULL): sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO; #endif } #if 0 /* this version did not support 1000M, */ if (sc->my_pinfo->my_vid == MarvellPHYID0) { if (bootverbose) device_printf(sc->my_dev, "1000Mbps half-duplex mode supported\n"); ifp->if_baudrate = 1000000000; ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_HDX, 0, NULL); if (bootverbose) device_printf(sc->my_dev, "1000Mbps full-duplex mode supported\n"); ifp->if_baudrate = 1000000000; ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); sc->ifmedia.ifm_media = IFM_ETHER | IFM_1000_T | IFM_FDX; } #endif if (bmsr & PHY_BMSR_CANAUTONEG) { if (bootverbose) device_printf(sc->my_dev, "autoneg supported\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO; } return; } /* * Set speed and duplex mode. */ static void my_setmode_mii(struct my_softc * sc, int media) { u_int16_t bmcr; MY_LOCK_ASSERT(sc); /* * If an autoneg session is in progress, stop it. */ if (sc->my_autoneg) { device_printf(sc->my_dev, "canceling autoneg session\n"); callout_stop(&sc->my_autoneg_timer); sc->my_autoneg = sc->my_want_auto = 0; bmcr = my_phy_readreg(sc, PHY_BMCR); bmcr &= ~PHY_BMCR_AUTONEGENBL; my_phy_writereg(sc, PHY_BMCR, bmcr); } device_printf(sc->my_dev, "selecting MII, "); bmcr = my_phy_readreg(sc, PHY_BMCR); bmcr &= ~(PHY_BMCR_AUTONEGENBL | PHY_BMCR_SPEEDSEL | PHY_BMCR_1000 | PHY_BMCR_DUPLEX | PHY_BMCR_LOOPBK); #if 0 /* this version did not support 1000M, */ if (IFM_SUBTYPE(media) == IFM_1000_T) { printf("1000Mbps/T4, half-duplex\n"); bmcr &= ~PHY_BMCR_SPEEDSEL; bmcr &= ~PHY_BMCR_DUPLEX; bmcr |= PHY_BMCR_1000; } #endif if (IFM_SUBTYPE(media) == IFM_100_T4) { printf("100Mbps/T4, half-duplex\n"); bmcr |= PHY_BMCR_SPEEDSEL; bmcr &= ~PHY_BMCR_DUPLEX; } if (IFM_SUBTYPE(media) == IFM_100_TX) { printf("100Mbps, "); bmcr |= PHY_BMCR_SPEEDSEL; } if (IFM_SUBTYPE(media) == IFM_10_T) { printf("10Mbps, "); bmcr &= ~PHY_BMCR_SPEEDSEL; } if ((media & IFM_GMASK) == IFM_FDX) { printf("full duplex\n"); bmcr |= PHY_BMCR_DUPLEX; } else { printf("half duplex\n"); bmcr &= ~PHY_BMCR_DUPLEX; } my_phy_writereg(sc, PHY_BMCR, bmcr); my_setcfg(sc, bmcr); return; } /* * The Myson manual states that in order to fiddle with the 'full-duplex' and * '100Mbps' bits in the netconfig register, we first have to put the * transmit and/or receive logic in the idle state. */ static void my_setcfg(struct my_softc * sc, int bmcr) { int i, restart = 0; MY_LOCK_ASSERT(sc); if (CSR_READ_4(sc, MY_TCRRCR) & (MY_TE | MY_RE)) { restart = 1; MY_CLRBIT(sc, MY_TCRRCR, (MY_TE | MY_RE)); for (i = 0; i < MY_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_4(sc, MY_TCRRCR) & (MY_TXRUN | MY_RXRUN))) break; } if (i == MY_TIMEOUT) device_printf(sc->my_dev, "failed to force tx and rx to idle \n"); } MY_CLRBIT(sc, MY_TCRRCR, MY_PS1000); MY_CLRBIT(sc, MY_TCRRCR, MY_PS10); if (bmcr & PHY_BMCR_1000) MY_SETBIT(sc, MY_TCRRCR, MY_PS1000); else if (!(bmcr & PHY_BMCR_SPEEDSEL)) MY_SETBIT(sc, MY_TCRRCR, MY_PS10); if (bmcr & PHY_BMCR_DUPLEX) MY_SETBIT(sc, MY_TCRRCR, MY_FD); else MY_CLRBIT(sc, MY_TCRRCR, MY_FD); if (restart) MY_SETBIT(sc, MY_TCRRCR, MY_TE | MY_RE); return; } static void my_reset(struct my_softc * sc) { register int i; MY_LOCK_ASSERT(sc); MY_SETBIT(sc, MY_BCR, MY_SWR); for (i = 0; i < MY_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_4(sc, MY_BCR) & MY_SWR)) break; } if (i == MY_TIMEOUT) device_printf(sc->my_dev, "reset never completed!\n"); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); return; } /* * Probe for a Myson chip. Check the PCI vendor and device IDs against our * list and return a device name if we find a match. */ static int my_probe(device_t dev) { struct my_type *t; t = my_devs; while (t->my_name != NULL) { if ((pci_get_vendor(dev) == t->my_vid) && (pci_get_device(dev) == t->my_did)) { device_set_desc(dev, t->my_name); my_info_tmp = t; return (BUS_PROBE_DEFAULT); } t++; } return (ENXIO); } /* * Attach the interface. Allocate softc structures, do ifmedia setup and * ethernet/BPF attach. */ static int my_attach(device_t dev) { int i; u_char eaddr[ETHER_ADDR_LEN]; u_int32_t iobase; struct my_softc *sc; struct ifnet *ifp; int media = IFM_ETHER | IFM_100_TX | IFM_FDX; unsigned int round; caddr_t roundptr; struct my_type *p; u_int16_t phy_vid, phy_did, phy_sts = 0; int rid, error = 0; sc = device_get_softc(dev); sc->my_dev = dev; mtx_init(&sc->my_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->my_autoneg_timer, &sc->my_mtx, 0); callout_init_mtx(&sc->my_watchdog, &sc->my_mtx, 0); /* * Map control/status registers. */ pci_enable_busmaster(dev); if (my_info_tmp->my_did == MTD800ID) { iobase = pci_read_config(dev, MY_PCI_LOIO, 4); if (iobase & 0x300) MY_USEIOSPACE = 0; } rid = MY_RID; sc->my_res = bus_alloc_resource_any(dev, MY_RES, &rid, RF_ACTIVE); if (sc->my_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); error = ENXIO; goto destroy_mutex; } sc->my_btag = rman_get_bustag(sc->my_res); sc->my_bhandle = rman_get_bushandle(sc->my_res); rid = 0; sc->my_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->my_irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto release_io; } sc->my_info = my_info_tmp; /* Reset the adapter. */ MY_LOCK(sc); my_reset(sc); MY_UNLOCK(sc); /* * Get station address */ for (i = 0; i < ETHER_ADDR_LEN; ++i) eaddr[i] = CSR_READ_1(sc, MY_PAR0 + i); sc->my_ldata_ptr = malloc(sizeof(struct my_list_data) + 8, M_DEVBUF, M_NOWAIT); if (sc->my_ldata_ptr == NULL) { device_printf(dev, "no memory for list buffers!\n"); error = ENXIO; goto release_irq; } sc->my_ldata = (struct my_list_data *) sc->my_ldata_ptr; round = (uintptr_t)sc->my_ldata_ptr & 0xF; roundptr = sc->my_ldata_ptr; for (i = 0; i < 8; i++) { if (round % 8) { round++; roundptr++; } else break; } sc->my_ldata = (struct my_list_data *) roundptr; bzero(sc->my_ldata, sizeof(struct my_list_data)); ifp = sc->my_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto free_ldata; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = my_ioctl; ifp->if_start = my_start; ifp->if_init = my_init; ifp->if_baudrate = 10000000; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); if (sc->my_info->my_did == MTD803ID) sc->my_pinfo = my_phys; else { if (bootverbose) device_printf(dev, "probing for a PHY\n"); MY_LOCK(sc); for (i = MY_PHYADDR_MIN; i < MY_PHYADDR_MAX + 1; i++) { if (bootverbose) device_printf(dev, "checking address: %d\n", i); sc->my_phy_addr = i; phy_sts = my_phy_readreg(sc, PHY_BMSR); if ((phy_sts != 0) && (phy_sts != 0xffff)) break; else phy_sts = 0; } if (phy_sts) { phy_vid = my_phy_readreg(sc, PHY_VENID); phy_did = my_phy_readreg(sc, PHY_DEVID); if (bootverbose) { device_printf(dev, "found PHY at address %d, ", sc->my_phy_addr); printf("vendor id: %x device id: %x\n", phy_vid, phy_did); } p = my_phys; while (p->my_vid) { if (phy_vid == p->my_vid) { sc->my_pinfo = p; break; } p++; } if (sc->my_pinfo == NULL) sc->my_pinfo = &my_phys[PHY_UNKNOWN]; if (bootverbose) device_printf(dev, "PHY type: %s\n", sc->my_pinfo->my_name); } else { MY_UNLOCK(sc); device_printf(dev, "MII without any phy!\n"); error = ENXIO; goto free_if; } MY_UNLOCK(sc); } /* Do ifmedia setup. */ ifmedia_init(&sc->ifmedia, 0, my_ifmedia_upd, my_ifmedia_sts); MY_LOCK(sc); my_getmode_mii(sc); my_autoneg_mii(sc, MY_FLAG_FORCEDELAY, 1); media = sc->ifmedia.ifm_media; my_stop(sc); MY_UNLOCK(sc); ifmedia_set(&sc->ifmedia, media); ether_ifattach(ifp, eaddr); error = bus_setup_intr(dev, sc->my_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, my_intr, sc, &sc->my_intrhand); if (error) { device_printf(dev, "couldn't set up irq\n"); goto detach_if; } return (0); detach_if: ether_ifdetach(ifp); free_if: if_free(ifp); free_ldata: free(sc->my_ldata_ptr, M_DEVBUF); release_irq: bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq); release_io: bus_release_resource(dev, MY_RES, MY_RID, sc->my_res); destroy_mutex: mtx_destroy(&sc->my_mtx); return (error); } static int my_detach(device_t dev) { struct my_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->my_ifp; ether_ifdetach(ifp); MY_LOCK(sc); my_stop(sc); MY_UNLOCK(sc); bus_teardown_intr(dev, sc->my_irq, sc->my_intrhand); callout_drain(&sc->my_watchdog); callout_drain(&sc->my_autoneg_timer); if_free(ifp); free(sc->my_ldata_ptr, M_DEVBUF); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq); bus_release_resource(dev, MY_RES, MY_RID, sc->my_res); mtx_destroy(&sc->my_mtx); return (0); } /* * Initialize the transmit descriptors. */ static int my_list_tx_init(struct my_softc * sc) { struct my_chain_data *cd; struct my_list_data *ld; int i; MY_LOCK_ASSERT(sc); cd = &sc->my_cdata; ld = sc->my_ldata; for (i = 0; i < MY_TX_LIST_CNT; i++) { cd->my_tx_chain[i].my_ptr = &ld->my_tx_list[i]; if (i == (MY_TX_LIST_CNT - 1)) cd->my_tx_chain[i].my_nextdesc = &cd->my_tx_chain[0]; else cd->my_tx_chain[i].my_nextdesc = &cd->my_tx_chain[i + 1]; } cd->my_tx_free = &cd->my_tx_chain[0]; cd->my_tx_tail = cd->my_tx_head = NULL; return (0); } /* * Initialize the RX descriptors and allocate mbufs for them. Note that we * arrange the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int my_list_rx_init(struct my_softc * sc) { struct my_chain_data *cd; struct my_list_data *ld; int i; MY_LOCK_ASSERT(sc); cd = &sc->my_cdata; ld = sc->my_ldata; for (i = 0; i < MY_RX_LIST_CNT; i++) { cd->my_rx_chain[i].my_ptr = (struct my_desc *) & ld->my_rx_list[i]; if (my_newbuf(sc, &cd->my_rx_chain[i]) == ENOBUFS) { MY_UNLOCK(sc); return (ENOBUFS); } if (i == (MY_RX_LIST_CNT - 1)) { cd->my_rx_chain[i].my_nextdesc = &cd->my_rx_chain[0]; ld->my_rx_list[i].my_next = vtophys(&ld->my_rx_list[0]); } else { cd->my_rx_chain[i].my_nextdesc = &cd->my_rx_chain[i + 1]; ld->my_rx_list[i].my_next = vtophys(&ld->my_rx_list[i + 1]); } } cd->my_rx_head = &cd->my_rx_chain[0]; return (0); } /* * Initialize an RX descriptor and attach an MBUF cluster. */ static int my_newbuf(struct my_softc * sc, struct my_chain_onefrag * c) { struct mbuf *m_new = NULL; MY_LOCK_ASSERT(sc); MGETHDR(m_new, M_NOWAIT, MT_DATA); if (m_new == NULL) { device_printf(sc->my_dev, "no memory for rx list -- packet dropped!\n"); return (ENOBUFS); } - MCLGET(m_new, M_NOWAIT); - if (!(m_new->m_flags & M_EXT)) { + if (!(MCLGET(m_new, M_NOWAIT))) { device_printf(sc->my_dev, "no memory for rx list -- packet dropped!\n"); m_freem(m_new); return (ENOBUFS); } c->my_mbuf = m_new; c->my_ptr->my_data = vtophys(mtod(m_new, caddr_t)); c->my_ptr->my_ctl = (MCLBYTES - 1) << MY_RBSShift; c->my_ptr->my_status = MY_OWNByNIC; return (0); } /* * A frame has been uploaded: pass the resulting mbuf chain up to the higher * level protocols. */ static void my_rxeof(struct my_softc * sc) { struct ether_header *eh; struct mbuf *m; struct ifnet *ifp; struct my_chain_onefrag *cur_rx; int total_len = 0; u_int32_t rxstat; MY_LOCK_ASSERT(sc); ifp = sc->my_ifp; while (!((rxstat = sc->my_cdata.my_rx_head->my_ptr->my_status) & MY_OWNByNIC)) { cur_rx = sc->my_cdata.my_rx_head; sc->my_cdata.my_rx_head = cur_rx->my_nextdesc; if (rxstat & MY_ES) { /* error summary: give up this rx pkt */ if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); cur_rx->my_ptr->my_status = MY_OWNByNIC; continue; } /* No errors; receive the packet. */ total_len = (rxstat & MY_FLNGMASK) >> MY_FLNGShift; total_len -= ETHER_CRC_LEN; if (total_len < MINCLSIZE) { m = m_devget(mtod(cur_rx->my_mbuf, char *), total_len, 0, ifp, NULL); cur_rx->my_ptr->my_status = MY_OWNByNIC; if (m == NULL) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); continue; } } else { m = cur_rx->my_mbuf; /* * Try to conjure up a new mbuf cluster. If that * fails, it means we have an out of memory condition * and should leave the buffer in place and continue. * This will result in a lost packet, but there's * little else we can do in this situation. */ if (my_newbuf(sc, cur_rx) == ENOBUFS) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); cur_rx->my_ptr->my_status = MY_OWNByNIC; continue; } m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = total_len; } if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); eh = mtod(m, struct ether_header *); #if NBPFILTER > 0 /* * Handle BPF listeners. Let the BPF user see the packet, but * don't pass it up to the ether_input() layer unless it's a * broadcast packet, multicast packet, matches our ethernet * address or the interface is in promiscuous mode. */ if (bpf_peers_present(ifp->if_bpf)) { bpf_mtap(ifp->if_bpf, m); if (ifp->if_flags & IFF_PROMISC && (bcmp(eh->ether_dhost, IF_LLADDR(sc->my_ifp), ETHER_ADDR_LEN) && (eh->ether_dhost[0] & 1) == 0)) { m_freem(m); continue; } } #endif MY_UNLOCK(sc); (*ifp->if_input)(ifp, m); MY_LOCK(sc); } return; } /* * A frame was downloaded to the chip. It's safe for us to clean up the list * buffers. */ static void my_txeof(struct my_softc * sc) { struct my_chain *cur_tx; struct ifnet *ifp; MY_LOCK_ASSERT(sc); ifp = sc->my_ifp; /* Clear the timeout timer. */ sc->my_timer = 0; if (sc->my_cdata.my_tx_head == NULL) { return; } /* * Go through our tx list and free mbufs for those frames that have * been transmitted. */ while (sc->my_cdata.my_tx_head->my_mbuf != NULL) { u_int32_t txstat; cur_tx = sc->my_cdata.my_tx_head; txstat = MY_TXSTATUS(cur_tx); if ((txstat & MY_OWNByNIC) || txstat == MY_UNSENT) break; if (!(CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced)) { if (txstat & MY_TXERR) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); if (txstat & MY_EC) /* excessive collision */ if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); if (txstat & MY_LC) /* late collision */ if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); } if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & MY_NCRMASK) >> MY_NCRShift); } if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); m_freem(cur_tx->my_mbuf); cur_tx->my_mbuf = NULL; if (sc->my_cdata.my_tx_head == sc->my_cdata.my_tx_tail) { sc->my_cdata.my_tx_head = NULL; sc->my_cdata.my_tx_tail = NULL; break; } sc->my_cdata.my_tx_head = cur_tx->my_nextdesc; } if (CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced) { if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (CSR_READ_4(sc, MY_TSR) & MY_NCRMask)); } return; } /* * TX 'end of channel' interrupt handler. */ static void my_txeoc(struct my_softc * sc) { struct ifnet *ifp; MY_LOCK_ASSERT(sc); ifp = sc->my_ifp; sc->my_timer = 0; if (sc->my_cdata.my_tx_head == NULL) { ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->my_cdata.my_tx_tail = NULL; if (sc->my_want_auto) my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1); } else { if (MY_TXOWN(sc->my_cdata.my_tx_head) == MY_UNSENT) { MY_TXOWN(sc->my_cdata.my_tx_head) = MY_OWNByNIC; sc->my_timer = 5; CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF); } } return; } static void my_intr(void *arg) { struct my_softc *sc; struct ifnet *ifp; u_int32_t status; sc = arg; MY_LOCK(sc); ifp = sc->my_ifp; if (!(ifp->if_flags & IFF_UP)) { MY_UNLOCK(sc); return; } /* Disable interrupts. */ CSR_WRITE_4(sc, MY_IMR, 0x00000000); for (;;) { status = CSR_READ_4(sc, MY_ISR); status &= MY_INTRS; if (status) CSR_WRITE_4(sc, MY_ISR, status); else break; if (status & MY_RI) /* receive interrupt */ my_rxeof(sc); if ((status & MY_RBU) || (status & MY_RxErr)) { /* rx buffer unavailable or rx error */ if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); #ifdef foo my_stop(sc); my_reset(sc); my_init_locked(sc); #endif } if (status & MY_TI) /* tx interrupt */ my_txeof(sc); if (status & MY_ETI) /* tx early interrupt */ my_txeof(sc); if (status & MY_TBU) /* tx buffer unavailable */ my_txeoc(sc); #if 0 /* 90/1/18 delete */ if (status & MY_FBE) { my_reset(sc); my_init_locked(sc); } #endif } /* Re-enable interrupts. */ CSR_WRITE_4(sc, MY_IMR, MY_INTRS); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) my_start_locked(ifp); MY_UNLOCK(sc); return; } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int my_encap(struct my_softc * sc, struct my_chain * c, struct mbuf * m_head) { struct my_desc *f = NULL; int total_len; struct mbuf *m, *m_new = NULL; MY_LOCK_ASSERT(sc); /* calculate the total tx pkt length */ total_len = 0; for (m = m_head; m != NULL; m = m->m_next) total_len += m->m_len; /* * Start packing the mbufs in this chain into the fragment pointers. * Stop when we run out of fragments or hit the end of the mbuf * chain. */ m = m_head; MGETHDR(m_new, M_NOWAIT, MT_DATA); if (m_new == NULL) { device_printf(sc->my_dev, "no memory for tx list"); return (1); } if (m_head->m_pkthdr.len > MHLEN) { - MCLGET(m_new, M_NOWAIT); - if (!(m_new->m_flags & M_EXT)) { + if (!(MCLGET(m_new, M_NOWAIT))) { m_freem(m_new); device_printf(sc->my_dev, "no memory for tx list"); return (1); } } m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t)); m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; m_freem(m_head); m_head = m_new; f = &c->my_ptr->my_frag[0]; f->my_status = 0; f->my_data = vtophys(mtod(m_new, caddr_t)); total_len = m_new->m_len; f->my_ctl = MY_TXFD | MY_TXLD | MY_CRCEnable | MY_PADEnable; f->my_ctl |= total_len << MY_PKTShift; /* pkt size */ f->my_ctl |= total_len; /* buffer size */ /* 89/12/29 add, for mtd891 *//* [ 89? ] */ if (sc->my_info->my_did == MTD891ID) f->my_ctl |= MY_ETIControl | MY_RetryTxLC; c->my_mbuf = m_head; c->my_lastdesc = 0; MY_TXNEXT(c) = vtophys(&c->my_nextdesc->my_ptr->my_frag[0]); return (0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void my_start(struct ifnet * ifp) { struct my_softc *sc; sc = ifp->if_softc; MY_LOCK(sc); my_start_locked(ifp); MY_UNLOCK(sc); } static void my_start_locked(struct ifnet * ifp) { struct my_softc *sc; struct mbuf *m_head = NULL; struct my_chain *cur_tx = NULL, *start_tx; sc = ifp->if_softc; MY_LOCK_ASSERT(sc); if (sc->my_autoneg) { sc->my_tx_pend = 1; return; } /* * Check for an available queue slot. If there are none, punt. */ if (sc->my_cdata.my_tx_free->my_mbuf != NULL) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; return; } start_tx = sc->my_cdata.my_tx_free; while (sc->my_cdata.my_tx_free->my_mbuf == NULL) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* Pick a descriptor off the free list. */ cur_tx = sc->my_cdata.my_tx_free; sc->my_cdata.my_tx_free = cur_tx->my_nextdesc; /* Pack the data into the descriptor. */ my_encap(sc, cur_tx, m_head); if (cur_tx != start_tx) MY_TXOWN(cur_tx) = MY_OWNByNIC; #if NBPFILTER > 0 /* * If there's a BPF listener, bounce a copy of this frame to * him. */ BPF_MTAP(ifp, cur_tx->my_mbuf); #endif } /* * If there are no packets queued, bail. */ if (cur_tx == NULL) { return; } /* * Place the request for the upload interrupt in the last descriptor * in the chain. This way, if we're chaining several packets at once, * we'll only get an interrupt once for the whole chain rather than * once for each packet. */ MY_TXCTL(cur_tx) |= MY_TXIC; cur_tx->my_ptr->my_frag[0].my_ctl |= MY_TXIC; sc->my_cdata.my_tx_tail = cur_tx; if (sc->my_cdata.my_tx_head == NULL) sc->my_cdata.my_tx_head = start_tx; MY_TXOWN(start_tx) = MY_OWNByNIC; CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF); /* tx polling demand */ /* * Set a timeout in case the chip goes out to lunch. */ sc->my_timer = 5; return; } static void my_init(void *xsc) { struct my_softc *sc = xsc; MY_LOCK(sc); my_init_locked(sc); MY_UNLOCK(sc); } static void my_init_locked(struct my_softc *sc) { struct ifnet *ifp = sc->my_ifp; u_int16_t phy_bmcr = 0; MY_LOCK_ASSERT(sc); if (sc->my_autoneg) { return; } if (sc->my_pinfo != NULL) phy_bmcr = my_phy_readreg(sc, PHY_BMCR); /* * Cancel pending I/O and free all RX/TX buffers. */ my_stop(sc); my_reset(sc); /* * Set cache alignment and burst length. */ #if 0 /* 89/9/1 modify, */ CSR_WRITE_4(sc, MY_BCR, MY_RPBLE512); CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF); #endif CSR_WRITE_4(sc, MY_BCR, MY_PBL8); CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF | MY_RBLEN | MY_RPBLE512); /* * 89/12/29 add, for mtd891, */ if (sc->my_info->my_did == MTD891ID) { MY_SETBIT(sc, MY_BCR, MY_PROG); MY_SETBIT(sc, MY_TCRRCR, MY_Enhanced); } my_setcfg(sc, phy_bmcr); /* Init circular RX list. */ if (my_list_rx_init(sc) == ENOBUFS) { device_printf(sc->my_dev, "init failed: no memory for rx buffers\n"); my_stop(sc); return; } /* Init TX descriptors. */ my_list_tx_init(sc); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) MY_SETBIT(sc, MY_TCRRCR, MY_PROM); else MY_CLRBIT(sc, MY_TCRRCR, MY_PROM); /* * Set capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) MY_SETBIT(sc, MY_TCRRCR, MY_AB); else MY_CLRBIT(sc, MY_TCRRCR, MY_AB); /* * Program the multicast filter, if necessary. */ my_setmulti(sc); /* * Load the address of the RX list. */ MY_CLRBIT(sc, MY_TCRRCR, MY_RE); CSR_WRITE_4(sc, MY_RXLBA, vtophys(&sc->my_ldata->my_rx_list[0])); /* * Enable interrupts. */ CSR_WRITE_4(sc, MY_IMR, MY_INTRS); CSR_WRITE_4(sc, MY_ISR, 0xFFFFFFFF); /* Enable receiver and transmitter. */ MY_SETBIT(sc, MY_TCRRCR, MY_RE); MY_CLRBIT(sc, MY_TCRRCR, MY_TE); CSR_WRITE_4(sc, MY_TXLBA, vtophys(&sc->my_ldata->my_tx_list[0])); MY_SETBIT(sc, MY_TCRRCR, MY_TE); /* Restore state of BMCR */ if (sc->my_pinfo != NULL) my_phy_writereg(sc, PHY_BMCR, phy_bmcr); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&sc->my_watchdog, hz, my_watchdog, sc); return; } /* * Set media options. */ static int my_ifmedia_upd(struct ifnet * ifp) { struct my_softc *sc; struct ifmedia *ifm; sc = ifp->if_softc; MY_LOCK(sc); ifm = &sc->ifmedia; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { MY_UNLOCK(sc); return (EINVAL); } if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1); else my_setmode_mii(sc, ifm->ifm_media); MY_UNLOCK(sc); return (0); } /* * Report current media status. */ static void my_ifmedia_sts(struct ifnet * ifp, struct ifmediareq * ifmr) { struct my_softc *sc; u_int16_t advert = 0, ability = 0; sc = ifp->if_softc; MY_LOCK(sc); ifmr->ifm_active = IFM_ETHER; if (!(my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) { #if 0 /* this version did not support 1000M, */ if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_1000) ifmr->ifm_active = IFM_ETHER | IFM_1000TX; #endif if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL) ifmr->ifm_active = IFM_ETHER | IFM_100_TX; else ifmr->ifm_active = IFM_ETHER | IFM_10_T; if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_DUPLEX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; MY_UNLOCK(sc); return; } ability = my_phy_readreg(sc, PHY_LPAR); advert = my_phy_readreg(sc, PHY_ANAR); #if 0 /* this version did not support 1000M, */ if (sc->my_pinfo->my_vid = MarvellPHYID0) { ability2 = my_phy_readreg(sc, PHY_1000SR); if (ability2 & PHY_1000SR_1000BTXFULL) { advert = 0; ability = 0; ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_FDX; } else if (ability & PHY_1000SR_1000BTXHALF) { advert = 0; ability = 0; ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_HDX; } } #endif if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) ifmr->ifm_active = IFM_ETHER | IFM_100_T4; else if (advert & PHY_ANAR_100BTXFULL && ability & PHY_ANAR_100BTXFULL) ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX; else if (advert & PHY_ANAR_100BTXHALF && ability & PHY_ANAR_100BTXHALF) ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_HDX; else if (advert & PHY_ANAR_10BTFULL && ability & PHY_ANAR_10BTFULL) ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_FDX; else if (advert & PHY_ANAR_10BTHALF && ability & PHY_ANAR_10BTHALF) ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_HDX; MY_UNLOCK(sc); return; } static int my_ioctl(struct ifnet * ifp, u_long command, caddr_t data) { struct my_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; int error; switch (command) { case SIOCSIFFLAGS: MY_LOCK(sc); if (ifp->if_flags & IFF_UP) my_init_locked(sc); else if (ifp->if_drv_flags & IFF_DRV_RUNNING) my_stop(sc); MY_UNLOCK(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: MY_LOCK(sc); my_setmulti(sc); MY_UNLOCK(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void my_watchdog(void *arg) { struct my_softc *sc; struct ifnet *ifp; sc = arg; MY_LOCK_ASSERT(sc); callout_reset(&sc->my_watchdog, hz, my_watchdog, sc); if (sc->my_timer == 0 || --sc->my_timer > 0) return; ifp = sc->my_ifp; if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); if_printf(ifp, "watchdog timeout\n"); if (!(my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT)) if_printf(ifp, "no carrier - transceiver cable problem?\n"); my_stop(sc); my_reset(sc); my_init_locked(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) my_start_locked(ifp); } /* * Stop the adapter and free any mbufs allocated to the RX and TX lists. */ static void my_stop(struct my_softc * sc) { register int i; struct ifnet *ifp; MY_LOCK_ASSERT(sc); ifp = sc->my_ifp; callout_stop(&sc->my_autoneg_timer); callout_stop(&sc->my_watchdog); MY_CLRBIT(sc, MY_TCRRCR, (MY_RE | MY_TE)); CSR_WRITE_4(sc, MY_IMR, 0x00000000); CSR_WRITE_4(sc, MY_TXLBA, 0x00000000); CSR_WRITE_4(sc, MY_RXLBA, 0x00000000); /* * Free data in the RX lists. */ for (i = 0; i < MY_RX_LIST_CNT; i++) { if (sc->my_cdata.my_rx_chain[i].my_mbuf != NULL) { m_freem(sc->my_cdata.my_rx_chain[i].my_mbuf); sc->my_cdata.my_rx_chain[i].my_mbuf = NULL; } } bzero((char *)&sc->my_ldata->my_rx_list, sizeof(sc->my_ldata->my_rx_list)); /* * Free the TX list buffers. */ for (i = 0; i < MY_TX_LIST_CNT; i++) { if (sc->my_cdata.my_tx_chain[i].my_mbuf != NULL) { m_freem(sc->my_cdata.my_tx_chain[i].my_mbuf); sc->my_cdata.my_tx_chain[i].my_mbuf = NULL; } } bzero((char *)&sc->my_ldata->my_tx_list, sizeof(sc->my_ldata->my_tx_list)); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); return; } /* * Stop all chip I/O so that the kernel's probe routines don't get confused * by errant DMAs when rebooting. */ static int my_shutdown(device_t dev) { struct my_softc *sc; sc = device_get_softc(dev); MY_LOCK(sc); my_stop(sc); MY_UNLOCK(sc); return 0; } Index: head/sys/dev/pcn/if_pcn.c =================================================================== --- head/sys/dev/pcn/if_pcn.c (revision 276749) +++ head/sys/dev/pcn/if_pcn.c (revision 276750) @@ -1,1519 +1,1518 @@ /*- * Copyright (c) 2000 Berkeley Software Design, Inc. * Copyright (c) 1997, 1998, 1999, 2000 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * AMD Am79c972 fast ethernet PCI NIC driver. Datasheets are available * from http://www.amd.com. * * The AMD PCnet/PCI controllers are more advanced and functional * versions of the venerable 7990 LANCE. The PCnet/PCI chips retain * backwards compatibility with the LANCE and thus can be made * to work with older LANCE drivers. This is in fact how the * PCnet/PCI chips were supported in FreeBSD originally. The trouble * is that the PCnet/PCI devices offer several performance enhancements * which can't be exploited in LANCE compatibility mode. Chief among * these enhancements is the ability to perform PCI DMA operations * using 32-bit addressing (which eliminates the need for ISA * bounce-buffering), and special receive buffer alignment (which * allows the receive handler to pass packets to the upper protocol * layers without copying on both the x86 and alpha platforms). */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include #include #include #define PCN_USEIOSPACE #include MODULE_DEPEND(pcn, pci, 1, 1, 1); MODULE_DEPEND(pcn, ether, 1, 1, 1); MODULE_DEPEND(pcn, miibus, 1, 1, 1); /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* * Various supported device vendors/types and their names. */ static const struct pcn_type pcn_devs[] = { { PCN_VENDORID, PCN_DEVICEID_PCNET, "AMD PCnet/PCI 10/100BaseTX" }, { PCN_VENDORID, PCN_DEVICEID_HOME, "AMD PCnet/Home HomePNA" }, { 0, 0, NULL } }; static const struct pcn_chipid { u_int32_t id; const char *name; } pcn_chipid[] = { { Am79C971, "Am79C971" }, { Am79C972, "Am79C972" }, { Am79C973, "Am79C973" }, { Am79C978, "Am79C978" }, { Am79C975, "Am79C975" }, { Am79C976, "Am79C976" }, { 0, NULL }, }; static const char *pcn_chipid_name(u_int32_t); static u_int32_t pcn_chip_id(device_t); static const struct pcn_type *pcn_match(u_int16_t, u_int16_t); static u_int32_t pcn_csr_read(struct pcn_softc *, int); static u_int16_t pcn_csr_read16(struct pcn_softc *, int); static u_int16_t pcn_bcr_read16(struct pcn_softc *, int); static void pcn_csr_write(struct pcn_softc *, int, int); static u_int32_t pcn_bcr_read(struct pcn_softc *, int); static void pcn_bcr_write(struct pcn_softc *, int, int); static int pcn_probe(device_t); static int pcn_attach(device_t); static int pcn_detach(device_t); static int pcn_newbuf(struct pcn_softc *, int, struct mbuf *); static int pcn_encap(struct pcn_softc *, struct mbuf *, u_int32_t *); static void pcn_rxeof(struct pcn_softc *); static void pcn_txeof(struct pcn_softc *); static void pcn_intr(void *); static void pcn_tick(void *); static void pcn_start(struct ifnet *); static void pcn_start_locked(struct ifnet *); static int pcn_ioctl(struct ifnet *, u_long, caddr_t); static void pcn_init(void *); static void pcn_init_locked(struct pcn_softc *); static void pcn_stop(struct pcn_softc *); static void pcn_watchdog(struct pcn_softc *); static int pcn_shutdown(device_t); static int pcn_ifmedia_upd(struct ifnet *); static void pcn_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int pcn_miibus_readreg(device_t, int, int); static int pcn_miibus_writereg(device_t, int, int, int); static void pcn_miibus_statchg(device_t); static void pcn_setfilt(struct ifnet *); static void pcn_setmulti(struct pcn_softc *); static void pcn_reset(struct pcn_softc *); static int pcn_list_rx_init(struct pcn_softc *); static int pcn_list_tx_init(struct pcn_softc *); #ifdef PCN_USEIOSPACE #define PCN_RES SYS_RES_IOPORT #define PCN_RID PCN_PCI_LOIO #else #define PCN_RES SYS_RES_MEMORY #define PCN_RID PCN_PCI_LOMEM #endif static device_method_t pcn_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pcn_probe), DEVMETHOD(device_attach, pcn_attach), DEVMETHOD(device_detach, pcn_detach), DEVMETHOD(device_shutdown, pcn_shutdown), /* MII interface */ DEVMETHOD(miibus_readreg, pcn_miibus_readreg), DEVMETHOD(miibus_writereg, pcn_miibus_writereg), DEVMETHOD(miibus_statchg, pcn_miibus_statchg), DEVMETHOD_END }; static driver_t pcn_driver = { "pcn", pcn_methods, sizeof(struct pcn_softc) }; static devclass_t pcn_devclass; DRIVER_MODULE(pcn, pci, pcn_driver, pcn_devclass, 0, 0); DRIVER_MODULE(miibus, pcn, miibus_driver, miibus_devclass, 0, 0); #define PCN_CSR_SETBIT(sc, reg, x) \ pcn_csr_write(sc, reg, pcn_csr_read(sc, reg) | (x)) #define PCN_CSR_CLRBIT(sc, reg, x) \ pcn_csr_write(sc, reg, pcn_csr_read(sc, reg) & ~(x)) #define PCN_BCR_SETBIT(sc, reg, x) \ pcn_bcr_write(sc, reg, pcn_bcr_read(sc, reg) | (x)) #define PCN_BCR_CLRBIT(sc, reg, x) \ pcn_bcr_write(sc, reg, pcn_bcr_read(sc, reg) & ~(x)) static u_int32_t pcn_csr_read(sc, reg) struct pcn_softc *sc; int reg; { CSR_WRITE_4(sc, PCN_IO32_RAP, reg); return(CSR_READ_4(sc, PCN_IO32_RDP)); } static u_int16_t pcn_csr_read16(sc, reg) struct pcn_softc *sc; int reg; { CSR_WRITE_2(sc, PCN_IO16_RAP, reg); return(CSR_READ_2(sc, PCN_IO16_RDP)); } static void pcn_csr_write(sc, reg, val) struct pcn_softc *sc; int reg; int val; { CSR_WRITE_4(sc, PCN_IO32_RAP, reg); CSR_WRITE_4(sc, PCN_IO32_RDP, val); return; } static u_int32_t pcn_bcr_read(sc, reg) struct pcn_softc *sc; int reg; { CSR_WRITE_4(sc, PCN_IO32_RAP, reg); return(CSR_READ_4(sc, PCN_IO32_BDP)); } static u_int16_t pcn_bcr_read16(sc, reg) struct pcn_softc *sc; int reg; { CSR_WRITE_2(sc, PCN_IO16_RAP, reg); return(CSR_READ_2(sc, PCN_IO16_BDP)); } static void pcn_bcr_write(sc, reg, val) struct pcn_softc *sc; int reg; int val; { CSR_WRITE_4(sc, PCN_IO32_RAP, reg); CSR_WRITE_4(sc, PCN_IO32_BDP, val); return; } static int pcn_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct pcn_softc *sc; int val; sc = device_get_softc(dev); /* * At least Am79C971 with DP83840A wedge when isolating the * external PHY so we can't allow multiple external PHYs. * There are cards that use Am79C971 with both the internal * and an external PHY though. * For internal PHYs it doesn't really matter whether we can * isolate the remaining internal and the external ones in * the PHY drivers as the internal PHYs have to be enabled * individually in PCN_BCR_PHYSEL, PCN_CSR_MODE, etc. * With Am79C97{3,5,8} we don't support switching beetween * the internal and external PHYs, yet, so we can't allow * multiple PHYs with these either. * Am79C97{2,6} actually only support external PHYs (not * connectable internal ones respond at the usual addresses, * which don't hurt if we let them show up on the bus) and * isolating them works. */ if (((sc->pcn_type == Am79C971 && phy != PCN_PHYAD_10BT) || sc->pcn_type == Am79C973 || sc->pcn_type == Am79C975 || sc->pcn_type == Am79C978) && sc->pcn_extphyaddr != -1 && phy != sc->pcn_extphyaddr) return(0); pcn_bcr_write(sc, PCN_BCR_MIIADDR, reg | (phy << 5)); val = pcn_bcr_read(sc, PCN_BCR_MIIDATA) & 0xFFFF; if (val == 0xFFFF) return(0); if (((sc->pcn_type == Am79C971 && phy != PCN_PHYAD_10BT) || sc->pcn_type == Am79C973 || sc->pcn_type == Am79C975 || sc->pcn_type == Am79C978) && sc->pcn_extphyaddr == -1) sc->pcn_extphyaddr = phy; return(val); } static int pcn_miibus_writereg(dev, phy, reg, data) device_t dev; int phy, reg, data; { struct pcn_softc *sc; sc = device_get_softc(dev); pcn_bcr_write(sc, PCN_BCR_MIIADDR, reg | (phy << 5)); pcn_bcr_write(sc, PCN_BCR_MIIDATA, data); return(0); } static void pcn_miibus_statchg(dev) device_t dev; { struct pcn_softc *sc; struct mii_data *mii; sc = device_get_softc(dev); mii = device_get_softc(sc->pcn_miibus); if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { PCN_BCR_SETBIT(sc, PCN_BCR_DUPLEX, PCN_DUPLEX_FDEN); } else { PCN_BCR_CLRBIT(sc, PCN_BCR_DUPLEX, PCN_DUPLEX_FDEN); } return; } static void pcn_setmulti(sc) struct pcn_softc *sc; { struct ifnet *ifp; struct ifmultiaddr *ifma; u_int32_t h, i; u_int16_t hashes[4] = { 0, 0, 0, 0 }; ifp = sc->pcn_ifp; PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { for (i = 0; i < 4; i++) pcn_csr_write(sc, PCN_CSR_MAR0 + i, 0xFFFF); PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); return; } /* first, zot all the existing hash bits */ for (i = 0; i < 4; i++) pcn_csr_write(sc, PCN_CSR_MAR0 + i, 0); /* now program new ones */ if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ether_crc32_le(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; hashes[h >> 4] |= 1 << (h & 0xF); } if_maddr_runlock(ifp); for (i = 0; i < 4; i++) pcn_csr_write(sc, PCN_CSR_MAR0 + i, hashes[i]); PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); return; } static void pcn_reset(sc) struct pcn_softc *sc; { /* * Issue a reset by reading from the RESET register. * Note that we don't know if the chip is operating in * 16-bit or 32-bit mode at this point, so we attempt * to reset the chip both ways. If one fails, the other * will succeed. */ CSR_READ_2(sc, PCN_IO16_RESET); CSR_READ_4(sc, PCN_IO32_RESET); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); /* Select 32-bit (DWIO) mode */ CSR_WRITE_4(sc, PCN_IO32_RDP, 0); /* Select software style 3. */ pcn_bcr_write(sc, PCN_BCR_SSTYLE, PCN_SWSTYLE_PCNETPCI_BURST); return; } static const char * pcn_chipid_name(u_int32_t id) { const struct pcn_chipid *p; p = pcn_chipid; while (p->name) { if (id == p->id) return (p->name); p++; } return ("Unknown"); } static u_int32_t pcn_chip_id(device_t dev) { struct pcn_softc *sc; u_int32_t chip_id; sc = device_get_softc(dev); /* * Note: we can *NOT* put the chip into * 32-bit mode yet. The le(4) driver will only * work in 16-bit mode, and once the chip * goes into 32-bit mode, the only way to * get it out again is with a hardware reset. * So if pcn_probe() is called before the * le(4) driver's probe routine, the chip will * be locked into 32-bit operation and the * le(4) driver will be unable to attach to it. * Note II: if the chip happens to already * be in 32-bit mode, we still need to check * the chip ID, but first we have to detect * 32-bit mode using only 16-bit operations. * The safest way to do this is to read the * PCI subsystem ID from BCR23/24 and compare * that with the value read from PCI config * space. */ chip_id = pcn_bcr_read16(sc, PCN_BCR_PCISUBSYSID); chip_id <<= 16; chip_id |= pcn_bcr_read16(sc, PCN_BCR_PCISUBVENID); /* * Note III: the test for 0x10001000 is a hack to * pacify VMware, who's pseudo-PCnet interface is * broken. Reading the subsystem register from PCI * config space yields 0x00000000 while reading the * same value from I/O space yields 0x10001000. It's * not supposed to be that way. */ if (chip_id == pci_read_config(dev, PCIR_SUBVEND_0, 4) || chip_id == 0x10001000) { /* We're in 16-bit mode. */ chip_id = pcn_csr_read16(sc, PCN_CSR_CHIPID1); chip_id <<= 16; chip_id |= pcn_csr_read16(sc, PCN_CSR_CHIPID0); } else { /* We're in 32-bit mode. */ chip_id = pcn_csr_read(sc, PCN_CSR_CHIPID1); chip_id <<= 16; chip_id |= pcn_csr_read(sc, PCN_CSR_CHIPID0); } return (chip_id); } static const struct pcn_type * pcn_match(u_int16_t vid, u_int16_t did) { const struct pcn_type *t; t = pcn_devs; while (t->pcn_name != NULL) { if ((vid == t->pcn_vid) && (did == t->pcn_did)) return (t); t++; } return (NULL); } /* * Probe for an AMD chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int pcn_probe(dev) device_t dev; { const struct pcn_type *t; struct pcn_softc *sc; int rid; u_int32_t chip_id; t = pcn_match(pci_get_vendor(dev), pci_get_device(dev)); if (t == NULL) return (ENXIO); sc = device_get_softc(dev); /* * Temporarily map the I/O space so we can read the chip ID register. */ rid = PCN_RID; sc->pcn_res = bus_alloc_resource_any(dev, PCN_RES, &rid, RF_ACTIVE); if (sc->pcn_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); return(ENXIO); } sc->pcn_btag = rman_get_bustag(sc->pcn_res); sc->pcn_bhandle = rman_get_bushandle(sc->pcn_res); chip_id = pcn_chip_id(dev); bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res); switch((chip_id >> 12) & PART_MASK) { case Am79C971: case Am79C972: case Am79C973: case Am79C975: case Am79C976: case Am79C978: break; default: return(ENXIO); } device_set_desc(dev, t->pcn_name); return(BUS_PROBE_DEFAULT); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int pcn_attach(dev) device_t dev; { u_int32_t eaddr[2]; struct pcn_softc *sc; struct mii_data *mii; struct mii_softc *miisc; struct ifnet *ifp; int error = 0, rid; sc = device_get_softc(dev); /* Initialize our mutex. */ mtx_init(&sc->pcn_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); /* * Map control/status registers. */ pci_enable_busmaster(dev); /* Retrieve the chip ID */ sc->pcn_type = (pcn_chip_id(dev) >> 12) & PART_MASK; device_printf(dev, "Chip ID %04x (%s)\n", sc->pcn_type, pcn_chipid_name(sc->pcn_type)); rid = PCN_RID; sc->pcn_res = bus_alloc_resource_any(dev, PCN_RES, &rid, RF_ACTIVE); if (sc->pcn_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); error = ENXIO; goto fail; } sc->pcn_btag = rman_get_bustag(sc->pcn_res); sc->pcn_bhandle = rman_get_bushandle(sc->pcn_res); /* Allocate interrupt */ rid = 0; sc->pcn_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->pcn_irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } /* Reset the adapter. */ pcn_reset(sc); /* * Get station address from the EEPROM. */ eaddr[0] = CSR_READ_4(sc, PCN_IO32_APROM00); eaddr[1] = CSR_READ_4(sc, PCN_IO32_APROM01); callout_init_mtx(&sc->pcn_stat_callout, &sc->pcn_mtx, 0); sc->pcn_ldata = contigmalloc(sizeof(struct pcn_list_data), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->pcn_ldata == NULL) { device_printf(dev, "no memory for list buffers!\n"); error = ENXIO; goto fail; } bzero(sc->pcn_ldata, sizeof(struct pcn_list_data)); ifp = sc->pcn_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = pcn_ioctl; ifp->if_start = pcn_start; ifp->if_init = pcn_init; ifp->if_snd.ifq_maxlen = PCN_TX_LIST_CNT - 1; /* * Do MII setup. * See the comment in pcn_miibus_readreg() for why we can't * universally pass MIIF_NOISOLATE here. */ sc->pcn_extphyaddr = -1; error = mii_attach(dev, &sc->pcn_miibus, ifp, pcn_ifmedia_upd, pcn_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); if (error != 0) { device_printf(dev, "attaching PHYs failed\n"); goto fail; } /* * Record the media instances of internal PHYs, which map the * built-in interfaces to the MII, so we can set the active * PHY/port based on the currently selected media. */ sc->pcn_inst_10bt = -1; mii = device_get_softc(sc->pcn_miibus); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) { switch (miisc->mii_phy) { case PCN_PHYAD_10BT: sc->pcn_inst_10bt = miisc->mii_inst; break; /* * XXX deal with the Am79C97{3,5} internal 100baseT * and the Am79C978 internal HomePNA PHYs. */ } } /* * Call MI attach routine. */ ether_ifattach(ifp, (u_int8_t *) eaddr); /* Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->pcn_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, pcn_intr, sc, &sc->pcn_intrhand); if (error) { device_printf(dev, "couldn't set up irq\n"); ether_ifdetach(ifp); goto fail; } fail: if (error) pcn_detach(dev); return(error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int pcn_detach(dev) device_t dev; { struct pcn_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->pcn_ifp; KASSERT(mtx_initialized(&sc->pcn_mtx), ("pcn mutex not initialized")); /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { PCN_LOCK(sc); pcn_reset(sc); pcn_stop(sc); PCN_UNLOCK(sc); callout_drain(&sc->pcn_stat_callout); ether_ifdetach(ifp); } if (sc->pcn_miibus) device_delete_child(dev, sc->pcn_miibus); bus_generic_detach(dev); if (sc->pcn_intrhand) bus_teardown_intr(dev, sc->pcn_irq, sc->pcn_intrhand); if (sc->pcn_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pcn_irq); if (sc->pcn_res) bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res); if (ifp) if_free(ifp); if (sc->pcn_ldata) { contigfree(sc->pcn_ldata, sizeof(struct pcn_list_data), M_DEVBUF); } mtx_destroy(&sc->pcn_mtx); return(0); } /* * Initialize the transmit descriptors. */ static int pcn_list_tx_init(sc) struct pcn_softc *sc; { struct pcn_list_data *ld; struct pcn_ring_data *cd; int i; cd = &sc->pcn_cdata; ld = sc->pcn_ldata; for (i = 0; i < PCN_TX_LIST_CNT; i++) { cd->pcn_tx_chain[i] = NULL; ld->pcn_tx_list[i].pcn_tbaddr = 0; ld->pcn_tx_list[i].pcn_txctl = 0; ld->pcn_tx_list[i].pcn_txstat = 0; } cd->pcn_tx_prod = cd->pcn_tx_cons = cd->pcn_tx_cnt = 0; return(0); } /* * Initialize the RX descriptors and allocate mbufs for them. */ static int pcn_list_rx_init(sc) struct pcn_softc *sc; { struct pcn_ring_data *cd; int i; cd = &sc->pcn_cdata; for (i = 0; i < PCN_RX_LIST_CNT; i++) { if (pcn_newbuf(sc, i, NULL) == ENOBUFS) return(ENOBUFS); } cd->pcn_rx_prod = 0; return(0); } /* * Initialize an RX descriptor and attach an MBUF cluster. */ static int pcn_newbuf(sc, idx, m) struct pcn_softc *sc; int idx; struct mbuf *m; { struct mbuf *m_new = NULL; struct pcn_rx_desc *c; c = &sc->pcn_ldata->pcn_rx_list[idx]; if (m == NULL) { MGETHDR(m_new, M_NOWAIT, MT_DATA); if (m_new == NULL) return(ENOBUFS); - MCLGET(m_new, M_NOWAIT); - if (!(m_new->m_flags & M_EXT)) { + if (!(MCLGET(m_new, M_NOWAIT))) { m_freem(m_new); return(ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, ETHER_ALIGN); sc->pcn_cdata.pcn_rx_chain[idx] = m_new; c->pcn_rbaddr = vtophys(mtod(m_new, caddr_t)); c->pcn_bufsz = (~(PCN_RXLEN) + 1) & PCN_RXLEN_BUFSZ; c->pcn_bufsz |= PCN_RXLEN_MBO; c->pcn_rxstat = PCN_RXSTAT_STP|PCN_RXSTAT_ENP|PCN_RXSTAT_OWN; return(0); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ static void pcn_rxeof(sc) struct pcn_softc *sc; { struct mbuf *m; struct ifnet *ifp; struct pcn_rx_desc *cur_rx; int i; PCN_LOCK_ASSERT(sc); ifp = sc->pcn_ifp; i = sc->pcn_cdata.pcn_rx_prod; while(PCN_OWN_RXDESC(&sc->pcn_ldata->pcn_rx_list[i])) { cur_rx = &sc->pcn_ldata->pcn_rx_list[i]; m = sc->pcn_cdata.pcn_rx_chain[i]; sc->pcn_cdata.pcn_rx_chain[i] = NULL; /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. */ if (cur_rx->pcn_rxstat & PCN_RXSTAT_ERR) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); pcn_newbuf(sc, i, m); PCN_INC(i, PCN_RX_LIST_CNT); continue; } if (pcn_newbuf(sc, i, NULL)) { /* Ran out of mbufs; recycle this one. */ pcn_newbuf(sc, i, m); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); PCN_INC(i, PCN_RX_LIST_CNT); continue; } PCN_INC(i, PCN_RX_LIST_CNT); /* No errors; receive the packet. */ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); m->m_len = m->m_pkthdr.len = cur_rx->pcn_rxlen - ETHER_CRC_LEN; m->m_pkthdr.rcvif = ifp; PCN_UNLOCK(sc); (*ifp->if_input)(ifp, m); PCN_LOCK(sc); } sc->pcn_cdata.pcn_rx_prod = i; return; } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void pcn_txeof(sc) struct pcn_softc *sc; { struct pcn_tx_desc *cur_tx = NULL; struct ifnet *ifp; u_int32_t idx; ifp = sc->pcn_ifp; /* * Go through our tx list and free mbufs for those * frames that have been transmitted. */ idx = sc->pcn_cdata.pcn_tx_cons; while (idx != sc->pcn_cdata.pcn_tx_prod) { cur_tx = &sc->pcn_ldata->pcn_tx_list[idx]; if (!PCN_OWN_TXDESC(cur_tx)) break; if (!(cur_tx->pcn_txctl & PCN_TXCTL_ENP)) { sc->pcn_cdata.pcn_tx_cnt--; PCN_INC(idx, PCN_TX_LIST_CNT); continue; } if (cur_tx->pcn_txctl & PCN_TXCTL_ERR) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); if (cur_tx->pcn_txstat & PCN_TXSTAT_EXDEF) if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); if (cur_tx->pcn_txstat & PCN_TXSTAT_RTRY) if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); } if_inc_counter(ifp, IFCOUNTER_COLLISIONS, cur_tx->pcn_txstat & PCN_TXSTAT_TRC); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if (sc->pcn_cdata.pcn_tx_chain[idx] != NULL) { m_freem(sc->pcn_cdata.pcn_tx_chain[idx]); sc->pcn_cdata.pcn_tx_chain[idx] = NULL; } sc->pcn_cdata.pcn_tx_cnt--; PCN_INC(idx, PCN_TX_LIST_CNT); } if (idx != sc->pcn_cdata.pcn_tx_cons) { /* Some buffers have been freed. */ sc->pcn_cdata.pcn_tx_cons = idx; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } sc->pcn_timer = (sc->pcn_cdata.pcn_tx_cnt == 0) ? 0 : 5; return; } static void pcn_tick(xsc) void *xsc; { struct pcn_softc *sc; struct mii_data *mii; struct ifnet *ifp; sc = xsc; ifp = sc->pcn_ifp; PCN_LOCK_ASSERT(sc); mii = device_get_softc(sc->pcn_miibus); mii_tick(mii); /* link just died */ if (sc->pcn_link && !(mii->mii_media_status & IFM_ACTIVE)) sc->pcn_link = 0; /* link just came up, restart */ if (!sc->pcn_link && mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->pcn_link++; if (ifp->if_snd.ifq_head != NULL) pcn_start_locked(ifp); } if (sc->pcn_timer > 0 && --sc->pcn_timer == 0) pcn_watchdog(sc); callout_reset(&sc->pcn_stat_callout, hz, pcn_tick, sc); return; } static void pcn_intr(arg) void *arg; { struct pcn_softc *sc; struct ifnet *ifp; u_int32_t status; sc = arg; ifp = sc->pcn_ifp; PCN_LOCK(sc); /* Suppress unwanted interrupts */ if (!(ifp->if_flags & IFF_UP)) { pcn_stop(sc); PCN_UNLOCK(sc); return; } CSR_WRITE_4(sc, PCN_IO32_RAP, PCN_CSR_CSR); while ((status = CSR_READ_4(sc, PCN_IO32_RDP)) & PCN_CSR_INTR) { CSR_WRITE_4(sc, PCN_IO32_RDP, status); if (status & PCN_CSR_RINT) pcn_rxeof(sc); if (status & PCN_CSR_TINT) pcn_txeof(sc); if (status & PCN_CSR_ERR) { pcn_init_locked(sc); break; } } if (ifp->if_snd.ifq_head != NULL) pcn_start_locked(ifp); PCN_UNLOCK(sc); return; } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int pcn_encap(sc, m_head, txidx) struct pcn_softc *sc; struct mbuf *m_head; u_int32_t *txidx; { struct pcn_tx_desc *f = NULL; struct mbuf *m; int frag, cur, cnt = 0; /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ m = m_head; cur = frag = *txidx; for (m = m_head; m != NULL; m = m->m_next) { if (m->m_len == 0) continue; if ((PCN_TX_LIST_CNT - (sc->pcn_cdata.pcn_tx_cnt + cnt)) < 2) return(ENOBUFS); f = &sc->pcn_ldata->pcn_tx_list[frag]; f->pcn_txctl = (~(m->m_len) + 1) & PCN_TXCTL_BUFSZ; f->pcn_txctl |= PCN_TXCTL_MBO; f->pcn_tbaddr = vtophys(mtod(m, vm_offset_t)); if (cnt == 0) f->pcn_txctl |= PCN_TXCTL_STP; else f->pcn_txctl |= PCN_TXCTL_OWN; cur = frag; PCN_INC(frag, PCN_TX_LIST_CNT); cnt++; } if (m != NULL) return(ENOBUFS); sc->pcn_cdata.pcn_tx_chain[cur] = m_head; sc->pcn_ldata->pcn_tx_list[cur].pcn_txctl |= PCN_TXCTL_ENP|PCN_TXCTL_ADD_FCS|PCN_TXCTL_MORE_LTINT; sc->pcn_ldata->pcn_tx_list[*txidx].pcn_txctl |= PCN_TXCTL_OWN; sc->pcn_cdata.pcn_tx_cnt += cnt; *txidx = frag; return(0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void pcn_start(ifp) struct ifnet *ifp; { struct pcn_softc *sc; sc = ifp->if_softc; PCN_LOCK(sc); pcn_start_locked(ifp); PCN_UNLOCK(sc); } static void pcn_start_locked(ifp) struct ifnet *ifp; { struct pcn_softc *sc; struct mbuf *m_head = NULL; u_int32_t idx; sc = ifp->if_softc; PCN_LOCK_ASSERT(sc); if (!sc->pcn_link) return; idx = sc->pcn_cdata.pcn_tx_prod; if (ifp->if_drv_flags & IFF_DRV_OACTIVE) return; while(sc->pcn_cdata.pcn_tx_chain[idx] == NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (pcn_encap(sc, m_head, &idx)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); } /* Transmit */ sc->pcn_cdata.pcn_tx_prod = idx; pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_TX|PCN_CSR_INTEN); /* * Set a timeout in case the chip goes out to lunch. */ sc->pcn_timer = 5; return; } static void pcn_setfilt(ifp) struct ifnet *ifp; { struct pcn_softc *sc; sc = ifp->if_softc; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { PCN_CSR_SETBIT(sc, PCN_CSR_MODE, PCN_MODE_PROMISC); } else { PCN_CSR_CLRBIT(sc, PCN_CSR_MODE, PCN_MODE_PROMISC); } /* Set the capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) { PCN_CSR_CLRBIT(sc, PCN_CSR_MODE, PCN_MODE_RXNOBROAD); } else { PCN_CSR_SETBIT(sc, PCN_CSR_MODE, PCN_MODE_RXNOBROAD); } return; } static void pcn_init(xsc) void *xsc; { struct pcn_softc *sc = xsc; PCN_LOCK(sc); pcn_init_locked(sc); PCN_UNLOCK(sc); } static void pcn_init_locked(sc) struct pcn_softc *sc; { struct ifnet *ifp = sc->pcn_ifp; struct mii_data *mii = NULL; struct ifmedia_entry *ife; PCN_LOCK_ASSERT(sc); /* * Cancel pending I/O and free all RX/TX buffers. */ pcn_stop(sc); pcn_reset(sc); mii = device_get_softc(sc->pcn_miibus); ife = mii->mii_media.ifm_cur; /* Set MAC address */ pcn_csr_write(sc, PCN_CSR_PAR0, ((u_int16_t *)IF_LLADDR(sc->pcn_ifp))[0]); pcn_csr_write(sc, PCN_CSR_PAR1, ((u_int16_t *)IF_LLADDR(sc->pcn_ifp))[1]); pcn_csr_write(sc, PCN_CSR_PAR2, ((u_int16_t *)IF_LLADDR(sc->pcn_ifp))[2]); /* Init circular RX list. */ if (pcn_list_rx_init(sc) == ENOBUFS) { if_printf(ifp, "initialization failed: no " "memory for rx buffers\n"); pcn_stop(sc); return; } /* * Init tx descriptors. */ pcn_list_tx_init(sc); /* Clear PCN_MISC_ASEL so we can set the port via PCN_CSR_MODE. */ PCN_BCR_CLRBIT(sc, PCN_BCR_MISCCFG, PCN_MISC_ASEL); /* * Set up the port based on the currently selected media. * For Am79C978 we've to unconditionally set PCN_PORT_MII and * set the PHY in PCN_BCR_PHYSEL instead. */ if (sc->pcn_type != Am79C978 && IFM_INST(ife->ifm_media) == sc->pcn_inst_10bt) pcn_csr_write(sc, PCN_CSR_MODE, PCN_PORT_10BASET); else pcn_csr_write(sc, PCN_CSR_MODE, PCN_PORT_MII); /* Set up RX filter. */ pcn_setfilt(ifp); /* * Load the multicast filter. */ pcn_setmulti(sc); /* * Load the addresses of the RX and TX lists. */ pcn_csr_write(sc, PCN_CSR_RXADDR0, vtophys(&sc->pcn_ldata->pcn_rx_list[0]) & 0xFFFF); pcn_csr_write(sc, PCN_CSR_RXADDR1, (vtophys(&sc->pcn_ldata->pcn_rx_list[0]) >> 16) & 0xFFFF); pcn_csr_write(sc, PCN_CSR_TXADDR0, vtophys(&sc->pcn_ldata->pcn_tx_list[0]) & 0xFFFF); pcn_csr_write(sc, PCN_CSR_TXADDR1, (vtophys(&sc->pcn_ldata->pcn_tx_list[0]) >> 16) & 0xFFFF); /* Set the RX and TX ring sizes. */ pcn_csr_write(sc, PCN_CSR_RXRINGLEN, (~PCN_RX_LIST_CNT) + 1); pcn_csr_write(sc, PCN_CSR_TXRINGLEN, (~PCN_TX_LIST_CNT) + 1); /* We're not using the initialization block. */ pcn_csr_write(sc, PCN_CSR_IAB1, 0); /* Enable fast suspend mode. */ PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL2, PCN_EXTCTL2_FASTSPNDE); /* * Enable burst read and write. Also set the no underflow * bit. This will avoid transmit underruns in certain * conditions while still providing decent performance. */ PCN_BCR_SETBIT(sc, PCN_BCR_BUSCTL, PCN_BUSCTL_NOUFLOW| PCN_BUSCTL_BREAD|PCN_BUSCTL_BWRITE); /* Enable graceful recovery from underflow. */ PCN_CSR_SETBIT(sc, PCN_CSR_IMR, PCN_IMR_DXSUFLO); /* Enable auto-padding of short TX frames. */ PCN_CSR_SETBIT(sc, PCN_CSR_TFEAT, PCN_TFEAT_PAD_TX); /* Disable MII autoneg (we handle this ourselves). */ PCN_BCR_SETBIT(sc, PCN_BCR_MIICTL, PCN_MIICTL_DANAS); if (sc->pcn_type == Am79C978) /* XXX support other PHYs? */ pcn_bcr_write(sc, PCN_BCR_PHYSEL, PCN_PHYSEL_PCNET|PCN_PHY_HOMEPNA); /* Enable interrupts and start the controller running. */ pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_INTEN|PCN_CSR_START); mii_mediachg(mii); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&sc->pcn_stat_callout, hz, pcn_tick, sc); return; } /* * Set media options. */ static int pcn_ifmedia_upd(ifp) struct ifnet *ifp; { struct pcn_softc *sc; sc = ifp->if_softc; PCN_LOCK(sc); /* * At least Am79C971 with DP83840A can wedge when switching * from the internal 10baseT PHY to the external PHY without * issuing pcn_reset(). For setting the port in PCN_CSR_MODE * the PCnet chip has to be powered down or stopped anyway * and although documented otherwise it doesn't take effect * until the next initialization. */ sc->pcn_link = 0; pcn_stop(sc); pcn_reset(sc); pcn_init_locked(sc); if (ifp->if_snd.ifq_head != NULL) pcn_start_locked(ifp); PCN_UNLOCK(sc); return(0); } /* * Report current media status. */ static void pcn_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct pcn_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->pcn_miibus); PCN_LOCK(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; PCN_UNLOCK(sc); return; } static int pcn_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct pcn_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii = NULL; int error = 0; switch(command) { case SIOCSIFFLAGS: PCN_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->pcn_if_flags & IFF_PROMISC)) { PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); pcn_setfilt(ifp); PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_INTEN|PCN_CSR_START); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->pcn_if_flags & IFF_PROMISC) { PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); pcn_setfilt(ifp); PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_INTEN|PCN_CSR_START); } else if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) pcn_init_locked(sc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) pcn_stop(sc); } sc->pcn_if_flags = ifp->if_flags; PCN_UNLOCK(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: PCN_LOCK(sc); pcn_setmulti(sc); PCN_UNLOCK(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->pcn_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = ether_ioctl(ifp, command, data); break; } return(error); } static void pcn_watchdog(struct pcn_softc *sc) { struct ifnet *ifp; PCN_LOCK_ASSERT(sc); ifp = sc->pcn_ifp; if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); if_printf(ifp, "watchdog timeout\n"); pcn_stop(sc); pcn_reset(sc); pcn_init_locked(sc); if (ifp->if_snd.ifq_head != NULL) pcn_start_locked(ifp); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void pcn_stop(struct pcn_softc *sc) { register int i; struct ifnet *ifp; PCN_LOCK_ASSERT(sc); ifp = sc->pcn_ifp; sc->pcn_timer = 0; callout_stop(&sc->pcn_stat_callout); /* Turn off interrupts */ PCN_CSR_CLRBIT(sc, PCN_CSR_CSR, PCN_CSR_INTEN); /* Stop adapter */ PCN_CSR_SETBIT(sc, PCN_CSR_CSR, PCN_CSR_STOP); sc->pcn_link = 0; /* * Free data in the RX lists. */ for (i = 0; i < PCN_RX_LIST_CNT; i++) { if (sc->pcn_cdata.pcn_rx_chain[i] != NULL) { m_freem(sc->pcn_cdata.pcn_rx_chain[i]); sc->pcn_cdata.pcn_rx_chain[i] = NULL; } } bzero((char *)&sc->pcn_ldata->pcn_rx_list, sizeof(sc->pcn_ldata->pcn_rx_list)); /* * Free the TX list buffers. */ for (i = 0; i < PCN_TX_LIST_CNT; i++) { if (sc->pcn_cdata.pcn_tx_chain[i] != NULL) { m_freem(sc->pcn_cdata.pcn_tx_chain[i]); sc->pcn_cdata.pcn_tx_chain[i] = NULL; } } bzero((char *)&sc->pcn_ldata->pcn_tx_list, sizeof(sc->pcn_ldata->pcn_tx_list)); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int pcn_shutdown(device_t dev) { struct pcn_softc *sc; sc = device_get_softc(dev); PCN_LOCK(sc); pcn_reset(sc); pcn_stop(sc); PCN_UNLOCK(sc); return 0; } Index: head/sys/dev/pdq/pdq_freebsd.h =================================================================== --- head/sys/dev/pdq/pdq_freebsd.h (revision 276749) +++ head/sys/dev/pdq/pdq_freebsd.h (revision 276750) @@ -1,274 +1,273 @@ /* $NetBSD: pdqvar.h,v 1.27 2000/05/03 19:17:54 thorpej Exp $ */ /*- * Copyright (c) 1995, 1996 Matt Thomas * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Id: pdqvar.h,v 1.21 1997/03/21 21:16:04 thomas Exp * $FreeBSD$ * */ /* * DEC PDQ FDDI Controller; PDQ O/S dependent definitions * * Written by Matt Thomas * */ #if defined(PDQ_HWSUPPORT) #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #endif /* PDQ_HWSUPPORT */ typedef struct _pdq_t pdq_t; typedef struct _pdq_csrs_t pdq_csrs_t; typedef struct _pdq_pci_csrs_t pdq_pci_csrs_t; typedef struct _pdq_lanaddr_t pdq_lanaddr_t; typedef unsigned int pdq_uint32_t; typedef unsigned short pdq_uint16_t; typedef unsigned char pdq_uint8_t; typedef enum _pdq_boolean_t pdq_boolean_t; typedef enum _pdq_type_t pdq_type_t; typedef enum _pdq_state_t pdq_state_t; typedef struct mbuf PDQ_OS_DATABUF_T; typedef bus_space_tag_t pdq_bus_t; typedef bus_space_handle_t pdq_bus_memaddr_t; typedef pdq_bus_memaddr_t pdq_bus_memoffset_t; extern devclass_t pdq_devclass; enum _pdq_type_t { PDQ_DEFPA, /* PCI-bus */ PDQ_DEFEA, /* EISA-bus */ PDQ_DEFTA, /* TurboChannel */ PDQ_DEFAA, /* FutureBus+ */ PDQ_DEFQA /* Q-bus */ }; #define sc_ifmedia ifmedia #if 0 /* ALTQ */ #define IFQ_DEQUEUE IF_DEQUEUE #define IFQ_IS_EMPTY(q) ((q)->ifq_len == 0) #endif typedef struct _pdq_os_ctx_t { struct ifnet *ifp; struct ifmedia ifmedia; device_t dev; int debug; pdq_t * sc_pdq; int sc_flags; #define PDQIF_DOWNCALL 0x0001 /* active calling from if to pdq */ struct resource * io; int io_rid; int io_type; bus_space_handle_t io_bsh; bus_space_tag_t io_bst; struct resource * mem; int mem_rid; int mem_type; bus_space_handle_t mem_bsh; bus_space_tag_t mem_bst; struct resource * irq; int irq_rid; void * irq_ih; struct mtx mtx; struct callout watchdog; int timer; } pdq_softc_t; #define PDQ_LOCK(_sc) mtx_lock(&(_sc)->mtx) #define PDQ_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx) #define PDQ_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->mtx, MA_OWNED) #define PDQ_OS_HDR_OFFSET PDQ_RX_FC_OFFSET #define PDQ_OS_PAGESIZE PAGE_SIZE #define PDQ_OS_TX_TRANSMIT 5 #define PDQ_OS_IORD_32(bt, bh, off) bus_space_read_4(bt, bh, off) #define PDQ_OS_IOWR_32(bt, bh, off, data) bus_space_write_4(bt, bh, off, data) #define PDQ_OS_IORD_8(bt, bh, off) bus_space_read_1(bt, bh, off) #define PDQ_OS_IOWR_8(bt, bh, off, data) bus_space_write_1(bt, bh, off, data) #define PDQ_CSR_OFFSET(base, offset) (0 + (offset)*sizeof(pdq_uint32_t)) #define PDQ_CSR_WRITE(csr, name, data) PDQ_OS_IOWR_32((csr)->csr_bus, (csr)->csr_base, (csr)->name, data) #define PDQ_CSR_READ(csr, name) PDQ_OS_IORD_32((csr)->csr_bus, (csr)->csr_base, (csr)->name) #define PDQ_OS_DATABUF_FREE(pdq, b) (m_freem(b)) #if defined(PDQ_OSSUPPORT) #define PDQ_OS_TX_TIMEOUT 5 /* seconds */ #define PDQ_OS_IFP_TO_SOFTC(ifp) ((pdq_softc_t *) (ifp)->if_softc) #define PDQ_BPF_MTAP(sc, m) BPF_MTAP((sc)->ifp, m) #define PDQ_IFNET(sc) ((sc)->ifp) #endif /* PDQ_OSSUPPORT */ #if defined(PDQ_HWSUPPORT) #define PDQ_OS_PREFIX "%s: " #define PDQ_OS_PREFIX_ARGS pdq->pdq_os_name #define PDQ_OS_PTR_FMT "%p" #define PDQ_OS_CSR_FMT "0x%x" #define PDQ_OS_USEC_DELAY(n) DELAY(n) #define PDQ_OS_VA_TO_BUSPA(pdq, p) vtophys(p) #define PDQ_OS_MEMALLOC(n) malloc(n, M_DEVBUF, M_NOWAIT) #define PDQ_OS_MEMFREE(p, n) free((void *) p, M_DEVBUF) #define PDQ_OS_MEMZERO(p, n) bzero((caddr_t)(p), (n)) #define PDQ_OS_MEMALLOC_CONTIG(n) contigmalloc(n, M_DEVBUF, M_NOWAIT, 0x800000, ~0, PAGE_SIZE, 0) #define PDQ_OS_MEMFREE_CONTIG(p, n) contigfree(p, n, M_DEVBUF) #define PDQ_OS_DATABUF_SIZE (MCLBYTES) #define PDQ_OS_DATABUF_NEXT(b) ((b)->m_next) #define PDQ_OS_DATABUF_NEXT_SET(b, b1) ((b)->m_next = (b1)) #define PDQ_OS_DATABUF_NEXTPKT(b) ((b)->m_nextpkt) #define PDQ_OS_DATABUF_NEXTPKT_SET(b, b1) ((b)->m_nextpkt = (b1)) #define PDQ_OS_DATABUF_LEN(b) ((b)->m_len) #define PDQ_OS_DATABUF_LEN_SET(b, n) ((b)->m_len = (n)) /* #define PDQ_OS_DATABUF_LEN_ADJ(b, n) ((b)->m_len += (n)) */ #define PDQ_OS_DATABUF_PTR(b) (mtod((b), pdq_uint8_t *)) #define PDQ_OS_DATABUF_ADJ(b, n) ((b)->m_data += (n), (b)->m_len -= (n)) #define PDQ_OS_DATABUF_ALLOC(pdq, b) do { \ PDQ_OS_DATABUF_T *x_m0; \ MGETHDR(x_m0, M_NOWAIT, MT_DATA); \ if (x_m0 != NULL) { \ - MCLGET(x_m0, M_NOWAIT); \ - if ((x_m0->m_flags & M_EXT) == 0) { \ + if (!(MCLGET(x_m0, M_NOWAIT))) { \ m_free(x_m0); \ (b) = NULL; \ } else { \ (b) = x_m0; \ x_m0->m_len = PDQ_OS_DATABUF_SIZE; \ } \ } else { \ (b) = NULL; \ } \ } while (0) #define PDQ_OS_DATABUF_RESET(b) ((b)->m_data = (b)->m_ext.ext_buf, (b)->m_len = MCLBYTES) #define PDQ_OS_DATABUF_ENQUEUE(q, b) do { \ PDQ_OS_DATABUF_NEXTPKT_SET(b, NULL); \ if ((q)->q_tail == NULL) \ (q)->q_head = (b); \ else \ PDQ_OS_DATABUF_NEXTPKT_SET(((PDQ_OS_DATABUF_T *)(q)->q_tail), b); \ (q)->q_tail = (b); \ } while (0) #define PDQ_OS_DATABUF_DEQUEUE(q, b) do { \ if (((b) = (PDQ_OS_DATABUF_T *) (q)->q_head) != NULL) { \ if (((q)->q_head = PDQ_OS_DATABUF_NEXTPKT(b)) == NULL) \ (q)->q_tail = NULL; \ PDQ_OS_DATABUF_NEXTPKT_SET(b, NULL); \ } \ } while (0) #define PDQ_OS_DATABUF_BUSPA(pdq, b) PDQ_OS_VA_TO_BUSPA(pdq, PDQ_OS_DATABUF_PTR(b)) #define PDQ_OS_CONSUMER_PRESYNC(pdq) do { } while(0) #define PDQ_OS_CONSUMER_POSTSYNC(pdq) do { } while(0) #define PDQ_OS_DESC_PRESYNC(pdq, d, s) do { } while(0) #define PDQ_OS_DESC_POSTSYNC(pdq, d, s) do { } while(0) #define PDQ_OS_CMDRQST_PRESYNC(pdq, s) do { } while(0) #define PDQ_OS_CMDRQST_POSTSYNC(pdq, s) do { } while(0) #define PDQ_OS_CMDRSP_PRESYNC(pdq, s) do { } while(0) #define PDQ_OS_CMDRSP_POSTSYNC(pdq, s) do { } while(0) #define PDQ_OS_RXPDU_PRESYNC(pdq, b, o, l) do { } while(0) #define PDQ_OS_RXPDU_POSTSYNC(pdq, b, o, l) do { } while(0) #define PDQ_OS_UNSOL_EVENT_PRESYNC(pdq, e) do { } while(0) #define PDQ_OS_UNSOL_EVENT_POSTSYNC(pdq, e) do { } while(0) #endif /* PDQ_HWSUPPORT */ /* * OS dependent functions provided by pdq_ifsubr.c to pdq.c */ void pdq_os_addr_fill (pdq_t *pdq, pdq_lanaddr_t *addrs, size_t numaddrs); void pdq_os_receive_pdu (pdq_t *, PDQ_OS_DATABUF_T *, size_t, int); void pdq_os_restart_transmitter (pdq_t *pdq); void pdq_os_transmit_done (pdq_t *, PDQ_OS_DATABUF_T *); void pdq_os_update_status (pdq_t *, const void *); /* * Driver interfaces functions provided by pdq.c to pdq_ifsubr.c */ pdq_boolean_t pdq_queue_transmit_data (pdq_t *pdq, PDQ_OS_DATABUF_T *pdu); void pdq_run (pdq_t *pdq); pdq_state_t pdq_stop (pdq_t *pdq); /* * OS dependent functions provided by * pdq_ifsubr.c or pdq.c to the bus front ends */ int pdq_ifattach (pdq_softc_t *, const pdq_uint8_t *, pdq_type_t type); void pdq_ifdetach (pdq_softc_t *); void pdq_free (device_t); int pdq_interrupt (pdq_t *pdq); void pdq_hwreset (pdq_t *pdq); pdq_t * pdq_initialize (pdq_bus_t bus, pdq_bus_memaddr_t csr_va, const char *name, int unit, void *ctx, pdq_type_t type); /* * Misc prototypes. */ void pdq_flush_transmitter(pdq_t *pdq); Index: head/sys/dev/pdq/pdq_ifsubr.c =================================================================== --- head/sys/dev/pdq/pdq_ifsubr.c (revision 276749) +++ head/sys/dev/pdq/pdq_ifsubr.c (revision 276750) @@ -1,776 +1,775 @@ /* $NetBSD: pdq_ifsubr.c,v 1.38 2001/12/21 23:21:47 matt Exp $ */ /*- * Copyright (c) 1995, 1996 Matt Thomas * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $NetBSD: pdq_ifsubr.c,v 1.12 1997/06/05 01:56:35 thomas Exp$ */ #include __FBSDID("$FreeBSD$"); /* * DEC PDQ FDDI Controller; code for BSD derived operating systems * * This module provide bus independent BSD specific O/S functions. * (ie. it provides an ifnet interface to the rest of the system) */ #define PDQ_OSSUPPORT #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include devclass_t pdq_devclass; static void pdq_watchdog(void *); static void pdq_ifstop(pdq_softc_t *sc) { PDQ_IFNET(sc)->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->sc_pdq->pdq_flags &= ~PDQ_RUNNING; pdq_stop(sc->sc_pdq); callout_stop(&sc->watchdog); } static void pdq_ifinit_locked(pdq_softc_t *sc) { PDQ_LOCK_ASSERT(sc); if (PDQ_IFNET(sc)->if_flags & IFF_UP) { PDQ_IFNET(sc)->if_drv_flags |= IFF_DRV_RUNNING; if (PDQ_IFNET(sc)->if_flags & IFF_PROMISC) { sc->sc_pdq->pdq_flags |= PDQ_PROMISC; } else { sc->sc_pdq->pdq_flags &= ~PDQ_PROMISC; } if (PDQ_IFNET(sc)->if_flags & IFF_LINK1) { sc->sc_pdq->pdq_flags |= PDQ_PASS_SMT; } else { sc->sc_pdq->pdq_flags &= ~PDQ_PASS_SMT; } sc->sc_pdq->pdq_flags |= PDQ_RUNNING; pdq_run(sc->sc_pdq); callout_reset(&sc->watchdog, hz, pdq_watchdog, sc); } else pdq_ifstop(sc); } static void pdq_ifinit(void *arg) { pdq_softc_t *sc; sc = arg; PDQ_LOCK(sc); pdq_ifinit_locked(sc); PDQ_UNLOCK(sc); } static void pdq_watchdog(void *arg) { pdq_softc_t *sc; struct ifnet *ifp; sc = arg; PDQ_LOCK_ASSERT(sc); callout_reset(&sc->watchdog, hz, pdq_watchdog, sc); if (sc->timer == 0 || --sc->timer > 0) return; /* * No progress was made on the transmit queue for PDQ_OS_TX_TRANSMIT * seconds. Remove all queued packets. */ ifp = PDQ_IFNET(sc); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; for (;;) { struct mbuf *m; IFQ_DEQUEUE(&ifp->if_snd, m); if (m == NULL) return; PDQ_OS_DATABUF_FREE(PDQ_OS_IFP_TO_SOFTC(ifp)->sc_pdq, m); } } static void pdq_ifstart_locked(struct ifnet *ifp) { pdq_softc_t * const sc = PDQ_OS_IFP_TO_SOFTC(ifp); struct mbuf *m; int tx = 0; PDQ_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; if (sc->timer == 0) sc->timer = PDQ_OS_TX_TIMEOUT; if ((sc->sc_pdq->pdq_flags & PDQ_TXOK) == 0) { PDQ_IFNET(sc)->if_drv_flags |= IFF_DRV_OACTIVE; return; } sc->sc_flags |= PDQIF_DOWNCALL; for (;; tx = 1) { IF_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; #if defined(PDQ_BUS_DMA) && !defined(PDQ_BUS_DMA_NOTX) if ((m->m_flags & M_HASTXDMAMAP) == 0) { bus_dmamap_t map; if (PDQ_OS_HDR_OFFSET != PDQ_RX_FC_OFFSET) { m->m_data[0] = PDQ_FDDI_PH0; m->m_data[1] = PDQ_FDDI_PH1; m->m_data[2] = PDQ_FDDI_PH2; } if (!bus_dmamap_create(sc->sc_dmatag, m->m_pkthdr.len, 255, m->m_pkthdr.len, 0, BUS_DMA_NOWAIT, &map)) { if (!bus_dmamap_load_mbuf(sc->sc_dmatag, map, m, BUS_DMA_WRITE|BUS_DMA_NOWAIT)) { bus_dmamap_sync(sc->sc_dmatag, map, 0, m->m_pkthdr.len, BUS_DMASYNC_PREWRITE); M_SETCTX(m, map); m->m_flags |= M_HASTXDMAMAP; } } if ((m->m_flags & M_HASTXDMAMAP) == 0) break; } #else if (PDQ_OS_HDR_OFFSET != PDQ_RX_FC_OFFSET) { m->m_data[0] = PDQ_FDDI_PH0; m->m_data[1] = PDQ_FDDI_PH1; m->m_data[2] = PDQ_FDDI_PH2; } #endif if (pdq_queue_transmit_data(sc->sc_pdq, m) == PDQ_FALSE) break; } if (m != NULL) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; IF_PREPEND(&ifp->if_snd, m); } if (tx) PDQ_DO_TYPE2_PRODUCER(sc->sc_pdq); sc->sc_flags &= ~PDQIF_DOWNCALL; } static void pdq_ifstart(struct ifnet *ifp) { pdq_softc_t * const sc = PDQ_OS_IFP_TO_SOFTC(ifp); PDQ_LOCK(sc); pdq_ifstart_locked(ifp); PDQ_UNLOCK(sc); } void pdq_os_receive_pdu( pdq_t *pdq, struct mbuf *m, size_t pktlen, int drop) { pdq_softc_t *sc = pdq->pdq_os_ctx; struct ifnet *ifp = PDQ_IFNET(sc); struct fddi_header *fh; if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); #if defined(PDQ_BUS_DMA) { /* * Even though the first mbuf start at the first fddi header octet, * the dmamap starts PDQ_OS_HDR_OFFSET octets earlier. Any additional * mbufs will start normally. */ int offset = PDQ_OS_HDR_OFFSET; struct mbuf *m0; for (m0 = m; m0 != NULL; m0 = m0->m_next, offset = 0) { pdq_os_databuf_sync(sc, m0, offset, m0->m_len, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->sc_dmatag, M_GETCTX(m0, bus_dmamap_t)); bus_dmamap_destroy(sc->sc_dmatag, M_GETCTX(m0, bus_dmamap_t)); m0->m_flags &= ~M_HASRXDMAMAP; M_SETCTX(m0, NULL); } } #endif m->m_pkthdr.len = pktlen; fh = mtod(m, struct fddi_header *); if (drop || (fh->fddi_fc & (FDDIFC_L|FDDIFC_F)) != FDDIFC_LLC_ASYNC) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); PDQ_OS_DATABUF_FREE(pdq, m); return; } m->m_pkthdr.rcvif = ifp; PDQ_UNLOCK(sc); (*ifp->if_input)(ifp, m); PDQ_LOCK(sc); } void pdq_os_restart_transmitter( pdq_t *pdq) { pdq_softc_t *sc = pdq->pdq_os_ctx; PDQ_IFNET(sc)->if_drv_flags &= ~IFF_DRV_OACTIVE; if (IFQ_IS_EMPTY(&PDQ_IFNET(sc)->if_snd) == 0) { sc->timer = PDQ_OS_TX_TIMEOUT; if ((sc->sc_flags & PDQIF_DOWNCALL) == 0) pdq_ifstart_locked(PDQ_IFNET(sc)); } else { sc->timer = 0; } } void pdq_os_transmit_done( pdq_t *pdq, struct mbuf *m) { pdq_softc_t *sc = pdq->pdq_os_ctx; #if defined(NBPFILTER) && NBPFILTER > 0 if (PQD_IFNET(sc)->if_bpf != NULL) PDQ_BPF_MTAP(sc, m); #endif PDQ_OS_DATABUF_FREE(pdq, m); if_inc_counter(PDQ_IFNET(sc), IFCOUNTER_OPACKETS, 1); } void pdq_os_addr_fill( pdq_t *pdq, pdq_lanaddr_t *addr, size_t num_addrs) { pdq_softc_t *sc = pdq->pdq_os_ctx; struct ifnet *ifp; struct ifmultiaddr *ifma; ifp = sc->ifp; /* * ADDR_FILTER_SET is always issued before FILTER_SET so * we can play with PDQ_ALLMULTI and not worry about * queueing a FILTER_SET ourselves. */ pdq->pdq_flags &= ~PDQ_ALLMULTI; #if defined(IFF_ALLMULTI) PDQ_IFNET(sc)->if_flags &= ~IFF_ALLMULTI; #endif if_maddr_rlock(PDQ_IFNET(sc)); for (ifma = TAILQ_FIRST(&PDQ_IFNET(sc)->if_multiaddrs); ifma && num_addrs > 0; ifma = TAILQ_NEXT(ifma, ifma_link)) { char *mcaddr; if (ifma->ifma_addr->sa_family != AF_LINK) continue; mcaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); ((u_short *) addr->lanaddr_bytes)[0] = ((u_short *) mcaddr)[0]; ((u_short *) addr->lanaddr_bytes)[1] = ((u_short *) mcaddr)[1]; ((u_short *) addr->lanaddr_bytes)[2] = ((u_short *) mcaddr)[2]; addr++; num_addrs--; } if_maddr_runlock(PDQ_IFNET(sc)); /* * If not all the address fit into the CAM, turn on all-multicast mode. */ if (ifma != NULL) { pdq->pdq_flags |= PDQ_ALLMULTI; #if defined(IFF_ALLMULTI) PDQ_IFNET(sc)->if_flags |= IFF_ALLMULTI; #endif } } #if defined(IFM_FDDI) static int pdq_ifmedia_change( struct ifnet *ifp) { pdq_softc_t * const sc = PDQ_OS_IFP_TO_SOFTC(ifp); PDQ_LOCK(sc); if (sc->sc_ifmedia.ifm_media & IFM_FDX) { if ((sc->sc_pdq->pdq_flags & PDQ_WANT_FDX) == 0) { sc->sc_pdq->pdq_flags |= PDQ_WANT_FDX; if (sc->sc_pdq->pdq_flags & PDQ_RUNNING) pdq_run(sc->sc_pdq); } } else if (sc->sc_pdq->pdq_flags & PDQ_WANT_FDX) { sc->sc_pdq->pdq_flags &= ~PDQ_WANT_FDX; if (sc->sc_pdq->pdq_flags & PDQ_RUNNING) pdq_run(sc->sc_pdq); } PDQ_UNLOCK(sc); return 0; } static void pdq_ifmedia_status( struct ifnet *ifp, struct ifmediareq *ifmr) { pdq_softc_t * const sc = PDQ_OS_IFP_TO_SOFTC(ifp); PDQ_LOCK(sc); ifmr->ifm_status = IFM_AVALID; if (sc->sc_pdq->pdq_flags & PDQ_IS_ONRING) ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active = (ifmr->ifm_current & ~IFM_FDX); if (sc->sc_pdq->pdq_flags & PDQ_IS_FDX) ifmr->ifm_active |= IFM_FDX; PDQ_UNLOCK(sc); } void pdq_os_update_status( pdq_t *pdq, const void *arg) { pdq_softc_t * const sc = pdq->pdq_os_ctx; const pdq_response_status_chars_get_t *rsp = arg; int media = 0; switch (rsp->status_chars_get.pmd_type[0]) { case PDQ_PMD_TYPE_ANSI_MUTLI_MODE: media = IFM_FDDI_MMF; break; case PDQ_PMD_TYPE_ANSI_SINGLE_MODE_TYPE_1: media = IFM_FDDI_SMF; break; case PDQ_PMD_TYPE_ANSI_SIGNLE_MODE_TYPE_2: media = IFM_FDDI_SMF; break; case PDQ_PMD_TYPE_UNSHIELDED_TWISTED_PAIR: media = IFM_FDDI_UTP; break; default: media |= IFM_MANUAL; } if (rsp->status_chars_get.station_type == PDQ_STATION_TYPE_DAS) media |= IFM_FDDI_DA; sc->sc_ifmedia.ifm_media = media | IFM_FDDI; } #endif /* defined(IFM_FDDI) */ static int pdq_ifioctl( struct ifnet *ifp, u_long cmd, caddr_t data) { pdq_softc_t *sc = PDQ_OS_IFP_TO_SOFTC(ifp); int error = 0; switch (cmd) { case SIOCSIFFLAGS: { pdq_ifinit(sc); break; } case SIOCADDMULTI: case SIOCDELMULTI: { PDQ_LOCK(sc); if (PDQ_IFNET(sc)->if_drv_flags & IFF_DRV_RUNNING) { pdq_run(sc->sc_pdq); error = 0; } PDQ_UNLOCK(sc); break; } #if defined(IFM_FDDI) && defined(SIOCSIFMEDIA) case SIOCSIFMEDIA: case SIOCGIFMEDIA: { struct ifreq *ifr = (struct ifreq *)data; error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, cmd); break; } #endif default: { error = fddi_ioctl(ifp, cmd, data); break; } } return error; } #ifndef IFF_NOTRAILERS #define IFF_NOTRAILERS 0 #endif int pdq_ifattach(pdq_softc_t *sc, const pdq_uint8_t *llc, pdq_type_t type) { struct ifnet *ifp; ifp = PDQ_IFNET(sc) = if_alloc(IFT_FDDI); if (ifp == NULL) { device_printf(sc->dev, "can not if_alloc()\n"); return (ENOSPC); } mtx_init(&sc->mtx, device_get_nameunit(sc->dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->watchdog, &sc->mtx, 0); if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); ifp->if_softc = sc; ifp->if_init = pdq_ifinit; ifp->if_snd.ifq_maxlen = ifqmaxlen; ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST; ifp->if_ioctl = pdq_ifioctl; ifp->if_start = pdq_ifstart; #if defined(IFM_FDDI) { const int media = sc->sc_ifmedia.ifm_media; ifmedia_init(&sc->sc_ifmedia, IFM_FDX, pdq_ifmedia_change, pdq_ifmedia_status); ifmedia_add(&sc->sc_ifmedia, media, 0, 0); ifmedia_set(&sc->sc_ifmedia, media); } #endif sc->sc_pdq = pdq_initialize(sc->mem_bst, sc->mem_bsh, ifp->if_xname, -1, sc, type); if (sc->sc_pdq == NULL) { device_printf(sc->dev, "Initialization failed.\n"); return (ENXIO); } fddi_ifattach(ifp, llc, FDDI_BPF_SUPPORTED); return (0); } void pdq_ifdetach (pdq_softc_t *sc) { struct ifnet *ifp; ifp = sc->ifp; fddi_ifdetach(ifp, FDDI_BPF_SUPPORTED); PDQ_LOCK(sc); pdq_ifstop(sc); PDQ_UNLOCK(sc); callout_drain(&sc->watchdog); pdq_free(sc->dev); return; } void pdq_free (device_t dev) { pdq_softc_t *sc; sc = device_get_softc(dev); if (sc->io) bus_release_resource(dev, sc->io_type, sc->io_rid, sc->io); if (sc->mem) bus_release_resource(dev, sc->mem_type, sc->mem_rid, sc->mem); if (sc->irq_ih) bus_teardown_intr(dev, sc->irq, sc->irq_ih); if (sc->irq) bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq); if (sc->ifp) if_free(sc->ifp); /* * Destroy the mutex. */ if (mtx_initialized(&sc->mtx) != 0) { mtx_destroy(&sc->mtx); } return; } #if defined(PDQ_BUS_DMA) int pdq_os_memalloc_contig( pdq_t *pdq) { pdq_softc_t * const sc = pdq->pdq_os_ctx; bus_dma_segment_t db_segs[1], ui_segs[1], cb_segs[1]; int db_nsegs = 0, ui_nsegs = 0; int steps = 0; int not_ok; not_ok = bus_dmamem_alloc(sc->sc_dmatag, sizeof(*pdq->pdq_dbp), sizeof(*pdq->pdq_dbp), sizeof(*pdq->pdq_dbp), db_segs, 1, &db_nsegs, BUS_DMA_NOWAIT); if (!not_ok) { steps = 1; not_ok = bus_dmamem_map(sc->sc_dmatag, db_segs, db_nsegs, sizeof(*pdq->pdq_dbp), (caddr_t *) &pdq->pdq_dbp, BUS_DMA_NOWAIT); } if (!not_ok) { steps = 2; not_ok = bus_dmamap_create(sc->sc_dmatag, db_segs[0].ds_len, 1, 0x2000, 0, BUS_DMA_NOWAIT, &sc->sc_dbmap); } if (!not_ok) { steps = 3; not_ok = bus_dmamap_load(sc->sc_dmatag, sc->sc_dbmap, pdq->pdq_dbp, sizeof(*pdq->pdq_dbp), NULL, BUS_DMA_NOWAIT); } if (!not_ok) { steps = 4; pdq->pdq_pa_descriptor_block = sc->sc_dbmap->dm_segs[0].ds_addr; not_ok = bus_dmamem_alloc(sc->sc_dmatag, PDQ_OS_PAGESIZE, PDQ_OS_PAGESIZE, PDQ_OS_PAGESIZE, ui_segs, 1, &ui_nsegs, BUS_DMA_NOWAIT); } if (!not_ok) { steps = 5; not_ok = bus_dmamem_map(sc->sc_dmatag, ui_segs, ui_nsegs, PDQ_OS_PAGESIZE, (caddr_t *) &pdq->pdq_unsolicited_info.ui_events, BUS_DMA_NOWAIT); } if (!not_ok) { steps = 6; not_ok = bus_dmamap_create(sc->sc_dmatag, ui_segs[0].ds_len, 1, PDQ_OS_PAGESIZE, 0, BUS_DMA_NOWAIT, &sc->sc_uimap); } if (!not_ok) { steps = 7; not_ok = bus_dmamap_load(sc->sc_dmatag, sc->sc_uimap, pdq->pdq_unsolicited_info.ui_events, PDQ_OS_PAGESIZE, NULL, BUS_DMA_NOWAIT); } if (!not_ok) { steps = 8; pdq->pdq_unsolicited_info.ui_pa_bufstart = sc->sc_uimap->dm_segs[0].ds_addr; cb_segs[0] = db_segs[0]; cb_segs[0].ds_addr += offsetof(pdq_descriptor_block_t, pdqdb_consumer); cb_segs[0].ds_len = sizeof(pdq_consumer_block_t); not_ok = bus_dmamem_map(sc->sc_dmatag, cb_segs, 1, sizeof(*pdq->pdq_cbp), (caddr_t *) &pdq->pdq_cbp, BUS_DMA_NOWAIT|BUS_DMA_COHERENT); } if (!not_ok) { steps = 9; not_ok = bus_dmamap_create(sc->sc_dmatag, cb_segs[0].ds_len, 1, 0x2000, 0, BUS_DMA_NOWAIT, &sc->sc_cbmap); } if (!not_ok) { steps = 10; not_ok = bus_dmamap_load(sc->sc_dmatag, sc->sc_cbmap, (caddr_t) pdq->pdq_cbp, sizeof(*pdq->pdq_cbp), NULL, BUS_DMA_NOWAIT); } if (!not_ok) { pdq->pdq_pa_consumer_block = sc->sc_cbmap->dm_segs[0].ds_addr; return not_ok; } switch (steps) { case 11: { bus_dmamap_unload(sc->sc_dmatag, sc->sc_cbmap); /* FALL THROUGH */ } case 10: { bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cbmap); /* FALL THROUGH */ } case 9: { bus_dmamem_unmap(sc->sc_dmatag, (caddr_t) pdq->pdq_cbp, sizeof(*pdq->pdq_cbp)); /* FALL THROUGH */ } case 8: { bus_dmamap_unload(sc->sc_dmatag, sc->sc_uimap); /* FALL THROUGH */ } case 7: { bus_dmamap_destroy(sc->sc_dmatag, sc->sc_uimap); /* FALL THROUGH */ } case 6: { bus_dmamem_unmap(sc->sc_dmatag, (caddr_t) pdq->pdq_unsolicited_info.ui_events, PDQ_OS_PAGESIZE); /* FALL THROUGH */ } case 5: { bus_dmamem_free(sc->sc_dmatag, ui_segs, ui_nsegs); /* FALL THROUGH */ } case 4: { bus_dmamap_unload(sc->sc_dmatag, sc->sc_dbmap); /* FALL THROUGH */ } case 3: { bus_dmamap_destroy(sc->sc_dmatag, sc->sc_dbmap); /* FALL THROUGH */ } case 2: { bus_dmamem_unmap(sc->sc_dmatag, (caddr_t) pdq->pdq_dbp, sizeof(*pdq->pdq_dbp)); /* FALL THROUGH */ } case 1: { bus_dmamem_free(sc->sc_dmatag, db_segs, db_nsegs); /* FALL THROUGH */ } } return not_ok; } extern void pdq_os_descriptor_block_sync( pdq_os_ctx_t *sc, size_t offset, size_t length, int ops) { bus_dmamap_sync(sc->sc_dmatag, sc->sc_dbmap, offset, length, ops); } extern void pdq_os_consumer_block_sync( pdq_os_ctx_t *sc, int ops) { bus_dmamap_sync(sc->sc_dmatag, sc->sc_cbmap, 0, sizeof(pdq_consumer_block_t), ops); } extern void pdq_os_unsolicited_event_sync( pdq_os_ctx_t *sc, size_t offset, size_t length, int ops) { bus_dmamap_sync(sc->sc_dmatag, sc->sc_uimap, offset, length, ops); } extern void pdq_os_databuf_sync( pdq_os_ctx_t *sc, struct mbuf *m, size_t offset, size_t length, int ops) { bus_dmamap_sync(sc->sc_dmatag, M_GETCTX(m, bus_dmamap_t), offset, length, ops); } extern void pdq_os_databuf_free( pdq_os_ctx_t *sc, struct mbuf *m) { if (m->m_flags & (M_HASRXDMAMAP|M_HASTXDMAMAP)) { bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); bus_dmamap_unload(sc->sc_dmatag, map); bus_dmamap_destroy(sc->sc_dmatag, map); m->m_flags &= ~(M_HASRXDMAMAP|M_HASTXDMAMAP); } m_freem(m); } extern struct mbuf * pdq_os_databuf_alloc( pdq_os_ctx_t *sc) { struct mbuf *m; bus_dmamap_t map; MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { printf("%s: can't alloc small buf\n", sc->sc_dev.dv_xname); return NULL; } - MCLGET(m, M_NOWAIT); - if ((m->m_flags & M_EXT) == 0) { + if (!(MCLGET(m, M_NOWAIT))) { printf("%s: can't alloc cluster\n", sc->sc_dev.dv_xname); m_free(m); return NULL; } m->m_pkthdr.len = m->m_len = PDQ_OS_DATABUF_SIZE; if (bus_dmamap_create(sc->sc_dmatag, PDQ_OS_DATABUF_SIZE, 1, PDQ_OS_DATABUF_SIZE, 0, BUS_DMA_NOWAIT, &map)) { printf("%s: can't create dmamap\n", sc->sc_dev.dv_xname); m_free(m); return NULL; } if (bus_dmamap_load_mbuf(sc->sc_dmatag, map, m, BUS_DMA_READ|BUS_DMA_NOWAIT)) { printf("%s: can't load dmamap\n", sc->sc_dev.dv_xname); bus_dmamap_destroy(sc->sc_dmatag, map); m_free(m); return NULL; } m->m_flags |= M_HASRXDMAMAP; M_SETCTX(m, map); return m; } #endif Index: head/sys/dev/pdq/pdqvar.h =================================================================== --- head/sys/dev/pdq/pdqvar.h (revision 276749) +++ head/sys/dev/pdq/pdqvar.h (revision 276750) @@ -1,297 +1,296 @@ /* $NetBSD: pdqvar.h,v 1.27 2000/05/03 19:17:54 thorpej Exp $ */ /*- * Copyright (c) 1995, 1996 Matt Thomas * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Id: pdqvar.h,v 1.21 1997/03/21 21:16:04 thomas Exp * $FreeBSD$ * */ /* * DEC PDQ FDDI Controller; PDQ O/S dependent definitions * * Written by Matt Thomas * */ #ifndef _PDQ_OS_H #define _PDQ_OS_H #define PDQ_OS_TX_TIMEOUT 5 /* seconds */ typedef struct _pdq_t pdq_t; typedef struct _pdq_csrs_t pdq_csrs_t; typedef struct _pdq_pci_csrs_t pdq_pci_csrs_t; typedef struct _pdq_lanaddr_t pdq_lanaddr_t; typedef unsigned int pdq_uint32_t; typedef unsigned short pdq_uint16_t; typedef unsigned char pdq_uint8_t; typedef enum _pdq_boolean_t pdq_boolean_t; typedef enum _pdq_type_t pdq_type_t; typedef enum _pdq_state_t pdq_state_t; enum _pdq_type_t { PDQ_DEFPA, /* PCI-bus */ PDQ_DEFEA, /* EISA-bus */ PDQ_DEFTA, /* TurboChannel */ PDQ_DEFAA, /* FutureBus+ */ PDQ_DEFQA /* Q-bus */ }; #if defined(PDQTEST) #include #else #include #include #ifndef M_MCAST #include #endif /* M_CAST */ #include #include #include #define PDQ_OS_PREFIX "%s: " #define PDQ_OS_PREFIX_ARGS pdq->pdq_os_name #define PDQ_OS_PAGESIZE PAGE_SIZE #define PDQ_OS_USEC_DELAY(n) DELAY(n) #define PDQ_OS_MEMZERO(p, n) bzero((caddr_t)(p), (n)) #if !defined(PDQ_BUS_DMA) #define PDQ_OS_VA_TO_BUSPA(pdq, p) vtophys(p) #endif #define PDQ_OS_MEMALLOC(n) malloc(n, M_DEVBUF, M_NOWAIT) #define PDQ_OS_MEMFREE(p, n) free((void *) p, M_DEVBUF) #define PDQ_OS_MEMALLOC_CONTIG(n) contigmalloc(n, M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0) #define PDQ_OS_MEMFREE_CONTIG(p, n) contigfree((void *) p, n, M_DEVBUF) #include #include #include #define ifnet_ret_t void typedef int ioctl_cmd_t; typedef enum { PDQ_BUS_EISA, PDQ_BUS_PCI } pdq_bus_t; typedef u_int16_t pdq_bus_ioport_t; typedef volatile pdq_uint32_t *pdq_bus_memaddr_t; typedef pdq_bus_memaddr_t pdq_bus_memoffset_t; #define pdq_os_update_status(a, b) ((void) 0) #if !defined(PDQ_OS_SPL_RAISE) #define PDQ_OS_SPL_RAISE() splimp() #endif #if !defined(PDQ_OS_SPL_LOWER) #define PDQ_OS_SPL_LOWER(s) splx(s) #endif #if !defined(PDQ_FDDICOM) #define PDQ_FDDICOM(sc) (&(sc)->sc_ac) #endif #if !defined(PDQ_IFNET) #define PDQ_IFNET(sc) (PDQ_FDDICOM((sc))->ac_ifp) #endif #define PDQ_BPF_MTAP(sc, m) bpf_mtap(PDQ_IFNET(sc), m) #define PDQ_BPFATTACH(sc, t, s) bpfattach(PDQ_IFNET(sc), t, s) #if !defined(PDQ_ARP_IFINIT) #define PDQ_ARP_IFINIT(sc, ifa) arp_ifinit(&(sc)->sc_ac, (ifa)) #endif #if !defined(PDQ_OS_PTR_FMT) #define PDQ_OS_PTR_FMT "0x%x" #endif #if !defined(PDQ_OS_CSR_FMT) #define PDQ_OS_CSR_FMT "0x%x" #endif #if !defined(PDQ_LANADDR) #define PDQ_LANADDR(sc) ((sc)->sc_ac.ac_enaddr) #define PDQ_LANADDR_SIZE(sc) (sizeof((sc)->sc_ac.ac_enaddr)) #endif #if !defined(PDQ_OS_IOMEM) #define PDQ_OS_IORD_32(t, base, offset) inl((base) + (offset)) #define PDQ_OS_IOWR_32(t, base, offset, data) outl((base) + (offset), data) #define PDQ_OS_IORD_8(t, base, offset) inb((base) + (offset)) #define PDQ_OS_IOWR_8(t, base, offset, data) outb((base) + (offset), data) #define PDQ_OS_MEMRD_32(t, base, offset) (0 + *((base) + (offset))) #define PDQ_OS_MEMWR_32(t, base, offset, data) do *((base) + (offset)) = (data); while (0) #endif #ifndef PDQ_CSR_OFFSET #define PDQ_CSR_OFFSET(base, offset) (0 + (base) + (offset)) #endif #ifndef PDQ_CSR_WRITE #define PDQ_CSR_WRITE(csr, name, data) PDQ_OS_MEMWR_32((csr)->csr_bus, (csr)->name, 0, data) #define PDQ_CSR_READ(csr, name) PDQ_OS_MEMRD_32((csr)->csr_bus, (csr)->name, 0) #endif #ifndef PDQ_OS_IFP_TO_SOFTC #define PDQ_OS_IFP_TO_SOFTC(ifp) ((pdq_softc_t *) ((caddr_t) ifp - offsetof(pdq_softc_t, sc_ac.ac_if))) #endif #if !defined(PDQ_HWSUPPORT) typedef struct _pdq_os_ctx_t { struct kern_devconf *sc_kdc; /* freebsd cruft */ struct arpcom sc_ac; #if defined(IFM_FDDI) struct ifmedia sc_ifmedia; #endif pdq_t *sc_pdq; #if defined(__i386__) pdq_bus_ioport_t sc_iobase; #endif #if defined(PDQ_IOMAPPED) #define sc_membase sc_iobase #else pdq_bus_memaddr_t sc_membase; #endif pdq_bus_t sc_iotag; pdq_bus_t sc_csrtag; caddr_t sc_bpf; #if defined(PDQ_BUS_DMA) bus_dma_tag_t sc_dmatag; bus_dmamap_t sc_dbmap; /* DMA map for descriptor block */ bus_dmamap_t sc_uimap; /* DMA map for unsolicited events */ bus_dmamap_t sc_cbmap; /* DMA map for consumer block */ #endif } pdq_softc_t; extern void pdq_ifreset(pdq_softc_t *sc); extern void pdq_ifinit(pdq_softc_t *sc); extern void pdq_ifwatchdog(struct ifnet *ifp); extern ifnet_ret_t pdq_ifstart(struct ifnet *ifp); extern int pdq_ifioctl(struct ifnet *ifp, ioctl_cmd_t cmd, caddr_t data); extern void pdq_ifattach(pdq_softc_t *sc, ifnet_ret_t (*ifwatchdog)(int unit)); #endif /* !PDQ_HWSUPPORT */ #endif #define PDQ_OS_DATABUF_SIZE (MCLBYTES) #ifndef PDQ_OS_DATABUF_FREE #define PDQ_OS_DATABUF_FREE(pdq, b) (m_freem(b)) #endif #define PDQ_OS_DATABUF_NEXT(b) ((b)->m_next) #define PDQ_OS_DATABUF_NEXT_SET(b, b1) ((b)->m_next = (b1)) #define PDQ_OS_DATABUF_NEXTPKT(b) ((b)->m_nextpkt) #define PDQ_OS_DATABUF_NEXTPKT_SET(b, b1) ((b)->m_nextpkt = (b1)) #define PDQ_OS_DATABUF_LEN(b) ((b)->m_len) #define PDQ_OS_DATABUF_LEN_SET(b, n) ((b)->m_len = (n)) /* #define PDQ_OS_DATABUF_LEN_ADJ(b, n) ((b)->m_len += (n)) */ #define PDQ_OS_DATABUF_PTR(b) (mtod((b), pdq_uint8_t *)) #define PDQ_OS_DATABUF_ADJ(b, n) ((b)->m_data += (n), (b)->m_len -= (n)) typedef struct mbuf PDQ_OS_DATABUF_T; #ifndef PDQ_OS_DATABUF_ALLOC #define PDQ_OS_DATABUF_ALLOC(pdq, b) do { \ PDQ_OS_DATABUF_T *x_m0; \ MGETHDR(x_m0, M_NOWAIT, MT_DATA); \ if (x_m0 != NULL) { \ - MCLGET(x_m0, M_NOWAIT); \ - if ((x_m0->m_flags & M_EXT) == 0) { \ + if (!(MCLGET(x_m0, M_NOWAIT))) { \ m_free(x_m0); \ (b) = NULL; \ } else { \ (b) = x_m0; \ x_m0->m_len = PDQ_OS_DATABUF_SIZE; \ } \ } else { \ (b) = NULL; \ } \ } while (0) #endif #define PDQ_OS_DATABUF_RESET(b) ((b)->m_data = (b)->m_ext.ext_buf, (b)->m_len = MCLBYTES) #define PDQ_OS_TX_TRANSMIT 5 #define PDQ_OS_DATABUF_ENQUEUE(q, b) do { \ PDQ_OS_DATABUF_NEXTPKT_SET(b, NULL); \ if ((q)->q_tail == NULL) \ (q)->q_head = (b); \ else \ PDQ_OS_DATABUF_NEXTPKT_SET(((PDQ_OS_DATABUF_T *)(q)->q_tail), b); \ (q)->q_tail = (b); \ } while (0) #define PDQ_OS_DATABUF_DEQUEUE(q, b) do { \ if (((b) = (PDQ_OS_DATABUF_T *) (q)->q_head) != NULL) { \ if (((q)->q_head = PDQ_OS_DATABUF_NEXTPKT(b)) == NULL) \ (q)->q_tail = NULL; \ PDQ_OS_DATABUF_NEXTPKT_SET(b, NULL); \ } \ } while (0) #if !defined(PDQ_OS_CONSUMER_PRESYNC) #define PDQ_OS_CONSUMER_PRESYNC(pdq) do { } while(0) #define PDQ_OS_CONSUMER_POSTSYNC(pdq) do { } while(0) #define PDQ_OS_DESC_PRESYNC(pdq, d, s) do { } while(0) #define PDQ_OS_DESC_POSTSYNC(pdq, d, s) do { } while(0) #define PDQ_OS_CMDRQST_PRESYNC(pdq, s) do { } while(0) #define PDQ_OS_CMDRQST_POSTSYNC(pdq, s) do { } while(0) #define PDQ_OS_CMDRSP_PRESYNC(pdq, s) do { } while(0) #define PDQ_OS_CMDRSP_POSTSYNC(pdq, s) do { } while(0) #define PDQ_OS_RXPDU_PRESYNC(pdq, b, o, l) do { } while(0) #define PDQ_OS_RXPDU_POSTSYNC(pdq, b, o, l) do { } while(0) #define PDQ_OS_UNSOL_EVENT_PRESYNC(pdq, e) do { } while(0) #define PDQ_OS_UNSOL_EVENT_POSTSYNC(pdq, e) do { } while(0) #endif #ifndef PDQ_OS_DATABUF_BUSPA #define PDQ_OS_DATABUF_BUSPA(pdq, b) PDQ_OS_VA_TO_BUSPA(pdq, PDQ_OS_DATABUF_PTR(b)) #endif #ifndef PDQ_OS_HDR_OFFSET #define PDQ_OS_HDR_OFFSET PDQ_RX_FC_OFFSET #endif extern void pdq_os_addr_fill(pdq_t *pdq, pdq_lanaddr_t *addrs, size_t numaddrs); extern void pdq_os_receive_pdu(pdq_t *, PDQ_OS_DATABUF_T *pdu, size_t pdulen, int drop); extern void pdq_os_restart_transmitter(pdq_t *pdq); extern void pdq_os_transmit_done(pdq_t *pdq, PDQ_OS_DATABUF_T *pdu); #if !defined(pdq_os_update_status) extern void pdq_os_update_status(pdq_t *pdq, const void *rsp); #endif #if !defined(PDQ_OS_MEMALLOC_CONTIG) extern int pdq_os_memalloc_contig(pdq_t *pdq); #endif extern pdq_boolean_t pdq_queue_transmit_data(pdq_t *pdq, PDQ_OS_DATABUF_T *pdu); extern void pdq_flush_transmitter(pdq_t *pdq); extern void pdq_run(pdq_t *pdq); extern pdq_state_t pdq_stop(pdq_t *pdq); extern void pdq_hwreset(pdq_t *pdq); extern int pdq_interrupt(pdq_t *pdq); extern pdq_t *pdq_initialize(pdq_bus_t bus, pdq_bus_memaddr_t csr_va, const char *name, int unit, void *ctx, pdq_type_t type); #endif /* _PDQ_OS_H */ Index: head/sys/dev/safe/safe.c =================================================================== --- head/sys/dev/safe/safe.c (revision 276749) +++ head/sys/dev/safe/safe.c (revision 276750) @@ -1,2232 +1,2230 @@ /*- * Copyright (c) 2003 Sam Leffler, Errno Consulting * Copyright (c) 2003 Global Technology Associates, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * SafeNet SafeXcel-1141 hardware crypto accelerator */ #include "opt_safe.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" #include #include #ifdef SAFE_RNDTEST #include #endif #include #include #ifndef bswap32 #define bswap32 NTOHL #endif /* * Prototypes and count for the pci_device structure */ static int safe_probe(device_t); static int safe_attach(device_t); static int safe_detach(device_t); static int safe_suspend(device_t); static int safe_resume(device_t); static int safe_shutdown(device_t); static int safe_newsession(device_t, u_int32_t *, struct cryptoini *); static int safe_freesession(device_t, u_int64_t); static int safe_process(device_t, struct cryptop *, int); static device_method_t safe_methods[] = { /* Device interface */ DEVMETHOD(device_probe, safe_probe), DEVMETHOD(device_attach, safe_attach), DEVMETHOD(device_detach, safe_detach), DEVMETHOD(device_suspend, safe_suspend), DEVMETHOD(device_resume, safe_resume), DEVMETHOD(device_shutdown, safe_shutdown), /* crypto device methods */ DEVMETHOD(cryptodev_newsession, safe_newsession), DEVMETHOD(cryptodev_freesession,safe_freesession), DEVMETHOD(cryptodev_process, safe_process), DEVMETHOD_END }; static driver_t safe_driver = { "safe", safe_methods, sizeof (struct safe_softc) }; static devclass_t safe_devclass; DRIVER_MODULE(safe, pci, safe_driver, safe_devclass, 0, 0); MODULE_DEPEND(safe, crypto, 1, 1, 1); #ifdef SAFE_RNDTEST MODULE_DEPEND(safe, rndtest, 1, 1, 1); #endif static void safe_intr(void *); static void safe_callback(struct safe_softc *, struct safe_ringentry *); static void safe_feed(struct safe_softc *, struct safe_ringentry *); static void safe_mcopy(struct mbuf *, struct mbuf *, u_int); #ifndef SAFE_NO_RNG static void safe_rng_init(struct safe_softc *); static void safe_rng(void *); #endif /* SAFE_NO_RNG */ static int safe_dma_malloc(struct safe_softc *, bus_size_t, struct safe_dma_alloc *, int); #define safe_dma_sync(_dma, _flags) \ bus_dmamap_sync((_dma)->dma_tag, (_dma)->dma_map, (_flags)) static void safe_dma_free(struct safe_softc *, struct safe_dma_alloc *); static int safe_dmamap_aligned(const struct safe_operand *); static int safe_dmamap_uniform(const struct safe_operand *); static void safe_reset_board(struct safe_softc *); static void safe_init_board(struct safe_softc *); static void safe_init_pciregs(device_t dev); static void safe_cleanchip(struct safe_softc *); static void safe_totalreset(struct safe_softc *); static int safe_free_entry(struct safe_softc *, struct safe_ringentry *); static SYSCTL_NODE(_hw, OID_AUTO, safe, CTLFLAG_RD, 0, "SafeNet driver parameters"); #ifdef SAFE_DEBUG static void safe_dump_dmastatus(struct safe_softc *, const char *); static void safe_dump_ringstate(struct safe_softc *, const char *); static void safe_dump_intrstate(struct safe_softc *, const char *); static void safe_dump_request(struct safe_softc *, const char *, struct safe_ringentry *); static struct safe_softc *safec; /* for use by hw.safe.dump */ static int safe_debug = 0; SYSCTL_INT(_hw_safe, OID_AUTO, debug, CTLFLAG_RW, &safe_debug, 0, "control debugging msgs"); #define DPRINTF(_x) if (safe_debug) printf _x #else #define DPRINTF(_x) #endif #define READ_REG(sc,r) \ bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r)) #define WRITE_REG(sc,reg,val) \ bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val) struct safe_stats safestats; SYSCTL_STRUCT(_hw_safe, OID_AUTO, stats, CTLFLAG_RD, &safestats, safe_stats, "driver statistics"); #ifndef SAFE_NO_RNG static int safe_rnginterval = 1; /* poll once a second */ SYSCTL_INT(_hw_safe, OID_AUTO, rnginterval, CTLFLAG_RW, &safe_rnginterval, 0, "RNG polling interval (secs)"); static int safe_rngbufsize = 16; /* 64 bytes each poll */ SYSCTL_INT(_hw_safe, OID_AUTO, rngbufsize, CTLFLAG_RW, &safe_rngbufsize, 0, "RNG polling buffer size (32-bit words)"); static int safe_rngmaxalarm = 8; /* max alarms before reset */ SYSCTL_INT(_hw_safe, OID_AUTO, rngmaxalarm, CTLFLAG_RW, &safe_rngmaxalarm, 0, "RNG max alarms before reset"); #endif /* SAFE_NO_RNG */ static int safe_probe(device_t dev) { if (pci_get_vendor(dev) == PCI_VENDOR_SAFENET && pci_get_device(dev) == PCI_PRODUCT_SAFEXCEL) return (BUS_PROBE_DEFAULT); return (ENXIO); } static const char* safe_partname(struct safe_softc *sc) { /* XXX sprintf numbers when not decoded */ switch (pci_get_vendor(sc->sc_dev)) { case PCI_VENDOR_SAFENET: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_SAFEXCEL: return "SafeNet SafeXcel-1141"; } return "SafeNet unknown-part"; } return "Unknown-vendor unknown-part"; } #ifndef SAFE_NO_RNG static void default_harvest(struct rndtest_state *rsp, void *buf, u_int count) { random_harvest(buf, count, count*NBBY/2, RANDOM_PURE_SAFE); } #endif /* SAFE_NO_RNG */ static int safe_attach(device_t dev) { struct safe_softc *sc = device_get_softc(dev); u_int32_t raddr; u_int32_t i, devinfo; int rid; bzero(sc, sizeof (*sc)); sc->sc_dev = dev; /* XXX handle power management */ pci_enable_busmaster(dev); /* * Setup memory-mapping of PCI registers. */ rid = BS_BAR; sc->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_sr == NULL) { device_printf(dev, "cannot map register space\n"); goto bad; } sc->sc_st = rman_get_bustag(sc->sc_sr); sc->sc_sh = rman_get_bushandle(sc->sc_sr); /* * Arrange interrupt line. */ rid = 0; sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE|RF_ACTIVE); if (sc->sc_irq == NULL) { device_printf(dev, "could not map interrupt\n"); goto bad1; } /* * NB: Network code assumes we are blocked with splimp() * so make sure the IRQ is mapped appropriately. */ if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, safe_intr, sc, &sc->sc_ih)) { device_printf(dev, "could not establish interrupt\n"); goto bad2; } sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE); if (sc->sc_cid < 0) { device_printf(dev, "could not get crypto driver id\n"); goto bad3; } sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) & (SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN); /* * Setup DMA descriptor area. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, /* alignment */ SAFE_DMA_BOUNDARY, /* boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ SAFE_MAX_DMA, /* maxsize */ SAFE_MAX_PART, /* nsegments */ SAFE_MAX_SSIZE, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* locking */ &sc->sc_srcdmat)) { device_printf(dev, "cannot allocate DMA tag\n"); goto bad4; } if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, /* alignment */ SAFE_MAX_DSIZE, /* boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ SAFE_MAX_DMA, /* maxsize */ SAFE_MAX_PART, /* nsegments */ SAFE_MAX_DSIZE, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* locking */ &sc->sc_dstdmat)) { device_printf(dev, "cannot allocate DMA tag\n"); goto bad4; } /* * Allocate packet engine descriptors. */ if (safe_dma_malloc(sc, SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry), &sc->sc_ringalloc, 0)) { device_printf(dev, "cannot allocate PE descriptor ring\n"); bus_dma_tag_destroy(sc->sc_srcdmat); goto bad4; } /* * Hookup the static portion of all our data structures. */ sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr; sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE; sc->sc_front = sc->sc_ring; sc->sc_back = sc->sc_ring; raddr = sc->sc_ringalloc.dma_paddr; bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry)); for (i = 0; i < SAFE_MAX_NQUEUE; i++) { struct safe_ringentry *re = &sc->sc_ring[i]; re->re_desc.d_sa = raddr + offsetof(struct safe_ringentry, re_sa); re->re_sa.sa_staterec = raddr + offsetof(struct safe_ringentry, re_sastate); raddr += sizeof (struct safe_ringentry); } mtx_init(&sc->sc_ringmtx, device_get_nameunit(dev), "packet engine ring", MTX_DEF); /* * Allocate scatter and gather particle descriptors. */ if (safe_dma_malloc(sc, SAFE_TOTAL_SPART * sizeof (struct safe_pdesc), &sc->sc_spalloc, 0)) { device_printf(dev, "cannot allocate source particle " "descriptor ring\n"); mtx_destroy(&sc->sc_ringmtx); safe_dma_free(sc, &sc->sc_ringalloc); bus_dma_tag_destroy(sc->sc_srcdmat); goto bad4; } sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr; sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART; sc->sc_spfree = sc->sc_spring; bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc)); if (safe_dma_malloc(sc, SAFE_TOTAL_DPART * sizeof (struct safe_pdesc), &sc->sc_dpalloc, 0)) { device_printf(dev, "cannot allocate destination particle " "descriptor ring\n"); mtx_destroy(&sc->sc_ringmtx); safe_dma_free(sc, &sc->sc_spalloc); safe_dma_free(sc, &sc->sc_ringalloc); bus_dma_tag_destroy(sc->sc_dstdmat); goto bad4; } sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr; sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART; sc->sc_dpfree = sc->sc_dpring; bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc)); device_printf(sc->sc_dev, "%s", safe_partname(sc)); devinfo = READ_REG(sc, SAFE_DEVINFO); if (devinfo & SAFE_DEVINFO_RNG) { sc->sc_flags |= SAFE_FLAGS_RNG; printf(" rng"); } if (devinfo & SAFE_DEVINFO_PKEY) { #if 0 printf(" key"); sc->sc_flags |= SAFE_FLAGS_KEY; crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0); crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0); #endif } if (devinfo & SAFE_DEVINFO_DES) { printf(" des/3des"); crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0); } if (devinfo & SAFE_DEVINFO_AES) { printf(" aes"); crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0); } if (devinfo & SAFE_DEVINFO_MD5) { printf(" md5"); crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0); } if (devinfo & SAFE_DEVINFO_SHA1) { printf(" sha1"); crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0); } printf(" null"); crypto_register(sc->sc_cid, CRYPTO_NULL_CBC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_NULL_HMAC, 0, 0); /* XXX other supported algorithms */ printf("\n"); safe_reset_board(sc); /* reset h/w */ safe_init_pciregs(dev); /* init pci settings */ safe_init_board(sc); /* init h/w */ #ifndef SAFE_NO_RNG if (sc->sc_flags & SAFE_FLAGS_RNG) { #ifdef SAFE_RNDTEST sc->sc_rndtest = rndtest_attach(dev); if (sc->sc_rndtest) sc->sc_harvest = rndtest_harvest; else sc->sc_harvest = default_harvest; #else sc->sc_harvest = default_harvest; #endif safe_rng_init(sc); callout_init(&sc->sc_rngto, CALLOUT_MPSAFE); callout_reset(&sc->sc_rngto, hz*safe_rnginterval, safe_rng, sc); } #endif /* SAFE_NO_RNG */ #ifdef SAFE_DEBUG safec = sc; /* for use by hw.safe.dump */ #endif return (0); bad4: crypto_unregister_all(sc->sc_cid); bad3: bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); bad2: bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); bad1: bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); bad: return (ENXIO); } /* * Detach a device that successfully probed. */ static int safe_detach(device_t dev) { struct safe_softc *sc = device_get_softc(dev); /* XXX wait/abort active ops */ WRITE_REG(sc, SAFE_HI_MASK, 0); /* disable interrupts */ callout_stop(&sc->sc_rngto); crypto_unregister_all(sc->sc_cid); #ifdef SAFE_RNDTEST if (sc->sc_rndtest) rndtest_detach(sc->sc_rndtest); #endif safe_cleanchip(sc); safe_dma_free(sc, &sc->sc_dpalloc); safe_dma_free(sc, &sc->sc_spalloc); mtx_destroy(&sc->sc_ringmtx); safe_dma_free(sc, &sc->sc_ringalloc); bus_generic_detach(dev); bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); bus_dma_tag_destroy(sc->sc_srcdmat); bus_dma_tag_destroy(sc->sc_dstdmat); bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); return (0); } /* * Stop all chip i/o so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int safe_shutdown(device_t dev) { #ifdef notyet safe_stop(device_get_softc(dev)); #endif return (0); } /* * Device suspend routine. */ static int safe_suspend(device_t dev) { struct safe_softc *sc = device_get_softc(dev); #ifdef notyet /* XXX stop the device and save PCI settings */ #endif sc->sc_suspended = 1; return (0); } static int safe_resume(device_t dev) { struct safe_softc *sc = device_get_softc(dev); #ifdef notyet /* XXX retore PCI settings and start the device */ #endif sc->sc_suspended = 0; return (0); } /* * SafeXcel Interrupt routine */ static void safe_intr(void *arg) { struct safe_softc *sc = arg; volatile u_int32_t stat; stat = READ_REG(sc, SAFE_HM_STAT); if (stat == 0) /* shared irq, not for us */ return; WRITE_REG(sc, SAFE_HI_CLR, stat); /* IACK */ if ((stat & SAFE_INT_PE_DDONE)) { /* * Descriptor(s) done; scan the ring and * process completed operations. */ mtx_lock(&sc->sc_ringmtx); while (sc->sc_back != sc->sc_front) { struct safe_ringentry *re = sc->sc_back; #ifdef SAFE_DEBUG if (safe_debug) { safe_dump_ringstate(sc, __func__); safe_dump_request(sc, __func__, re); } #endif /* * safe_process marks ring entries that were allocated * but not used with a csr of zero. This insures the * ring front pointer never needs to be set backwards * in the event that an entry is allocated but not used * because of a setup error. */ if (re->re_desc.d_csr != 0) { if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr)) break; if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len)) break; sc->sc_nqchip--; safe_callback(sc, re); } if (++(sc->sc_back) == sc->sc_ringtop) sc->sc_back = sc->sc_ring; } mtx_unlock(&sc->sc_ringmtx); } /* * Check to see if we got any DMA Error */ if (stat & SAFE_INT_PE_ERROR) { DPRINTF(("dmaerr dmastat %08x\n", READ_REG(sc, SAFE_PE_DMASTAT))); safestats.st_dmaerr++; safe_totalreset(sc); #if 0 safe_feed(sc); #endif } if (sc->sc_needwakeup) { /* XXX check high watermark */ int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); DPRINTF(("%s: wakeup crypto %x\n", __func__, sc->sc_needwakeup)); sc->sc_needwakeup &= ~wakeup; crypto_unblock(sc->sc_cid, wakeup); } } /* * safe_feed() - post a request to chip */ static void safe_feed(struct safe_softc *sc, struct safe_ringentry *re) { bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_PREWRITE); if (re->re_dst_map != NULL) bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map, BUS_DMASYNC_PREREAD); /* XXX have no smaller granularity */ safe_dma_sync(&sc->sc_ringalloc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); safe_dma_sync(&sc->sc_spalloc, BUS_DMASYNC_PREWRITE); safe_dma_sync(&sc->sc_dpalloc, BUS_DMASYNC_PREWRITE); #ifdef SAFE_DEBUG if (safe_debug) { safe_dump_ringstate(sc, __func__); safe_dump_request(sc, __func__, re); } #endif sc->sc_nqchip++; if (sc->sc_nqchip > safestats.st_maxqchip) safestats.st_maxqchip = sc->sc_nqchip; /* poke h/w to check descriptor ring, any value can be written */ WRITE_REG(sc, SAFE_HI_RD_DESCR, 0); } #define N(a) (sizeof(a) / sizeof (a[0])) static void safe_setup_enckey(struct safe_session *ses, caddr_t key) { int i; bcopy(key, ses->ses_key, ses->ses_klen / 8); /* PE is little-endian, insure proper byte order */ for (i = 0; i < N(ses->ses_key); i++) ses->ses_key[i] = htole32(ses->ses_key[i]); } static void safe_setup_mackey(struct safe_session *ses, int algo, caddr_t key, int klen) { MD5_CTX md5ctx; SHA1_CTX sha1ctx; int i; for (i = 0; i < klen; i++) key[i] ^= HMAC_IPAD_VAL; if (algo == CRYPTO_MD5_HMAC) { MD5Init(&md5ctx); MD5Update(&md5ctx, key, klen); MD5Update(&md5ctx, hmac_ipad_buffer, MD5_HMAC_BLOCK_LEN - klen); bcopy(md5ctx.state, ses->ses_hminner, sizeof(md5ctx.state)); } else { SHA1Init(&sha1ctx); SHA1Update(&sha1ctx, key, klen); SHA1Update(&sha1ctx, hmac_ipad_buffer, SHA1_HMAC_BLOCK_LEN - klen); bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32)); } for (i = 0; i < klen; i++) key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); if (algo == CRYPTO_MD5_HMAC) { MD5Init(&md5ctx); MD5Update(&md5ctx, key, klen); MD5Update(&md5ctx, hmac_opad_buffer, MD5_HMAC_BLOCK_LEN - klen); bcopy(md5ctx.state, ses->ses_hmouter, sizeof(md5ctx.state)); } else { SHA1Init(&sha1ctx); SHA1Update(&sha1ctx, key, klen); SHA1Update(&sha1ctx, hmac_opad_buffer, SHA1_HMAC_BLOCK_LEN - klen); bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32)); } for (i = 0; i < klen; i++) key[i] ^= HMAC_OPAD_VAL; /* PE is little-endian, insure proper byte order */ for (i = 0; i < N(ses->ses_hminner); i++) { ses->ses_hminner[i] = htole32(ses->ses_hminner[i]); ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]); } } #undef N /* * Allocate a new 'session' and return an encoded session id. 'sidp' * contains our registration id, and should contain an encoded session * id on successful allocation. */ static int safe_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri) { struct safe_softc *sc = device_get_softc(dev); struct cryptoini *c, *encini = NULL, *macini = NULL; struct safe_session *ses = NULL; int sesn; if (sidp == NULL || cri == NULL || sc == NULL) return (EINVAL); for (c = cri; c != NULL; c = c->cri_next) { if (c->cri_alg == CRYPTO_MD5_HMAC || c->cri_alg == CRYPTO_SHA1_HMAC || c->cri_alg == CRYPTO_NULL_HMAC) { if (macini) return (EINVAL); macini = c; } else if (c->cri_alg == CRYPTO_DES_CBC || c->cri_alg == CRYPTO_3DES_CBC || c->cri_alg == CRYPTO_AES_CBC || c->cri_alg == CRYPTO_NULL_CBC) { if (encini) return (EINVAL); encini = c; } else return (EINVAL); } if (encini == NULL && macini == NULL) return (EINVAL); if (encini) { /* validate key length */ switch (encini->cri_alg) { case CRYPTO_DES_CBC: if (encini->cri_klen != 64) return (EINVAL); break; case CRYPTO_3DES_CBC: if (encini->cri_klen != 192) return (EINVAL); break; case CRYPTO_AES_CBC: if (encini->cri_klen != 128 && encini->cri_klen != 192 && encini->cri_klen != 256) return (EINVAL); break; } } if (sc->sc_sessions == NULL) { ses = sc->sc_sessions = (struct safe_session *)malloc( sizeof(struct safe_session), M_DEVBUF, M_NOWAIT); if (ses == NULL) return (ENOMEM); sesn = 0; sc->sc_nsessions = 1; } else { for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { if (sc->sc_sessions[sesn].ses_used == 0) { ses = &sc->sc_sessions[sesn]; break; } } if (ses == NULL) { sesn = sc->sc_nsessions; ses = (struct safe_session *)malloc((sesn + 1) * sizeof(struct safe_session), M_DEVBUF, M_NOWAIT); if (ses == NULL) return (ENOMEM); bcopy(sc->sc_sessions, ses, sesn * sizeof(struct safe_session)); bzero(sc->sc_sessions, sesn * sizeof(struct safe_session)); free(sc->sc_sessions, M_DEVBUF); sc->sc_sessions = ses; ses = &sc->sc_sessions[sesn]; sc->sc_nsessions++; } } bzero(ses, sizeof(struct safe_session)); ses->ses_used = 1; if (encini) { /* get an IV */ /* XXX may read fewer than requested */ read_random(ses->ses_iv, sizeof(ses->ses_iv)); ses->ses_klen = encini->cri_klen; if (encini->cri_key != NULL) safe_setup_enckey(ses, encini->cri_key); } if (macini) { ses->ses_mlen = macini->cri_mlen; if (ses->ses_mlen == 0) { if (macini->cri_alg == CRYPTO_MD5_HMAC) ses->ses_mlen = MD5_HASH_LEN; else ses->ses_mlen = SHA1_HASH_LEN; } if (macini->cri_key != NULL) { safe_setup_mackey(ses, macini->cri_alg, macini->cri_key, macini->cri_klen / 8); } } *sidp = SAFE_SID(device_get_unit(sc->sc_dev), sesn); return (0); } /* * Deallocate a session. */ static int safe_freesession(device_t dev, u_int64_t tid) { struct safe_softc *sc = device_get_softc(dev); int session, ret; u_int32_t sid = ((u_int32_t) tid) & 0xffffffff; if (sc == NULL) return (EINVAL); session = SAFE_SESSION(sid); if (session < sc->sc_nsessions) { bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session])); ret = 0; } else ret = EINVAL; return (ret); } static void safe_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error) { struct safe_operand *op = arg; DPRINTF(("%s: mapsize %u nsegs %d error %d\n", __func__, (u_int) mapsize, nsegs, error)); if (error != 0) return; op->mapsize = mapsize; op->nsegs = nsegs; bcopy(seg, op->segs, nsegs * sizeof (seg[0])); } static int safe_process(device_t dev, struct cryptop *crp, int hint) { struct safe_softc *sc = device_get_softc(dev); int err = 0, i, nicealign, uniform; struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; int bypass, oplen, ivsize; caddr_t iv; int16_t coffset; struct safe_session *ses; struct safe_ringentry *re; struct safe_sarec *sa; struct safe_pdesc *pd; u_int32_t cmd0, cmd1, staterec; if (crp == NULL || crp->crp_callback == NULL || sc == NULL) { safestats.st_invalid++; return (EINVAL); } if (SAFE_SESSION(crp->crp_sid) >= sc->sc_nsessions) { safestats.st_badsession++; return (EINVAL); } mtx_lock(&sc->sc_ringmtx); if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) { safestats.st_ringfull++; sc->sc_needwakeup |= CRYPTO_SYMQ; mtx_unlock(&sc->sc_ringmtx); return (ERESTART); } re = sc->sc_front; staterec = re->re_sa.sa_staterec; /* save */ /* NB: zero everything but the PE descriptor */ bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc)); re->re_sa.sa_staterec = staterec; /* restore */ re->re_crp = crp; re->re_sesn = SAFE_SESSION(crp->crp_sid); if (crp->crp_flags & CRYPTO_F_IMBUF) { re->re_src_m = (struct mbuf *)crp->crp_buf; re->re_dst_m = (struct mbuf *)crp->crp_buf; } else if (crp->crp_flags & CRYPTO_F_IOV) { re->re_src_io = (struct uio *)crp->crp_buf; re->re_dst_io = (struct uio *)crp->crp_buf; } else { safestats.st_badflags++; err = EINVAL; goto errout; /* XXX we don't handle contiguous blocks! */ } sa = &re->re_sa; ses = &sc->sc_sessions[re->re_sesn]; crd1 = crp->crp_desc; if (crd1 == NULL) { safestats.st_nodesc++; err = EINVAL; goto errout; } crd2 = crd1->crd_next; cmd0 = SAFE_SA_CMD0_BASIC; /* basic group operation */ cmd1 = 0; if (crd2 == NULL) { if (crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC || crd1->crd_alg == CRYPTO_NULL_HMAC) { maccrd = crd1; enccrd = NULL; cmd0 |= SAFE_SA_CMD0_OP_HASH; } else if (crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_3DES_CBC || crd1->crd_alg == CRYPTO_AES_CBC || crd1->crd_alg == CRYPTO_NULL_CBC) { maccrd = NULL; enccrd = crd1; cmd0 |= SAFE_SA_CMD0_OP_CRYPT; } else { safestats.st_badalg++; err = EINVAL; goto errout; } } else { if ((crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC || crd1->crd_alg == CRYPTO_NULL_HMAC) && (crd2->crd_alg == CRYPTO_DES_CBC || crd2->crd_alg == CRYPTO_3DES_CBC || crd2->crd_alg == CRYPTO_AES_CBC || crd2->crd_alg == CRYPTO_NULL_CBC) && ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { maccrd = crd1; enccrd = crd2; } else if ((crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_3DES_CBC || crd1->crd_alg == CRYPTO_AES_CBC || crd1->crd_alg == CRYPTO_NULL_CBC) && (crd2->crd_alg == CRYPTO_MD5_HMAC || crd2->crd_alg == CRYPTO_SHA1_HMAC || crd2->crd_alg == CRYPTO_NULL_HMAC) && (crd1->crd_flags & CRD_F_ENCRYPT)) { enccrd = crd1; maccrd = crd2; } else { safestats.st_badalg++; err = EINVAL; goto errout; } cmd0 |= SAFE_SA_CMD0_OP_BOTH; } if (enccrd) { if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) safe_setup_enckey(ses, enccrd->crd_key); if (enccrd->crd_alg == CRYPTO_DES_CBC) { cmd0 |= SAFE_SA_CMD0_DES; cmd1 |= SAFE_SA_CMD1_CBC; ivsize = 2*sizeof(u_int32_t); } else if (enccrd->crd_alg == CRYPTO_3DES_CBC) { cmd0 |= SAFE_SA_CMD0_3DES; cmd1 |= SAFE_SA_CMD1_CBC; ivsize = 2*sizeof(u_int32_t); } else if (enccrd->crd_alg == CRYPTO_AES_CBC) { cmd0 |= SAFE_SA_CMD0_AES; cmd1 |= SAFE_SA_CMD1_CBC; if (ses->ses_klen == 128) cmd1 |= SAFE_SA_CMD1_AES128; else if (ses->ses_klen == 192) cmd1 |= SAFE_SA_CMD1_AES192; else cmd1 |= SAFE_SA_CMD1_AES256; ivsize = 4*sizeof(u_int32_t); } else { cmd0 |= SAFE_SA_CMD0_CRYPT_NULL; ivsize = 0; } /* * Setup encrypt/decrypt state. When using basic ops * we can't use an inline IV because hash/crypt offset * must be from the end of the IV to the start of the * crypt data and this leaves out the preceding header * from the hash calculation. Instead we place the IV * in the state record and set the hash/crypt offset to * copy both the header+IV. */ if (enccrd->crd_flags & CRD_F_ENCRYPT) { cmd0 |= SAFE_SA_CMD0_OUTBOUND; if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) iv = enccrd->crd_iv; else iv = (caddr_t) ses->ses_iv; if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) { crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_inject, ivsize, iv); } bcopy(iv, re->re_sastate.sa_saved_iv, ivsize); cmd0 |= SAFE_SA_CMD0_IVLD_STATE | SAFE_SA_CMD0_SAVEIV; re->re_flags |= SAFE_QFLAGS_COPYOUTIV; } else { cmd0 |= SAFE_SA_CMD0_INBOUND; if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) { bcopy(enccrd->crd_iv, re->re_sastate.sa_saved_iv, ivsize); } else { crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_inject, ivsize, (caddr_t)re->re_sastate.sa_saved_iv); } cmd0 |= SAFE_SA_CMD0_IVLD_STATE; } /* * For basic encryption use the zero pad algorithm. * This pads results to an 8-byte boundary and * suppresses padding verification for inbound (i.e. * decrypt) operations. * * NB: Not sure if the 8-byte pad boundary is a problem. */ cmd0 |= SAFE_SA_CMD0_PAD_ZERO; /* XXX assert key bufs have the same size */ bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key)); } if (maccrd) { if (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) { safe_setup_mackey(ses, maccrd->crd_alg, maccrd->crd_key, maccrd->crd_klen / 8); } if (maccrd->crd_alg == CRYPTO_MD5_HMAC) { cmd0 |= SAFE_SA_CMD0_MD5; cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */ } else if (maccrd->crd_alg == CRYPTO_SHA1_HMAC) { cmd0 |= SAFE_SA_CMD0_SHA1; cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */ } else { cmd0 |= SAFE_SA_CMD0_HASH_NULL; } /* * Digest data is loaded from the SA and the hash * result is saved to the state block where we * retrieve it for return to the caller. */ /* XXX assert digest bufs have the same size */ bcopy(ses->ses_hminner, sa->sa_indigest, sizeof(sa->sa_indigest)); bcopy(ses->ses_hmouter, sa->sa_outdigest, sizeof(sa->sa_outdigest)); cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH; re->re_flags |= SAFE_QFLAGS_COPYOUTICV; } if (enccrd && maccrd) { /* * The offset from hash data to the start of * crypt data is the difference in the skips. */ bypass = maccrd->crd_skip; coffset = enccrd->crd_skip - maccrd->crd_skip; if (coffset < 0) { DPRINTF(("%s: hash does not precede crypt; " "mac skip %u enc skip %u\n", __func__, maccrd->crd_skip, enccrd->crd_skip)); safestats.st_skipmismatch++; err = EINVAL; goto errout; } oplen = enccrd->crd_skip + enccrd->crd_len; if (maccrd->crd_skip + maccrd->crd_len != oplen) { DPRINTF(("%s: hash amount %u != crypt amount %u\n", __func__, maccrd->crd_skip + maccrd->crd_len, oplen)); safestats.st_lenmismatch++; err = EINVAL; goto errout; } #ifdef SAFE_DEBUG if (safe_debug) { printf("mac: skip %d, len %d, inject %d\n", maccrd->crd_skip, maccrd->crd_len, maccrd->crd_inject); printf("enc: skip %d, len %d, inject %d\n", enccrd->crd_skip, enccrd->crd_len, enccrd->crd_inject); printf("bypass %d coffset %d oplen %d\n", bypass, coffset, oplen); } #endif if (coffset & 3) { /* offset must be 32-bit aligned */ DPRINTF(("%s: coffset %u misaligned\n", __func__, coffset)); safestats.st_coffmisaligned++; err = EINVAL; goto errout; } coffset >>= 2; if (coffset > 255) { /* offset must be <256 dwords */ DPRINTF(("%s: coffset %u too big\n", __func__, coffset)); safestats.st_cofftoobig++; err = EINVAL; goto errout; } /* * Tell the hardware to copy the header to the output. * The header is defined as the data from the end of * the bypass to the start of data to be encrypted. * Typically this is the inline IV. Note that you need * to do this even if src+dst are the same; it appears * that w/o this bit the crypted data is written * immediately after the bypass data. */ cmd1 |= SAFE_SA_CMD1_HDRCOPY; /* * Disable IP header mutable bit handling. This is * needed to get correct HMAC calculations. */ cmd1 |= SAFE_SA_CMD1_MUTABLE; } else { if (enccrd) { bypass = enccrd->crd_skip; oplen = bypass + enccrd->crd_len; } else { bypass = maccrd->crd_skip; oplen = bypass + maccrd->crd_len; } coffset = 0; } /* XXX verify multiple of 4 when using s/g */ if (bypass > 96) { /* bypass offset must be <= 96 bytes */ DPRINTF(("%s: bypass %u too big\n", __func__, bypass)); safestats.st_bypasstoobig++; err = EINVAL; goto errout; } if (bus_dmamap_create(sc->sc_srcdmat, BUS_DMA_NOWAIT, &re->re_src_map)) { safestats.st_nomap++; err = ENOMEM; goto errout; } if (crp->crp_flags & CRYPTO_F_IMBUF) { if (bus_dmamap_load_mbuf(sc->sc_srcdmat, re->re_src_map, re->re_src_m, safe_op_cb, &re->re_src, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); re->re_src_map = NULL; safestats.st_noload++; err = ENOMEM; goto errout; } } else if (crp->crp_flags & CRYPTO_F_IOV) { if (bus_dmamap_load_uio(sc->sc_srcdmat, re->re_src_map, re->re_src_io, safe_op_cb, &re->re_src, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); re->re_src_map = NULL; safestats.st_noload++; err = ENOMEM; goto errout; } } nicealign = safe_dmamap_aligned(&re->re_src); uniform = safe_dmamap_uniform(&re->re_src); DPRINTF(("src nicealign %u uniform %u nsegs %u\n", nicealign, uniform, re->re_src.nsegs)); if (re->re_src.nsegs > 1) { re->re_desc.d_src = sc->sc_spalloc.dma_paddr + ((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring); for (i = 0; i < re->re_src_nsegs; i++) { /* NB: no need to check if there's space */ pd = sc->sc_spfree; if (++(sc->sc_spfree) == sc->sc_springtop) sc->sc_spfree = sc->sc_spring; KASSERT((pd->pd_flags&3) == 0 || (pd->pd_flags&3) == SAFE_PD_DONE, ("bogus source particle descriptor; flags %x", pd->pd_flags)); pd->pd_addr = re->re_src_segs[i].ds_addr; pd->pd_size = re->re_src_segs[i].ds_len; pd->pd_flags = SAFE_PD_READY; } cmd0 |= SAFE_SA_CMD0_IGATHER; } else { /* * No need for gather, reference the operand directly. */ re->re_desc.d_src = re->re_src_segs[0].ds_addr; } if (enccrd == NULL && maccrd != NULL) { /* * Hash op; no destination needed. */ } else { if (crp->crp_flags & CRYPTO_F_IOV) { if (!nicealign) { safestats.st_iovmisaligned++; err = EINVAL; goto errout; } if (uniform != 1) { /* * Source is not suitable for direct use as * the destination. Create a new scatter/gather * list based on the destination requirements * and check if that's ok. */ if (bus_dmamap_create(sc->sc_dstdmat, BUS_DMA_NOWAIT, &re->re_dst_map)) { safestats.st_nomap++; err = ENOMEM; goto errout; } if (bus_dmamap_load_uio(sc->sc_dstdmat, re->re_dst_map, re->re_dst_io, safe_op_cb, &re->re_dst, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); re->re_dst_map = NULL; safestats.st_noload++; err = ENOMEM; goto errout; } uniform = safe_dmamap_uniform(&re->re_dst); if (!uniform) { /* * There's no way to handle the DMA * requirements with this uio. We * could create a separate DMA area for * the result and then copy it back, * but for now we just bail and return * an error. Note that uio requests * > SAFE_MAX_DSIZE are handled because * the DMA map and segment list for the * destination wil result in a * destination particle list that does * the necessary scatter DMA. */ safestats.st_iovnotuniform++; err = EINVAL; goto errout; } } else re->re_dst = re->re_src; } else if (crp->crp_flags & CRYPTO_F_IMBUF) { if (nicealign && uniform == 1) { /* * Source layout is suitable for direct * sharing of the DMA map and segment list. */ re->re_dst = re->re_src; } else if (nicealign && uniform == 2) { /* * The source is properly aligned but requires a * different particle list to handle DMA of the * result. Create a new map and do the load to * create the segment list. The particle * descriptor setup code below will handle the * rest. */ if (bus_dmamap_create(sc->sc_dstdmat, BUS_DMA_NOWAIT, &re->re_dst_map)) { safestats.st_nomap++; err = ENOMEM; goto errout; } if (bus_dmamap_load_mbuf(sc->sc_dstdmat, re->re_dst_map, re->re_dst_m, safe_op_cb, &re->re_dst, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); re->re_dst_map = NULL; safestats.st_noload++; err = ENOMEM; goto errout; } } else { /* !(aligned and/or uniform) */ int totlen, len; struct mbuf *m, *top, **mp; /* * DMA constraints require that we allocate a * new mbuf chain for the destination. We * allocate an entire new set of mbufs of * optimal/required size and then tell the * hardware to copy any bits that are not * created as a byproduct of the operation. */ if (!nicealign) safestats.st_unaligned++; if (!uniform) safestats.st_notuniform++; totlen = re->re_src_mapsize; if (re->re_src_m->m_flags & M_PKTHDR) { len = MHLEN; MGETHDR(m, M_NOWAIT, MT_DATA); if (m && !m_dup_pkthdr(m, re->re_src_m, M_NOWAIT)) { m_free(m); m = NULL; } } else { len = MLEN; MGET(m, M_NOWAIT, MT_DATA); } if (m == NULL) { safestats.st_nombuf++; err = sc->sc_nqchip ? ERESTART : ENOMEM; goto errout; } if (totlen >= MINCLSIZE) { - MCLGET(m, M_NOWAIT); - if ((m->m_flags & M_EXT) == 0) { + if (!(MCLGET(m, M_NOWAIT))) { m_free(m); safestats.st_nomcl++; err = sc->sc_nqchip ? ERESTART : ENOMEM; goto errout; } len = MCLBYTES; } m->m_len = len; top = NULL; mp = ⊤ while (totlen > 0) { if (top) { MGET(m, M_NOWAIT, MT_DATA); if (m == NULL) { m_freem(top); safestats.st_nombuf++; err = sc->sc_nqchip ? ERESTART : ENOMEM; goto errout; } len = MLEN; } if (top && totlen >= MINCLSIZE) { - MCLGET(m, M_NOWAIT); - if ((m->m_flags & M_EXT) == 0) { + if (!(MCLGET(m, M_NOWAIT))) { *mp = m; m_freem(top); safestats.st_nomcl++; err = sc->sc_nqchip ? ERESTART : ENOMEM; goto errout; } len = MCLBYTES; } m->m_len = len = min(totlen, len); totlen -= len; *mp = m; mp = &m->m_next; } re->re_dst_m = top; if (bus_dmamap_create(sc->sc_dstdmat, BUS_DMA_NOWAIT, &re->re_dst_map) != 0) { safestats.st_nomap++; err = ENOMEM; goto errout; } if (bus_dmamap_load_mbuf(sc->sc_dstdmat, re->re_dst_map, re->re_dst_m, safe_op_cb, &re->re_dst, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); re->re_dst_map = NULL; safestats.st_noload++; err = ENOMEM; goto errout; } if (re->re_src.mapsize > oplen) { /* * There's data following what the * hardware will copy for us. If this * isn't just the ICV (that's going to * be written on completion), copy it * to the new mbufs */ if (!(maccrd && (re->re_src.mapsize-oplen) == 12 && maccrd->crd_inject == oplen)) safe_mcopy(re->re_src_m, re->re_dst_m, oplen); else safestats.st_noicvcopy++; } } } else { safestats.st_badflags++; err = EINVAL; goto errout; } if (re->re_dst.nsegs > 1) { re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr + ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring); for (i = 0; i < re->re_dst_nsegs; i++) { pd = sc->sc_dpfree; KASSERT((pd->pd_flags&3) == 0 || (pd->pd_flags&3) == SAFE_PD_DONE, ("bogus dest particle descriptor; flags %x", pd->pd_flags)); if (++(sc->sc_dpfree) == sc->sc_dpringtop) sc->sc_dpfree = sc->sc_dpring; pd->pd_addr = re->re_dst_segs[i].ds_addr; pd->pd_flags = SAFE_PD_READY; } cmd0 |= SAFE_SA_CMD0_OSCATTER; } else { /* * No need for scatter, reference the operand directly. */ re->re_desc.d_dst = re->re_dst_segs[0].ds_addr; } } /* * All done with setup; fillin the SA command words * and the packet engine descriptor. The operation * is now ready for submission to the hardware. */ sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI; sa->sa_cmd1 = cmd1 | (coffset << SAFE_SA_CMD1_OFFSET_S) | SAFE_SA_CMD1_SAREV1 /* Rev 1 SA data structure */ | SAFE_SA_CMD1_SRPCI ; /* * NB: the order of writes is important here. In case the * chip is scanning the ring because of an outstanding request * it might nab this one too. In that case we need to make * sure the setup is complete before we write the length * field of the descriptor as it signals the descriptor is * ready for processing. */ re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI; if (maccrd) re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL; re->re_desc.d_len = oplen | SAFE_PE_LEN_READY | (bypass << SAFE_PE_LEN_BYPASS_S) ; safestats.st_ipackets++; safestats.st_ibytes += oplen; if (++(sc->sc_front) == sc->sc_ringtop) sc->sc_front = sc->sc_ring; /* XXX honor batching */ safe_feed(sc, re); mtx_unlock(&sc->sc_ringmtx); return (0); errout: if ((re->re_dst_m != NULL) && (re->re_src_m != re->re_dst_m)) m_freem(re->re_dst_m); if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) { bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map); bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); } if (re->re_src_map != NULL) { bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map); bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); } mtx_unlock(&sc->sc_ringmtx); if (err != ERESTART) { crp->crp_etype = err; crypto_done(crp); } else { sc->sc_needwakeup |= CRYPTO_SYMQ; } return (err); } static void safe_callback(struct safe_softc *sc, struct safe_ringentry *re) { struct cryptop *crp = (struct cryptop *)re->re_crp; struct cryptodesc *crd; safestats.st_opackets++; safestats.st_obytes += re->re_dst.mapsize; safe_dma_sync(&sc->sc_ringalloc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) { device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n", re->re_desc.d_csr, re->re_sa.sa_cmd0, re->re_sa.sa_cmd1); safestats.st_peoperr++; crp->crp_etype = EIO; /* something more meaningful? */ } if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) { bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map); bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); } bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map); bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); /* * If result was written to a differet mbuf chain, swap * it in as the return value and reclaim the original. */ if ((crp->crp_flags & CRYPTO_F_IMBUF) && re->re_src_m != re->re_dst_m) { m_freem(re->re_src_m); crp->crp_buf = (caddr_t)re->re_dst_m; } if (re->re_flags & SAFE_QFLAGS_COPYOUTIV) { /* copy out IV for future use */ for (crd = crp->crp_desc; crd; crd = crd->crd_next) { int ivsize; if (crd->crd_alg == CRYPTO_DES_CBC || crd->crd_alg == CRYPTO_3DES_CBC) { ivsize = 2*sizeof(u_int32_t); } else if (crd->crd_alg == CRYPTO_AES_CBC) { ivsize = 4*sizeof(u_int32_t); } else continue; crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_skip + crd->crd_len - ivsize, ivsize, (caddr_t)sc->sc_sessions[re->re_sesn].ses_iv); break; } } if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) { /* copy out ICV result */ for (crd = crp->crp_desc; crd; crd = crd->crd_next) { if (!(crd->crd_alg == CRYPTO_MD5_HMAC || crd->crd_alg == CRYPTO_SHA1_HMAC || crd->crd_alg == CRYPTO_NULL_HMAC)) continue; if (crd->crd_alg == CRYPTO_SHA1_HMAC) { /* * SHA-1 ICV's are byte-swapped; fix 'em up * before copy them to their destination. */ re->re_sastate.sa_saved_indigest[0] = bswap32(re->re_sastate.sa_saved_indigest[0]); re->re_sastate.sa_saved_indigest[1] = bswap32(re->re_sastate.sa_saved_indigest[1]); re->re_sastate.sa_saved_indigest[2] = bswap32(re->re_sastate.sa_saved_indigest[2]); } crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject, sc->sc_sessions[re->re_sesn].ses_mlen, (caddr_t)re->re_sastate.sa_saved_indigest); break; } } crypto_done(crp); } /* * Copy all data past offset from srcm to dstm. */ static void safe_mcopy(struct mbuf *srcm, struct mbuf *dstm, u_int offset) { u_int j, dlen, slen; caddr_t dptr, sptr; /* * Advance src and dst to offset. */ j = offset; while (j >= 0) { if (srcm->m_len > j) break; j -= srcm->m_len; srcm = srcm->m_next; if (srcm == NULL) return; } sptr = mtod(srcm, caddr_t) + j; slen = srcm->m_len - j; j = offset; while (j >= 0) { if (dstm->m_len > j) break; j -= dstm->m_len; dstm = dstm->m_next; if (dstm == NULL) return; } dptr = mtod(dstm, caddr_t) + j; dlen = dstm->m_len - j; /* * Copy everything that remains. */ for (;;) { j = min(slen, dlen); bcopy(sptr, dptr, j); if (slen == j) { srcm = srcm->m_next; if (srcm == NULL) return; sptr = srcm->m_data; slen = srcm->m_len; } else sptr += j, slen -= j; if (dlen == j) { dstm = dstm->m_next; if (dstm == NULL) return; dptr = dstm->m_data; dlen = dstm->m_len; } else dptr += j, dlen -= j; } } #ifndef SAFE_NO_RNG #define SAFE_RNG_MAXWAIT 1000 static void safe_rng_init(struct safe_softc *sc) { u_int32_t w, v; int i; WRITE_REG(sc, SAFE_RNG_CTRL, 0); /* use default value according to the manual */ WRITE_REG(sc, SAFE_RNG_CNFG, 0x834); /* magic from SafeNet */ WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); /* * There is a bug in rev 1.0 of the 1140 that when the RNG * is brought out of reset the ready status flag does not * work until the RNG has finished its internal initialization. * * So in order to determine the device is through its * initialization we must read the data register, using the * status reg in the read in case it is initialized. Then read * the data register until it changes from the first read. * Once it changes read the data register until it changes * again. At this time the RNG is considered initialized. * This could take between 750ms - 1000ms in time. */ i = 0; w = READ_REG(sc, SAFE_RNG_OUT); do { v = READ_REG(sc, SAFE_RNG_OUT); if (v != w) { w = v; break; } DELAY(10); } while (++i < SAFE_RNG_MAXWAIT); /* Wait Until data changes again */ i = 0; do { v = READ_REG(sc, SAFE_RNG_OUT); if (v != w) break; DELAY(10); } while (++i < SAFE_RNG_MAXWAIT); } static __inline void safe_rng_disable_short_cycle(struct safe_softc *sc) { WRITE_REG(sc, SAFE_RNG_CTRL, READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN); } static __inline void safe_rng_enable_short_cycle(struct safe_softc *sc) { WRITE_REG(sc, SAFE_RNG_CTRL, READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN); } static __inline u_int32_t safe_rng_read(struct safe_softc *sc) { int i; i = 0; while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT) ; return READ_REG(sc, SAFE_RNG_OUT); } static void safe_rng(void *arg) { struct safe_softc *sc = arg; u_int32_t buf[SAFE_RNG_MAXBUFSIZ]; /* NB: maybe move to softc */ u_int maxwords; int i; safestats.st_rng++; /* * Fetch the next block of data. */ maxwords = safe_rngbufsize; if (maxwords > SAFE_RNG_MAXBUFSIZ) maxwords = SAFE_RNG_MAXBUFSIZ; retry: for (i = 0; i < maxwords; i++) buf[i] = safe_rng_read(sc); /* * Check the comparator alarm count and reset the h/w if * it exceeds our threshold. This guards against the * hardware oscillators resonating with external signals. */ if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) { u_int32_t freq_inc, w; DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__, READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm)); safestats.st_rngalarm++; safe_rng_enable_short_cycle(sc); freq_inc = 18; for (i = 0; i < 64; i++) { w = READ_REG(sc, SAFE_RNG_CNFG); freq_inc = ((w + freq_inc) & 0x3fL); w = ((w & ~0x3fL) | freq_inc); WRITE_REG(sc, SAFE_RNG_CNFG, w); WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); (void) safe_rng_read(sc); DELAY(25); if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) { safe_rng_disable_short_cycle(sc); goto retry; } freq_inc = 1; } safe_rng_disable_short_cycle(sc); } else WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); (*sc->sc_harvest)(sc->sc_rndtest, buf, maxwords*sizeof (u_int32_t)); callout_reset(&sc->sc_rngto, hz * (safe_rnginterval ? safe_rnginterval : 1), safe_rng, sc); } #endif /* SAFE_NO_RNG */ static void safe_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *paddr = (bus_addr_t*) arg; *paddr = segs->ds_addr; } static int safe_dma_malloc( struct safe_softc *sc, bus_size_t size, struct safe_dma_alloc *dma, int mapflags ) { int r; r = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ sizeof(u_int32_t), 0, /* alignment, bounds */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* locking */ &dma->dma_tag); if (r != 0) { device_printf(sc->sc_dev, "safe_dma_malloc: " "bus_dma_tag_create failed; error %u\n", r); goto fail_0; } r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, BUS_DMA_NOWAIT, &dma->dma_map); if (r != 0) { device_printf(sc->sc_dev, "safe_dma_malloc: " "bus_dmammem_alloc failed; size %ju, error %u\n", (uintmax_t)size, r); goto fail_1; } r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size, safe_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT); if (r != 0) { device_printf(sc->sc_dev, "safe_dma_malloc: " "bus_dmamap_load failed; error %u\n", r); goto fail_2; } dma->dma_size = size; return (0); bus_dmamap_unload(dma->dma_tag, dma->dma_map); fail_2: bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); fail_1: bus_dma_tag_destroy(dma->dma_tag); fail_0: dma->dma_tag = NULL; return (r); } static void safe_dma_free(struct safe_softc *sc, struct safe_dma_alloc *dma) { bus_dmamap_unload(dma->dma_tag, dma->dma_map); bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); bus_dma_tag_destroy(dma->dma_tag); } /* * Resets the board. Values in the regesters are left as is * from the reset (i.e. initial values are assigned elsewhere). */ static void safe_reset_board(struct safe_softc *sc) { u_int32_t v; /* * Reset the device. The manual says no delay * is needed between marking and clearing reset. */ v = READ_REG(sc, SAFE_PE_DMACFG) &~ (SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET | SAFE_PE_DMACFG_SGRESET); WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET | SAFE_PE_DMACFG_SGRESET); WRITE_REG(sc, SAFE_PE_DMACFG, v); } /* * Initialize registers we need to touch only once. */ static void safe_init_board(struct safe_softc *sc) { u_int32_t v, dwords; v = READ_REG(sc, SAFE_PE_DMACFG); v &=~ SAFE_PE_DMACFG_PEMODE; v |= SAFE_PE_DMACFG_FSENA /* failsafe enable */ | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */ | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */ | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */ | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */ | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */ ; WRITE_REG(sc, SAFE_PE_DMACFG, v); #if 0 /* XXX select byte swap based on host byte order */ WRITE_REG(sc, SAFE_ENDIAN, 0x1b); #endif if (sc->sc_chiprev == SAFE_REV(1,0)) { /* * Avoid large PCI DMA transfers. Rev 1.0 has a bug where * "target mode transfers" done while the chip is DMA'ing * >1020 bytes cause the hardware to lockup. To avoid this * we reduce the max PCI transfer size and use small source * particle descriptors (<= 256 bytes). */ WRITE_REG(sc, SAFE_DMA_CFG, 256); device_printf(sc->sc_dev, "Reduce max DMA size to %u words for rev %u.%u WAR\n", (READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff, SAFE_REV_MAJ(sc->sc_chiprev), SAFE_REV_MIN(sc->sc_chiprev)); } /* NB: operands+results are overlaid */ WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr); WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr); /* * Configure ring entry size and number of items in the ring. */ KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0, ("PE ring entry not 32-bit aligned!")); dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t); WRITE_REG(sc, SAFE_PE_RINGCFG, (dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE); WRITE_REG(sc, SAFE_PE_RINGPOLL, 0); /* disable polling */ WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr); WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr); WRITE_REG(sc, SAFE_PE_PARTSIZE, (SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART); /* * NB: destination particles are fixed size. We use * an mbuf cluster and require all results go to * clusters or smaller. */ WRITE_REG(sc, SAFE_PE_PARTCFG, SAFE_MAX_DSIZE); /* it's now safe to enable PE mode, do it */ WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE); /* * Configure hardware to use level-triggered interrupts and * to interrupt after each descriptor is processed. */ WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL); WRITE_REG(sc, SAFE_HI_DESC_CNT, 1); WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR); } /* * Init PCI registers */ static void safe_init_pciregs(device_t dev) { } /* * Clean up after a chip crash. * It is assumed that the caller in splimp() */ static void safe_cleanchip(struct safe_softc *sc) { if (sc->sc_nqchip != 0) { struct safe_ringentry *re = sc->sc_back; while (re != sc->sc_front) { if (re->re_desc.d_csr != 0) safe_free_entry(sc, re); if (++re == sc->sc_ringtop) re = sc->sc_ring; } sc->sc_back = re; sc->sc_nqchip = 0; } } /* * free a safe_q * It is assumed that the caller is within splimp(). */ static int safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re) { struct cryptop *crp; /* * Free header MCR */ if ((re->re_dst_m != NULL) && (re->re_src_m != re->re_dst_m)) m_freem(re->re_dst_m); crp = (struct cryptop *)re->re_crp; re->re_desc.d_csr = 0; crp->crp_etype = EFAULT; crypto_done(crp); return(0); } /* * Routine to reset the chip and clean up. * It is assumed that the caller is in splimp() */ static void safe_totalreset(struct safe_softc *sc) { safe_reset_board(sc); safe_init_board(sc); safe_cleanchip(sc); } /* * Is the operand suitable aligned for direct DMA. Each * segment must be aligned on a 32-bit boundary and all * but the last segment must be a multiple of 4 bytes. */ static int safe_dmamap_aligned(const struct safe_operand *op) { int i; for (i = 0; i < op->nsegs; i++) { if (op->segs[i].ds_addr & 3) return (0); if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3)) return (0); } return (1); } /* * Is the operand suitable for direct DMA as the destination * of an operation. The hardware requires that each ``particle'' * but the last in an operation result have the same size. We * fix that size at SAFE_MAX_DSIZE bytes. This routine returns * 0 if some segment is not a multiple of of this size, 1 if all * segments are exactly this size, or 2 if segments are at worst * a multple of this size. */ static int safe_dmamap_uniform(const struct safe_operand *op) { int result = 1; if (op->nsegs > 0) { int i; for (i = 0; i < op->nsegs-1; i++) { if (op->segs[i].ds_len % SAFE_MAX_DSIZE) return (0); if (op->segs[i].ds_len != SAFE_MAX_DSIZE) result = 2; } } return (result); } #ifdef SAFE_DEBUG static void safe_dump_dmastatus(struct safe_softc *sc, const char *tag) { printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n" , tag , READ_REG(sc, SAFE_DMA_ENDIAN) , READ_REG(sc, SAFE_DMA_SRCADDR) , READ_REG(sc, SAFE_DMA_DSTADDR) , READ_REG(sc, SAFE_DMA_STAT) ); } static void safe_dump_intrstate(struct safe_softc *sc, const char *tag) { printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n" , tag , READ_REG(sc, SAFE_HI_CFG) , READ_REG(sc, SAFE_HI_MASK) , READ_REG(sc, SAFE_HI_DESC_CNT) , READ_REG(sc, SAFE_HU_STAT) , READ_REG(sc, SAFE_HM_STAT) ); } static void safe_dump_ringstate(struct safe_softc *sc, const char *tag) { u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT); /* NB: assume caller has lock on ring */ printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n", tag, estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S), (unsigned long)(sc->sc_back - sc->sc_ring), (unsigned long)(sc->sc_front - sc->sc_ring)); } static void safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re) { int ix, nsegs; ix = re - sc->sc_ring; printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n" , tag , re, ix , re->re_desc.d_csr , re->re_desc.d_src , re->re_desc.d_dst , re->re_desc.d_sa , re->re_desc.d_len ); if (re->re_src.nsegs > 1) { ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) / sizeof(struct safe_pdesc); for (nsegs = re->re_src.nsegs; nsegs; nsegs--) { printf(" spd[%u] %p: %p size %u flags %x" , ix, &sc->sc_spring[ix] , (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr , sc->sc_spring[ix].pd_size , sc->sc_spring[ix].pd_flags ); if (sc->sc_spring[ix].pd_size == 0) printf(" (zero!)"); printf("\n"); if (++ix == SAFE_TOTAL_SPART) ix = 0; } } if (re->re_dst.nsegs > 1) { ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) / sizeof(struct safe_pdesc); for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) { printf(" dpd[%u] %p: %p flags %x\n" , ix, &sc->sc_dpring[ix] , (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr , sc->sc_dpring[ix].pd_flags ); if (++ix == SAFE_TOTAL_DPART) ix = 0; } } printf("sa: cmd0 %08x cmd1 %08x staterec %x\n", re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec); printf("sa: key %x %x %x %x %x %x %x %x\n" , re->re_sa.sa_key[0] , re->re_sa.sa_key[1] , re->re_sa.sa_key[2] , re->re_sa.sa_key[3] , re->re_sa.sa_key[4] , re->re_sa.sa_key[5] , re->re_sa.sa_key[6] , re->re_sa.sa_key[7] ); printf("sa: indigest %x %x %x %x %x\n" , re->re_sa.sa_indigest[0] , re->re_sa.sa_indigest[1] , re->re_sa.sa_indigest[2] , re->re_sa.sa_indigest[3] , re->re_sa.sa_indigest[4] ); printf("sa: outdigest %x %x %x %x %x\n" , re->re_sa.sa_outdigest[0] , re->re_sa.sa_outdigest[1] , re->re_sa.sa_outdigest[2] , re->re_sa.sa_outdigest[3] , re->re_sa.sa_outdigest[4] ); printf("sr: iv %x %x %x %x\n" , re->re_sastate.sa_saved_iv[0] , re->re_sastate.sa_saved_iv[1] , re->re_sastate.sa_saved_iv[2] , re->re_sastate.sa_saved_iv[3] ); printf("sr: hashbc %u indigest %x %x %x %x %x\n" , re->re_sastate.sa_saved_hashbc , re->re_sastate.sa_saved_indigest[0] , re->re_sastate.sa_saved_indigest[1] , re->re_sastate.sa_saved_indigest[2] , re->re_sastate.sa_saved_indigest[3] , re->re_sastate.sa_saved_indigest[4] ); } static void safe_dump_ring(struct safe_softc *sc, const char *tag) { mtx_lock(&sc->sc_ringmtx); printf("\nSafeNet Ring State:\n"); safe_dump_intrstate(sc, tag); safe_dump_dmastatus(sc, tag); safe_dump_ringstate(sc, tag); if (sc->sc_nqchip) { struct safe_ringentry *re = sc->sc_back; do { safe_dump_request(sc, tag, re); if (++re == sc->sc_ringtop) re = sc->sc_ring; } while (re != sc->sc_front); } mtx_unlock(&sc->sc_ringmtx); } static int sysctl_hw_safe_dump(SYSCTL_HANDLER_ARGS) { char dmode[64]; int error; strncpy(dmode, "", sizeof(dmode) - 1); dmode[sizeof(dmode) - 1] = '\0'; error = sysctl_handle_string(oidp, &dmode[0], sizeof(dmode), req); if (error == 0 && req->newptr != NULL) { struct safe_softc *sc = safec; if (!sc) return EINVAL; if (strncmp(dmode, "dma", 3) == 0) safe_dump_dmastatus(sc, "safe0"); else if (strncmp(dmode, "int", 3) == 0) safe_dump_intrstate(sc, "safe0"); else if (strncmp(dmode, "ring", 4) == 0) safe_dump_ring(sc, "safe0"); else return EINVAL; } return error; } SYSCTL_PROC(_hw_safe, OID_AUTO, dump, CTLTYPE_STRING | CTLFLAG_RW, 0, 0, sysctl_hw_safe_dump, "A", "Dump driver state"); #endif /* SAFE_DEBUG */ Index: head/sys/dev/sbni/if_sbni.c =================================================================== --- head/sys/dev/sbni/if_sbni.c (revision 276749) +++ head/sys/dev/sbni/if_sbni.c (revision 276750) @@ -1,1278 +1,1277 @@ /*- * Copyright (c) 1997-2001 Granch, Ltd. All rights reserved. * Author: Denis I.Timofeev * * Redistributon and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); /* * Device driver for Granch SBNI12 leased line adapters * * Revision 2.0.0 1997/08/06 * Initial revision by Alexey Zverev * * Revision 2.0.1 1997/08/11 * Additional internal statistics support (tx statistics) * * Revision 2.0.2 1997/11/05 * if_bpf bug has been fixed * * Revision 2.0.3 1998/12/20 * Memory leakage has been eliminated in * the sbni_st and sbni_timeout routines. * * Revision 3.0 2000/08/10 by Yaroslav Polyakov * Support for PCI cards. 4.1 modification. * * Revision 3.1 2000/09/12 * Removed extra #defines around bpf functions * * Revision 4.0 2000/11/23 by Denis Timofeev * Completely redesigned the buffer management * * Revision 4.1 2001/01/21 * Support for PCI Dual cards and new SBNI12D-10, -11 Dual/ISA cards * * Written with reference to NE2000 driver developed by David Greenman. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void sbni_init(void *); static void sbni_init_locked(struct sbni_softc *); static void sbni_start(struct ifnet *); static void sbni_start_locked(struct ifnet *); static int sbni_ioctl(struct ifnet *, u_long, caddr_t); static void sbni_stop(struct sbni_softc *); static void handle_channel(struct sbni_softc *); static void card_start(struct sbni_softc *); static int recv_frame(struct sbni_softc *); static void send_frame(struct sbni_softc *); static int upload_data(struct sbni_softc *, u_int, u_int, u_int, u_int32_t); static int skip_tail(struct sbni_softc *, u_int, u_int32_t); static void interpret_ack(struct sbni_softc *, u_int); static void download_data(struct sbni_softc *, u_int32_t *); static void prepare_to_send(struct sbni_softc *); static void drop_xmit_queue(struct sbni_softc *); static int get_rx_buf(struct sbni_softc *); static void indicate_pkt(struct sbni_softc *); static void change_level(struct sbni_softc *); static int check_fhdr(struct sbni_softc *, u_int *, u_int *, u_int *, u_int *, u_int32_t *); static int append_frame_to_pkt(struct sbni_softc *, u_int, u_int32_t); static void timeout_change_level(struct sbni_softc *); static void send_frame_header(struct sbni_softc *, u_int32_t *); static void set_initial_values(struct sbni_softc *, struct sbni_flags); static u_int32_t calc_crc32(u_int32_t, caddr_t, u_int); static timeout_t sbni_timeout; static __inline u_char sbni_inb(struct sbni_softc *, enum sbni_reg); static __inline void sbni_outb(struct sbni_softc *, enum sbni_reg, u_char); static __inline void sbni_insb(struct sbni_softc *, u_char *, u_int); static __inline void sbni_outsb(struct sbni_softc *, u_char *, u_int); static u_int32_t crc32tab[]; #ifdef SBNI_DUAL_COMPOUND static struct mtx headlist_lock; MTX_SYSINIT(headlist_lock, &headlist_lock, "sbni headlist", MTX_DEF); static struct sbni_softc *sbni_headlist; #endif /* -------------------------------------------------------------------------- */ static __inline u_char sbni_inb(struct sbni_softc *sc, enum sbni_reg reg) { return bus_space_read_1( rman_get_bustag(sc->io_res), rman_get_bushandle(sc->io_res), sc->io_off + reg); } static __inline void sbni_outb(struct sbni_softc *sc, enum sbni_reg reg, u_char value) { bus_space_write_1( rman_get_bustag(sc->io_res), rman_get_bushandle(sc->io_res), sc->io_off + reg, value); } static __inline void sbni_insb(struct sbni_softc *sc, u_char *to, u_int len) { bus_space_read_multi_1( rman_get_bustag(sc->io_res), rman_get_bushandle(sc->io_res), sc->io_off + DAT, to, len); } static __inline void sbni_outsb(struct sbni_softc *sc, u_char *from, u_int len) { bus_space_write_multi_1( rman_get_bustag(sc->io_res), rman_get_bushandle(sc->io_res), sc->io_off + DAT, from, len); } /* Valid combinations in CSR0 (for probing): VALID_DECODER 0000,0011,1011,1010 ; 0 ; - TR_REQ ; 1 ; + TR_RDY ; 2 ; - TR_RDY TR_REQ ; 3 ; + BU_EMP ; 4 ; + BU_EMP TR_REQ ; 5 ; + BU_EMP TR_RDY ; 6 ; - BU_EMP TR_RDY TR_REQ ; 7 ; + RC_RDY ; 8 ; + RC_RDY TR_REQ ; 9 ; + RC_RDY TR_RDY ; 10 ; - RC_RDY TR_RDY TR_REQ ; 11 ; - RC_RDY BU_EMP ; 12 ; - RC_RDY BU_EMP TR_REQ ; 13 ; - RC_RDY BU_EMP TR_RDY ; 14 ; - RC_RDY BU_EMP TR_RDY TR_REQ ; 15 ; - */ #define VALID_DECODER (2 + 8 + 0x10 + 0x20 + 0x80 + 0x100 + 0x200) int sbni_probe(struct sbni_softc *sc) { u_char csr0; csr0 = sbni_inb(sc, CSR0); if (csr0 != 0xff && csr0 != 0x00) { csr0 &= ~EN_INT; if (csr0 & BU_EMP) csr0 |= EN_INT; if (VALID_DECODER & (1 << (csr0 >> 4))) return (0); } return (ENXIO); } /* * Install interface into kernel networking data structures */ int sbni_attach(struct sbni_softc *sc, int unit, struct sbni_flags flags) { struct ifnet *ifp; u_char csr0; ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) return (ENOMEM); sbni_outb(sc, CSR0, 0); set_initial_values(sc, flags); /* Initialize ifnet structure */ ifp->if_softc = sc; if_initname(ifp, "sbni", unit); ifp->if_init = sbni_init; ifp->if_start = sbni_start; ifp->if_ioctl = sbni_ioctl; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); /* report real baud rate */ csr0 = sbni_inb(sc, CSR0); ifp->if_baudrate = (csr0 & 0x01 ? 500000 : 2000000) / (1 << flags.rate); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; mtx_init(&sc->lock, ifp->if_xname, MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->wch, &sc->lock, 0); ether_ifattach(ifp, sc->enaddr); /* device attach does transition from UNCONFIGURED to IDLE state */ if_printf(ifp, "speed %ju, rxl ", (uintmax_t)ifp->if_baudrate); if (sc->delta_rxl) printf("auto\n"); else printf("%d (fixed)\n", sc->cur_rxl_index); return (0); } void sbni_detach(struct sbni_softc *sc) { SBNI_LOCK(sc); sbni_stop(sc); SBNI_UNLOCK(sc); callout_drain(&sc->wch); ether_ifdetach(sc->ifp); if (sc->irq_handle) bus_teardown_intr(sc->dev, sc->irq_res, sc->irq_handle); mtx_destroy(&sc->lock); if_free(sc->ifp); } void sbni_release_resources(struct sbni_softc *sc) { if (sc->irq_res) bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); if (sc->io_res && sc->io_off == 0) bus_release_resource(sc->dev, SYS_RES_IOPORT, sc->io_rid, sc->io_res); } /* -------------------------------------------------------------------------- */ static void sbni_init(void *xsc) { struct sbni_softc *sc; sc = (struct sbni_softc *)xsc; SBNI_LOCK(sc); sbni_init_locked(sc); SBNI_UNLOCK(sc); } static void sbni_init_locked(struct sbni_softc *sc) { struct ifnet *ifp; ifp = sc->ifp; /* * kludge to avoid multiple initialization when more than once * protocols configured */ if (ifp->if_drv_flags & IFF_DRV_RUNNING) return; card_start(sc); callout_reset(&sc->wch, hz/SBNI_HZ, sbni_timeout, sc); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; /* attempt to start output */ sbni_start_locked(ifp); } static void sbni_start(struct ifnet *ifp) { struct sbni_softc *sc = ifp->if_softc; SBNI_LOCK(sc); sbni_start_locked(ifp); SBNI_UNLOCK(sc); } static void sbni_start_locked(struct ifnet *ifp) { struct sbni_softc *sc = ifp->if_softc; if (sc->tx_frameno == 0) prepare_to_send(sc); } static void sbni_stop(struct sbni_softc *sc) { sbni_outb(sc, CSR0, 0); drop_xmit_queue(sc); if (sc->rx_buf_p) { m_freem(sc->rx_buf_p); sc->rx_buf_p = NULL; } callout_stop(&sc->wch); sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); } /* -------------------------------------------------------------------------- */ /* interrupt handler */ /* * SBNI12D-10, -11/ISA boards within "common interrupt" mode could not * be looked as two independent single-channel devices. Every channel seems * as Ethernet interface but interrupt handler must be common. Really, first * channel ("master") driver only registers the handler. In it's struct softc * it has got pointer to "slave" channel's struct softc and handles that's * interrupts too. * softc of successfully attached ISA SBNI boards is linked to list. * While next board driver is initialized, it scans this list. If one * has found softc with same irq and ioaddr different by 4 then it assumes * this board to be "master". */ void sbni_intr(void *arg) { struct sbni_softc *sc; int repeat; sc = (struct sbni_softc *)arg; do { repeat = 0; SBNI_LOCK(sc); if (sbni_inb(sc, CSR0) & (RC_RDY | TR_RDY)) { handle_channel(sc); repeat = 1; } SBNI_UNLOCK(sc); if (sc->slave_sc) { /* second channel present */ SBNI_LOCK(sc->slave_sc); if (sbni_inb(sc->slave_sc, CSR0) & (RC_RDY | TR_RDY)) { handle_channel(sc->slave_sc); repeat = 1; } SBNI_UNLOCK(sc->slave_sc); } } while (repeat); } static void handle_channel(struct sbni_softc *sc) { int req_ans; u_char csr0; sbni_outb(sc, CSR0, (sbni_inb(sc, CSR0) & ~EN_INT) | TR_REQ); sc->timer_ticks = CHANGE_LEVEL_START_TICKS; for (;;) { csr0 = sbni_inb(sc, CSR0); if ((csr0 & (RC_RDY | TR_RDY)) == 0) break; req_ans = !(sc->state & FL_PREV_OK); if (csr0 & RC_RDY) req_ans = recv_frame(sc); /* * TR_RDY always equals 1 here because we have owned the marker, * and we set TR_REQ when disabled interrupts */ csr0 = sbni_inb(sc, CSR0); if ((csr0 & TR_RDY) == 0 || (csr0 & RC_RDY) != 0) if_printf(sc->ifp, "internal error!\n"); /* if state & FL_NEED_RESEND != 0 then tx_frameno != 0 */ if (req_ans || sc->tx_frameno != 0) send_frame(sc); else { /* send the marker without any data */ sbni_outb(sc, CSR0, sbni_inb(sc, CSR0) & ~TR_REQ); } } sbni_outb(sc, CSR0, sbni_inb(sc, CSR0) | EN_INT); } /* * Routine returns 1 if it need to acknoweledge received frame. * Empty frame received without errors won't be acknoweledged. */ static int recv_frame(struct sbni_softc *sc) { u_int32_t crc; u_int framelen, frameno, ack; u_int is_first, frame_ok; crc = CRC32_INITIAL; if (check_fhdr(sc, &framelen, &frameno, &ack, &is_first, &crc)) { frame_ok = framelen > 4 ? upload_data(sc, framelen, frameno, is_first, crc) : skip_tail(sc, framelen, crc); if (frame_ok) interpret_ack(sc, ack); } else { framelen = 0; frame_ok = 0; } sbni_outb(sc, CSR0, sbni_inb(sc, CSR0) ^ CT_ZER); if (frame_ok) { sc->state |= FL_PREV_OK; if (framelen > 4) sc->in_stats.all_rx_number++; } else { sc->state &= ~FL_PREV_OK; change_level(sc); sc->in_stats.all_rx_number++; sc->in_stats.bad_rx_number++; } return (!frame_ok || framelen > 4); } static void send_frame(struct sbni_softc *sc) { u_int32_t crc; u_char csr0; crc = CRC32_INITIAL; if (sc->state & FL_NEED_RESEND) { /* if frame was sended but not ACK'ed - resend it */ if (sc->trans_errors) { sc->trans_errors--; if (sc->framelen != 0) sc->in_stats.resend_tx_number++; } else { /* cannot xmit with many attempts */ drop_xmit_queue(sc); goto do_send; } } else sc->trans_errors = TR_ERROR_COUNT; send_frame_header(sc, &crc); sc->state |= FL_NEED_RESEND; /* * FL_NEED_RESEND will be cleared after ACK, but if empty * frame sended then in prepare_to_send next frame */ if (sc->framelen) { download_data(sc, &crc); sc->in_stats.all_tx_number++; sc->state |= FL_WAIT_ACK; } sbni_outsb(sc, (u_char *)&crc, sizeof crc); do_send: csr0 = sbni_inb(sc, CSR0); sbni_outb(sc, CSR0, csr0 & ~TR_REQ); if (sc->tx_frameno) { /* next frame exists - request to send */ sbni_outb(sc, CSR0, csr0 | TR_REQ); } } static void download_data(struct sbni_softc *sc, u_int32_t *crc_p) { struct mbuf *m; caddr_t data_p; u_int data_len, pos, slice; data_p = NULL; /* initialized to avoid warn */ pos = 0; for (m = sc->tx_buf_p; m != NULL && pos < sc->pktlen; m = m->m_next) { if (pos + m->m_len > sc->outpos) { data_len = m->m_len - (sc->outpos - pos); data_p = mtod(m, caddr_t) + (sc->outpos - pos); goto do_copy; } else pos += m->m_len; } data_len = 0; do_copy: pos = 0; do { if (data_len) { slice = min(data_len, sc->framelen - pos); sbni_outsb(sc, data_p, slice); *crc_p = calc_crc32(*crc_p, data_p, slice); pos += slice; if (data_len -= slice) data_p += slice; else { do { m = m->m_next; } while (m != NULL && m->m_len == 0); if (m) { data_len = m->m_len; data_p = mtod(m, caddr_t); } } } else { /* frame too short - zero padding */ pos = sc->framelen - pos; while (pos--) { sbni_outb(sc, DAT, 0); *crc_p = CRC32(0, *crc_p); } return; } } while (pos < sc->framelen); } static int upload_data(struct sbni_softc *sc, u_int framelen, u_int frameno, u_int is_first, u_int32_t crc) { int frame_ok; if (is_first) { sc->wait_frameno = frameno; sc->inppos = 0; } if (sc->wait_frameno == frameno) { if (sc->inppos + framelen <= ETHER_MAX_LEN) { frame_ok = append_frame_to_pkt(sc, framelen, crc); /* * if CRC is right but framelen incorrect then transmitter * error was occured... drop entire packet */ } else if ((frame_ok = skip_tail(sc, framelen, crc)) != 0) { sc->wait_frameno = 0; sc->inppos = 0; if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1); /* now skip all frames until is_first != 0 */ } } else frame_ok = skip_tail(sc, framelen, crc); if (is_first && !frame_ok) { /* * Frame has been violated, but we have stored * is_first already... Drop entire packet. */ sc->wait_frameno = 0; if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1); } return (frame_ok); } static __inline void send_complete(struct sbni_softc *); static __inline void send_complete(struct sbni_softc *sc) { m_freem(sc->tx_buf_p); sc->tx_buf_p = NULL; if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1); } static void interpret_ack(struct sbni_softc *sc, u_int ack) { if (ack == FRAME_SENT_OK) { sc->state &= ~FL_NEED_RESEND; if (sc->state & FL_WAIT_ACK) { sc->outpos += sc->framelen; if (--sc->tx_frameno) { sc->framelen = min( sc->maxframe, sc->pktlen - sc->outpos); } else { send_complete(sc); prepare_to_send(sc); } } } sc->state &= ~FL_WAIT_ACK; } /* * Glue received frame with previous fragments of packet. * Indicate packet when last frame would be accepted. */ static int append_frame_to_pkt(struct sbni_softc *sc, u_int framelen, u_int32_t crc) { caddr_t p; if (sc->inppos + framelen > ETHER_MAX_LEN) return (0); if (!sc->rx_buf_p && !get_rx_buf(sc)) return (0); p = sc->rx_buf_p->m_data + sc->inppos; sbni_insb(sc, p, framelen); if (calc_crc32(crc, p, framelen) != CRC32_REMAINDER) return (0); sc->inppos += framelen - 4; if (--sc->wait_frameno == 0) { /* last frame received */ indicate_pkt(sc); if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1); } return (1); } /* * Prepare to start output on adapter. Current priority must be set to splimp * before this routine is called. * Transmitter will be actually activated when marker has been accepted. */ static void prepare_to_send(struct sbni_softc *sc) { struct mbuf *m; u_int len; /* sc->tx_buf_p == NULL here! */ if (sc->tx_buf_p) printf("sbni: memory leak!\n"); sc->outpos = 0; sc->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND); for (;;) { IF_DEQUEUE(&sc->ifp->if_snd, sc->tx_buf_p); if (!sc->tx_buf_p) { /* nothing to transmit... */ sc->pktlen = 0; sc->tx_frameno = 0; sc->framelen = 0; sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; return; } for (len = 0, m = sc->tx_buf_p; m; m = m->m_next) len += m->m_len; if (len != 0) break; m_freem(sc->tx_buf_p); } if (len < SBNI_MIN_LEN) len = SBNI_MIN_LEN; sc->pktlen = len; sc->tx_frameno = (len + sc->maxframe - 1) / sc->maxframe; sc->framelen = min(len, sc->maxframe); sbni_outb(sc, CSR0, sbni_inb(sc, CSR0) | TR_REQ); sc->ifp->if_drv_flags |= IFF_DRV_OACTIVE; BPF_MTAP(sc->ifp, sc->tx_buf_p); } static void drop_xmit_queue(struct sbni_softc *sc) { struct mbuf *m; if (sc->tx_buf_p) { m_freem(sc->tx_buf_p); sc->tx_buf_p = NULL; if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); } for (;;) { IF_DEQUEUE(&sc->ifp->if_snd, m); if (m == NULL) break; m_freem(m); if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); } sc->tx_frameno = 0; sc->framelen = 0; sc->outpos = 0; sc->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND); sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } static void send_frame_header(struct sbni_softc *sc, u_int32_t *crc_p) { u_int32_t crc; u_int len_field; u_char value; crc = *crc_p; len_field = sc->framelen + 6; /* CRC + frameno + reserved */ if (sc->state & FL_NEED_RESEND) len_field |= FRAME_RETRY; /* non-first attempt... */ if (sc->outpos == 0) len_field |= FRAME_FIRST; len_field |= (sc->state & FL_PREV_OK) ? FRAME_SENT_OK : FRAME_SENT_BAD; sbni_outb(sc, DAT, SBNI_SIG); value = (u_char)len_field; sbni_outb(sc, DAT, value); crc = CRC32(value, crc); value = (u_char)(len_field >> 8); sbni_outb(sc, DAT, value); crc = CRC32(value, crc); sbni_outb(sc, DAT, sc->tx_frameno); crc = CRC32(sc->tx_frameno, crc); sbni_outb(sc, DAT, 0); crc = CRC32(0, crc); *crc_p = crc; } /* * if frame tail not needed (incorrect number or received twice), * it won't store, but CRC will be calculated */ static int skip_tail(struct sbni_softc *sc, u_int tail_len, u_int32_t crc) { while (tail_len--) crc = CRC32(sbni_inb(sc, DAT), crc); return (crc == CRC32_REMAINDER); } static int check_fhdr(struct sbni_softc *sc, u_int *framelen, u_int *frameno, u_int *ack, u_int *is_first, u_int32_t *crc_p) { u_int32_t crc; u_char value; crc = *crc_p; if (sbni_inb(sc, DAT) != SBNI_SIG) return (0); value = sbni_inb(sc, DAT); *framelen = (u_int)value; crc = CRC32(value, crc); value = sbni_inb(sc, DAT); *framelen |= ((u_int)value) << 8; crc = CRC32(value, crc); *ack = *framelen & FRAME_ACK_MASK; *is_first = (*framelen & FRAME_FIRST) != 0; if ((*framelen &= FRAME_LEN_MASK) < 6 || *framelen > SBNI_MAX_FRAME - 3) return (0); value = sbni_inb(sc, DAT); *frameno = (u_int)value; crc = CRC32(value, crc); crc = CRC32(sbni_inb(sc, DAT), crc); /* reserved byte */ *framelen -= 2; *crc_p = crc; return (1); } static int get_rx_buf(struct sbni_softc *sc) { struct mbuf *m; MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { if_printf(sc->ifp, "cannot allocate header mbuf\n"); return (0); } /* * We always put the received packet in a single buffer - * either with just an mbuf header or in a cluster attached * to the header. The +2 is to compensate for the alignment * fixup below. */ if (ETHER_MAX_LEN + 2 > MHLEN) { /* Attach an mbuf cluster */ - MCLGET(m, M_NOWAIT); - if ((m->m_flags & M_EXT) == 0) { + if (!(MCLGET(m, M_NOWAIT))) { m_freem(m); return (0); } } m->m_pkthdr.len = m->m_len = ETHER_MAX_LEN + 2; /* * The +2 is to longword align the start of the real packet. * (sizeof ether_header == 14) * This is important for NFS. */ m_adj(m, 2); sc->rx_buf_p = m; return (1); } static void indicate_pkt(struct sbni_softc *sc) { struct ifnet *ifp = sc->ifp; struct mbuf *m; m = sc->rx_buf_p; m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = sc->inppos; sc->rx_buf_p = NULL; SBNI_UNLOCK(sc); (*ifp->if_input)(ifp, m); SBNI_LOCK(sc); } /* -------------------------------------------------------------------------- */ /* * Routine checks periodically wire activity and regenerates marker if * connect was inactive for a long time. */ static void sbni_timeout(void *xsc) { struct sbni_softc *sc; u_char csr0; sc = (struct sbni_softc *)xsc; SBNI_ASSERT_LOCKED(sc); csr0 = sbni_inb(sc, CSR0); if (csr0 & RC_CHK) { if (sc->timer_ticks) { if (csr0 & (RC_RDY | BU_EMP)) /* receiving not active */ sc->timer_ticks--; } else { sc->in_stats.timeout_number++; if (sc->delta_rxl) timeout_change_level(sc); sbni_outb(sc, CSR1, *(u_char *)&sc->csr1 | PR_RES); csr0 = sbni_inb(sc, CSR0); } } sbni_outb(sc, CSR0, csr0 | RC_CHK); callout_reset(&sc->wch, hz/SBNI_HZ, sbni_timeout, sc); } /* -------------------------------------------------------------------------- */ static void card_start(struct sbni_softc *sc) { sc->timer_ticks = CHANGE_LEVEL_START_TICKS; sc->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND); sc->state |= FL_PREV_OK; sc->inppos = 0; sc->wait_frameno = 0; sbni_outb(sc, CSR1, *(u_char *)&sc->csr1 | PR_RES); sbni_outb(sc, CSR0, EN_INT); } /* -------------------------------------------------------------------------- */ static u_char rxl_tab[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x08, 0x0a, 0x0c, 0x0f, 0x16, 0x18, 0x1a, 0x1c, 0x1f }; #define SIZE_OF_TIMEOUT_RXL_TAB 4 static u_char timeout_rxl_tab[] = { 0x03, 0x05, 0x08, 0x0b }; static void set_initial_values(struct sbni_softc *sc, struct sbni_flags flags) { if (flags.fixed_rxl) { sc->delta_rxl = 0; /* disable receive level autodetection */ sc->cur_rxl_index = flags.rxl; } else { sc->delta_rxl = DEF_RXL_DELTA; sc->cur_rxl_index = DEF_RXL; } sc->csr1.rate = flags.fixed_rate ? flags.rate : DEFAULT_RATE; sc->csr1.rxl = rxl_tab[sc->cur_rxl_index]; sc->maxframe = DEFAULT_FRAME_LEN; /* * generate Ethernet address (0x00ff01xxxxxx) */ *(u_int16_t *) sc->enaddr = htons(0x00ff); if (flags.mac_addr) { *(u_int32_t *) (sc->enaddr + 2) = htonl(flags.mac_addr | 0x01000000); } else { *(u_char *) (sc->enaddr + 2) = 0x01; read_random(sc->enaddr + 3, 3); } } #ifdef SBNI_DUAL_COMPOUND void sbni_add(struct sbni_softc *sc) { mtx_lock(&headlist_lock); sc->link = sbni_headlist; sbni_headlist = sc; mtx_unlock(&headlist_lock); } struct sbni_softc * connect_to_master(struct sbni_softc *sc) { struct sbni_softc *p, *p_prev; mtx_lock(&headlist_lock); for (p = sbni_headlist, p_prev = NULL; p; p_prev = p, p = p->link) { if (rman_get_start(p->io_res) == rman_get_start(sc->io_res) + 4 || rman_get_start(p->io_res) == rman_get_start(sc->io_res) - 4) { p->slave_sc = sc; if (p_prev) p_prev->link = p->link; else sbni_headlist = p->link; mtx_unlock(&headlist_lock); return p; } } mtx_unlock(&headlist_lock); return (NULL); } #endif /* SBNI_DUAL_COMPOUND */ /* Receive level auto-selection */ static void change_level(struct sbni_softc *sc) { if (sc->delta_rxl == 0) /* do not auto-negotiate RxL */ return; if (sc->cur_rxl_index == 0) sc->delta_rxl = 1; else if (sc->cur_rxl_index == 15) sc->delta_rxl = -1; else if (sc->cur_rxl_rcvd < sc->prev_rxl_rcvd) sc->delta_rxl = -sc->delta_rxl; sc->csr1.rxl = rxl_tab[sc->cur_rxl_index += sc->delta_rxl]; sbni_inb(sc, CSR0); /* it needed for PCI cards */ sbni_outb(sc, CSR1, *(u_char *)&sc->csr1); sc->prev_rxl_rcvd = sc->cur_rxl_rcvd; sc->cur_rxl_rcvd = 0; } static void timeout_change_level(struct sbni_softc *sc) { sc->cur_rxl_index = timeout_rxl_tab[sc->timeout_rxl]; if (++sc->timeout_rxl >= 4) sc->timeout_rxl = 0; sc->csr1.rxl = rxl_tab[sc->cur_rxl_index]; sbni_inb(sc, CSR0); sbni_outb(sc, CSR1, *(u_char *)&sc->csr1); sc->prev_rxl_rcvd = sc->cur_rxl_rcvd; sc->cur_rxl_rcvd = 0; } /* -------------------------------------------------------------------------- */ /* * Process an ioctl request. This code needs some work - it looks * pretty ugly. */ static int sbni_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct sbni_softc *sc; struct ifreq *ifr; struct thread *td; struct sbni_in_stats *in_stats; struct sbni_flags flags; int error; sc = ifp->if_softc; ifr = (struct ifreq *)data; td = curthread; error = 0; switch (command) { case SIOCSIFFLAGS: /* * If the interface is marked up and stopped, then start it. * If it is marked down and running, then stop it. */ SBNI_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) sbni_init_locked(sc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { sbni_stop(sc); } } SBNI_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: /* * Multicast list has changed; set the hardware filter * accordingly. */ error = 0; /* if (ifr == NULL) error = EAFNOSUPPORT; */ break; /* * SBNI specific ioctl */ case SIOCGHWFLAGS: /* get flags */ SBNI_LOCK(sc); bcopy((caddr_t)IF_LLADDR(sc->ifp)+3, (caddr_t) &flags, 3); flags.rxl = sc->cur_rxl_index; flags.rate = sc->csr1.rate; flags.fixed_rxl = (sc->delta_rxl == 0); flags.fixed_rate = 1; SBNI_UNLOCK(sc); ifr->ifr_data = *(caddr_t*) &flags; break; case SIOCGINSTATS: in_stats = malloc(sizeof(struct sbni_in_stats), M_DEVBUF, M_WAITOK); SBNI_LOCK(sc); bcopy(&sc->in_stats, in_stats, sizeof(struct sbni_in_stats)); SBNI_UNLOCK(sc); error = copyout(ifr->ifr_data, in_stats, sizeof(struct sbni_in_stats)); free(in_stats, M_DEVBUF); break; case SIOCSHWFLAGS: /* set flags */ /* root only */ error = priv_check(td, PRIV_DRIVER); if (error) break; flags = *(struct sbni_flags*)&ifr->ifr_data; SBNI_LOCK(sc); if (flags.fixed_rxl) { sc->delta_rxl = 0; sc->cur_rxl_index = flags.rxl; } else { sc->delta_rxl = DEF_RXL_DELTA; sc->cur_rxl_index = DEF_RXL; } sc->csr1.rxl = rxl_tab[sc->cur_rxl_index]; sc->csr1.rate = flags.fixed_rate ? flags.rate : DEFAULT_RATE; if (flags.mac_addr) bcopy((caddr_t) &flags, (caddr_t) IF_LLADDR(sc->ifp)+3, 3); /* Don't be afraid... */ sbni_outb(sc, CSR1, *(char*)(&sc->csr1) | PR_RES); SBNI_UNLOCK(sc); break; case SIOCRINSTATS: SBNI_LOCK(sc); if (!(error = priv_check(td, PRIV_DRIVER))) /* root only */ bzero(&sc->in_stats, sizeof(struct sbni_in_stats)); SBNI_UNLOCK(sc); break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } /* -------------------------------------------------------------------------- */ static u_int32_t calc_crc32(u_int32_t crc, caddr_t p, u_int len) { while (len--) crc = CRC32(*p++, crc); return (crc); } static u_int32_t crc32tab[] __aligned(8) = { 0xD202EF8D, 0xA505DF1B, 0x3C0C8EA1, 0x4B0BBE37, 0xD56F2B94, 0xA2681B02, 0x3B614AB8, 0x4C667A2E, 0xDCD967BF, 0xABDE5729, 0x32D70693, 0x45D03605, 0xDBB4A3A6, 0xACB39330, 0x35BAC28A, 0x42BDF21C, 0xCFB5FFE9, 0xB8B2CF7F, 0x21BB9EC5, 0x56BCAE53, 0xC8D83BF0, 0xBFDF0B66, 0x26D65ADC, 0x51D16A4A, 0xC16E77DB, 0xB669474D, 0x2F6016F7, 0x58672661, 0xC603B3C2, 0xB1048354, 0x280DD2EE, 0x5F0AE278, 0xE96CCF45, 0x9E6BFFD3, 0x0762AE69, 0x70659EFF, 0xEE010B5C, 0x99063BCA, 0x000F6A70, 0x77085AE6, 0xE7B74777, 0x90B077E1, 0x09B9265B, 0x7EBE16CD, 0xE0DA836E, 0x97DDB3F8, 0x0ED4E242, 0x79D3D2D4, 0xF4DBDF21, 0x83DCEFB7, 0x1AD5BE0D, 0x6DD28E9B, 0xF3B61B38, 0x84B12BAE, 0x1DB87A14, 0x6ABF4A82, 0xFA005713, 0x8D076785, 0x140E363F, 0x630906A9, 0xFD6D930A, 0x8A6AA39C, 0x1363F226, 0x6464C2B0, 0xA4DEAE1D, 0xD3D99E8B, 0x4AD0CF31, 0x3DD7FFA7, 0xA3B36A04, 0xD4B45A92, 0x4DBD0B28, 0x3ABA3BBE, 0xAA05262F, 0xDD0216B9, 0x440B4703, 0x330C7795, 0xAD68E236, 0xDA6FD2A0, 0x4366831A, 0x3461B38C, 0xB969BE79, 0xCE6E8EEF, 0x5767DF55, 0x2060EFC3, 0xBE047A60, 0xC9034AF6, 0x500A1B4C, 0x270D2BDA, 0xB7B2364B, 0xC0B506DD, 0x59BC5767, 0x2EBB67F1, 0xB0DFF252, 0xC7D8C2C4, 0x5ED1937E, 0x29D6A3E8, 0x9FB08ED5, 0xE8B7BE43, 0x71BEEFF9, 0x06B9DF6F, 0x98DD4ACC, 0xEFDA7A5A, 0x76D32BE0, 0x01D41B76, 0x916B06E7, 0xE66C3671, 0x7F6567CB, 0x0862575D, 0x9606C2FE, 0xE101F268, 0x7808A3D2, 0x0F0F9344, 0x82079EB1, 0xF500AE27, 0x6C09FF9D, 0x1B0ECF0B, 0x856A5AA8, 0xF26D6A3E, 0x6B643B84, 0x1C630B12, 0x8CDC1683, 0xFBDB2615, 0x62D277AF, 0x15D54739, 0x8BB1D29A, 0xFCB6E20C, 0x65BFB3B6, 0x12B88320, 0x3FBA6CAD, 0x48BD5C3B, 0xD1B40D81, 0xA6B33D17, 0x38D7A8B4, 0x4FD09822, 0xD6D9C998, 0xA1DEF90E, 0x3161E49F, 0x4666D409, 0xDF6F85B3, 0xA868B525, 0x360C2086, 0x410B1010, 0xD80241AA, 0xAF05713C, 0x220D7CC9, 0x550A4C5F, 0xCC031DE5, 0xBB042D73, 0x2560B8D0, 0x52678846, 0xCB6ED9FC, 0xBC69E96A, 0x2CD6F4FB, 0x5BD1C46D, 0xC2D895D7, 0xB5DFA541, 0x2BBB30E2, 0x5CBC0074, 0xC5B551CE, 0xB2B26158, 0x04D44C65, 0x73D37CF3, 0xEADA2D49, 0x9DDD1DDF, 0x03B9887C, 0x74BEB8EA, 0xEDB7E950, 0x9AB0D9C6, 0x0A0FC457, 0x7D08F4C1, 0xE401A57B, 0x930695ED, 0x0D62004E, 0x7A6530D8, 0xE36C6162, 0x946B51F4, 0x19635C01, 0x6E646C97, 0xF76D3D2D, 0x806A0DBB, 0x1E0E9818, 0x6909A88E, 0xF000F934, 0x8707C9A2, 0x17B8D433, 0x60BFE4A5, 0xF9B6B51F, 0x8EB18589, 0x10D5102A, 0x67D220BC, 0xFEDB7106, 0x89DC4190, 0x49662D3D, 0x3E611DAB, 0xA7684C11, 0xD06F7C87, 0x4E0BE924, 0x390CD9B2, 0xA0058808, 0xD702B89E, 0x47BDA50F, 0x30BA9599, 0xA9B3C423, 0xDEB4F4B5, 0x40D06116, 0x37D75180, 0xAEDE003A, 0xD9D930AC, 0x54D13D59, 0x23D60DCF, 0xBADF5C75, 0xCDD86CE3, 0x53BCF940, 0x24BBC9D6, 0xBDB2986C, 0xCAB5A8FA, 0x5A0AB56B, 0x2D0D85FD, 0xB404D447, 0xC303E4D1, 0x5D677172, 0x2A6041E4, 0xB369105E, 0xC46E20C8, 0x72080DF5, 0x050F3D63, 0x9C066CD9, 0xEB015C4F, 0x7565C9EC, 0x0262F97A, 0x9B6BA8C0, 0xEC6C9856, 0x7CD385C7, 0x0BD4B551, 0x92DDE4EB, 0xE5DAD47D, 0x7BBE41DE, 0x0CB97148, 0x95B020F2, 0xE2B71064, 0x6FBF1D91, 0x18B82D07, 0x81B17CBD, 0xF6B64C2B, 0x68D2D988, 0x1FD5E91E, 0x86DCB8A4, 0xF1DB8832, 0x616495A3, 0x1663A535, 0x8F6AF48F, 0xF86DC419, 0x660951BA, 0x110E612C, 0x88073096, 0xFF000000 }; Index: head/sys/dev/smc/if_smc.c =================================================================== --- head/sys/dev/smc/if_smc.c (revision 276749) +++ head/sys/dev/smc/if_smc.c (revision 276750) @@ -1,1310 +1,1309 @@ /*- * Copyright (c) 2008 Benno Rice. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Driver for SMSC LAN91C111, may work for older variants. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #endif #include #include #include #include #include #include #include #define SMC_LOCK(sc) mtx_lock(&(sc)->smc_mtx) #define SMC_UNLOCK(sc) mtx_unlock(&(sc)->smc_mtx) #define SMC_ASSERT_LOCKED(sc) mtx_assert(&(sc)->smc_mtx, MA_OWNED) #define SMC_INTR_PRIORITY 0 #define SMC_RX_PRIORITY 5 #define SMC_TX_PRIORITY 10 devclass_t smc_devclass; static const char *smc_chip_ids[16] = { NULL, NULL, NULL, /* 3 */ "SMSC LAN91C90 or LAN91C92", /* 4 */ "SMSC LAN91C94", /* 5 */ "SMSC LAN91C95", /* 6 */ "SMSC LAN91C96", /* 7 */ "SMSC LAN91C100", /* 8 */ "SMSC LAN91C100FD", /* 9 */ "SMSC LAN91C110FD or LAN91C111FD", NULL, NULL, NULL, NULL, NULL, NULL }; static void smc_init(void *); static void smc_start(struct ifnet *); static void smc_stop(struct smc_softc *); static int smc_ioctl(struct ifnet *, u_long, caddr_t); static void smc_init_locked(struct smc_softc *); static void smc_start_locked(struct ifnet *); static void smc_reset(struct smc_softc *); static int smc_mii_ifmedia_upd(struct ifnet *); static void smc_mii_ifmedia_sts(struct ifnet *, struct ifmediareq *); static void smc_mii_tick(void *); static void smc_mii_mediachg(struct smc_softc *); static int smc_mii_mediaioctl(struct smc_softc *, struct ifreq *, u_long); static void smc_task_intr(void *, int); static void smc_task_rx(void *, int); static void smc_task_tx(void *, int); static driver_filter_t smc_intr; static timeout_t smc_watchdog; #ifdef DEVICE_POLLING static poll_handler_t smc_poll; #endif /* * MII bit-bang glue */ static uint32_t smc_mii_bitbang_read(device_t); static void smc_mii_bitbang_write(device_t, uint32_t); static const struct mii_bitbang_ops smc_mii_bitbang_ops = { smc_mii_bitbang_read, smc_mii_bitbang_write, { MGMT_MDO, /* MII_BIT_MDO */ MGMT_MDI, /* MII_BIT_MDI */ MGMT_MCLK, /* MII_BIT_MDC */ MGMT_MDOE, /* MII_BIT_DIR_HOST_PHY */ 0, /* MII_BIT_DIR_PHY_HOST */ } }; static __inline void smc_select_bank(struct smc_softc *sc, uint16_t bank) { bus_barrier(sc->smc_reg, BSR, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); bus_write_2(sc->smc_reg, BSR, bank & BSR_BANK_MASK); bus_barrier(sc->smc_reg, BSR, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); } /* Never call this when not in bank 2. */ static __inline void smc_mmu_wait(struct smc_softc *sc) { KASSERT((bus_read_2(sc->smc_reg, BSR) & BSR_BANK_MASK) == 2, ("%s: smc_mmu_wait called when not in bank 2", device_get_nameunit(sc->smc_dev))); while (bus_read_2(sc->smc_reg, MMUCR) & MMUCR_BUSY) ; } static __inline uint8_t smc_read_1(struct smc_softc *sc, bus_size_t offset) { return (bus_read_1(sc->smc_reg, offset)); } static __inline void smc_write_1(struct smc_softc *sc, bus_size_t offset, uint8_t val) { bus_write_1(sc->smc_reg, offset, val); } static __inline uint16_t smc_read_2(struct smc_softc *sc, bus_size_t offset) { return (bus_read_2(sc->smc_reg, offset)); } static __inline void smc_write_2(struct smc_softc *sc, bus_size_t offset, uint16_t val) { bus_write_2(sc->smc_reg, offset, val); } static __inline void smc_read_multi_2(struct smc_softc *sc, bus_size_t offset, uint16_t *datap, bus_size_t count) { bus_read_multi_2(sc->smc_reg, offset, datap, count); } static __inline void smc_write_multi_2(struct smc_softc *sc, bus_size_t offset, uint16_t *datap, bus_size_t count) { bus_write_multi_2(sc->smc_reg, offset, datap, count); } static __inline void smc_barrier(struct smc_softc *sc, bus_size_t offset, bus_size_t length, int flags) { bus_barrier(sc->smc_reg, offset, length, flags); } int smc_probe(device_t dev) { int rid, type, error; uint16_t val; struct smc_softc *sc; struct resource *reg; sc = device_get_softc(dev); rid = 0; type = SYS_RES_IOPORT; error = 0; if (sc->smc_usemem) type = SYS_RES_MEMORY; reg = bus_alloc_resource(dev, type, &rid, 0, ~0, 16, RF_ACTIVE); if (reg == NULL) { if (bootverbose) device_printf(dev, "could not allocate I/O resource for probe\n"); return (ENXIO); } /* Check for the identification value in the BSR. */ val = bus_read_2(reg, BSR); if ((val & BSR_IDENTIFY_MASK) != BSR_IDENTIFY) { if (bootverbose) device_printf(dev, "identification value not in BSR\n"); error = ENXIO; goto done; } /* * Try switching banks and make sure we still get the identification * value. */ bus_write_2(reg, BSR, 0); val = bus_read_2(reg, BSR); if ((val & BSR_IDENTIFY_MASK) != BSR_IDENTIFY) { if (bootverbose) device_printf(dev, "identification value not in BSR after write\n"); error = ENXIO; goto done; } #if 0 /* Check the BAR. */ bus_write_2(reg, BSR, 1); val = bus_read_2(reg, BAR); val = BAR_ADDRESS(val); if (rman_get_start(reg) != val) { if (bootverbose) device_printf(dev, "BAR address %x does not match " "I/O resource address %lx\n", val, rman_get_start(reg)); error = ENXIO; goto done; } #endif /* Compare REV against known chip revisions. */ bus_write_2(reg, BSR, 3); val = bus_read_2(reg, REV); val = (val & REV_CHIP_MASK) >> REV_CHIP_SHIFT; if (smc_chip_ids[val] == NULL) { if (bootverbose) device_printf(dev, "Unknown chip revision: %d\n", val); error = ENXIO; goto done; } device_set_desc(dev, smc_chip_ids[val]); done: bus_release_resource(dev, type, rid, reg); return (error); } int smc_attach(device_t dev) { int type, error; uint16_t val; u_char eaddr[ETHER_ADDR_LEN]; struct smc_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); error = 0; sc->smc_dev = dev; ifp = sc->smc_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { error = ENOSPC; goto done; } mtx_init(&sc->smc_mtx, device_get_nameunit(dev), NULL, MTX_DEF); /* Set up watchdog callout. */ callout_init_mtx(&sc->smc_watchdog, &sc->smc_mtx, 0); type = SYS_RES_IOPORT; if (sc->smc_usemem) type = SYS_RES_MEMORY; sc->smc_reg_rid = 0; sc->smc_reg = bus_alloc_resource(dev, type, &sc->smc_reg_rid, 0, ~0, 16, RF_ACTIVE); if (sc->smc_reg == NULL) { error = ENXIO; goto done; } sc->smc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->smc_irq_rid, 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE); if (sc->smc_irq == NULL) { error = ENXIO; goto done; } SMC_LOCK(sc); smc_reset(sc); SMC_UNLOCK(sc); smc_select_bank(sc, 3); val = smc_read_2(sc, REV); sc->smc_chip = (val & REV_CHIP_MASK) >> REV_CHIP_SHIFT; sc->smc_rev = (val * REV_REV_MASK) >> REV_REV_SHIFT; if (bootverbose) device_printf(dev, "revision %x\n", sc->smc_rev); callout_init_mtx(&sc->smc_mii_tick_ch, &sc->smc_mtx, CALLOUT_RETURNUNLOCKED); if (sc->smc_chip >= REV_CHIP_91110FD) { (void)mii_attach(dev, &sc->smc_miibus, ifp, smc_mii_ifmedia_upd, smc_mii_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); if (sc->smc_miibus != NULL) { sc->smc_mii_tick = smc_mii_tick; sc->smc_mii_mediachg = smc_mii_mediachg; sc->smc_mii_mediaioctl = smc_mii_mediaioctl; } } smc_select_bank(sc, 1); eaddr[0] = smc_read_1(sc, IAR0); eaddr[1] = smc_read_1(sc, IAR1); eaddr[2] = smc_read_1(sc, IAR2); eaddr[3] = smc_read_1(sc, IAR3); eaddr[4] = smc_read_1(sc, IAR4); eaddr[5] = smc_read_1(sc, IAR5); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = smc_init; ifp->if_ioctl = smc_ioctl; ifp->if_start = smc_start; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); IFQ_SET_READY(&ifp->if_snd); ifp->if_capabilities = ifp->if_capenable = 0; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif ether_ifattach(ifp, eaddr); /* Set up taskqueue */ TASK_INIT(&sc->smc_intr, SMC_INTR_PRIORITY, smc_task_intr, ifp); TASK_INIT(&sc->smc_rx, SMC_RX_PRIORITY, smc_task_rx, ifp); TASK_INIT(&sc->smc_tx, SMC_TX_PRIORITY, smc_task_tx, ifp); sc->smc_tq = taskqueue_create_fast("smc_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->smc_tq); taskqueue_start_threads(&sc->smc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->smc_dev)); /* Mask all interrupts. */ sc->smc_mask = 0; smc_write_1(sc, MSK, 0); /* Wire up interrupt */ error = bus_setup_intr(dev, sc->smc_irq, INTR_TYPE_NET|INTR_MPSAFE, smc_intr, NULL, sc, &sc->smc_ih); if (error != 0) goto done; done: if (error != 0) smc_detach(dev); return (error); } int smc_detach(device_t dev) { int type; struct smc_softc *sc; sc = device_get_softc(dev); SMC_LOCK(sc); smc_stop(sc); SMC_UNLOCK(sc); if (sc->smc_ifp != NULL) { ether_ifdetach(sc->smc_ifp); } callout_drain(&sc->smc_watchdog); callout_drain(&sc->smc_mii_tick_ch); #ifdef DEVICE_POLLING if (sc->smc_ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(sc->smc_ifp); #endif if (sc->smc_ih != NULL) bus_teardown_intr(sc->smc_dev, sc->smc_irq, sc->smc_ih); if (sc->smc_tq != NULL) { taskqueue_drain(sc->smc_tq, &sc->smc_intr); taskqueue_drain(sc->smc_tq, &sc->smc_rx); taskqueue_drain(sc->smc_tq, &sc->smc_tx); taskqueue_free(sc->smc_tq); sc->smc_tq = NULL; } if (sc->smc_ifp != NULL) { if_free(sc->smc_ifp); } if (sc->smc_miibus != NULL) { device_delete_child(sc->smc_dev, sc->smc_miibus); bus_generic_detach(sc->smc_dev); } if (sc->smc_reg != NULL) { type = SYS_RES_IOPORT; if (sc->smc_usemem) type = SYS_RES_MEMORY; bus_release_resource(sc->smc_dev, type, sc->smc_reg_rid, sc->smc_reg); } if (sc->smc_irq != NULL) bus_release_resource(sc->smc_dev, SYS_RES_IRQ, sc->smc_irq_rid, sc->smc_irq); if (mtx_initialized(&sc->smc_mtx)) mtx_destroy(&sc->smc_mtx); return (0); } static void smc_start(struct ifnet *ifp) { struct smc_softc *sc; sc = ifp->if_softc; SMC_LOCK(sc); smc_start_locked(ifp); SMC_UNLOCK(sc); } static void smc_start_locked(struct ifnet *ifp) { struct smc_softc *sc; struct mbuf *m; u_int len, npages, spin_count; sc = ifp->if_softc; SMC_ASSERT_LOCKED(sc); if (ifp->if_drv_flags & IFF_DRV_OACTIVE) return; if (IFQ_IS_EMPTY(&ifp->if_snd)) return; /* * Grab the next packet. If it's too big, drop it. */ IFQ_DRV_DEQUEUE(&ifp->if_snd, m); len = m_length(m, NULL); len += (len & 1); if (len > ETHER_MAX_LEN - ETHER_CRC_LEN) { if_printf(ifp, "large packet discarded\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); m_freem(m); return; /* XXX readcheck? */ } /* * Flag that we're busy. */ ifp->if_drv_flags |= IFF_DRV_OACTIVE; sc->smc_pending = m; /* * Work out how many 256 byte "pages" we need. We have to include the * control data for the packet in this calculation. */ npages = (len * PKT_CTRL_DATA_LEN) >> 8; if (npages == 0) npages = 1; /* * Request memory. */ smc_select_bank(sc, 2); smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_TX_ALLOC | npages); /* * Spin briefly to see if the allocation succeeds. */ spin_count = TX_ALLOC_WAIT_TIME; do { if (smc_read_1(sc, IST) & ALLOC_INT) { smc_write_1(sc, ACK, ALLOC_INT); break; } } while (--spin_count); /* * If the allocation is taking too long, unmask the alloc interrupt * and wait. */ if (spin_count == 0) { sc->smc_mask |= ALLOC_INT; if ((ifp->if_capenable & IFCAP_POLLING) == 0) smc_write_1(sc, MSK, sc->smc_mask); return; } taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx); } static void smc_task_tx(void *context, int pending) { struct ifnet *ifp; struct smc_softc *sc; struct mbuf *m, *m0; u_int packet, len; int last_len; uint8_t *data; (void)pending; ifp = (struct ifnet *)context; sc = ifp->if_softc; SMC_LOCK(sc); if (sc->smc_pending == NULL) { SMC_UNLOCK(sc); goto next_packet; } m = m0 = sc->smc_pending; sc->smc_pending = NULL; smc_select_bank(sc, 2); /* * Check the allocation result. */ packet = smc_read_1(sc, ARR); /* * If the allocation failed, requeue the packet and retry. */ if (packet & ARR_FAILED) { IFQ_DRV_PREPEND(&ifp->if_snd, m); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; smc_start_locked(ifp); SMC_UNLOCK(sc); return; } /* * Tell the device to write to our packet number. */ smc_write_1(sc, PNR, packet); smc_write_2(sc, PTR, 0 | PTR_AUTO_INCR); /* * Tell the device how long the packet is (including control data). */ len = m_length(m, 0); len += PKT_CTRL_DATA_LEN; smc_write_2(sc, DATA0, 0); smc_write_2(sc, DATA0, len); /* * Push the data out to the device. */ data = NULL; last_len = 0; for (; m != NULL; m = m->m_next) { data = mtod(m, uint8_t *); smc_write_multi_2(sc, DATA0, (uint16_t *)data, m->m_len / 2); last_len = m->m_len; } /* * Push out the control byte and and the odd byte if needed. */ if ((len & 1) != 0 && data != NULL) smc_write_2(sc, DATA0, (CTRL_ODD << 8) | data[last_len - 1]); else smc_write_2(sc, DATA0, 0); /* * Unmask the TX empty interrupt. */ sc->smc_mask |= TX_EMPTY_INT; if ((ifp->if_capenable & IFCAP_POLLING) == 0) smc_write_1(sc, MSK, sc->smc_mask); /* * Enqueue the packet. */ smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_ENQUEUE); callout_reset(&sc->smc_watchdog, hz * 2, smc_watchdog, sc); /* * Finish up. */ if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; SMC_UNLOCK(sc); BPF_MTAP(ifp, m0); m_freem(m0); next_packet: /* * See if there's anything else to do. */ smc_start(ifp); } static void smc_task_rx(void *context, int pending) { u_int packet, status, len; uint8_t *data; struct ifnet *ifp; struct smc_softc *sc; struct mbuf *m, *mhead, *mtail; (void)pending; ifp = (struct ifnet *)context; sc = ifp->if_softc; mhead = mtail = NULL; SMC_LOCK(sc); packet = smc_read_1(sc, FIFO_RX); while ((packet & FIFO_EMPTY) == 0) { /* * Grab an mbuf and attach a cluster. */ MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { break; } - MCLGET(m, M_NOWAIT); - if ((m->m_flags & M_EXT) == 0) { + if (!(MCLGET(m, M_NOWAIT))) { m_freem(m); break; } /* * Point to the start of the packet. */ smc_select_bank(sc, 2); smc_write_1(sc, PNR, packet); smc_write_2(sc, PTR, 0 | PTR_READ | PTR_RCV | PTR_AUTO_INCR); /* * Grab status and packet length. */ status = smc_read_2(sc, DATA0); len = smc_read_2(sc, DATA0) & RX_LEN_MASK; len -= 6; if (status & RX_ODDFRM) len += 1; /* * Check for errors. */ if (status & (RX_TOOSHORT | RX_TOOLNG | RX_BADCRC | RX_ALGNERR)) { smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_RELEASE); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); m_freem(m); break; } /* * Set the mbuf up the way we want it. */ m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len + 2; /* XXX: Is this right? */ m_adj(m, ETHER_ALIGN); /* * Pull the packet out of the device. Make sure we're in the * right bank first as things may have changed while we were * allocating our mbuf. */ smc_select_bank(sc, 2); smc_write_1(sc, PNR, packet); smc_write_2(sc, PTR, 4 | PTR_READ | PTR_RCV | PTR_AUTO_INCR); data = mtod(m, uint8_t *); smc_read_multi_2(sc, DATA0, (uint16_t *)data, len >> 1); if (len & 1) { data += len & ~1; *data = smc_read_1(sc, DATA0); } /* * Tell the device we're done. */ smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_RELEASE); if (m == NULL) { break; } if (mhead == NULL) { mhead = mtail = m; m->m_next = NULL; } else { mtail->m_next = m; mtail = m; } packet = smc_read_1(sc, FIFO_RX); } sc->smc_mask |= RCV_INT; if ((ifp->if_capenable & IFCAP_POLLING) == 0) smc_write_1(sc, MSK, sc->smc_mask); SMC_UNLOCK(sc); while (mhead != NULL) { m = mhead; mhead = mhead->m_next; m->m_next = NULL; if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); (*ifp->if_input)(ifp, m); } } #ifdef DEVICE_POLLING static void smc_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct smc_softc *sc; sc = ifp->if_softc; SMC_LOCK(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { SMC_UNLOCK(sc); return; } SMC_UNLOCK(sc); if (cmd == POLL_AND_CHECK_STATUS) taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_intr); } #endif static int smc_intr(void *context) { struct smc_softc *sc; sc = (struct smc_softc *)context; /* * Block interrupts in order to let smc_task_intr to kick in */ smc_write_1(sc, MSK, 0); taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_intr); return (FILTER_HANDLED); } static void smc_task_intr(void *context, int pending) { struct smc_softc *sc; struct ifnet *ifp; u_int status, packet, counter, tcr; (void)pending; ifp = (struct ifnet *)context; sc = ifp->if_softc; SMC_LOCK(sc); smc_select_bank(sc, 2); /* * Find out what interrupts are flagged. */ status = smc_read_1(sc, IST) & sc->smc_mask; /* * Transmit error */ if (status & TX_INT) { /* * Kill off the packet if there is one and re-enable transmit. */ packet = smc_read_1(sc, FIFO_TX); if ((packet & FIFO_EMPTY) == 0) { smc_write_1(sc, PNR, packet); smc_write_2(sc, PTR, 0 | PTR_READ | PTR_AUTO_INCR); tcr = smc_read_2(sc, DATA0); if ((tcr & EPHSR_TX_SUC) == 0) device_printf(sc->smc_dev, "bad packet\n"); smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_RELEASE_PKT); smc_select_bank(sc, 0); tcr = smc_read_2(sc, TCR); tcr |= TCR_TXENA | TCR_PAD_EN; smc_write_2(sc, TCR, tcr); smc_select_bank(sc, 2); taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx); } /* * Ack the interrupt. */ smc_write_1(sc, ACK, TX_INT); } /* * Receive */ if (status & RCV_INT) { smc_write_1(sc, ACK, RCV_INT); sc->smc_mask &= ~RCV_INT; taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_rx); } /* * Allocation */ if (status & ALLOC_INT) { smc_write_1(sc, ACK, ALLOC_INT); sc->smc_mask &= ~ALLOC_INT; taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx); } /* * Receive overrun */ if (status & RX_OVRN_INT) { smc_write_1(sc, ACK, RX_OVRN_INT); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); } /* * Transmit empty */ if (status & TX_EMPTY_INT) { smc_write_1(sc, ACK, TX_EMPTY_INT); sc->smc_mask &= ~TX_EMPTY_INT; callout_stop(&sc->smc_watchdog); /* * Update collision stats. */ smc_select_bank(sc, 0); counter = smc_read_2(sc, ECR); smc_select_bank(sc, 2); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, ((counter & ECR_SNGLCOL_MASK) >> ECR_SNGLCOL_SHIFT) + ((counter & ECR_MULCOL_MASK) >> ECR_MULCOL_SHIFT)); /* * See if there are any packets to transmit. */ taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx); } /* * Update the interrupt mask. */ if ((ifp->if_capenable & IFCAP_POLLING) == 0) smc_write_1(sc, MSK, sc->smc_mask); SMC_UNLOCK(sc); } static uint32_t smc_mii_bitbang_read(device_t dev) { struct smc_softc *sc; uint32_t val; sc = device_get_softc(dev); SMC_ASSERT_LOCKED(sc); KASSERT((smc_read_2(sc, BSR) & BSR_BANK_MASK) == 3, ("%s: smc_mii_bitbang_read called with bank %d (!= 3)", device_get_nameunit(sc->smc_dev), smc_read_2(sc, BSR) & BSR_BANK_MASK)); val = smc_read_2(sc, MGMT); smc_barrier(sc, MGMT, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); return (val); } static void smc_mii_bitbang_write(device_t dev, uint32_t val) { struct smc_softc *sc; sc = device_get_softc(dev); SMC_ASSERT_LOCKED(sc); KASSERT((smc_read_2(sc, BSR) & BSR_BANK_MASK) == 3, ("%s: smc_mii_bitbang_write called with bank %d (!= 3)", device_get_nameunit(sc->smc_dev), smc_read_2(sc, BSR) & BSR_BANK_MASK)); smc_write_2(sc, MGMT, val); smc_barrier(sc, MGMT, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); } int smc_miibus_readreg(device_t dev, int phy, int reg) { struct smc_softc *sc; int val; sc = device_get_softc(dev); SMC_LOCK(sc); smc_select_bank(sc, 3); val = mii_bitbang_readreg(dev, &smc_mii_bitbang_ops, phy, reg); SMC_UNLOCK(sc); return (val); } int smc_miibus_writereg(device_t dev, int phy, int reg, int data) { struct smc_softc *sc; sc = device_get_softc(dev); SMC_LOCK(sc); smc_select_bank(sc, 3); mii_bitbang_writereg(dev, &smc_mii_bitbang_ops, phy, reg, data); SMC_UNLOCK(sc); return (0); } void smc_miibus_statchg(device_t dev) { struct smc_softc *sc; struct mii_data *mii; uint16_t tcr; sc = device_get_softc(dev); mii = device_get_softc(sc->smc_miibus); SMC_LOCK(sc); smc_select_bank(sc, 0); tcr = smc_read_2(sc, TCR); if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) tcr |= TCR_SWFDUP; else tcr &= ~TCR_SWFDUP; smc_write_2(sc, TCR, tcr); SMC_UNLOCK(sc); } static int smc_mii_ifmedia_upd(struct ifnet *ifp) { struct smc_softc *sc; struct mii_data *mii; sc = ifp->if_softc; if (sc->smc_miibus == NULL) return (ENXIO); mii = device_get_softc(sc->smc_miibus); return (mii_mediachg(mii)); } static void smc_mii_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct smc_softc *sc; struct mii_data *mii; sc = ifp->if_softc; if (sc->smc_miibus == NULL) return; mii = device_get_softc(sc->smc_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static void smc_mii_tick(void *context) { struct smc_softc *sc; sc = (struct smc_softc *)context; if (sc->smc_miibus == NULL) return; SMC_UNLOCK(sc); mii_tick(device_get_softc(sc->smc_miibus)); callout_reset(&sc->smc_mii_tick_ch, hz, smc_mii_tick, sc); } static void smc_mii_mediachg(struct smc_softc *sc) { if (sc->smc_miibus == NULL) return; mii_mediachg(device_get_softc(sc->smc_miibus)); } static int smc_mii_mediaioctl(struct smc_softc *sc, struct ifreq *ifr, u_long command) { struct mii_data *mii; if (sc->smc_miibus == NULL) return (EINVAL); mii = device_get_softc(sc->smc_miibus); return (ifmedia_ioctl(sc->smc_ifp, ifr, &mii->mii_media, command)); } static void smc_reset(struct smc_softc *sc) { u_int ctr; SMC_ASSERT_LOCKED(sc); smc_select_bank(sc, 2); /* * Mask all interrupts. */ smc_write_1(sc, MSK, 0); /* * Tell the device to reset. */ smc_select_bank(sc, 0); smc_write_2(sc, RCR, RCR_SOFT_RST); /* * Set up the configuration register. */ smc_select_bank(sc, 1); smc_write_2(sc, CR, CR_EPH_POWER_EN); DELAY(1); /* * Turn off transmit and receive. */ smc_select_bank(sc, 0); smc_write_2(sc, TCR, 0); smc_write_2(sc, RCR, 0); /* * Set up the control register. */ smc_select_bank(sc, 1); ctr = smc_read_2(sc, CTR); ctr |= CTR_LE_ENABLE | CTR_AUTO_RELEASE; smc_write_2(sc, CTR, ctr); /* * Reset the MMU. */ smc_select_bank(sc, 2); smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_MMU_RESET); } static void smc_enable(struct smc_softc *sc) { struct ifnet *ifp; SMC_ASSERT_LOCKED(sc); ifp = sc->smc_ifp; /* * Set up the receive/PHY control register. */ smc_select_bank(sc, 0); smc_write_2(sc, RPCR, RPCR_ANEG | (RPCR_LED_LINK_ANY << RPCR_LSA_SHIFT) | (RPCR_LED_ACT_ANY << RPCR_LSB_SHIFT)); /* * Set up the transmit and receive control registers. */ smc_write_2(sc, TCR, TCR_TXENA | TCR_PAD_EN); smc_write_2(sc, RCR, RCR_RXEN | RCR_STRIP_CRC); /* * Set up the interrupt mask. */ smc_select_bank(sc, 2); sc->smc_mask = EPH_INT | RX_OVRN_INT | RCV_INT | TX_INT; if ((ifp->if_capenable & IFCAP_POLLING) != 0) smc_write_1(sc, MSK, sc->smc_mask); } static void smc_stop(struct smc_softc *sc) { SMC_ASSERT_LOCKED(sc); /* * Turn off callouts. */ callout_stop(&sc->smc_watchdog); callout_stop(&sc->smc_mii_tick_ch); /* * Mask all interrupts. */ smc_select_bank(sc, 2); sc->smc_mask = 0; smc_write_1(sc, MSK, 0); #ifdef DEVICE_POLLING ether_poll_deregister(sc->smc_ifp); sc->smc_ifp->if_capenable &= ~IFCAP_POLLING; sc->smc_ifp->if_capenable &= ~IFCAP_POLLING_NOCOUNT; #endif /* * Disable transmit and receive. */ smc_select_bank(sc, 0); smc_write_2(sc, TCR, 0); smc_write_2(sc, RCR, 0); sc->smc_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; } static void smc_watchdog(void *arg) { struct smc_softc *sc; sc = (struct smc_softc *)arg; device_printf(sc->smc_dev, "watchdog timeout\n"); taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_intr); } static void smc_init(void *context) { struct smc_softc *sc; sc = (struct smc_softc *)context; SMC_LOCK(sc); smc_init_locked(sc); SMC_UNLOCK(sc); } static void smc_init_locked(struct smc_softc *sc) { struct ifnet *ifp; SMC_ASSERT_LOCKED(sc); ifp = sc->smc_ifp; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) return; smc_reset(sc); smc_enable(sc); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; smc_start_locked(ifp); if (sc->smc_mii_tick != NULL) callout_reset(&sc->smc_mii_tick_ch, hz, sc->smc_mii_tick, sc); #ifdef DEVICE_POLLING SMC_UNLOCK(sc); ether_poll_register(smc_poll, ifp); SMC_LOCK(sc); ifp->if_capenable |= IFCAP_POLLING; ifp->if_capenable |= IFCAP_POLLING_NOCOUNT; #endif } static int smc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct smc_softc *sc; int error; sc = ifp->if_softc; error = 0; switch (cmd) { case SIOCSIFFLAGS: if ((ifp->if_flags & IFF_UP) == 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { SMC_LOCK(sc); smc_stop(sc); SMC_UNLOCK(sc); } else { smc_init(sc); if (sc->smc_mii_mediachg != NULL) sc->smc_mii_mediachg(sc); } break; case SIOCADDMULTI: case SIOCDELMULTI: /* XXX SMC_LOCK(sc); smc_setmcast(sc); SMC_UNLOCK(sc); */ error = EINVAL; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: if (sc->smc_mii_mediaioctl == NULL) { error = EINVAL; break; } sc->smc_mii_mediaioctl(sc, (struct ifreq *)data, cmd); break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } Index: head/sys/dev/sn/if_sn.c =================================================================== --- head/sys/dev/sn/if_sn.c (revision 276749) +++ head/sys/dev/sn/if_sn.c (revision 276750) @@ -1,1440 +1,1435 @@ /*- * Copyright (c) 1996 Gardner Buchanan * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Gardner Buchanan. * 4. The name of Gardner Buchanan may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * This is a driver for SMC's 9000 series of Ethernet adapters. * * This FreeBSD driver is derived from the smc9194 Linux driver by * Erik Stahlman and is Copyright (C) 1996 by Erik Stahlman. * This driver also shamelessly borrows from the FreeBSD ep driver * which is Copyright (C) 1994 Herb Peyerl * All rights reserved. * * It is set up for my SMC91C92 equipped Ampro LittleBoard embedded * PC. It is adapted from Erik Stahlman's Linux driver which worked * with his EFA Info*Express SVC VLB adaptor. According to SMC's databook, * it will work for the entire SMC 9xxx series. (Ha Ha) * * "Features" of the SMC chip: * 4608 byte packet memory. (for the 91C92. Others have more) * EEPROM for configuration * AUI/TP selection * * Authors: * Erik Stahlman erik@vt.edu * Herb Peyerl hpeyerl@novatel.ca * Andres Vega Garcia avega@sophia.inria.fr * Serge Babkin babkin@hq.icb.chel.su * Gardner Buchanan gbuchanan@shl.com * * Sources: * o SMC databook * o "smc9194.c:v0.10(FIXED) 02/15/96 by Erik Stahlman (erik@vt.edu)" * o "if_ep.c,v 1.19 1995/01/24 20:53:45 davidg Exp" * * Known Bugs: * o Setting of the hardware address isn't supported. * o Hardware padding isn't used. */ /* * Modifications for Megahertz X-Jack Ethernet Card (XJ-10BT) * * Copyright (c) 1996 by Tatsumi Hosokawa * BSD-nomads, Tokyo, Japan. */ /* * Multicast support by Kei TANAKA * Special thanks to itojun@itojun.org */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #endif #include #include #include #include /* Exported variables */ devclass_t sn_devclass; static int snioctl(struct ifnet * ifp, u_long, caddr_t); static void snresume(struct ifnet *); static void snintr_locked(struct sn_softc *); static void sninit_locked(void *); static void snstart_locked(struct ifnet *); static void sninit(void *); static void snread(struct ifnet *); static void snstart(struct ifnet *); static void snstop(struct sn_softc *); static void snwatchdog(void *); static void sn_setmcast(struct sn_softc *); static int sn_getmcf(struct ifnet *ifp, u_char *mcf); /* I (GB) have been unlucky getting the hardware padding * to work properly. */ #define SW_PAD static const char *chip_ids[15] = { NULL, NULL, NULL, /* 3 */ "SMC91C90/91C92", /* 4 */ "SMC91C94/91C96", /* 5 */ "SMC91C95", NULL, /* 7 */ "SMC91C100", /* 8 */ "SMC91C100FD", /* 9 */ "SMC91C110", NULL, NULL, NULL, NULL, NULL }; int sn_attach(device_t dev) { struct sn_softc *sc = device_get_softc(dev); struct ifnet *ifp; uint16_t i; uint8_t *p; int rev; uint16_t address; int err; u_char eaddr[6]; ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); return (ENOSPC); } SN_LOCK_INIT(sc); callout_init_mtx(&sc->watchdog, &sc->sc_mtx, 0); snstop(sc); sc->pages_wanted = -1; if (bootverbose || 1) { SMC_SELECT_BANK(sc, 3); rev = (CSR_READ_2(sc, REVISION_REG_W) >> 4) & 0xf; if (chip_ids[rev]) device_printf(dev, " %s ", chip_ids[rev]); else device_printf(dev, " unsupported chip: rev %d ", rev); SMC_SELECT_BANK(sc, 1); i = CSR_READ_2(sc, CONFIG_REG_W); printf("%s\n", i & CR_AUI_SELECT ? "AUI" : "UTP"); } /* * Read the station address from the chip. The MAC address is bank 1, * regs 4 - 9 */ SMC_SELECT_BANK(sc, 1); p = (uint8_t *) eaddr; for (i = 0; i < 6; i += 2) { address = CSR_READ_2(sc, IAR_ADDR0_REG_W + i); p[i + 1] = address >> 8; p[i] = address & 0xFF; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_start = snstart; ifp->if_ioctl = snioctl; ifp->if_init = sninit; ifp->if_baudrate = 10000000; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); ether_ifattach(ifp, eaddr); /* * Activate the interrupt so we can get card interrupts. This * needs to be done last so that we don't have/hold the lock * during startup to avoid LORs in the network layer. */ if ((err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE, NULL, sn_intr, sc, &sc->intrhand)) != 0) { sn_detach(dev); return err; } return 0; } int sn_detach(device_t dev) { struct sn_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->ifp; ether_ifdetach(ifp); SN_LOCK(sc); snstop(sc); SN_UNLOCK(sc); callout_drain(&sc->watchdog); sn_deactivate(dev); if_free(ifp); SN_LOCK_DESTROY(sc); return 0; } static void sninit(void *xsc) { struct sn_softc *sc = xsc; SN_LOCK(sc); sninit_locked(sc); SN_UNLOCK(sc); } /* * Reset and initialize the chip */ static void sninit_locked(void *xsc) { struct sn_softc *sc = xsc; struct ifnet *ifp = sc->ifp; int flags; int mask; SN_ASSERT_LOCKED(sc); /* * This resets the registers mostly to defaults, but doesn't affect * EEPROM. After the reset cycle, we pause briefly for the chip to * be happy. */ SMC_SELECT_BANK(sc, 0); CSR_WRITE_2(sc, RECV_CONTROL_REG_W, RCR_SOFTRESET); SMC_DELAY(sc); CSR_WRITE_2(sc, RECV_CONTROL_REG_W, 0x0000); SMC_DELAY(sc); SMC_DELAY(sc); CSR_WRITE_2(sc, TXMIT_CONTROL_REG_W, 0x0000); /* * Set the control register to automatically release succesfully * transmitted packets (making the best use out of our limited * memory) and to enable the EPH interrupt on certain TX errors. */ SMC_SELECT_BANK(sc, 1); CSR_WRITE_2(sc, CONTROL_REG_W, (CTR_AUTO_RELEASE | CTR_TE_ENABLE | CTR_CR_ENABLE | CTR_LE_ENABLE)); /* Set squelch level to 240mV (default 480mV) */ flags = CSR_READ_2(sc, CONFIG_REG_W); flags |= CR_SET_SQLCH; CSR_WRITE_2(sc, CONFIG_REG_W, flags); /* * Reset the MMU and wait for it to be un-busy. */ SMC_SELECT_BANK(sc, 2); CSR_WRITE_2(sc, MMU_CMD_REG_W, MMUCR_RESET); while (CSR_READ_2(sc, MMU_CMD_REG_W) & MMUCR_BUSY) /* NOTHING */ ; /* * Disable all interrupts */ CSR_WRITE_1(sc, INTR_MASK_REG_B, 0x00); sn_setmcast(sc); /* * Set the transmitter control. We want it enabled. */ flags = TCR_ENABLE; #ifndef SW_PAD /* * I (GB) have been unlucky getting this to work. */ flags |= TCR_PAD_ENABLE; #endif /* SW_PAD */ CSR_WRITE_2(sc, TXMIT_CONTROL_REG_W, flags); /* * Now, enable interrupts */ SMC_SELECT_BANK(sc, 2); mask = IM_EPH_INT | IM_RX_OVRN_INT | IM_RCV_INT | IM_TX_INT; CSR_WRITE_1(sc, INTR_MASK_REG_B, mask); sc->intr_mask = mask; sc->pages_wanted = -1; /* * Mark the interface running but not active. */ ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&sc->watchdog, hz, snwatchdog, sc); /* * Attempt to push out any waiting packets. */ snstart_locked(ifp); } static void snstart(struct ifnet *ifp) { struct sn_softc *sc = ifp->if_softc; SN_LOCK(sc); snstart_locked(ifp); SN_UNLOCK(sc); } static void snstart_locked(struct ifnet *ifp) { struct sn_softc *sc = ifp->if_softc; u_int len; struct mbuf *m; struct mbuf *top; int pad; int mask; uint16_t length; uint16_t numPages; uint8_t packet_no; int time_out; int junk = 0; SN_ASSERT_LOCKED(sc); if (ifp->if_drv_flags & IFF_DRV_OACTIVE) return; if (sc->pages_wanted != -1) { if_printf(ifp, "snstart() while memory allocation pending\n"); return; } startagain: /* * Sneak a peek at the next packet */ m = ifp->if_snd.ifq_head; if (m == 0) return; /* * Compute the frame length and set pad to give an overall even * number of bytes. Below we assume that the packet length is even. */ for (len = 0, top = m; m; m = m->m_next) len += m->m_len; pad = (len & 1); /* * We drop packets that are too large. Perhaps we should truncate * them instead? */ if (len + pad > ETHER_MAX_LEN - ETHER_CRC_LEN) { if_printf(ifp, "large packet discarded (A)\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); IFQ_DRV_DEQUEUE(&ifp->if_snd, m); m_freem(m); goto readcheck; } #ifdef SW_PAD /* * If HW padding is not turned on, then pad to ETHER_MIN_LEN. */ if (len < ETHER_MIN_LEN - ETHER_CRC_LEN) pad = ETHER_MIN_LEN - ETHER_CRC_LEN - len; #endif /* SW_PAD */ length = pad + len; /* * The MMU wants the number of pages to be the number of 256 byte * 'pages', minus 1 (A packet can't ever have 0 pages. We also * include space for the status word, byte count and control bytes in * the allocation request. */ numPages = (length + 6) >> 8; /* * Now, try to allocate the memory */ SMC_SELECT_BANK(sc, 2); CSR_WRITE_2(sc, MMU_CMD_REG_W, MMUCR_ALLOC | numPages); /* * Wait a short amount of time to see if the allocation request * completes. Otherwise, I enable the interrupt and wait for * completion asynchronously. */ time_out = MEMORY_WAIT_TIME; do { if (CSR_READ_1(sc, INTR_STAT_REG_B) & IM_ALLOC_INT) break; } while (--time_out); if (!time_out || junk > 10) { /* * No memory now. Oh well, wait until the chip finds memory * later. Remember how many pages we were asking for and * enable the allocation completion interrupt. Also set a * watchdog in case we miss the interrupt. We mark the * interface active since there is no point in attempting an * snstart() until after the memory is available. */ mask = CSR_READ_1(sc, INTR_MASK_REG_B) | IM_ALLOC_INT; CSR_WRITE_1(sc, INTR_MASK_REG_B, mask); sc->intr_mask = mask; sc->timer = 1; ifp->if_drv_flags |= IFF_DRV_OACTIVE; sc->pages_wanted = numPages; return; } /* * The memory allocation completed. Check the results. */ packet_no = CSR_READ_1(sc, ALLOC_RESULT_REG_B); if (packet_no & ARR_FAILED) { if (junk++ > 10) if_printf(ifp, "Memory allocation failed\n"); goto startagain; } /* * We have a packet number, so tell the card to use it. */ CSR_WRITE_1(sc, PACKET_NUM_REG_B, packet_no); /* * Point to the beginning of the packet */ CSR_WRITE_2(sc, POINTER_REG_W, PTR_AUTOINC | 0x0000); /* * Send the packet length (+6 for status, length and control byte) * and the status word (set to zeros) */ CSR_WRITE_2(sc, DATA_REG_W, 0); CSR_WRITE_1(sc, DATA_REG_B, (length + 6) & 0xFF); CSR_WRITE_1(sc, DATA_REG_B, (length + 6) >> 8); /* * Get the packet from the kernel. This will include the Ethernet * frame header, MAC Addresses etc. */ IFQ_DRV_DEQUEUE(&ifp->if_snd, m); /* * Push out the data to the card. */ for (top = m; m != 0; m = m->m_next) { /* * Push out words. */ CSR_WRITE_MULTI_2(sc, DATA_REG_W, mtod(m, uint16_t *), m->m_len / 2); /* * Push out remaining byte. */ if (m->m_len & 1) CSR_WRITE_1(sc, DATA_REG_B, *(mtod(m, caddr_t) + m->m_len - 1)); } /* * Push out padding. */ while (pad > 1) { CSR_WRITE_2(sc, DATA_REG_W, 0); pad -= 2; } if (pad) CSR_WRITE_1(sc, DATA_REG_B, 0); /* * Push out control byte and unused packet byte The control byte is 0 * meaning the packet is even lengthed and no special CRC handling is * desired. */ CSR_WRITE_2(sc, DATA_REG_W, 0); /* * Enable the interrupts and let the chipset deal with it Also set a * watchdog in case we miss the interrupt. */ mask = CSR_READ_1(sc, INTR_MASK_REG_B) | (IM_TX_INT | IM_TX_EMPTY_INT); CSR_WRITE_1(sc, INTR_MASK_REG_B, mask); sc->intr_mask = mask; CSR_WRITE_2(sc, MMU_CMD_REG_W, MMUCR_ENQUEUE); ifp->if_drv_flags |= IFF_DRV_OACTIVE; sc->timer = 1; BPF_MTAP(ifp, top); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); m_freem(top); readcheck: /* * Is another packet coming in? We don't want to overflow the tiny * RX FIFO. If nothing has arrived then attempt to queue another * transmit packet. */ if (CSR_READ_2(sc, FIFO_PORTS_REG_W) & FIFO_REMPTY) goto startagain; return; } /* Resume a packet transmit operation after a memory allocation * has completed. * * This is basically a hacked up copy of snstart() which handles * a completed memory allocation the same way snstart() does. * It then passes control to snstart to handle any other queued * packets. */ static void snresume(struct ifnet *ifp) { struct sn_softc *sc = ifp->if_softc; u_int len; struct mbuf *m; struct mbuf *top; int pad; int mask; uint16_t length; uint16_t numPages; uint16_t pages_wanted; uint8_t packet_no; if (sc->pages_wanted < 0) return; pages_wanted = sc->pages_wanted; sc->pages_wanted = -1; /* * Sneak a peek at the next packet */ m = ifp->if_snd.ifq_head; if (m == 0) { if_printf(ifp, "snresume() with nothing to send\n"); return; } /* * Compute the frame length and set pad to give an overall even * number of bytes. Below we assume that the packet length is even. */ for (len = 0, top = m; m; m = m->m_next) len += m->m_len; pad = (len & 1); /* * We drop packets that are too large. Perhaps we should truncate * them instead? */ if (len + pad > ETHER_MAX_LEN - ETHER_CRC_LEN) { if_printf(ifp, "large packet discarded (B)\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); IFQ_DRV_DEQUEUE(&ifp->if_snd, m); m_freem(m); return; } #ifdef SW_PAD /* * If HW padding is not turned on, then pad to ETHER_MIN_LEN. */ if (len < ETHER_MIN_LEN - ETHER_CRC_LEN) pad = ETHER_MIN_LEN - ETHER_CRC_LEN - len; #endif /* SW_PAD */ length = pad + len; /* * The MMU wants the number of pages to be the number of 256 byte * 'pages', minus 1 (A packet can't ever have 0 pages. We also * include space for the status word, byte count and control bytes in * the allocation request. */ numPages = (length + 6) >> 8; SMC_SELECT_BANK(sc, 2); /* * The memory allocation completed. Check the results. If it failed, * we simply set a watchdog timer and hope for the best. */ packet_no = CSR_READ_1(sc, ALLOC_RESULT_REG_B); if (packet_no & ARR_FAILED) { if_printf(ifp, "Memory allocation failed. Weird.\n"); sc->timer = 1; goto try_start; } /* * We have a packet number, so tell the card to use it. */ CSR_WRITE_1(sc, PACKET_NUM_REG_B, packet_no); /* * Now, numPages should match the pages_wanted recorded when the * memory allocation was initiated. */ if (pages_wanted != numPages) { if_printf(ifp, "memory allocation wrong size. Weird.\n"); /* * If the allocation was the wrong size we simply release the * memory once it is granted. Wait for the MMU to be un-busy. */ while (CSR_READ_2(sc, MMU_CMD_REG_W) & MMUCR_BUSY) /* NOTHING */ ; CSR_WRITE_2(sc, MMU_CMD_REG_W, MMUCR_FREEPKT); return; } /* * Point to the beginning of the packet */ CSR_WRITE_2(sc, POINTER_REG_W, PTR_AUTOINC | 0x0000); /* * Send the packet length (+6 for status, length and control byte) * and the status word (set to zeros) */ CSR_WRITE_2(sc, DATA_REG_W, 0); CSR_WRITE_1(sc, DATA_REG_B, (length + 6) & 0xFF); CSR_WRITE_1(sc, DATA_REG_B, (length + 6) >> 8); /* * Get the packet from the kernel. This will include the Ethernet * frame header, MAC Addresses etc. */ IFQ_DRV_DEQUEUE(&ifp->if_snd, m); /* * Push out the data to the card. */ for (top = m; m != 0; m = m->m_next) { /* * Push out words. */ CSR_WRITE_MULTI_2(sc, DATA_REG_W, mtod(m, uint16_t *), m->m_len / 2); /* * Push out remaining byte. */ if (m->m_len & 1) CSR_WRITE_1(sc, DATA_REG_B, *(mtod(m, caddr_t) + m->m_len - 1)); } /* * Push out padding. */ while (pad > 1) { CSR_WRITE_2(sc, DATA_REG_W, 0); pad -= 2; } if (pad) CSR_WRITE_1(sc, DATA_REG_B, 0); /* * Push out control byte and unused packet byte The control byte is 0 * meaning the packet is even lengthed and no special CRC handling is * desired. */ CSR_WRITE_2(sc, DATA_REG_W, 0); /* * Enable the interrupts and let the chipset deal with it Also set a * watchdog in case we miss the interrupt. */ mask = CSR_READ_1(sc, INTR_MASK_REG_B) | (IM_TX_INT | IM_TX_EMPTY_INT); CSR_WRITE_1(sc, INTR_MASK_REG_B, mask); sc->intr_mask = mask; CSR_WRITE_2(sc, MMU_CMD_REG_W, MMUCR_ENQUEUE); BPF_MTAP(ifp, top); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); m_freem(top); try_start: /* * Now pass control to snstart() to queue any additional packets */ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; snstart_locked(ifp); /* * We've sent something, so we're active. Set a watchdog in case the * TX_EMPTY interrupt is lost. */ ifp->if_drv_flags |= IFF_DRV_OACTIVE; sc->timer = 1; return; } void sn_intr(void *arg) { struct sn_softc *sc = (struct sn_softc *) arg; SN_LOCK(sc); snintr_locked(sc); SN_UNLOCK(sc); } static void snintr_locked(struct sn_softc *sc) { int status, interrupts; struct ifnet *ifp = sc->ifp; /* * Chip state registers */ uint8_t mask; uint8_t packet_no; uint16_t tx_status; uint16_t card_stats; /* * Clear the watchdog. */ sc->timer = 0; SMC_SELECT_BANK(sc, 2); /* * Obtain the current interrupt mask and clear the hardware mask * while servicing interrupts. */ mask = CSR_READ_1(sc, INTR_MASK_REG_B); CSR_WRITE_1(sc, INTR_MASK_REG_B, 0x00); /* * Get the set of interrupts which occurred and eliminate any which * are masked. */ interrupts = CSR_READ_1(sc, INTR_STAT_REG_B); status = interrupts & mask; /* * Now, process each of the interrupt types. */ /* * Receive Overrun. */ if (status & IM_RX_OVRN_INT) { /* * Acknowlege Interrupt */ SMC_SELECT_BANK(sc, 2); CSR_WRITE_1(sc, INTR_ACK_REG_B, IM_RX_OVRN_INT); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); } /* * Got a packet. */ if (status & IM_RCV_INT) { int packet_number; SMC_SELECT_BANK(sc, 2); packet_number = CSR_READ_2(sc, FIFO_PORTS_REG_W); if (packet_number & FIFO_REMPTY) { /* * we got called , but nothing was on the FIFO */ printf("sn: Receive interrupt with nothing on FIFO\n"); goto out; } snread(ifp); } /* * An on-card memory allocation came through. */ if (status & IM_ALLOC_INT) { /* * Disable this interrupt. */ mask &= ~IM_ALLOC_INT; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; snresume(ifp); } /* * TX Completion. Handle a transmit error message. This will only be * called when there is an error, because of the AUTO_RELEASE mode. */ if (status & IM_TX_INT) { /* * Acknowlege Interrupt */ SMC_SELECT_BANK(sc, 2); CSR_WRITE_1(sc, INTR_ACK_REG_B, IM_TX_INT); packet_no = CSR_READ_2(sc, FIFO_PORTS_REG_W); packet_no &= FIFO_TX_MASK; /* * select this as the packet to read from */ CSR_WRITE_1(sc, PACKET_NUM_REG_B, packet_no); /* * Position the pointer to the first word from this packet */ CSR_WRITE_2(sc, POINTER_REG_W, PTR_AUTOINC | PTR_READ | 0x0000); /* * Fetch the TX status word. The value found here will be a * copy of the EPH_STATUS_REG_W at the time the transmit * failed. */ tx_status = CSR_READ_2(sc, DATA_REG_W); if (tx_status & EPHSR_TX_SUC) { device_printf(sc->dev, "Successful packet caused interrupt\n"); } else { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } if (tx_status & EPHSR_LATCOL) if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); /* * Some of these errors will have disabled transmit. * Re-enable transmit now. */ SMC_SELECT_BANK(sc, 0); #ifdef SW_PAD CSR_WRITE_2(sc, TXMIT_CONTROL_REG_W, TCR_ENABLE); #else CSR_WRITE_2(sc, TXMIT_CONTROL_REG_W, TCR_ENABLE | TCR_PAD_ENABLE); #endif /* SW_PAD */ /* * kill the failed packet. Wait for the MMU to be un-busy. */ SMC_SELECT_BANK(sc, 2); while (CSR_READ_2(sc, MMU_CMD_REG_W) & MMUCR_BUSY) /* NOTHING */ ; CSR_WRITE_2(sc, MMU_CMD_REG_W, MMUCR_FREEPKT); /* * Attempt to queue more transmits. */ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; snstart_locked(ifp); } /* * Transmit underrun. We use this opportunity to update transmit * statistics from the card. */ if (status & IM_TX_EMPTY_INT) { /* * Acknowlege Interrupt */ SMC_SELECT_BANK(sc, 2); CSR_WRITE_1(sc, INTR_ACK_REG_B, IM_TX_EMPTY_INT); /* * Disable this interrupt. */ mask &= ~IM_TX_EMPTY_INT; SMC_SELECT_BANK(sc, 0); card_stats = CSR_READ_2(sc, COUNTER_REG_W); /* * Single collisions */ if_inc_counter(ifp, IFCOUNTER_COLLISIONS, card_stats & ECR_COLN_MASK); /* * Multiple collisions */ if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (card_stats & ECR_MCOLN_MASK) >> 4); SMC_SELECT_BANK(sc, 2); /* * Attempt to enqueue some more stuff. */ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; snstart_locked(ifp); } /* * Some other error. Try to fix it by resetting the adapter. */ if (status & IM_EPH_INT) { snstop(sc); sninit_locked(sc); } out: /* * Handled all interrupt sources. */ SMC_SELECT_BANK(sc, 2); /* * Reestablish interrupts from mask which have not been deselected * during this interrupt. Note that the hardware mask, which was set * to 0x00 at the start of this service routine, may have been * updated by one or more of the interrupt handers and we must let * those new interrupts stay enabled here. */ mask |= CSR_READ_1(sc, INTR_MASK_REG_B); CSR_WRITE_1(sc, INTR_MASK_REG_B, mask); sc->intr_mask = mask; } static void snread(struct ifnet *ifp) { struct sn_softc *sc = ifp->if_softc; struct ether_header *eh; struct mbuf *m; short status; int packet_number; uint16_t packet_length; uint8_t *data; SMC_SELECT_BANK(sc, 2); #if 0 packet_number = CSR_READ_2(sc, FIFO_PORTS_REG_W); if (packet_number & FIFO_REMPTY) { /* * we got called , but nothing was on the FIFO */ printf("sn: Receive interrupt with nothing on FIFO\n"); return; } #endif read_another: /* * Start reading from the start of the packet. Since PTR_RCV is set, * packet number is found in FIFO_PORTS_REG_W, FIFO_RX_MASK. */ CSR_WRITE_2(sc, POINTER_REG_W, PTR_READ | PTR_RCV | PTR_AUTOINC | 0x0000); /* * First two words are status and packet_length */ status = CSR_READ_2(sc, DATA_REG_W); packet_length = CSR_READ_2(sc, DATA_REG_W) & RLEN_MASK; /* * The packet length contains 3 extra words: status, length, and a * extra word with the control byte. */ packet_length -= 6; /* * Account for receive errors and discard. */ if (status & RS_ERRORS) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); goto out; } /* * A packet is received. */ /* * Adjust for odd-length packet. */ if (status & RS_ODDFRAME) packet_length++; /* * Allocate a header mbuf from the kernel. */ MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) goto out; m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = packet_length; /* - * Attach an mbuf cluster + * Attach an mbuf cluster. */ - MCLGET(m, M_NOWAIT); - - /* - * Insist on getting a cluster - */ - if ((m->m_flags & M_EXT) == 0) { + if (!(MCLGET(m, M_NOWAIT))) { m_freem(m); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); printf("sn: snread() kernel memory allocation problem\n"); goto out; } eh = mtod(m, struct ether_header *); /* * Get packet, including link layer address, from interface. */ data = (uint8_t *) eh; CSR_READ_MULTI_2(sc, DATA_REG_W, (uint16_t *) data, packet_length >> 1); if (packet_length & 1) { data += packet_length & ~1; *data = CSR_READ_1(sc, DATA_REG_B); } if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); /* * Remove link layer addresses and whatnot. */ m->m_pkthdr.len = m->m_len = packet_length; /* * Drop locks before calling if_input() since it may re-enter * snstart() in the netisr case. This would result in a * lock reversal. Better performance might be obtained by * chaining all packets received, dropping the lock, and then * calling if_input() on each one. */ SN_UNLOCK(sc); (*ifp->if_input)(ifp, m); SN_LOCK(sc); out: /* * Error or good, tell the card to get rid of this packet Wait for * the MMU to be un-busy. */ SMC_SELECT_BANK(sc, 2); while (CSR_READ_2(sc, MMU_CMD_REG_W) & MMUCR_BUSY) /* NOTHING */ ; CSR_WRITE_2(sc, MMU_CMD_REG_W, MMUCR_RELEASE); /* * Check whether another packet is ready */ packet_number = CSR_READ_2(sc, FIFO_PORTS_REG_W); if (packet_number & FIFO_REMPTY) { return; } goto read_another; } /* * Handle IOCTLS. This function is completely stolen from if_ep.c * As with its progenitor, it does not handle hardware address * changes. */ static int snioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct sn_softc *sc = ifp->if_softc; int error = 0; switch (cmd) { case SIOCSIFFLAGS: SN_LOCK(sc); if ((ifp->if_flags & IFF_UP) == 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) { snstop(sc); } else { /* reinitialize card on any parameter change */ sninit_locked(sc); } SN_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: /* update multicast filter list. */ SN_LOCK(sc); sn_setmcast(sc); error = 0; SN_UNLOCK(sc); break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } static void snwatchdog(void *arg) { struct sn_softc *sc; sc = arg; SN_ASSERT_LOCKED(sc); callout_reset(&sc->watchdog, hz, snwatchdog, sc); if (sc->timer == 0 || --sc->timer > 0) return; snintr_locked(sc); } /* 1. zero the interrupt mask * 2. clear the enable receive flag * 3. clear the enable xmit flags */ static void snstop(struct sn_softc *sc) { struct ifnet *ifp = sc->ifp; /* * Clear interrupt mask; disable all interrupts. */ SMC_SELECT_BANK(sc, 2); CSR_WRITE_1(sc, INTR_MASK_REG_B, 0x00); /* * Disable transmitter and Receiver */ SMC_SELECT_BANK(sc, 0); CSR_WRITE_2(sc, RECV_CONTROL_REG_W, 0x0000); CSR_WRITE_2(sc, TXMIT_CONTROL_REG_W, 0x0000); /* * Cancel watchdog. */ sc->timer = 0; callout_stop(&sc->watchdog); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); } int sn_activate(device_t dev) { struct sn_softc *sc = device_get_softc(dev); sc->port_rid = 0; sc->port_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &sc->port_rid, 0, ~0, SMC_IO_EXTENT, RF_ACTIVE); if (!sc->port_res) { if (bootverbose) device_printf(dev, "Cannot allocate ioport\n"); return ENOMEM; } sc->irq_rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE); if (!sc->irq_res) { if (bootverbose) device_printf(dev, "Cannot allocate irq\n"); sn_deactivate(dev); return ENOMEM; } return (0); } void sn_deactivate(device_t dev) { struct sn_softc *sc = device_get_softc(dev); if (sc->intrhand) bus_teardown_intr(dev, sc->irq_res, sc->intrhand); sc->intrhand = 0; if (sc->port_res) bus_release_resource(dev, SYS_RES_IOPORT, sc->port_rid, sc->port_res); sc->port_res = 0; if (sc->modem_res) bus_release_resource(dev, SYS_RES_IOPORT, sc->modem_rid, sc->modem_res); sc->modem_res = 0; if (sc->irq_res) bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); sc->irq_res = 0; return; } /* * Function: sn_probe(device_t dev) * * Purpose: * Tests to see if a given ioaddr points to an SMC9xxx chip. * Tries to cause as little damage as possible if it's not a SMC chip. * Returns a 0 on success * * Algorithm: * (1) see if the high byte of BANK_SELECT is 0x33 * (2) compare the ioaddr with the base register's address * (3) see if I recognize the chip ID in the appropriate register * * */ int sn_probe(device_t dev) { struct sn_softc *sc = device_get_softc(dev); uint16_t bank; uint16_t revision_register; uint16_t base_address_register; int err; if ((err = sn_activate(dev)) != 0) return err; /* * First, see if the high byte is 0x33 */ bank = CSR_READ_2(sc, BANK_SELECT_REG_W); if ((bank & BSR_DETECT_MASK) != BSR_DETECT_VALUE) { #ifdef SN_DEBUG device_printf(dev, "test1 failed\n"); #endif goto error; } /* * The above MIGHT indicate a device, but I need to write to further * test this. Go to bank 0, then test that the register still * reports the high byte is 0x33. */ CSR_WRITE_2(sc, BANK_SELECT_REG_W, 0x0000); bank = CSR_READ_2(sc, BANK_SELECT_REG_W); if ((bank & BSR_DETECT_MASK) != BSR_DETECT_VALUE) { #ifdef SN_DEBUG device_printf(dev, "test2 failed\n"); #endif goto error; } /* * well, we've already written once, so hopefully another time won't * hurt. This time, I need to switch the bank register to bank 1, so * I can access the base address register. The contents of the * BASE_ADDR_REG_W register, after some jiggery pokery, is expected * to match the I/O port address where the adapter is being probed. */ CSR_WRITE_2(sc, BANK_SELECT_REG_W, 0x0001); base_address_register = (CSR_READ_2(sc, BASE_ADDR_REG_W) >> 3) & 0x3e0; if (rman_get_start(sc->port_res) != base_address_register) { /* * Well, the base address register didn't match. Must not * have been a SMC chip after all. */ #ifdef SN_DEBUG device_printf(dev, "test3 failed ioaddr = 0x%x, " "base_address_register = 0x%x\n", rman_get_start(sc->port_res), base_address_register); #endif goto error; } /* * Check if the revision register is something that I recognize. * These might need to be added to later, as future revisions could * be added. */ CSR_WRITE_2(sc, BANK_SELECT_REG_W, 0x3); revision_register = CSR_READ_2(sc, REVISION_REG_W); if (!chip_ids[(revision_register >> 4) & 0xF]) { /* * I don't regonize this chip, so... */ #ifdef SN_DEBUG device_printf(dev, "test4 failed\n"); #endif goto error; } /* * at this point I'll assume that the chip is an SMC9xxx. It might be * prudent to check a listing of MAC addresses against the hardware * address, or do some other tests. */ sn_deactivate(dev); return 0; error: sn_deactivate(dev); return ENXIO; } #define MCFSZ 8 static void sn_setmcast(struct sn_softc *sc) { struct ifnet *ifp = sc->ifp; int flags; uint8_t mcf[MCFSZ]; SN_ASSERT_LOCKED(sc); /* * Set the receiver filter. We want receive enabled and auto strip * of CRC from received packet. If we are promiscuous then set that * bit too. */ flags = RCR_ENABLE | RCR_STRIP_CRC; if (ifp->if_flags & IFF_PROMISC) { flags |= RCR_PROMISC | RCR_ALMUL; } else if (ifp->if_flags & IFF_ALLMULTI) { flags |= RCR_ALMUL; } else { if (sn_getmcf(ifp, mcf)) { /* set filter */ SMC_SELECT_BANK(sc, 3); CSR_WRITE_2(sc, MULTICAST1_REG_W, ((uint16_t)mcf[1] << 8) | mcf[0]); CSR_WRITE_2(sc, MULTICAST2_REG_W, ((uint16_t)mcf[3] << 8) | mcf[2]); CSR_WRITE_2(sc, MULTICAST3_REG_W, ((uint16_t)mcf[5] << 8) | mcf[4]); CSR_WRITE_2(sc, MULTICAST4_REG_W, ((uint16_t)mcf[7] << 8) | mcf[6]); } else { flags |= RCR_ALMUL; } } SMC_SELECT_BANK(sc, 0); CSR_WRITE_2(sc, RECV_CONTROL_REG_W, flags); } static int sn_getmcf(struct ifnet *ifp, uint8_t *mcf) { int i; uint32_t index, index2; uint8_t *af = mcf; struct ifmultiaddr *ifma; bzero(mcf, MCFSZ); if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) { if_maddr_runlock(ifp); return 0; } index = ether_crc32_le(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) & 0x3f; index2 = 0; for (i = 0; i < 6; i++) { index2 <<= 1; index2 |= (index & 0x01); index >>= 1; } af[index2 >> 3] |= 1 << (index2 & 7); } if_maddr_runlock(ifp); return 1; /* use multicast filter */ } Index: head/sys/dev/snc/dp83932.c =================================================================== --- head/sys/dev/snc/dp83932.c (revision 276749) +++ head/sys/dev/snc/dp83932.c (revision 276750) @@ -1,1199 +1,1198 @@ /* $FreeBSD$ */ /* $NecBSD: dp83932.c,v 1.5 1999/07/29 05:08:44 kmatsuda Exp $ */ /* $NetBSD: if_snc.c,v 1.18 1998/04/25 21:27:40 scottr Exp $ */ /*- * Copyright (c) 1997, 1998, 1999 * Kouichi Matsuda. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Kouichi Matsuda for * NetBSD/pc98. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Modified for FreeBSD(98) 4.0 from NetBSD/pc98 1.4.2 by Motomichi Matsuzaki. */ /* * Modified for NetBSD/pc98 1.2G from NetBSD/mac68k 1.2G by Kouichi Matsuda. * Make adapted for NEC PC-9801-83, 84, PC-9801-103, 104, PC-9801N-25 and * PC-9801N-J02, J02R, which uses National Semiconductor DP83934AVQB as * Ethernet Controller and National Semiconductor NS46C46 as * (64 * 16 bits) Microwire Serial EEPROM. */ /*- * National Semiconductor DP8393X SONIC Driver * Copyright (c) 1991 Algorithmics Ltd (http://www.algor.co.uk) * You may use, copy, and modify this program so long as you retain the * copyright line. * * This driver has been substantially modified since Algorithmics donated * it. * * Denton Gentry * and also * Yanagisawa Takeshi * did the work to get this running on the Macintosh. */ #include "opt_inet.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void sncwatchdog(void *); static void sncinit(void *); static void sncinit_locked(struct snc_softc *); static int sncstop(struct snc_softc *sc); static int sncioctl(struct ifnet *ifp, u_long cmd, caddr_t data); static void sncstart(struct ifnet *ifp); static void sncstart_locked(struct ifnet *ifp); static void sncreset(struct snc_softc *sc); static void caminitialise(struct snc_softc *); static void camentry(struct snc_softc *, int, u_char *ea); static void camprogram(struct snc_softc *); static void initialise_tda(struct snc_softc *); static void initialise_rda(struct snc_softc *); static void initialise_rra(struct snc_softc *); #ifdef SNCDEBUG static void camdump(struct snc_softc *sc); #endif static void sonictxint(struct snc_softc *); static void sonicrxint(struct snc_softc *); static u_int sonicput(struct snc_softc *sc, struct mbuf *m0, int mtd_next); static int sonic_read(struct snc_softc *, u_int32_t, int); static struct mbuf *sonic_get(struct snc_softc *, u_int32_t, int); int snc_enable(struct snc_softc *); void snc_disable(struct snc_softc *); int snc_mediachange(struct ifnet *); void snc_mediastatus(struct ifnet *, struct ifmediareq *); #undef assert #undef _assert #ifdef NDEBUG #define assert(e) ((void)0) #define _assert(e) ((void)0) #else #define _assert(e) assert(e) #ifdef __STDC__ #define assert(e) ((e) ? (void)0 : __assert("snc ", __FILE__, __LINE__, #e)) #else /* PCC */ #define assert(e) ((e) ? (void)0 : __assert("snc "__FILE__, __LINE__, "e")) #endif #endif #ifdef SNCDEBUG #define SNC_SHOWTXHDR 0x01 /* show tx ether_header */ #define SNC_SHOWRXHDR 0x02 /* show rx ether_header */ #define SNC_SHOWCAMENT 0x04 /* show CAM entry */ #endif /* SNCDEBUG */ int sncdebug = 0; int sncconfig(struct snc_softc *sc, int *media, int nmedia, int defmedia, u_int8_t *myea) { struct ifnet *ifp; int i; #ifdef SNCDEBUG if ((sncdebug & SNC_SHOWCAMENT) != 0) { camdump(sc); } #endif ifp = sc->sc_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(sc->sc_dev, "can not if_alloc()\n"); return (ENOMEM); } #ifdef SNCDEBUG device_printf(sc->sc_dev, "buffers: rra=0x%x cda=0x%x rda=0x%x tda=0x%x\n", sc->v_rra[0], sc->v_cda, sc->v_rda, sc->mtda[0].mtd_vtxp); #endif ifp->if_softc = sc; if_initname(ifp, device_get_name(sc->sc_dev), device_get_unit(sc->sc_dev)); ifp->if_ioctl = sncioctl; ifp->if_start = sncstart; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = sncinit; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); /* Initialize media goo. */ ifmedia_init(&sc->sc_media, 0, snc_mediachange, snc_mediastatus); if (media != NULL) { for (i = 0; i < nmedia; i++) ifmedia_add(&sc->sc_media, media[i], 0, NULL); ifmedia_set(&sc->sc_media, defmedia); } else { ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL); } ether_ifattach(ifp, myea); return (0); } void sncshutdown(void *arg) { struct snc_softc *sc = arg; SNC_ASSERT_LOCKED(sc); sncstop(sc); } /* * Media change callback. */ int snc_mediachange(struct ifnet *ifp) { struct snc_softc *sc = ifp->if_softc; int error; SNC_LOCK(sc); if (sc->sc_mediachange) error = (*sc->sc_mediachange)(sc); else error = EINVAL; SNC_UNLOCK(sc); return (error); } /* * Media status callback. */ void snc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) { struct snc_softc *sc = ifp->if_softc; SNC_LOCK(sc); if (sc->sc_enabled == 0) { ifmr->ifm_active = IFM_ETHER | IFM_NONE; ifmr->ifm_status = 0; SNC_UNLOCK(sc); return; } if (sc->sc_mediastatus) (*sc->sc_mediastatus)(sc, ifmr); SNC_UNLOCK(sc); } static int sncioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct ifreq *ifr; struct snc_softc *sc = ifp->if_softc; int err = 0; switch (cmd) { case SIOCSIFFLAGS: SNC_LOCK(sc); if ((ifp->if_flags & IFF_UP) == 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { /* * If interface is marked down and it is running, * then stop it. */ sncstop(sc); snc_disable(sc); } else if ((ifp->if_flags & IFF_UP) != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { /* * If interface is marked up and it is stopped, * then start it. */ if ((err = snc_enable(sc)) != 0) break; sncinit_locked(sc); } else if (sc->sc_enabled) { /* * reset the interface to pick up any other changes * in flags */ sncreset(sc); sncstart_locked(ifp); } SNC_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: SNC_LOCK(sc); if (sc->sc_enabled == 0) { err = EIO; SNC_UNLOCK(sc); break; } sncreset(sc); SNC_UNLOCK(sc); err = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: ifr = (struct ifreq *) data; err = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); break; default: err = ether_ioctl(ifp, cmd, data); break; } return (err); } /* * Encapsulate a packet of type family for the local net. */ static void sncstart(struct ifnet *ifp) { struct snc_softc *sc = ifp->if_softc; SNC_LOCK(sc); sncstart_locked(ifp); SNC_UNLOCK(sc); } static void sncstart_locked(struct ifnet *ifp) { struct snc_softc *sc = ifp->if_softc; struct mbuf *m; int mtd_next; if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; outloop: /* Check for room in the xmit buffer. */ if ((mtd_next = (sc->mtd_free + 1)) == NTDA) mtd_next = 0; if (mtd_next == sc->mtd_hw) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; return; } IF_DEQUEUE(&ifp->if_snd, m); if (m == 0) return; /* We need the header for m_pkthdr.len. */ M_ASSERTPKTHDR(m); /* * If there is nothing in the o/p queue, and there is room in * the Tx ring, then send the packet directly. Otherwise append * it to the o/p queue. */ if ((sonicput(sc, m, mtd_next)) == 0) { IF_PREPEND(&ifp->if_snd, m); return; } /* * If bpf is listening on this interface, let it see the packet * before we commit it to the wire, but only if we are really * committed to send it. * * XXX: Locking must protect m against premature m_freem() in * sonictxint(). */ BPF_MTAP(ifp, m); sc->mtd_prev = sc->mtd_free; sc->mtd_free = mtd_next; if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); /* # of pkts */ /* Jump back for possibly more punishment. */ goto outloop; } /* * reset and restart the SONIC. Called in case of fatal * hardware/software errors. */ static void sncreset(struct snc_softc *sc) { sncstop(sc); sncinit_locked(sc); } static void sncinit(void *xsc) { struct snc_softc *sc = xsc; SNC_LOCK(sc); sncinit_locked(sc); SNC_UNLOCK(sc); } static void sncinit_locked(struct snc_softc *sc) { u_long s_rcr; if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) /* already running */ return; NIC_PUT(sc, SNCR_CR, CR_RST); /* DCR only accessable in reset mode! */ /* config it */ NIC_PUT(sc, SNCR_DCR, (sc->sncr_dcr | (sc->bitmode ? DCR_DW32 : DCR_DW16))); NIC_PUT(sc, SNCR_DCR2, sc->sncr_dcr2); s_rcr = RCR_BRD | RCR_LBNONE; if (sc->sc_ifp->if_flags & IFF_PROMISC) s_rcr |= RCR_PRO; if (sc->sc_ifp->if_flags & IFF_ALLMULTI) s_rcr |= RCR_AMC; NIC_PUT(sc, SNCR_RCR, s_rcr); NIC_PUT(sc, SNCR_IMR, (IMR_PRXEN | IMR_PTXEN | IMR_TXEREN | IMR_LCDEN)); /* clear pending interrupts */ NIC_PUT(sc, SNCR_ISR, ISR_ALL); /* clear tally counters */ NIC_PUT(sc, SNCR_CRCT, -1); NIC_PUT(sc, SNCR_FAET, -1); NIC_PUT(sc, SNCR_MPT, -1); initialise_tda(sc); initialise_rda(sc); initialise_rra(sc); /* enable the chip */ NIC_PUT(sc, SNCR_CR, 0); wbflush(); /* program the CAM */ camprogram(sc); /* get it to read resource descriptors */ NIC_PUT(sc, SNCR_CR, CR_RRRA); wbflush(); while ((NIC_GET(sc, SNCR_CR)) & CR_RRRA) continue; /* enable rx */ NIC_PUT(sc, SNCR_CR, CR_RXEN); wbflush(); /* flag interface as "running" */ sc->sc_ifp->if_drv_flags |= IFF_DRV_RUNNING; sc->sc_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&sc->sc_timer, hz, sncwatchdog, sc); return; } /* * close down an interface and free its buffers * Called on final close of device, or if sncinit() fails * part way through. */ static int sncstop(struct snc_softc *sc) { struct mtd *mtd; SNC_ASSERT_LOCKED(sc); /* stick chip in reset */ NIC_PUT(sc, SNCR_CR, CR_RST); wbflush(); /* free all receive buffers (currently static so nothing to do) */ /* free all pending transmit mbufs */ while (sc->mtd_hw != sc->mtd_free) { mtd = &sc->mtda[sc->mtd_hw]; if (mtd->mtd_mbuf) m_freem(mtd->mtd_mbuf); if (++sc->mtd_hw == NTDA) sc->mtd_hw = 0; } callout_stop(&sc->sc_timer); sc->sc_tx_timeout = 0; sc->sc_ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); return (0); } /* * Called if any Tx packets remain unsent after 5 seconds, * In all cases we just reset the chip, and any retransmission * will be handled by higher level protocol timeouts. */ static void sncwatchdog(void *arg) { struct snc_softc *sc = arg; struct mtd *mtd; SNC_ASSERT_LOCKED(sc); if (sc->sc_tx_timeout && --sc->sc_tx_timeout == 0) { if (sc->mtd_hw != sc->mtd_free) { /* something still pending for transmit */ mtd = &sc->mtda[sc->mtd_hw]; if (SRO(sc, mtd->mtd_vtxp, TXP_STATUS) == 0) log(LOG_ERR, "%s: Tx - timeout\n", device_get_nameunit(sc->sc_dev)); else log(LOG_ERR, "%s: Tx - lost interrupt\n", device_get_nameunit(sc->sc_dev)); sncreset(sc); } } callout_reset(&sc->sc_timer, hz, sncwatchdog, sc); } /* * stuff packet into sonic */ static u_int sonicput(struct snc_softc *sc, struct mbuf *m0, int mtd_next) { struct mtd *mtdp; struct mbuf *m; u_int32_t buff; u_int32_t txp; u_int len = 0; u_int totlen = 0; #ifdef whyonearthwouldyoudothis if (NIC_GET(sc, SNCR_CR) & CR_TXP) return (0); #endif /* grab the replacement mtd */ mtdp = &sc->mtda[sc->mtd_free]; buff = mtdp->mtd_vbuf; /* this packet goes to mtdnext fill in the TDA */ mtdp->mtd_mbuf = m0; txp = mtdp->mtd_vtxp; /* Write to the config word. Every (NTDA/2)+1 packets we set an intr */ if (sc->mtd_pint == 0) { sc->mtd_pint = NTDA/2; SWO(sc, txp, TXP_CONFIG, TCR_PINT); } else { sc->mtd_pint--; SWO(sc, txp, TXP_CONFIG, 0); } for (m = m0; m; m = m->m_next) { len = m->m_len; totlen += len; (*sc->sc_copytobuf)(sc, mtod(m, caddr_t), buff, len); buff += len; } if (totlen >= TXBSIZE) { panic("%s: sonicput: packet overflow", device_get_nameunit(sc->sc_dev)); } SWO(sc, txp, TXP_FRAGOFF + (0 * TXP_FRAGSIZE) + TXP_FPTRLO, LOWER(mtdp->mtd_vbuf)); SWO(sc, txp, TXP_FRAGOFF + (0 * TXP_FRAGSIZE) + TXP_FPTRHI, UPPER(mtdp->mtd_vbuf)); if (totlen < ETHERMIN + sizeof(struct ether_header)) { int pad = ETHERMIN + sizeof(struct ether_header) - totlen; (*sc->sc_zerobuf)(sc, mtdp->mtd_vbuf + totlen, pad); totlen = ETHERMIN + sizeof(struct ether_header); } SWO(sc, txp, TXP_FRAGOFF + (0 * TXP_FRAGSIZE) + TXP_FSIZE, totlen); SWO(sc, txp, TXP_FRAGCNT, 1); SWO(sc, txp, TXP_PKTSIZE, totlen); /* link onto the next mtd that will be used */ SWO(sc, txp, TXP_FRAGOFF + (1 * TXP_FRAGSIZE) + TXP_FPTRLO, LOWER(sc->mtda[mtd_next].mtd_vtxp) | EOL); /* * The previous txp.tlink currently contains a pointer to * our txp | EOL. Want to clear the EOL, so write our * pointer to the previous txp. */ SWO(sc, sc->mtda[sc->mtd_prev].mtd_vtxp, sc->mtd_tlinko, LOWER(mtdp->mtd_vtxp)); /* make sure chip is running */ wbflush(); NIC_PUT(sc, SNCR_CR, CR_TXP); wbflush(); /* 5 seconds to watch for failing to transmit */ sc->sc_tx_timeout = 5; return (totlen); } /* * These are called from sonicioctl() when /etc/ifconfig is run to set * the address or switch the i/f on. */ /* * CAM support */ static void caminitialise(struct snc_softc *sc) { u_int32_t v_cda = sc->v_cda; int i; int camoffset; for (i = 0; i < MAXCAM; i++) { camoffset = i * CDA_CAMDESC; SWO(sc, v_cda, (camoffset + CDA_CAMEP), i); SWO(sc, v_cda, (camoffset + CDA_CAMAP2), 0); SWO(sc, v_cda, (camoffset + CDA_CAMAP1), 0); SWO(sc, v_cda, (camoffset + CDA_CAMAP0), 0); } SWO(sc, v_cda, CDA_ENABLE, 0); #ifdef SNCDEBUG if ((sncdebug & SNC_SHOWCAMENT) != 0) { camdump(sc); } #endif } static void camentry(struct snc_softc *sc, int entry, u_char *ea) { u_int32_t v_cda = sc->v_cda; int camoffset = entry * CDA_CAMDESC; SWO(sc, v_cda, camoffset + CDA_CAMEP, entry); SWO(sc, v_cda, camoffset + CDA_CAMAP2, (ea[5] << 8) | ea[4]); SWO(sc, v_cda, camoffset + CDA_CAMAP1, (ea[3] << 8) | ea[2]); SWO(sc, v_cda, camoffset + CDA_CAMAP0, (ea[1] << 8) | ea[0]); SWO(sc, v_cda, CDA_ENABLE, (SRO(sc, v_cda, CDA_ENABLE) | (1 << entry))); } static void camprogram(struct snc_softc *sc) { struct ifmultiaddr *ifma; struct ifnet *ifp; int timeout; int mcount = 0; caminitialise(sc); ifp = sc->sc_ifp; /* Always load our own address first. */ camentry (sc, mcount, IF_LLADDR(sc->sc_ifp)); mcount++; /* Assume we won't need allmulti bit. */ ifp->if_flags &= ~IFF_ALLMULTI; /* Loop through multicast addresses */ if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (mcount == MAXCAM) { ifp->if_flags |= IFF_ALLMULTI; break; } /* program the CAM with the specified entry */ camentry(sc, mcount, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); mcount++; } if_maddr_runlock(ifp); NIC_PUT(sc, SNCR_CDP, LOWER(sc->v_cda)); NIC_PUT(sc, SNCR_CDC, MAXCAM); NIC_PUT(sc, SNCR_CR, CR_LCAM); wbflush(); timeout = 10000; while ((NIC_GET(sc, SNCR_CR) & CR_LCAM) && timeout--) continue; if (timeout == 0) { /* XXX */ panic("%s: CAM initialisation failed\n", device_get_nameunit(sc->sc_dev)); } timeout = 10000; while (((NIC_GET(sc, SNCR_ISR) & ISR_LCD) == 0) && timeout--) continue; if (NIC_GET(sc, SNCR_ISR) & ISR_LCD) NIC_PUT(sc, SNCR_ISR, ISR_LCD); else device_printf(sc->sc_dev, "CAM initialisation without interrupt\n"); } #ifdef SNCDEBUG static void camdump(struct snc_softc *sc) { int i; printf("CAM entries:\n"); NIC_PUT(sc, SNCR_CR, CR_RST); wbflush(); for (i = 0; i < 16; i++) { u_short ap2, ap1, ap0; NIC_PUT(sc, SNCR_CEP, i); wbflush(); ap2 = NIC_GET(sc, SNCR_CAP2); ap1 = NIC_GET(sc, SNCR_CAP1); ap0 = NIC_GET(sc, SNCR_CAP0); printf("%d: ap2=0x%x ap1=0x%x ap0=0x%x\n", i, ap2, ap1, ap0); } printf("CAM enable 0x%x\n", NIC_GET(sc, SNCR_CEP)); NIC_PUT(sc, SNCR_CR, 0); wbflush(); } #endif static void initialise_tda(struct snc_softc *sc) { struct mtd *mtd; int i; for (i = 0; i < NTDA; i++) { mtd = &sc->mtda[i]; mtd->mtd_mbuf = 0; } sc->mtd_hw = 0; sc->mtd_prev = NTDA - 1; sc->mtd_free = 0; sc->mtd_tlinko = TXP_FRAGOFF + 1*TXP_FRAGSIZE + TXP_FPTRLO; sc->mtd_pint = NTDA/2; NIC_PUT(sc, SNCR_UTDA, UPPER(sc->mtda[0].mtd_vtxp)); NIC_PUT(sc, SNCR_CTDA, LOWER(sc->mtda[0].mtd_vtxp)); } static void initialise_rda(struct snc_softc *sc) { int i; u_int32_t vv_rda = 0; u_int32_t v_rda = 0; /* link the RDA's together into a circular list */ for (i = 0; i < (sc->sc_nrda - 1); i++) { v_rda = sc->v_rda + (i * RXPKT_SIZE(sc)); vv_rda = sc->v_rda + ((i+1) * RXPKT_SIZE(sc)); SWO(sc, v_rda, RXPKT_RLINK, LOWER(vv_rda)); SWO(sc, v_rda, RXPKT_INUSE, 1); } v_rda = sc->v_rda + ((sc->sc_nrda - 1) * RXPKT_SIZE(sc)); SWO(sc, v_rda, RXPKT_RLINK, LOWER(sc->v_rda) | EOL); SWO(sc, v_rda, RXPKT_INUSE, 1); /* mark end of receive descriptor list */ sc->sc_rdamark = sc->sc_nrda - 1; sc->sc_rxmark = 0; NIC_PUT(sc, SNCR_URDA, UPPER(sc->v_rda)); NIC_PUT(sc, SNCR_CRDA, LOWER(sc->v_rda)); wbflush(); } static void initialise_rra(struct snc_softc *sc) { int i; u_int v; int bitmode = sc->bitmode; if (bitmode) NIC_PUT(sc, SNCR_EOBC, RBASIZE(sc) / 2 - 2); else NIC_PUT(sc, SNCR_EOBC, RBASIZE(sc) / 2 - 1); NIC_PUT(sc, SNCR_URRA, UPPER(sc->v_rra[0])); NIC_PUT(sc, SNCR_RSA, LOWER(sc->v_rra[0])); /* rea must point just past the end of the rra space */ NIC_PUT(sc, SNCR_REA, LOWER(sc->v_rea)); NIC_PUT(sc, SNCR_RRP, LOWER(sc->v_rra[0])); NIC_PUT(sc, SNCR_RSC, 0); /* fill up SOME of the rra with buffers */ for (i = 0; i < NRBA; i++) { v = SONIC_GETDMA(sc->rbuf[i]); SWO(sc, sc->v_rra[i], RXRSRC_PTRHI, UPPER(v)); SWO(sc, sc->v_rra[i], RXRSRC_PTRLO, LOWER(v)); SWO(sc, sc->v_rra[i], RXRSRC_WCHI, UPPER(PAGE_SIZE/2)); SWO(sc, sc->v_rra[i], RXRSRC_WCLO, LOWER(PAGE_SIZE/2)); } sc->sc_rramark = NRBA; NIC_PUT(sc, SNCR_RWP, LOWER(sc->v_rra[sc->sc_rramark])); wbflush(); } void sncintr(void *arg) { struct snc_softc *sc = (struct snc_softc *)arg; int isr; if (sc->sc_enabled == 0) return; SNC_LOCK(sc); while ((isr = (NIC_GET(sc, SNCR_ISR) & ISR_ALL)) != 0) { /* scrub the interrupts that we are going to service */ NIC_PUT(sc, SNCR_ISR, isr); wbflush(); if (isr & (ISR_BR | ISR_LCD | ISR_TC)) device_printf(sc->sc_dev, "unexpected interrupt status 0x%x\n", isr); if (isr & (ISR_TXDN | ISR_TXER | ISR_PINT)) sonictxint(sc); if (isr & ISR_PKTRX) sonicrxint(sc); if (isr & (ISR_HBL | ISR_RDE | ISR_RBE | ISR_RBAE | ISR_RFO)) { if (isr & ISR_HBL) /* * The repeater is not providing a heartbeat. * In itself this isn't harmful, lots of the * cheap repeater hubs don't supply a heartbeat. * So ignore the lack of heartbeat. Its only * if we can't detect a carrier that we have a * problem. */ ; if (isr & ISR_RDE) device_printf(sc->sc_dev, "receive descriptors exhausted\n"); if (isr & ISR_RBE) device_printf(sc->sc_dev, "receive buffers exhausted\n"); if (isr & ISR_RBAE) device_printf(sc->sc_dev, "receive buffer area exhausted\n"); if (isr & ISR_RFO) device_printf(sc->sc_dev, "receive FIFO overrun\n"); } if (isr & (ISR_CRC | ISR_FAE | ISR_MP)) { #ifdef notdef if (isr & ISR_CRC) sc->sc_crctally++; if (isr & ISR_FAE) sc->sc_faetally++; if (isr & ISR_MP) sc->sc_mptally++; #endif } sncstart_locked(sc->sc_ifp); } SNC_UNLOCK(sc); return; } /* * Transmit interrupt routine */ static void sonictxint(struct snc_softc *sc) { struct mtd *mtd; u_int32_t txp; unsigned short txp_status; int mtd_hw; struct ifnet *ifp = sc->sc_ifp; mtd_hw = sc->mtd_hw; if (mtd_hw == sc->mtd_free) return; while (mtd_hw != sc->mtd_free) { mtd = &sc->mtda[mtd_hw]; txp = mtd->mtd_vtxp; if (SRO(sc, txp, TXP_STATUS) == 0) { break; /* it hasn't really gone yet */ } #ifdef SNCDEBUG if ((sncdebug & SNC_SHOWTXHDR) != 0) { struct ether_header eh; (*sc->sc_copyfrombuf)(sc, &eh, mtd->mtd_vbuf, sizeof(eh)); device_printf(sc->sc_dev, "xmit status=0x%x len=%d type=0x%x from %6D", SRO(sc, txp, TXP_STATUS), SRO(sc, txp, TXP_PKTSIZE), htons(eh.ether_type), eh.ether_shost, ":"); printf(" (to %6D)\n", eh.ether_dhost, ":"); } #endif /* SNCDEBUG */ sc->sc_tx_timeout = 0; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (mtd->mtd_mbuf != 0) { m_freem(mtd->mtd_mbuf); mtd->mtd_mbuf = 0; } if (++mtd_hw == NTDA) mtd_hw = 0; txp_status = SRO(sc, txp, TXP_STATUS); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txp_status & TCR_EXC) ? 16 : ((txp_status & TCR_NC) >> 12)); if ((txp_status & TCR_PTX) == 0) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); device_printf(sc->sc_dev, "Tx packet status=0x%x\n", txp_status); /* XXX - DG This looks bogus */ if (mtd_hw != sc->mtd_free) { printf("resubmitting remaining packets\n"); mtd = &sc->mtda[mtd_hw]; NIC_PUT(sc, SNCR_CTDA, LOWER(mtd->mtd_vtxp)); NIC_PUT(sc, SNCR_CR, CR_TXP); wbflush(); break; } } } sc->mtd_hw = mtd_hw; return; } /* * Receive interrupt routine */ static void sonicrxint(struct snc_softc *sc) { u_int32_t rda; int orra; int len; int rramark; int rdamark; u_int16_t rxpkt_ptr; rda = sc->v_rda + (sc->sc_rxmark * RXPKT_SIZE(sc)); while (SRO(sc, rda, RXPKT_INUSE) == 0) { u_int status = SRO(sc, rda, RXPKT_STATUS); orra = RBASEQ(SRO(sc, rda, RXPKT_SEQNO)) & RRAMASK; rxpkt_ptr = SRO(sc, rda, RXPKT_PTRLO); /* * Do not trunc ether_header length. * Our sonic_read() and sonic_get() require it. */ len = SRO(sc, rda, RXPKT_BYTEC) - FCSSIZE; if (status & RCR_PRX) { /* XXX: Does PAGE_MASK require? */ u_int32_t pkt = sc->rbuf[orra & RBAMASK] + (rxpkt_ptr & PAGE_MASK); if (sonic_read(sc, pkt, len)) if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1); else if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1); } else if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1); /* * give receive buffer area back to chip. * * If this was the last packet in the RRA, give the RRA to * the chip again. * If sonic read didnt copy it out then we would have to * wait !! * (dont bother add it back in again straight away) * * Really, we're doing v_rra[rramark] = v_rra[orra] but * we have to use the macros because SONIC might be in * 16 or 32 bit mode. */ if (status & RCR_LPKT) { u_int32_t tmp1, tmp2; rramark = sc->sc_rramark; tmp1 = sc->v_rra[rramark]; tmp2 = sc->v_rra[orra]; SWO(sc, tmp1, RXRSRC_PTRLO, SRO(sc, tmp2, RXRSRC_PTRLO)); SWO(sc, tmp1, RXRSRC_PTRHI, SRO(sc, tmp2, RXRSRC_PTRHI)); SWO(sc, tmp1, RXRSRC_WCLO, SRO(sc, tmp2, RXRSRC_WCLO)); SWO(sc, tmp1, RXRSRC_WCHI, SRO(sc, tmp2, RXRSRC_WCHI)); /* zap old rra for fun */ SWO(sc, tmp2, RXRSRC_WCHI, 0); SWO(sc, tmp2, RXRSRC_WCLO, 0); sc->sc_rramark = (++rramark) & RRAMASK; NIC_PUT(sc, SNCR_RWP, LOWER(sc->v_rra[rramark])); wbflush(); } /* * give receive descriptor back to chip simple * list is circular */ rdamark = sc->sc_rdamark; SWO(sc, rda, RXPKT_INUSE, 1); SWO(sc, rda, RXPKT_RLINK, SRO(sc, rda, RXPKT_RLINK) | EOL); SWO(sc, (sc->v_rda + (rdamark * RXPKT_SIZE(sc))), RXPKT_RLINK, SRO(sc, (sc->v_rda + (rdamark * RXPKT_SIZE(sc))), RXPKT_RLINK) & ~EOL); sc->sc_rdamark = sc->sc_rxmark; if (++sc->sc_rxmark >= sc->sc_nrda) sc->sc_rxmark = 0; rda = sc->v_rda + (sc->sc_rxmark * RXPKT_SIZE(sc)); } } /* * sonic_read -- pull packet off interface and forward to * appropriate protocol handler */ static int sonic_read(struct snc_softc *sc, u_int32_t pkt, int len) { struct ifnet *ifp = sc->sc_ifp; struct ether_header *et; struct mbuf *m; if (len <= sizeof(struct ether_header) || len > ETHERMTU + sizeof(struct ether_header)) { device_printf(sc->sc_dev, "invalid packet length %d bytes\n", len); return (0); } /* Pull packet off interface. */ m = sonic_get(sc, pkt, len); if (m == 0) { return (0); } /* We assume that the header fit entirely in one mbuf. */ et = mtod(m, struct ether_header *); #ifdef SNCDEBUG if ((sncdebug & SNC_SHOWRXHDR) != 0) { device_printf(sc->sc_dev, "rcvd 0x%x len=%d type=0x%x from %6D", pkt, len, htons(et->ether_type), et->ether_shost, ":"); printf(" (to %6D)\n", et->ether_dhost, ":"); } #endif /* SNCDEBUG */ /* Pass the packet up. */ SNC_UNLOCK(sc); (*ifp->if_input)(ifp, m); SNC_LOCK(sc); return (1); } /* * munge the received packet into an mbuf chain */ static struct mbuf * sonic_get(struct snc_softc *sc, u_int32_t pkt, int datalen) { struct mbuf *m, *top, **mp; int len; /* * Do not trunc ether_header length. * Our sonic_read() and sonic_get() require it. */ MGETHDR(m, M_NOWAIT, MT_DATA); if (m == 0) return (0); m->m_pkthdr.rcvif = sc->sc_ifp; m->m_pkthdr.len = datalen; len = MHLEN; top = 0; mp = ⊤ while (datalen > 0) { if (top) { MGET(m, M_NOWAIT, MT_DATA); if (m == 0) { m_freem(top); return (0); } len = MLEN; } if (datalen >= MINCLSIZE) { - MCLGET(m, M_NOWAIT); - if ((m->m_flags & M_EXT) == 0) { + if (!(MCLGET(m, M_NOWAIT))) { if (top) m_freem(top); return (0); } len = MCLBYTES; } #if 0 /* XXX: Require? */ if (!top) { register int pad = ALIGN(sizeof(struct ether_header)) - sizeof(struct ether_header); m->m_data += pad; len -= pad; } #endif m->m_len = len = min(datalen, len); (*sc->sc_copyfrombuf)(sc, mtod(m, caddr_t), pkt, len); pkt += len; datalen -= len; *mp = m; mp = &m->m_next; } return (top); } /* * Enable power on the interface. */ int snc_enable(struct snc_softc *sc) { #ifdef SNCDEBUG device_printf(sc->sc_dev, "snc_enable()\n"); #endif /* SNCDEBUG */ if (sc->sc_enabled == 0 && sc->sc_enable != NULL) { if ((*sc->sc_enable)(sc) != 0) { device_printf(sc->sc_dev, "device enable failed\n"); return (EIO); } } sc->sc_enabled = 1; return (0); } /* * Disable power on the interface. */ void snc_disable(struct snc_softc *sc) { #ifdef SNCDEBUG device_printf(sc->sc_dev, "snc_disable()\n"); #endif /* SNCDEBUG */ if (sc->sc_enabled != 0 && sc->sc_disable != NULL) { (*sc->sc_disable)(sc); sc->sc_enabled = 0; } } Index: head/sys/dev/ti/if_ti.c =================================================================== --- head/sys/dev/ti/if_ti.c (revision 276749) +++ head/sys/dev/ti/if_ti.c (revision 276750) @@ -1,4058 +1,4057 @@ /*- * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /* * Alteon Networks Tigon PCI gigabit ethernet driver for FreeBSD. * Manuals, sample driver and firmware source kits are available * from http://www.alteon.com/support/openkits. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The Alteon Networks Tigon chip contains an embedded R4000 CPU, * gigabit MAC, dual DMA channels and a PCI interface unit. NICs * using the Tigon may have anywhere from 512K to 2MB of SRAM. The * Tigon supports hardware IP, TCP and UCP checksumming, multicast * filtering and jumbo (9014 byte) frames. The hardware is largely * controlled by firmware, which must be loaded into the NIC during * initialization. * * The Tigon 2 contains 2 R4000 CPUs and requires a newer firmware * revision, which supports new features such as extended commands, * extended jumbo receive ring desciptors and a mini receive ring. * * Alteon Networks is to be commended for releasing such a vast amount * of development material for the Tigon NIC without requiring an NDA * (although they really should have done it a long time ago). With * any luck, the other vendors will finally wise up and follow Alteon's * stellar example. * * The firmware for the Tigon 1 and 2 NICs is compiled directly into * this driver by #including it as a C header file. This bloats the * driver somewhat, but it's the easiest method considering that the * driver code and firmware code need to be kept in sync. The source * for the firmware is not provided with the FreeBSD distribution since * compiling it requires a GNU toolchain targeted for mips-sgi-irix5.3. * * The following people deserve special thanks: * - Terry Murphy of 3Com, for providing a 3c985 Tigon 1 board * for testing * - Raymond Lee of Netgear, for providing a pair of Netgear * GA620 Tigon 2 boards for testing * - Ulf Zimmermann, for bringing the GA260 to my attention and * convincing me to write this driver. * - Andrew Gallatin for providing FreeBSD/Alpha support. */ #include __FBSDID("$FreeBSD$"); #include "opt_ti.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef TI_SF_BUF_JUMBO #include #include #endif #include #include #include #include #include #include #include #define TI_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) /* * We can only turn on header splitting if we're using extended receive * BDs. */ #if defined(TI_JUMBO_HDRSPLIT) && !defined(TI_SF_BUF_JUMBO) #error "options TI_JUMBO_HDRSPLIT requires TI_SF_BUF_JUMBO" #endif /* TI_JUMBO_HDRSPLIT && !TI_SF_BUF_JUMBO */ typedef enum { TI_SWAP_HTON, TI_SWAP_NTOH } ti_swap_type; /* * Various supported device vendors/types and their names. */ static const struct ti_type ti_devs[] = { { ALT_VENDORID, ALT_DEVICEID_ACENIC, "Alteon AceNIC 1000baseSX Gigabit Ethernet" }, { ALT_VENDORID, ALT_DEVICEID_ACENIC_COPPER, "Alteon AceNIC 1000baseT Gigabit Ethernet" }, { TC_VENDORID, TC_DEVICEID_3C985, "3Com 3c985-SX Gigabit Ethernet" }, { NG_VENDORID, NG_DEVICEID_GA620, "Netgear GA620 1000baseSX Gigabit Ethernet" }, { NG_VENDORID, NG_DEVICEID_GA620T, "Netgear GA620 1000baseT Gigabit Ethernet" }, { SGI_VENDORID, SGI_DEVICEID_TIGON, "Silicon Graphics Gigabit Ethernet" }, { DEC_VENDORID, DEC_DEVICEID_FARALLON_PN9000SX, "Farallon PN9000SX Gigabit Ethernet" }, { 0, 0, NULL } }; static d_open_t ti_open; static d_close_t ti_close; static d_ioctl_t ti_ioctl2; static struct cdevsw ti_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_open = ti_open, .d_close = ti_close, .d_ioctl = ti_ioctl2, .d_name = "ti", }; static int ti_probe(device_t); static int ti_attach(device_t); static int ti_detach(device_t); static void ti_txeof(struct ti_softc *); static void ti_rxeof(struct ti_softc *); static int ti_encap(struct ti_softc *, struct mbuf **); static void ti_intr(void *); static void ti_start(struct ifnet *); static void ti_start_locked(struct ifnet *); static int ti_ioctl(struct ifnet *, u_long, caddr_t); static uint64_t ti_get_counter(struct ifnet *, ift_counter); static void ti_init(void *); static void ti_init_locked(void *); static void ti_init2(struct ti_softc *); static void ti_stop(struct ti_softc *); static void ti_watchdog(void *); static int ti_shutdown(device_t); static int ti_ifmedia_upd(struct ifnet *); static int ti_ifmedia_upd_locked(struct ti_softc *); static void ti_ifmedia_sts(struct ifnet *, struct ifmediareq *); static uint32_t ti_eeprom_putbyte(struct ti_softc *, int); static uint8_t ti_eeprom_getbyte(struct ti_softc *, int, uint8_t *); static int ti_read_eeprom(struct ti_softc *, caddr_t, int, int); static void ti_add_mcast(struct ti_softc *, struct ether_addr *); static void ti_del_mcast(struct ti_softc *, struct ether_addr *); static void ti_setmulti(struct ti_softc *); static void ti_mem_read(struct ti_softc *, uint32_t, uint32_t, void *); static void ti_mem_write(struct ti_softc *, uint32_t, uint32_t, void *); static void ti_mem_zero(struct ti_softc *, uint32_t, uint32_t); static int ti_copy_mem(struct ti_softc *, uint32_t, uint32_t, caddr_t, int, int); static int ti_copy_scratch(struct ti_softc *, uint32_t, uint32_t, caddr_t, int, int, int); static int ti_bcopy_swap(const void *, void *, size_t, ti_swap_type); static void ti_loadfw(struct ti_softc *); static void ti_cmd(struct ti_softc *, struct ti_cmd_desc *); static void ti_cmd_ext(struct ti_softc *, struct ti_cmd_desc *, caddr_t, int); static void ti_handle_events(struct ti_softc *); static void ti_dma_map_addr(void *, bus_dma_segment_t *, int, int); static int ti_dma_alloc(struct ti_softc *); static void ti_dma_free(struct ti_softc *); static int ti_dma_ring_alloc(struct ti_softc *, bus_size_t, bus_size_t, bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *); static void ti_dma_ring_free(struct ti_softc *, bus_dma_tag_t *, uint8_t **, bus_dmamap_t, bus_addr_t *); static int ti_newbuf_std(struct ti_softc *, int); static int ti_newbuf_mini(struct ti_softc *, int); static int ti_newbuf_jumbo(struct ti_softc *, int, struct mbuf *); static int ti_init_rx_ring_std(struct ti_softc *); static void ti_free_rx_ring_std(struct ti_softc *); static int ti_init_rx_ring_jumbo(struct ti_softc *); static void ti_free_rx_ring_jumbo(struct ti_softc *); static int ti_init_rx_ring_mini(struct ti_softc *); static void ti_free_rx_ring_mini(struct ti_softc *); static void ti_free_tx_ring(struct ti_softc *); static int ti_init_tx_ring(struct ti_softc *); static void ti_discard_std(struct ti_softc *, int); #ifndef TI_SF_BUF_JUMBO static void ti_discard_jumbo(struct ti_softc *, int); #endif static void ti_discard_mini(struct ti_softc *, int); static int ti_64bitslot_war(struct ti_softc *); static int ti_chipinit(struct ti_softc *); static int ti_gibinit(struct ti_softc *); #ifdef TI_JUMBO_HDRSPLIT static __inline void ti_hdr_split(struct mbuf *top, int hdr_len, int pkt_len, int idx); #endif /* TI_JUMBO_HDRSPLIT */ static void ti_sysctl_node(struct ti_softc *); static device_method_t ti_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ti_probe), DEVMETHOD(device_attach, ti_attach), DEVMETHOD(device_detach, ti_detach), DEVMETHOD(device_shutdown, ti_shutdown), { 0, 0 } }; static driver_t ti_driver = { "ti", ti_methods, sizeof(struct ti_softc) }; static devclass_t ti_devclass; DRIVER_MODULE(ti, pci, ti_driver, ti_devclass, 0, 0); MODULE_DEPEND(ti, pci, 1, 1, 1); MODULE_DEPEND(ti, ether, 1, 1, 1); /* * Send an instruction or address to the EEPROM, check for ACK. */ static uint32_t ti_eeprom_putbyte(struct ti_softc *sc, int byte) { int i, ack = 0; /* * Make sure we're in TX mode. */ TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); /* * Feed in each bit and stobe the clock. */ for (i = 0x80; i; i >>= 1) { if (byte & i) { TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT); } else { TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT); } DELAY(1); TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); DELAY(1); TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); } /* * Turn off TX mode. */ TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); /* * Check for ack. */ TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); ack = CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN; TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); return (ack); } /* * Read a byte of data stored in the EEPROM at address 'addr.' * We have to send two address bytes since the EEPROM can hold * more than 256 bytes of data. */ static uint8_t ti_eeprom_getbyte(struct ti_softc *sc, int addr, uint8_t *dest) { int i; uint8_t byte = 0; EEPROM_START; /* * Send write control code to EEPROM. */ if (ti_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) { device_printf(sc->ti_dev, "failed to send write command, status: %x\n", CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); return (1); } /* * Send first byte of address of byte we want to read. */ if (ti_eeprom_putbyte(sc, (addr >> 8) & 0xFF)) { device_printf(sc->ti_dev, "failed to send address, status: %x\n", CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); return (1); } /* * Send second byte address of byte we want to read. */ if (ti_eeprom_putbyte(sc, addr & 0xFF)) { device_printf(sc->ti_dev, "failed to send address, status: %x\n", CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); return (1); } EEPROM_STOP; EEPROM_START; /* * Send read control code to EEPROM. */ if (ti_eeprom_putbyte(sc, EEPROM_CTL_READ)) { device_printf(sc->ti_dev, "failed to send read command, status: %x\n", CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); return (1); } /* * Start reading bits from EEPROM. */ TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); for (i = 0x80; i; i >>= 1) { TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); DELAY(1); if (CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN) byte |= i; TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); DELAY(1); } EEPROM_STOP; /* * No ACK generated for read, so just return byte. */ *dest = byte; return (0); } /* * Read a sequence of bytes from the EEPROM. */ static int ti_read_eeprom(struct ti_softc *sc, caddr_t dest, int off, int cnt) { int err = 0, i; uint8_t byte = 0; for (i = 0; i < cnt; i++) { err = ti_eeprom_getbyte(sc, off + i, &byte); if (err) break; *(dest + i) = byte; } return (err ? 1 : 0); } /* * NIC memory read function. * Can be used to copy data from NIC local memory. */ static void ti_mem_read(struct ti_softc *sc, uint32_t addr, uint32_t len, void *buf) { int segptr, segsize, cnt; char *ptr; segptr = addr; cnt = len; ptr = buf; while (cnt) { if (cnt < TI_WINLEN) segsize = cnt; else segsize = TI_WINLEN - (segptr % TI_WINLEN); CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1))); bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle, TI_WINDOW + (segptr & (TI_WINLEN - 1)), (uint32_t *)ptr, segsize / 4); ptr += segsize; segptr += segsize; cnt -= segsize; } } /* * NIC memory write function. * Can be used to copy data into NIC local memory. */ static void ti_mem_write(struct ti_softc *sc, uint32_t addr, uint32_t len, void *buf) { int segptr, segsize, cnt; char *ptr; segptr = addr; cnt = len; ptr = buf; while (cnt) { if (cnt < TI_WINLEN) segsize = cnt; else segsize = TI_WINLEN - (segptr % TI_WINLEN); CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1))); bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle, TI_WINDOW + (segptr & (TI_WINLEN - 1)), (uint32_t *)ptr, segsize / 4); ptr += segsize; segptr += segsize; cnt -= segsize; } } /* * NIC memory read function. * Can be used to clear a section of NIC local memory. */ static void ti_mem_zero(struct ti_softc *sc, uint32_t addr, uint32_t len) { int segptr, segsize, cnt; segptr = addr; cnt = len; while (cnt) { if (cnt < TI_WINLEN) segsize = cnt; else segsize = TI_WINLEN - (segptr % TI_WINLEN); CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1))); bus_space_set_region_4(sc->ti_btag, sc->ti_bhandle, TI_WINDOW + (segptr & (TI_WINLEN - 1)), 0, segsize / 4); segptr += segsize; cnt -= segsize; } } static int ti_copy_mem(struct ti_softc *sc, uint32_t tigon_addr, uint32_t len, caddr_t buf, int useraddr, int readdata) { int segptr, segsize, cnt; caddr_t ptr; uint32_t origwin; int resid, segresid; int first_pass; TI_LOCK_ASSERT(sc); /* * At the moment, we don't handle non-aligned cases, we just bail. * If this proves to be a problem, it will be fixed. */ if (readdata == 0 && (tigon_addr & 0x3) != 0) { device_printf(sc->ti_dev, "%s: tigon address %#x isn't " "word-aligned\n", __func__, tigon_addr); device_printf(sc->ti_dev, "%s: unaligned writes aren't " "yet supported\n", __func__); return (EINVAL); } segptr = tigon_addr & ~0x3; segresid = tigon_addr - segptr; /* * This is the non-aligned amount left over that we'll need to * copy. */ resid = len & 0x3; /* Add in the left over amount at the front of the buffer */ resid += segresid; cnt = len & ~0x3; /* * If resid + segresid is >= 4, add multiples of 4 to the count and * decrease the residual by that much. */ cnt += resid & ~0x3; resid -= resid & ~0x3; ptr = buf; first_pass = 1; /* * Save the old window base value. */ origwin = CSR_READ_4(sc, TI_WINBASE); while (cnt) { bus_size_t ti_offset; if (cnt < TI_WINLEN) segsize = cnt; else segsize = TI_WINLEN - (segptr % TI_WINLEN); CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1))); ti_offset = TI_WINDOW + (segptr & (TI_WINLEN -1)); if (readdata) { bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle, ti_offset, (uint32_t *)sc->ti_membuf, segsize >> 2); if (useraddr) { /* * Yeah, this is a little on the kludgy * side, but at least this code is only * used for debugging. */ ti_bcopy_swap(sc->ti_membuf, sc->ti_membuf2, segsize, TI_SWAP_NTOH); TI_UNLOCK(sc); if (first_pass) { copyout(&sc->ti_membuf2[segresid], ptr, segsize - segresid); first_pass = 0; } else copyout(sc->ti_membuf2, ptr, segsize); TI_LOCK(sc); } else { if (first_pass) { ti_bcopy_swap(sc->ti_membuf, sc->ti_membuf2, segsize, TI_SWAP_NTOH); TI_UNLOCK(sc); bcopy(&sc->ti_membuf2[segresid], ptr, segsize - segresid); TI_LOCK(sc); first_pass = 0; } else ti_bcopy_swap(sc->ti_membuf, ptr, segsize, TI_SWAP_NTOH); } } else { if (useraddr) { TI_UNLOCK(sc); copyin(ptr, sc->ti_membuf2, segsize); TI_LOCK(sc); ti_bcopy_swap(sc->ti_membuf2, sc->ti_membuf, segsize, TI_SWAP_HTON); } else ti_bcopy_swap(ptr, sc->ti_membuf, segsize, TI_SWAP_HTON); bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle, ti_offset, (uint32_t *)sc->ti_membuf, segsize >> 2); } segptr += segsize; ptr += segsize; cnt -= segsize; } /* * Handle leftover, non-word-aligned bytes. */ if (resid != 0) { uint32_t tmpval, tmpval2; bus_size_t ti_offset; /* * Set the segment pointer. */ CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1))); ti_offset = TI_WINDOW + (segptr & (TI_WINLEN - 1)); /* * First, grab whatever is in our source/destination. * We'll obviously need this for reads, but also for * writes, since we'll be doing read/modify/write. */ bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle, ti_offset, &tmpval, 1); /* * Next, translate this from little-endian to big-endian * (at least on i386 boxes). */ tmpval2 = ntohl(tmpval); if (readdata) { /* * If we're reading, just copy the leftover number * of bytes from the host byte order buffer to * the user's buffer. */ if (useraddr) { TI_UNLOCK(sc); copyout(&tmpval2, ptr, resid); TI_LOCK(sc); } else bcopy(&tmpval2, ptr, resid); } else { /* * If we're writing, first copy the bytes to be * written into the network byte order buffer, * leaving the rest of the buffer with whatever was * originally in there. Then, swap the bytes * around into host order and write them out. * * XXX KDM the read side of this has been verified * to work, but the write side of it has not been * verified. So user beware. */ if (useraddr) { TI_UNLOCK(sc); copyin(ptr, &tmpval2, resid); TI_LOCK(sc); } else bcopy(ptr, &tmpval2, resid); tmpval = htonl(tmpval2); bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle, ti_offset, &tmpval, 1); } } CSR_WRITE_4(sc, TI_WINBASE, origwin); return (0); } static int ti_copy_scratch(struct ti_softc *sc, uint32_t tigon_addr, uint32_t len, caddr_t buf, int useraddr, int readdata, int cpu) { uint32_t segptr; int cnt; uint32_t tmpval, tmpval2; caddr_t ptr; TI_LOCK_ASSERT(sc); /* * At the moment, we don't handle non-aligned cases, we just bail. * If this proves to be a problem, it will be fixed. */ if (tigon_addr & 0x3) { device_printf(sc->ti_dev, "%s: tigon address %#x " "isn't word-aligned\n", __func__, tigon_addr); return (EINVAL); } if (len & 0x3) { device_printf(sc->ti_dev, "%s: transfer length %d " "isn't word-aligned\n", __func__, len); return (EINVAL); } segptr = tigon_addr; cnt = len; ptr = buf; while (cnt) { CSR_WRITE_4(sc, CPU_REG(TI_SRAM_ADDR, cpu), segptr); if (readdata) { tmpval2 = CSR_READ_4(sc, CPU_REG(TI_SRAM_DATA, cpu)); tmpval = ntohl(tmpval2); /* * Note: I've used this debugging interface * extensively with Alteon's 12.3.15 firmware, * compiled with GCC 2.7.2.1 and binutils 2.9.1. * * When you compile the firmware without * optimization, which is necessary sometimes in * order to properly step through it, you sometimes * read out a bogus value of 0xc0017c instead of * whatever was supposed to be in that scratchpad * location. That value is on the stack somewhere, * but I've never been able to figure out what was * causing the problem. * * The address seems to pop up in random places, * often not in the same place on two subsequent * reads. * * In any case, the underlying data doesn't seem * to be affected, just the value read out. * * KDM, 3/7/2000 */ if (tmpval2 == 0xc0017c) device_printf(sc->ti_dev, "found 0xc0017c at " "%#x (tmpval2)\n", segptr); if (tmpval == 0xc0017c) device_printf(sc->ti_dev, "found 0xc0017c at " "%#x (tmpval)\n", segptr); if (useraddr) copyout(&tmpval, ptr, 4); else bcopy(&tmpval, ptr, 4); } else { if (useraddr) copyin(ptr, &tmpval2, 4); else bcopy(ptr, &tmpval2, 4); tmpval = htonl(tmpval2); CSR_WRITE_4(sc, CPU_REG(TI_SRAM_DATA, cpu), tmpval); } cnt -= 4; segptr += 4; ptr += 4; } return (0); } static int ti_bcopy_swap(const void *src, void *dst, size_t len, ti_swap_type swap_type) { const uint8_t *tmpsrc; uint8_t *tmpdst; size_t tmplen; if (len & 0x3) { printf("ti_bcopy_swap: length %zd isn't 32-bit aligned\n", len); return (-1); } tmpsrc = src; tmpdst = dst; tmplen = len; while (tmplen) { if (swap_type == TI_SWAP_NTOH) *(uint32_t *)tmpdst = ntohl(*(const uint32_t *)tmpsrc); else *(uint32_t *)tmpdst = htonl(*(const uint32_t *)tmpsrc); tmpsrc += 4; tmpdst += 4; tmplen -= 4; } return (0); } /* * Load firmware image into the NIC. Check that the firmware revision * is acceptable and see if we want the firmware for the Tigon 1 or * Tigon 2. */ static void ti_loadfw(struct ti_softc *sc) { TI_LOCK_ASSERT(sc); switch (sc->ti_hwrev) { case TI_HWREV_TIGON: if (tigonFwReleaseMajor != TI_FIRMWARE_MAJOR || tigonFwReleaseMinor != TI_FIRMWARE_MINOR || tigonFwReleaseFix != TI_FIRMWARE_FIX) { device_printf(sc->ti_dev, "firmware revision mismatch; " "want %d.%d.%d, got %d.%d.%d\n", TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR, TI_FIRMWARE_FIX, tigonFwReleaseMajor, tigonFwReleaseMinor, tigonFwReleaseFix); return; } ti_mem_write(sc, tigonFwTextAddr, tigonFwTextLen, tigonFwText); ti_mem_write(sc, tigonFwDataAddr, tigonFwDataLen, tigonFwData); ti_mem_write(sc, tigonFwRodataAddr, tigonFwRodataLen, tigonFwRodata); ti_mem_zero(sc, tigonFwBssAddr, tigonFwBssLen); ti_mem_zero(sc, tigonFwSbssAddr, tigonFwSbssLen); CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tigonFwStartAddr); break; case TI_HWREV_TIGON_II: if (tigon2FwReleaseMajor != TI_FIRMWARE_MAJOR || tigon2FwReleaseMinor != TI_FIRMWARE_MINOR || tigon2FwReleaseFix != TI_FIRMWARE_FIX) { device_printf(sc->ti_dev, "firmware revision mismatch; " "want %d.%d.%d, got %d.%d.%d\n", TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR, TI_FIRMWARE_FIX, tigon2FwReleaseMajor, tigon2FwReleaseMinor, tigon2FwReleaseFix); return; } ti_mem_write(sc, tigon2FwTextAddr, tigon2FwTextLen, tigon2FwText); ti_mem_write(sc, tigon2FwDataAddr, tigon2FwDataLen, tigon2FwData); ti_mem_write(sc, tigon2FwRodataAddr, tigon2FwRodataLen, tigon2FwRodata); ti_mem_zero(sc, tigon2FwBssAddr, tigon2FwBssLen); ti_mem_zero(sc, tigon2FwSbssAddr, tigon2FwSbssLen); CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tigon2FwStartAddr); break; default: device_printf(sc->ti_dev, "can't load firmware: unknown hardware rev\n"); break; } } /* * Send the NIC a command via the command ring. */ static void ti_cmd(struct ti_softc *sc, struct ti_cmd_desc *cmd) { int index; index = sc->ti_cmd_saved_prodidx; CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(uint32_t *)(cmd)); TI_INC(index, TI_CMD_RING_CNT); CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index); sc->ti_cmd_saved_prodidx = index; } /* * Send the NIC an extended command. The 'len' parameter specifies the * number of command slots to include after the initial command. */ static void ti_cmd_ext(struct ti_softc *sc, struct ti_cmd_desc *cmd, caddr_t arg, int len) { int index; int i; index = sc->ti_cmd_saved_prodidx; CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(uint32_t *)(cmd)); TI_INC(index, TI_CMD_RING_CNT); for (i = 0; i < len; i++) { CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(uint32_t *)(&arg[i * 4])); TI_INC(index, TI_CMD_RING_CNT); } CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index); sc->ti_cmd_saved_prodidx = index; } /* * Handle events that have triggered interrupts. */ static void ti_handle_events(struct ti_softc *sc) { struct ti_event_desc *e; if (sc->ti_rdata.ti_event_ring == NULL) return; bus_dmamap_sync(sc->ti_cdata.ti_event_ring_tag, sc->ti_cdata.ti_event_ring_map, BUS_DMASYNC_POSTREAD); while (sc->ti_ev_saved_considx != sc->ti_ev_prodidx.ti_idx) { e = &sc->ti_rdata.ti_event_ring[sc->ti_ev_saved_considx]; switch (TI_EVENT_EVENT(e)) { case TI_EV_LINKSTAT_CHANGED: sc->ti_linkstat = TI_EVENT_CODE(e); if (sc->ti_linkstat == TI_EV_CODE_LINK_UP) { if_link_state_change(sc->ti_ifp, LINK_STATE_UP); sc->ti_ifp->if_baudrate = IF_Mbps(100); if (bootverbose) device_printf(sc->ti_dev, "10/100 link up\n"); } else if (sc->ti_linkstat == TI_EV_CODE_GIG_LINK_UP) { if_link_state_change(sc->ti_ifp, LINK_STATE_UP); sc->ti_ifp->if_baudrate = IF_Gbps(1UL); if (bootverbose) device_printf(sc->ti_dev, "gigabit link up\n"); } else if (sc->ti_linkstat == TI_EV_CODE_LINK_DOWN) { if_link_state_change(sc->ti_ifp, LINK_STATE_DOWN); sc->ti_ifp->if_baudrate = 0; if (bootverbose) device_printf(sc->ti_dev, "link down\n"); } break; case TI_EV_ERROR: if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_INVAL_CMD) device_printf(sc->ti_dev, "invalid command\n"); else if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_UNIMP_CMD) device_printf(sc->ti_dev, "unknown command\n"); else if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_BADCFG) device_printf(sc->ti_dev, "bad config data\n"); break; case TI_EV_FIRMWARE_UP: ti_init2(sc); break; case TI_EV_STATS_UPDATED: case TI_EV_RESET_JUMBO_RING: case TI_EV_MCAST_UPDATED: /* Who cares. */ break; default: device_printf(sc->ti_dev, "unknown event: %d\n", TI_EVENT_EVENT(e)); break; } /* Advance the consumer index. */ TI_INC(sc->ti_ev_saved_considx, TI_EVENT_RING_CNT); CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, sc->ti_ev_saved_considx); } bus_dmamap_sync(sc->ti_cdata.ti_event_ring_tag, sc->ti_cdata.ti_event_ring_map, BUS_DMASYNC_PREREAD); } struct ti_dmamap_arg { bus_addr_t ti_busaddr; }; static void ti_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct ti_dmamap_arg *ctx; if (error) return; KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg)); ctx = arg; ctx->ti_busaddr = segs->ds_addr; } static int ti_dma_ring_alloc(struct ti_softc *sc, bus_size_t alignment, bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map, bus_addr_t *paddr, const char *msg) { struct ti_dmamap_arg ctx; int error; error = bus_dma_tag_create(sc->ti_cdata.ti_parent_tag, alignment, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag); if (error != 0) { device_printf(sc->ti_dev, "could not create %s dma tag\n", msg); return (error); } /* Allocate DMA'able memory for ring. */ error = bus_dmamem_alloc(*tag, (void **)ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map); if (error != 0) { device_printf(sc->ti_dev, "could not allocate DMA'able memory for %s\n", msg); return (error); } /* Load the address of the ring. */ ctx.ti_busaddr = 0; error = bus_dmamap_load(*tag, *map, *ring, maxsize, ti_dma_map_addr, &ctx, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->ti_dev, "could not load DMA'able memory for %s\n", msg); return (error); } *paddr = ctx.ti_busaddr; return (0); } static void ti_dma_ring_free(struct ti_softc *sc, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t map, bus_addr_t *paddr) { if (*paddr != 0) { bus_dmamap_unload(*tag, map); *paddr = 0; } if (*ring != NULL) { bus_dmamem_free(*tag, *ring, map); *ring = NULL; } if (*tag) { bus_dma_tag_destroy(*tag); *tag = NULL; } } static int ti_dma_alloc(struct ti_softc *sc) { bus_addr_t lowaddr; int i, error; lowaddr = BUS_SPACE_MAXADDR; if (sc->ti_dac == 0) lowaddr = BUS_SPACE_MAXADDR_32BIT; error = bus_dma_tag_create(bus_get_dma_tag(sc->ti_dev), 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->ti_cdata.ti_parent_tag); if (error != 0) { device_printf(sc->ti_dev, "could not allocate parent dma tag\n"); return (ENOMEM); } error = ti_dma_ring_alloc(sc, TI_RING_ALIGN, sizeof(struct ti_gib), &sc->ti_cdata.ti_gib_tag, (uint8_t **)&sc->ti_rdata.ti_info, &sc->ti_cdata.ti_gib_map, &sc->ti_rdata.ti_info_paddr, "GIB"); if (error) return (error); /* Producer/consumer status */ error = ti_dma_ring_alloc(sc, TI_RING_ALIGN, sizeof(struct ti_status), &sc->ti_cdata.ti_status_tag, (uint8_t **)&sc->ti_rdata.ti_status, &sc->ti_cdata.ti_status_map, &sc->ti_rdata.ti_status_paddr, "event ring"); if (error) return (error); /* Event ring */ error = ti_dma_ring_alloc(sc, TI_RING_ALIGN, TI_EVENT_RING_SZ, &sc->ti_cdata.ti_event_ring_tag, (uint8_t **)&sc->ti_rdata.ti_event_ring, &sc->ti_cdata.ti_event_ring_map, &sc->ti_rdata.ti_event_ring_paddr, "event ring"); if (error) return (error); /* Command ring lives in shared memory so no need to create DMA area. */ /* Standard RX ring */ error = ti_dma_ring_alloc(sc, TI_RING_ALIGN, TI_STD_RX_RING_SZ, &sc->ti_cdata.ti_rx_std_ring_tag, (uint8_t **)&sc->ti_rdata.ti_rx_std_ring, &sc->ti_cdata.ti_rx_std_ring_map, &sc->ti_rdata.ti_rx_std_ring_paddr, "RX ring"); if (error) return (error); /* Jumbo RX ring */ error = ti_dma_ring_alloc(sc, TI_JUMBO_RING_ALIGN, TI_JUMBO_RX_RING_SZ, &sc->ti_cdata.ti_rx_jumbo_ring_tag, (uint8_t **)&sc->ti_rdata.ti_rx_jumbo_ring, &sc->ti_cdata.ti_rx_jumbo_ring_map, &sc->ti_rdata.ti_rx_jumbo_ring_paddr, "jumbo RX ring"); if (error) return (error); /* RX return ring */ error = ti_dma_ring_alloc(sc, TI_RING_ALIGN, TI_RX_RETURN_RING_SZ, &sc->ti_cdata.ti_rx_return_ring_tag, (uint8_t **)&sc->ti_rdata.ti_rx_return_ring, &sc->ti_cdata.ti_rx_return_ring_map, &sc->ti_rdata.ti_rx_return_ring_paddr, "RX return ring"); if (error) return (error); /* Create DMA tag for standard RX mbufs. */ error = bus_dma_tag_create(sc->ti_cdata.ti_parent_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->ti_cdata.ti_rx_std_tag); if (error) { device_printf(sc->ti_dev, "could not allocate RX dma tag\n"); return (error); } /* Create DMA tag for jumbo RX mbufs. */ #ifdef TI_SF_BUF_JUMBO /* * The VM system will take care of providing aligned pages. Alignment * is set to 1 here so that busdma resources won't be wasted. */ error = bus_dma_tag_create(sc->ti_cdata.ti_parent_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, PAGE_SIZE * 4, 4, PAGE_SIZE, 0, NULL, NULL, &sc->ti_cdata.ti_rx_jumbo_tag); #else error = bus_dma_tag_create(sc->ti_cdata.ti_parent_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MJUM9BYTES, 1, MJUM9BYTES, 0, NULL, NULL, &sc->ti_cdata.ti_rx_jumbo_tag); #endif if (error) { device_printf(sc->ti_dev, "could not allocate jumbo RX dma tag\n"); return (error); } /* Create DMA tag for TX mbufs. */ error = bus_dma_tag_create(sc->ti_cdata.ti_parent_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * TI_MAXTXSEGS, TI_MAXTXSEGS, MCLBYTES, 0, NULL, NULL, &sc->ti_cdata.ti_tx_tag); if (error) { device_printf(sc->ti_dev, "could not allocate TX dma tag\n"); return (ENOMEM); } /* Create DMA maps for RX buffers. */ for (i = 0; i < TI_STD_RX_RING_CNT; i++) { error = bus_dmamap_create(sc->ti_cdata.ti_rx_std_tag, 0, &sc->ti_cdata.ti_rx_std_maps[i]); if (error) { device_printf(sc->ti_dev, "could not create DMA map for RX\n"); return (error); } } error = bus_dmamap_create(sc->ti_cdata.ti_rx_std_tag, 0, &sc->ti_cdata.ti_rx_std_sparemap); if (error) { device_printf(sc->ti_dev, "could not create spare DMA map for RX\n"); return (error); } /* Create DMA maps for jumbo RX buffers. */ for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) { error = bus_dmamap_create(sc->ti_cdata.ti_rx_jumbo_tag, 0, &sc->ti_cdata.ti_rx_jumbo_maps[i]); if (error) { device_printf(sc->ti_dev, "could not create DMA map for jumbo RX\n"); return (error); } } error = bus_dmamap_create(sc->ti_cdata.ti_rx_jumbo_tag, 0, &sc->ti_cdata.ti_rx_jumbo_sparemap); if (error) { device_printf(sc->ti_dev, "could not create spare DMA map for jumbo RX\n"); return (error); } /* Create DMA maps for TX buffers. */ for (i = 0; i < TI_TX_RING_CNT; i++) { error = bus_dmamap_create(sc->ti_cdata.ti_tx_tag, 0, &sc->ti_cdata.ti_txdesc[i].tx_dmamap); if (error) { device_printf(sc->ti_dev, "could not create DMA map for TX\n"); return (ENOMEM); } } /* Mini ring and TX ring is not available on Tigon 1. */ if (sc->ti_hwrev == TI_HWREV_TIGON) return (0); /* TX ring */ error = ti_dma_ring_alloc(sc, TI_RING_ALIGN, TI_TX_RING_SZ, &sc->ti_cdata.ti_tx_ring_tag, (uint8_t **)&sc->ti_rdata.ti_tx_ring, &sc->ti_cdata.ti_tx_ring_map, &sc->ti_rdata.ti_tx_ring_paddr, "TX ring"); if (error) return (error); /* Mini RX ring */ error = ti_dma_ring_alloc(sc, TI_RING_ALIGN, TI_MINI_RX_RING_SZ, &sc->ti_cdata.ti_rx_mini_ring_tag, (uint8_t **)&sc->ti_rdata.ti_rx_mini_ring, &sc->ti_cdata.ti_rx_mini_ring_map, &sc->ti_rdata.ti_rx_mini_ring_paddr, "mini RX ring"); if (error) return (error); /* Create DMA tag for mini RX mbufs. */ error = bus_dma_tag_create(sc->ti_cdata.ti_parent_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MHLEN, 1, MHLEN, 0, NULL, NULL, &sc->ti_cdata.ti_rx_mini_tag); if (error) { device_printf(sc->ti_dev, "could not allocate mini RX dma tag\n"); return (error); } /* Create DMA maps for mini RX buffers. */ for (i = 0; i < TI_MINI_RX_RING_CNT; i++) { error = bus_dmamap_create(sc->ti_cdata.ti_rx_mini_tag, 0, &sc->ti_cdata.ti_rx_mini_maps[i]); if (error) { device_printf(sc->ti_dev, "could not create DMA map for mini RX\n"); return (error); } } error = bus_dmamap_create(sc->ti_cdata.ti_rx_mini_tag, 0, &sc->ti_cdata.ti_rx_mini_sparemap); if (error) { device_printf(sc->ti_dev, "could not create spare DMA map for mini RX\n"); return (error); } return (0); } static void ti_dma_free(struct ti_softc *sc) { int i; /* Destroy DMA maps for RX buffers. */ for (i = 0; i < TI_STD_RX_RING_CNT; i++) { if (sc->ti_cdata.ti_rx_std_maps[i]) { bus_dmamap_destroy(sc->ti_cdata.ti_rx_std_tag, sc->ti_cdata.ti_rx_std_maps[i]); sc->ti_cdata.ti_rx_std_maps[i] = NULL; } } if (sc->ti_cdata.ti_rx_std_sparemap) { bus_dmamap_destroy(sc->ti_cdata.ti_rx_std_tag, sc->ti_cdata.ti_rx_std_sparemap); sc->ti_cdata.ti_rx_std_sparemap = NULL; } if (sc->ti_cdata.ti_rx_std_tag) { bus_dma_tag_destroy(sc->ti_cdata.ti_rx_std_tag); sc->ti_cdata.ti_rx_std_tag = NULL; } /* Destroy DMA maps for jumbo RX buffers. */ for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) { if (sc->ti_cdata.ti_rx_jumbo_maps[i]) { bus_dmamap_destroy(sc->ti_cdata.ti_rx_jumbo_tag, sc->ti_cdata.ti_rx_jumbo_maps[i]); sc->ti_cdata.ti_rx_jumbo_maps[i] = NULL; } } if (sc->ti_cdata.ti_rx_jumbo_sparemap) { bus_dmamap_destroy(sc->ti_cdata.ti_rx_jumbo_tag, sc->ti_cdata.ti_rx_jumbo_sparemap); sc->ti_cdata.ti_rx_jumbo_sparemap = NULL; } if (sc->ti_cdata.ti_rx_jumbo_tag) { bus_dma_tag_destroy(sc->ti_cdata.ti_rx_jumbo_tag); sc->ti_cdata.ti_rx_jumbo_tag = NULL; } /* Destroy DMA maps for mini RX buffers. */ for (i = 0; i < TI_MINI_RX_RING_CNT; i++) { if (sc->ti_cdata.ti_rx_mini_maps[i]) { bus_dmamap_destroy(sc->ti_cdata.ti_rx_mini_tag, sc->ti_cdata.ti_rx_mini_maps[i]); sc->ti_cdata.ti_rx_mini_maps[i] = NULL; } } if (sc->ti_cdata.ti_rx_mini_sparemap) { bus_dmamap_destroy(sc->ti_cdata.ti_rx_mini_tag, sc->ti_cdata.ti_rx_mini_sparemap); sc->ti_cdata.ti_rx_mini_sparemap = NULL; } if (sc->ti_cdata.ti_rx_mini_tag) { bus_dma_tag_destroy(sc->ti_cdata.ti_rx_mini_tag); sc->ti_cdata.ti_rx_mini_tag = NULL; } /* Destroy DMA maps for TX buffers. */ for (i = 0; i < TI_TX_RING_CNT; i++) { if (sc->ti_cdata.ti_txdesc[i].tx_dmamap) { bus_dmamap_destroy(sc->ti_cdata.ti_tx_tag, sc->ti_cdata.ti_txdesc[i].tx_dmamap); sc->ti_cdata.ti_txdesc[i].tx_dmamap = NULL; } } if (sc->ti_cdata.ti_tx_tag) { bus_dma_tag_destroy(sc->ti_cdata.ti_tx_tag); sc->ti_cdata.ti_tx_tag = NULL; } /* Destroy standard RX ring. */ ti_dma_ring_free(sc, &sc->ti_cdata.ti_rx_std_ring_tag, (void *)&sc->ti_rdata.ti_rx_std_ring, sc->ti_cdata.ti_rx_std_ring_map, &sc->ti_rdata.ti_rx_std_ring_paddr); /* Destroy jumbo RX ring. */ ti_dma_ring_free(sc, &sc->ti_cdata.ti_rx_jumbo_ring_tag, (void *)&sc->ti_rdata.ti_rx_jumbo_ring, sc->ti_cdata.ti_rx_jumbo_ring_map, &sc->ti_rdata.ti_rx_jumbo_ring_paddr); /* Destroy mini RX ring. */ ti_dma_ring_free(sc, &sc->ti_cdata.ti_rx_mini_ring_tag, (void *)&sc->ti_rdata.ti_rx_mini_ring, sc->ti_cdata.ti_rx_mini_ring_map, &sc->ti_rdata.ti_rx_mini_ring_paddr); /* Destroy RX return ring. */ ti_dma_ring_free(sc, &sc->ti_cdata.ti_rx_return_ring_tag, (void *)&sc->ti_rdata.ti_rx_return_ring, sc->ti_cdata.ti_rx_return_ring_map, &sc->ti_rdata.ti_rx_return_ring_paddr); /* Destroy TX ring. */ ti_dma_ring_free(sc, &sc->ti_cdata.ti_tx_ring_tag, (void *)&sc->ti_rdata.ti_tx_ring, sc->ti_cdata.ti_tx_ring_map, &sc->ti_rdata.ti_tx_ring_paddr); /* Destroy status block. */ ti_dma_ring_free(sc, &sc->ti_cdata.ti_status_tag, (void *)&sc->ti_rdata.ti_status, sc->ti_cdata.ti_status_map, &sc->ti_rdata.ti_status_paddr); /* Destroy event ring. */ ti_dma_ring_free(sc, &sc->ti_cdata.ti_event_ring_tag, (void *)&sc->ti_rdata.ti_event_ring, sc->ti_cdata.ti_event_ring_map, &sc->ti_rdata.ti_event_ring_paddr); /* Destroy GIB */ ti_dma_ring_free(sc, &sc->ti_cdata.ti_gib_tag, (void *)&sc->ti_rdata.ti_info, sc->ti_cdata.ti_gib_map, &sc->ti_rdata.ti_info_paddr); /* Destroy the parent tag. */ if (sc->ti_cdata.ti_parent_tag) { bus_dma_tag_destroy(sc->ti_cdata.ti_parent_tag); sc->ti_cdata.ti_parent_tag = NULL; } } /* * Intialize a standard receive ring descriptor. */ static int ti_newbuf_std(struct ti_softc *sc, int i) { bus_dmamap_t map; bus_dma_segment_t segs[1]; struct mbuf *m; struct ti_rx_desc *r; int error, nsegs; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MCLBYTES; m_adj(m, ETHER_ALIGN); error = bus_dmamap_load_mbuf_sg(sc->ti_cdata.ti_rx_std_tag, sc->ti_cdata.ti_rx_std_sparemap, m, segs, &nsegs, 0); if (error != 0) { m_freem(m); return (error); } KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); if (sc->ti_cdata.ti_rx_std_chain[i] != NULL) { bus_dmamap_sync(sc->ti_cdata.ti_rx_std_tag, sc->ti_cdata.ti_rx_std_maps[i], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->ti_cdata.ti_rx_std_tag, sc->ti_cdata.ti_rx_std_maps[i]); } map = sc->ti_cdata.ti_rx_std_maps[i]; sc->ti_cdata.ti_rx_std_maps[i] = sc->ti_cdata.ti_rx_std_sparemap; sc->ti_cdata.ti_rx_std_sparemap = map; sc->ti_cdata.ti_rx_std_chain[i] = m; r = &sc->ti_rdata.ti_rx_std_ring[i]; ti_hostaddr64(&r->ti_addr, segs[0].ds_addr); r->ti_len = segs[0].ds_len; r->ti_type = TI_BDTYPE_RECV_BD; r->ti_flags = 0; r->ti_vlan_tag = 0; r->ti_tcp_udp_cksum = 0; if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM) r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM; r->ti_idx = i; bus_dmamap_sync(sc->ti_cdata.ti_rx_std_tag, sc->ti_cdata.ti_rx_std_maps[i], BUS_DMASYNC_PREREAD); return (0); } /* * Intialize a mini receive ring descriptor. This only applies to * the Tigon 2. */ static int ti_newbuf_mini(struct ti_softc *sc, int i) { bus_dmamap_t map; bus_dma_segment_t segs[1]; struct mbuf *m; struct ti_rx_desc *r; int error, nsegs; MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MHLEN; m_adj(m, ETHER_ALIGN); error = bus_dmamap_load_mbuf_sg(sc->ti_cdata.ti_rx_mini_tag, sc->ti_cdata.ti_rx_mini_sparemap, m, segs, &nsegs, 0); if (error != 0) { m_freem(m); return (error); } KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); if (sc->ti_cdata.ti_rx_mini_chain[i] != NULL) { bus_dmamap_sync(sc->ti_cdata.ti_rx_mini_tag, sc->ti_cdata.ti_rx_mini_maps[i], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->ti_cdata.ti_rx_mini_tag, sc->ti_cdata.ti_rx_mini_maps[i]); } map = sc->ti_cdata.ti_rx_mini_maps[i]; sc->ti_cdata.ti_rx_mini_maps[i] = sc->ti_cdata.ti_rx_mini_sparemap; sc->ti_cdata.ti_rx_mini_sparemap = map; sc->ti_cdata.ti_rx_mini_chain[i] = m; r = &sc->ti_rdata.ti_rx_mini_ring[i]; ti_hostaddr64(&r->ti_addr, segs[0].ds_addr); r->ti_len = segs[0].ds_len; r->ti_type = TI_BDTYPE_RECV_BD; r->ti_flags = TI_BDFLAG_MINI_RING; r->ti_vlan_tag = 0; r->ti_tcp_udp_cksum = 0; if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM) r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM; r->ti_idx = i; bus_dmamap_sync(sc->ti_cdata.ti_rx_mini_tag, sc->ti_cdata.ti_rx_mini_maps[i], BUS_DMASYNC_PREREAD); return (0); } #ifndef TI_SF_BUF_JUMBO /* * Initialize a jumbo receive ring descriptor. This allocates * a jumbo buffer from the pool managed internally by the driver. */ static int ti_newbuf_jumbo(struct ti_softc *sc, int i, struct mbuf *dummy) { bus_dmamap_t map; bus_dma_segment_t segs[1]; struct mbuf *m; struct ti_rx_desc *r; int error, nsegs; (void)dummy; m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MJUM9BYTES; m_adj(m, ETHER_ALIGN); error = bus_dmamap_load_mbuf_sg(sc->ti_cdata.ti_rx_jumbo_tag, sc->ti_cdata.ti_rx_jumbo_sparemap, m, segs, &nsegs, 0); if (error != 0) { m_freem(m); return (error); } KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); if (sc->ti_cdata.ti_rx_jumbo_chain[i] != NULL) { bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_tag, sc->ti_cdata.ti_rx_jumbo_maps[i], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->ti_cdata.ti_rx_jumbo_tag, sc->ti_cdata.ti_rx_jumbo_maps[i]); } map = sc->ti_cdata.ti_rx_jumbo_maps[i]; sc->ti_cdata.ti_rx_jumbo_maps[i] = sc->ti_cdata.ti_rx_jumbo_sparemap; sc->ti_cdata.ti_rx_jumbo_sparemap = map; sc->ti_cdata.ti_rx_jumbo_chain[i] = m; r = &sc->ti_rdata.ti_rx_jumbo_ring[i]; ti_hostaddr64(&r->ti_addr, segs[0].ds_addr); r->ti_len = segs[0].ds_len; r->ti_type = TI_BDTYPE_RECV_JUMBO_BD; r->ti_flags = TI_BDFLAG_JUMBO_RING; r->ti_vlan_tag = 0; r->ti_tcp_udp_cksum = 0; if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM) r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM; r->ti_idx = i; bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_tag, sc->ti_cdata.ti_rx_jumbo_maps[i], BUS_DMASYNC_PREREAD); return (0); } #else #if (PAGE_SIZE == 4096) #define NPAYLOAD 2 #else #define NPAYLOAD 1 #endif #define TCP_HDR_LEN (52 + sizeof(struct ether_header)) #define UDP_HDR_LEN (28 + sizeof(struct ether_header)) #define NFS_HDR_LEN (UDP_HDR_LEN) static int HDR_LEN = TCP_HDR_LEN; /* * Initialize a jumbo receive ring descriptor. This allocates * a jumbo buffer from the pool managed internally by the driver. */ static int ti_newbuf_jumbo(struct ti_softc *sc, int idx, struct mbuf *m_old) { bus_dmamap_t map; struct mbuf *cur, *m_new = NULL; struct mbuf *m[3] = {NULL, NULL, NULL}; struct ti_rx_desc_ext *r; vm_page_t frame; /* 1 extra buf to make nobufs easy*/ struct sf_buf *sf[3] = {NULL, NULL, NULL}; int i; bus_dma_segment_t segs[4]; int nsegs; if (m_old != NULL) { m_new = m_old; cur = m_old->m_next; for (i = 0; i <= NPAYLOAD; i++){ m[i] = cur; cur = cur->m_next; } } else { /* Allocate the mbufs. */ MGETHDR(m_new, M_NOWAIT, MT_DATA); if (m_new == NULL) { device_printf(sc->ti_dev, "mbuf allocation failed " "-- packet dropped!\n"); goto nobufs; } MGET(m[NPAYLOAD], M_NOWAIT, MT_DATA); if (m[NPAYLOAD] == NULL) { device_printf(sc->ti_dev, "cluster mbuf allocation " "failed -- packet dropped!\n"); goto nobufs; } - MCLGET(m[NPAYLOAD], M_NOWAIT); - if ((m[NPAYLOAD]->m_flags & M_EXT) == 0) { + if (!(MCLGET(m[NPAYLOAD], M_NOWAIT))) { device_printf(sc->ti_dev, "mbuf allocation failed " "-- packet dropped!\n"); goto nobufs; } m[NPAYLOAD]->m_len = MCLBYTES; for (i = 0; i < NPAYLOAD; i++){ MGET(m[i], M_NOWAIT, MT_DATA); if (m[i] == NULL) { device_printf(sc->ti_dev, "mbuf allocation " "failed -- packet dropped!\n"); goto nobufs; } frame = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); if (frame == NULL) { device_printf(sc->ti_dev, "buffer allocation " "failed -- packet dropped!\n"); printf(" index %d page %d\n", idx, i); goto nobufs; } sf[i] = sf_buf_alloc(frame, SFB_NOWAIT); if (sf[i] == NULL) { vm_page_unwire(frame, PQ_INACTIVE); vm_page_free(frame); device_printf(sc->ti_dev, "buffer allocation " "failed -- packet dropped!\n"); printf(" index %d page %d\n", idx, i); goto nobufs; } } for (i = 0; i < NPAYLOAD; i++){ /* Attach the buffer to the mbuf. */ m[i]->m_data = (void *)sf_buf_kva(sf[i]); m[i]->m_len = PAGE_SIZE; MEXTADD(m[i], sf_buf_kva(sf[i]), PAGE_SIZE, sf_buf_mext, (void*)sf_buf_kva(sf[i]), sf[i], 0, EXT_DISPOSABLE); m[i]->m_next = m[i+1]; } /* link the buffers to the header */ m_new->m_next = m[0]; m_new->m_data += ETHER_ALIGN; if (sc->ti_hdrsplit) m_new->m_len = MHLEN - ETHER_ALIGN; else m_new->m_len = HDR_LEN; m_new->m_pkthdr.len = NPAYLOAD * PAGE_SIZE + m_new->m_len; } /* Set up the descriptor. */ r = &sc->ti_rdata.ti_rx_jumbo_ring[idx]; sc->ti_cdata.ti_rx_jumbo_chain[idx] = m_new; map = sc->ti_cdata.ti_rx_jumbo_maps[i]; if (bus_dmamap_load_mbuf_sg(sc->ti_cdata.ti_rx_jumbo_tag, map, m_new, segs, &nsegs, 0)) return (ENOBUFS); if ((nsegs < 1) || (nsegs > 4)) return (ENOBUFS); ti_hostaddr64(&r->ti_addr0, segs[0].ds_addr); r->ti_len0 = m_new->m_len; ti_hostaddr64(&r->ti_addr1, segs[1].ds_addr); r->ti_len1 = PAGE_SIZE; ti_hostaddr64(&r->ti_addr2, segs[2].ds_addr); r->ti_len2 = m[1]->m_ext.ext_size; /* could be PAGE_SIZE or MCLBYTES */ if (PAGE_SIZE == 4096) { ti_hostaddr64(&r->ti_addr3, segs[3].ds_addr); r->ti_len3 = MCLBYTES; } else { r->ti_len3 = 0; } r->ti_type = TI_BDTYPE_RECV_JUMBO_BD; r->ti_flags = TI_BDFLAG_JUMBO_RING|TI_RCB_FLAG_USE_EXT_RX_BD; if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM) r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM|TI_BDFLAG_IP_CKSUM; r->ti_idx = idx; bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_tag, map, BUS_DMASYNC_PREREAD); return (0); nobufs: /* * Warning! : * This can only be called before the mbufs are strung together. * If the mbufs are strung together, m_freem() will free the chain, * so that the later mbufs will be freed multiple times. */ if (m_new) m_freem(m_new); for (i = 0; i < 3; i++) { if (m[i]) m_freem(m[i]); if (sf[i]) sf_buf_mext((void *)sf_buf_kva(sf[i]), sf[i]); } return (ENOBUFS); } #endif /* * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, * that's 1MB or memory, which is a lot. For now, we fill only the first * 256 ring entries and hope that our CPU is fast enough to keep up with * the NIC. */ static int ti_init_rx_ring_std(struct ti_softc *sc) { int i; struct ti_cmd_desc cmd; for (i = 0; i < TI_STD_RX_RING_CNT; i++) { if (ti_newbuf_std(sc, i) != 0) return (ENOBUFS); }; sc->ti_std = TI_STD_RX_RING_CNT - 1; TI_UPDATE_STDPROD(sc, TI_STD_RX_RING_CNT - 1); return (0); } static void ti_free_rx_ring_std(struct ti_softc *sc) { bus_dmamap_t map; int i; for (i = 0; i < TI_STD_RX_RING_CNT; i++) { if (sc->ti_cdata.ti_rx_std_chain[i] != NULL) { map = sc->ti_cdata.ti_rx_std_maps[i]; bus_dmamap_sync(sc->ti_cdata.ti_rx_std_tag, map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->ti_cdata.ti_rx_std_tag, map); m_freem(sc->ti_cdata.ti_rx_std_chain[i]); sc->ti_cdata.ti_rx_std_chain[i] = NULL; } } bzero(sc->ti_rdata.ti_rx_std_ring, TI_STD_RX_RING_SZ); bus_dmamap_sync(sc->ti_cdata.ti_rx_std_ring_tag, sc->ti_cdata.ti_rx_std_ring_map, BUS_DMASYNC_PREWRITE); } static int ti_init_rx_ring_jumbo(struct ti_softc *sc) { struct ti_cmd_desc cmd; int i; for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) { if (ti_newbuf_jumbo(sc, i, NULL) != 0) return (ENOBUFS); }; sc->ti_jumbo = TI_JUMBO_RX_RING_CNT - 1; TI_UPDATE_JUMBOPROD(sc, TI_JUMBO_RX_RING_CNT - 1); return (0); } static void ti_free_rx_ring_jumbo(struct ti_softc *sc) { bus_dmamap_t map; int i; for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) { if (sc->ti_cdata.ti_rx_jumbo_chain[i] != NULL) { map = sc->ti_cdata.ti_rx_jumbo_maps[i]; bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_tag, map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->ti_cdata.ti_rx_jumbo_tag, map); m_freem(sc->ti_cdata.ti_rx_jumbo_chain[i]); sc->ti_cdata.ti_rx_jumbo_chain[i] = NULL; } } bzero(sc->ti_rdata.ti_rx_jumbo_ring, TI_JUMBO_RX_RING_SZ); bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_ring_tag, sc->ti_cdata.ti_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE); } static int ti_init_rx_ring_mini(struct ti_softc *sc) { int i; for (i = 0; i < TI_MINI_RX_RING_CNT; i++) { if (ti_newbuf_mini(sc, i) != 0) return (ENOBUFS); }; sc->ti_mini = TI_MINI_RX_RING_CNT - 1; TI_UPDATE_MINIPROD(sc, TI_MINI_RX_RING_CNT - 1); return (0); } static void ti_free_rx_ring_mini(struct ti_softc *sc) { bus_dmamap_t map; int i; if (sc->ti_rdata.ti_rx_mini_ring == NULL) return; for (i = 0; i < TI_MINI_RX_RING_CNT; i++) { if (sc->ti_cdata.ti_rx_mini_chain[i] != NULL) { map = sc->ti_cdata.ti_rx_mini_maps[i]; bus_dmamap_sync(sc->ti_cdata.ti_rx_mini_tag, map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->ti_cdata.ti_rx_mini_tag, map); m_freem(sc->ti_cdata.ti_rx_mini_chain[i]); sc->ti_cdata.ti_rx_mini_chain[i] = NULL; } } bzero(sc->ti_rdata.ti_rx_mini_ring, TI_MINI_RX_RING_SZ); bus_dmamap_sync(sc->ti_cdata.ti_rx_mini_ring_tag, sc->ti_cdata.ti_rx_mini_ring_map, BUS_DMASYNC_PREWRITE); } static void ti_free_tx_ring(struct ti_softc *sc) { struct ti_txdesc *txd; int i; if (sc->ti_rdata.ti_tx_ring == NULL) return; for (i = 0; i < TI_TX_RING_CNT; i++) { txd = &sc->ti_cdata.ti_txdesc[i]; if (txd->tx_m != NULL) { bus_dmamap_sync(sc->ti_cdata.ti_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->ti_cdata.ti_tx_tag, txd->tx_dmamap); m_freem(txd->tx_m); txd->tx_m = NULL; } } bzero(sc->ti_rdata.ti_tx_ring, TI_TX_RING_SZ); bus_dmamap_sync(sc->ti_cdata.ti_tx_ring_tag, sc->ti_cdata.ti_tx_ring_map, BUS_DMASYNC_PREWRITE); } static int ti_init_tx_ring(struct ti_softc *sc) { struct ti_txdesc *txd; int i; STAILQ_INIT(&sc->ti_cdata.ti_txfreeq); STAILQ_INIT(&sc->ti_cdata.ti_txbusyq); for (i = 0; i < TI_TX_RING_CNT; i++) { txd = &sc->ti_cdata.ti_txdesc[i]; STAILQ_INSERT_TAIL(&sc->ti_cdata.ti_txfreeq, txd, tx_q); } sc->ti_txcnt = 0; sc->ti_tx_saved_considx = 0; sc->ti_tx_saved_prodidx = 0; CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, 0); return (0); } /* * The Tigon 2 firmware has a new way to add/delete multicast addresses, * but we have to support the old way too so that Tigon 1 cards will * work. */ static void ti_add_mcast(struct ti_softc *sc, struct ether_addr *addr) { struct ti_cmd_desc cmd; uint16_t *m; uint32_t ext[2] = {0, 0}; m = (uint16_t *)&addr->octet[0]; switch (sc->ti_hwrev) { case TI_HWREV_TIGON: CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0])); CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2])); TI_DO_CMD(TI_CMD_ADD_MCAST_ADDR, 0, 0); break; case TI_HWREV_TIGON_II: ext[0] = htons(m[0]); ext[1] = (htons(m[1]) << 16) | htons(m[2]); TI_DO_CMD_EXT(TI_CMD_EXT_ADD_MCAST, 0, 0, (caddr_t)&ext, 2); break; default: device_printf(sc->ti_dev, "unknown hwrev\n"); break; } } static void ti_del_mcast(struct ti_softc *sc, struct ether_addr *addr) { struct ti_cmd_desc cmd; uint16_t *m; uint32_t ext[2] = {0, 0}; m = (uint16_t *)&addr->octet[0]; switch (sc->ti_hwrev) { case TI_HWREV_TIGON: CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0])); CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2])); TI_DO_CMD(TI_CMD_DEL_MCAST_ADDR, 0, 0); break; case TI_HWREV_TIGON_II: ext[0] = htons(m[0]); ext[1] = (htons(m[1]) << 16) | htons(m[2]); TI_DO_CMD_EXT(TI_CMD_EXT_DEL_MCAST, 0, 0, (caddr_t)&ext, 2); break; default: device_printf(sc->ti_dev, "unknown hwrev\n"); break; } } /* * Configure the Tigon's multicast address filter. * * The actual multicast table management is a bit of a pain, thanks to * slight brain damage on the part of both Alteon and us. With our * multicast code, we are only alerted when the multicast address table * changes and at that point we only have the current list of addresses: * we only know the current state, not the previous state, so we don't * actually know what addresses were removed or added. The firmware has * state, but we can't get our grubby mits on it, and there is no 'delete * all multicast addresses' command. Hence, we have to maintain our own * state so we know what addresses have been programmed into the NIC at * any given time. */ static void ti_setmulti(struct ti_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *ifma; struct ti_cmd_desc cmd; struct ti_mc_entry *mc; uint32_t intrs; TI_LOCK_ASSERT(sc); ifp = sc->ti_ifp; if (ifp->if_flags & IFF_ALLMULTI) { TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_ENB, 0); return; } else { TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_DIS, 0); } /* Disable interrupts. */ intrs = CSR_READ_4(sc, TI_MB_HOSTINTR); CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); /* First, zot all the existing filters. */ while (SLIST_FIRST(&sc->ti_mc_listhead) != NULL) { mc = SLIST_FIRST(&sc->ti_mc_listhead); ti_del_mcast(sc, &mc->mc_addr); SLIST_REMOVE_HEAD(&sc->ti_mc_listhead, mc_entries); free(mc, M_DEVBUF); } /* Now program new ones. */ if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; mc = malloc(sizeof(struct ti_mc_entry), M_DEVBUF, M_NOWAIT); if (mc == NULL) { device_printf(sc->ti_dev, "no memory for mcast filter entry\n"); continue; } bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), (char *)&mc->mc_addr, ETHER_ADDR_LEN); SLIST_INSERT_HEAD(&sc->ti_mc_listhead, mc, mc_entries); ti_add_mcast(sc, &mc->mc_addr); } if_maddr_runlock(ifp); /* Re-enable interrupts. */ CSR_WRITE_4(sc, TI_MB_HOSTINTR, intrs); } /* * Check to see if the BIOS has configured us for a 64 bit slot when * we aren't actually in one. If we detect this condition, we can work * around it on the Tigon 2 by setting a bit in the PCI state register, * but for the Tigon 1 we must give up and abort the interface attach. */ static int ti_64bitslot_war(struct ti_softc *sc) { if (!(CSR_READ_4(sc, TI_PCI_STATE) & TI_PCISTATE_32BIT_BUS)) { CSR_WRITE_4(sc, 0x600, 0); CSR_WRITE_4(sc, 0x604, 0); CSR_WRITE_4(sc, 0x600, 0x5555AAAA); if (CSR_READ_4(sc, 0x604) == 0x5555AAAA) { if (sc->ti_hwrev == TI_HWREV_TIGON) return (EINVAL); else { TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_32BIT_BUS); return (0); } } } return (0); } /* * Do endian, PCI and DMA initialization. Also check the on-board ROM * self-test results. */ static int ti_chipinit(struct ti_softc *sc) { uint32_t cacheline; uint32_t pci_writemax = 0; uint32_t hdrsplit; /* Initialize link to down state. */ sc->ti_linkstat = TI_EV_CODE_LINK_DOWN; /* Set endianness before we access any non-PCI registers. */ #if 0 && BYTE_ORDER == BIG_ENDIAN CSR_WRITE_4(sc, TI_MISC_HOST_CTL, TI_MHC_BIGENDIAN_INIT | (TI_MHC_BIGENDIAN_INIT << 24)); #else CSR_WRITE_4(sc, TI_MISC_HOST_CTL, TI_MHC_LITTLEENDIAN_INIT | (TI_MHC_LITTLEENDIAN_INIT << 24)); #endif /* Check the ROM failed bit to see if self-tests passed. */ if (CSR_READ_4(sc, TI_CPU_STATE) & TI_CPUSTATE_ROMFAIL) { device_printf(sc->ti_dev, "board self-diagnostics failed!\n"); return (ENODEV); } /* Halt the CPU. */ TI_SETBIT(sc, TI_CPU_STATE, TI_CPUSTATE_HALT); /* Figure out the hardware revision. */ switch (CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_CHIP_REV_MASK) { case TI_REV_TIGON_I: sc->ti_hwrev = TI_HWREV_TIGON; break; case TI_REV_TIGON_II: sc->ti_hwrev = TI_HWREV_TIGON_II; break; default: device_printf(sc->ti_dev, "unsupported chip revision\n"); return (ENODEV); } /* Do special setup for Tigon 2. */ if (sc->ti_hwrev == TI_HWREV_TIGON_II) { TI_SETBIT(sc, TI_CPU_CTL_B, TI_CPUSTATE_HALT); TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_SRAM_BANK_512K); TI_SETBIT(sc, TI_MISC_CONF, TI_MCR_SRAM_SYNCHRONOUS); } /* * We don't have firmware source for the Tigon 1, so Tigon 1 boards * can't do header splitting. */ #ifdef TI_JUMBO_HDRSPLIT if (sc->ti_hwrev != TI_HWREV_TIGON) sc->ti_hdrsplit = 1; else device_printf(sc->ti_dev, "can't do header splitting on a Tigon I board\n"); #endif /* TI_JUMBO_HDRSPLIT */ /* Set up the PCI state register. */ CSR_WRITE_4(sc, TI_PCI_STATE, TI_PCI_READ_CMD|TI_PCI_WRITE_CMD); if (sc->ti_hwrev == TI_HWREV_TIGON_II) { TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_USE_MEM_RD_MULT); } /* Clear the read/write max DMA parameters. */ TI_CLRBIT(sc, TI_PCI_STATE, (TI_PCISTATE_WRITE_MAXDMA| TI_PCISTATE_READ_MAXDMA)); /* Get cache line size. */ cacheline = CSR_READ_4(sc, TI_PCI_BIST) & 0xFF; /* * If the system has set enabled the PCI memory write * and invalidate command in the command register, set * the write max parameter accordingly. This is necessary * to use MWI with the Tigon 2. */ if (CSR_READ_4(sc, TI_PCI_CMDSTAT) & PCIM_CMD_MWIEN) { switch (cacheline) { case 1: case 4: case 8: case 16: case 32: case 64: break; default: /* Disable PCI memory write and invalidate. */ if (bootverbose) device_printf(sc->ti_dev, "cache line size %d" " not supported; disabling PCI MWI\n", cacheline); CSR_WRITE_4(sc, TI_PCI_CMDSTAT, CSR_READ_4(sc, TI_PCI_CMDSTAT) & ~PCIM_CMD_MWIEN); break; } } TI_SETBIT(sc, TI_PCI_STATE, pci_writemax); /* This sets the min dma param all the way up (0xff). */ TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_MINDMA); if (sc->ti_hdrsplit) hdrsplit = TI_OPMODE_JUMBO_HDRSPLIT; else hdrsplit = 0; /* Configure DMA variables. */ #if BYTE_ORDER == BIG_ENDIAN CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_BYTESWAP_BD | TI_OPMODE_BYTESWAP_DATA | TI_OPMODE_WORDSWAP_BD | TI_OPMODE_WARN_ENB | TI_OPMODE_FATAL_ENB | TI_OPMODE_DONT_FRAG_JUMBO | hdrsplit); #else /* BYTE_ORDER */ CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_BYTESWAP_DATA| TI_OPMODE_WORDSWAP_BD|TI_OPMODE_DONT_FRAG_JUMBO| TI_OPMODE_WARN_ENB|TI_OPMODE_FATAL_ENB | hdrsplit); #endif /* BYTE_ORDER */ /* * Only allow 1 DMA channel to be active at a time. * I don't think this is a good idea, but without it * the firmware racks up lots of nicDmaReadRingFull * errors. This is not compatible with hardware checksums. */ if ((sc->ti_ifp->if_capenable & (IFCAP_TXCSUM | IFCAP_RXCSUM)) == 0) TI_SETBIT(sc, TI_GCR_OPMODE, TI_OPMODE_1_DMA_ACTIVE); /* Recommended settings from Tigon manual. */ CSR_WRITE_4(sc, TI_GCR_DMA_WRITECFG, TI_DMA_STATE_THRESH_8W); CSR_WRITE_4(sc, TI_GCR_DMA_READCFG, TI_DMA_STATE_THRESH_8W); if (ti_64bitslot_war(sc)) { device_printf(sc->ti_dev, "bios thinks we're in a 64 bit slot, " "but we aren't"); return (EINVAL); } return (0); } /* * Initialize the general information block and firmware, and * start the CPU(s) running. */ static int ti_gibinit(struct ti_softc *sc) { struct ifnet *ifp; struct ti_rcb *rcb; int i; TI_LOCK_ASSERT(sc); ifp = sc->ti_ifp; /* Disable interrupts for now. */ CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); /* Tell the chip where to find the general information block. */ CSR_WRITE_4(sc, TI_GCR_GENINFO_HI, (uint64_t)sc->ti_rdata.ti_info_paddr >> 32); CSR_WRITE_4(sc, TI_GCR_GENINFO_LO, sc->ti_rdata.ti_info_paddr & 0xFFFFFFFF); /* Load the firmware into SRAM. */ ti_loadfw(sc); /* Set up the contents of the general info and ring control blocks. */ /* Set up the event ring and producer pointer. */ bzero(sc->ti_rdata.ti_event_ring, TI_EVENT_RING_SZ); rcb = &sc->ti_rdata.ti_info->ti_ev_rcb; ti_hostaddr64(&rcb->ti_hostaddr, sc->ti_rdata.ti_event_ring_paddr); rcb->ti_flags = 0; ti_hostaddr64(&sc->ti_rdata.ti_info->ti_ev_prodidx_ptr, sc->ti_rdata.ti_status_paddr + offsetof(struct ti_status, ti_ev_prodidx_r)); sc->ti_ev_prodidx.ti_idx = 0; CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, 0); sc->ti_ev_saved_considx = 0; /* Set up the command ring and producer mailbox. */ rcb = &sc->ti_rdata.ti_info->ti_cmd_rcb; ti_hostaddr64(&rcb->ti_hostaddr, TI_GCR_NIC_ADDR(TI_GCR_CMDRING)); rcb->ti_flags = 0; rcb->ti_max_len = 0; for (i = 0; i < TI_CMD_RING_CNT; i++) { CSR_WRITE_4(sc, TI_GCR_CMDRING + (i * 4), 0); } CSR_WRITE_4(sc, TI_GCR_CMDCONS_IDX, 0); CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, 0); sc->ti_cmd_saved_prodidx = 0; /* * Assign the address of the stats refresh buffer. * We re-use the current stats buffer for this to * conserve memory. */ bzero(&sc->ti_rdata.ti_info->ti_stats, sizeof(struct ti_stats)); ti_hostaddr64(&sc->ti_rdata.ti_info->ti_refresh_stats_ptr, sc->ti_rdata.ti_info_paddr + offsetof(struct ti_gib, ti_stats)); /* Set up the standard receive ring. */ rcb = &sc->ti_rdata.ti_info->ti_std_rx_rcb; ti_hostaddr64(&rcb->ti_hostaddr, sc->ti_rdata.ti_rx_std_ring_paddr); rcb->ti_max_len = TI_FRAMELEN; rcb->ti_flags = 0; if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM) rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; if (sc->ti_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; /* Set up the jumbo receive ring. */ rcb = &sc->ti_rdata.ti_info->ti_jumbo_rx_rcb; ti_hostaddr64(&rcb->ti_hostaddr, sc->ti_rdata.ti_rx_jumbo_ring_paddr); #ifndef TI_SF_BUF_JUMBO rcb->ti_max_len = MJUM9BYTES - ETHER_ALIGN; rcb->ti_flags = 0; #else rcb->ti_max_len = PAGE_SIZE; rcb->ti_flags = TI_RCB_FLAG_USE_EXT_RX_BD; #endif if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM) rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; if (sc->ti_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; /* * Set up the mini ring. Only activated on the * Tigon 2 but the slot in the config block is * still there on the Tigon 1. */ rcb = &sc->ti_rdata.ti_info->ti_mini_rx_rcb; ti_hostaddr64(&rcb->ti_hostaddr, sc->ti_rdata.ti_rx_mini_ring_paddr); rcb->ti_max_len = MHLEN - ETHER_ALIGN; if (sc->ti_hwrev == TI_HWREV_TIGON) rcb->ti_flags = TI_RCB_FLAG_RING_DISABLED; else rcb->ti_flags = 0; if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM) rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; if (sc->ti_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; /* * Set up the receive return ring. */ rcb = &sc->ti_rdata.ti_info->ti_return_rcb; ti_hostaddr64(&rcb->ti_hostaddr, sc->ti_rdata.ti_rx_return_ring_paddr); rcb->ti_flags = 0; rcb->ti_max_len = TI_RETURN_RING_CNT; ti_hostaddr64(&sc->ti_rdata.ti_info->ti_return_prodidx_ptr, sc->ti_rdata.ti_status_paddr + offsetof(struct ti_status, ti_return_prodidx_r)); /* * Set up the tx ring. Note: for the Tigon 2, we have the option * of putting the transmit ring in the host's address space and * letting the chip DMA it instead of leaving the ring in the NIC's * memory and accessing it through the shared memory region. We * do this for the Tigon 2, but it doesn't work on the Tigon 1, * so we have to revert to the shared memory scheme if we detect * a Tigon 1 chip. */ CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE); if (sc->ti_rdata.ti_tx_ring != NULL) bzero(sc->ti_rdata.ti_tx_ring, TI_TX_RING_SZ); rcb = &sc->ti_rdata.ti_info->ti_tx_rcb; if (sc->ti_hwrev == TI_HWREV_TIGON) rcb->ti_flags = 0; else rcb->ti_flags = TI_RCB_FLAG_HOST_RING; if (sc->ti_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; if (sc->ti_ifp->if_capenable & IFCAP_TXCSUM) rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; rcb->ti_max_len = TI_TX_RING_CNT; if (sc->ti_hwrev == TI_HWREV_TIGON) ti_hostaddr64(&rcb->ti_hostaddr, TI_TX_RING_BASE); else ti_hostaddr64(&rcb->ti_hostaddr, sc->ti_rdata.ti_tx_ring_paddr); ti_hostaddr64(&sc->ti_rdata.ti_info->ti_tx_considx_ptr, sc->ti_rdata.ti_status_paddr + offsetof(struct ti_status, ti_tx_considx_r)); bus_dmamap_sync(sc->ti_cdata.ti_gib_tag, sc->ti_cdata.ti_gib_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->ti_cdata.ti_status_tag, sc->ti_cdata.ti_status_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->ti_cdata.ti_event_ring_tag, sc->ti_cdata.ti_event_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if (sc->ti_rdata.ti_tx_ring != NULL) bus_dmamap_sync(sc->ti_cdata.ti_tx_ring_tag, sc->ti_cdata.ti_tx_ring_map, BUS_DMASYNC_PREWRITE); /* Set up tunables */ #if 0 if (ifp->if_mtu > ETHERMTU + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, (sc->ti_rx_coal_ticks / 10)); else #endif CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, sc->ti_rx_coal_ticks); CSR_WRITE_4(sc, TI_GCR_TX_COAL_TICKS, sc->ti_tx_coal_ticks); CSR_WRITE_4(sc, TI_GCR_STAT_TICKS, sc->ti_stat_ticks); CSR_WRITE_4(sc, TI_GCR_RX_MAX_COAL_BD, sc->ti_rx_max_coal_bds); CSR_WRITE_4(sc, TI_GCR_TX_MAX_COAL_BD, sc->ti_tx_max_coal_bds); CSR_WRITE_4(sc, TI_GCR_TX_BUFFER_RATIO, sc->ti_tx_buf_ratio); /* Turn interrupts on. */ CSR_WRITE_4(sc, TI_GCR_MASK_INTRS, 0); CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); /* Start CPU. */ TI_CLRBIT(sc, TI_CPU_STATE, (TI_CPUSTATE_HALT|TI_CPUSTATE_STEP)); return (0); } /* * Probe for a Tigon chip. Check the PCI vendor and device IDs * against our list and return its name if we find a match. */ static int ti_probe(device_t dev) { const struct ti_type *t; t = ti_devs; while (t->ti_name != NULL) { if ((pci_get_vendor(dev) == t->ti_vid) && (pci_get_device(dev) == t->ti_did)) { device_set_desc(dev, t->ti_name); return (BUS_PROBE_DEFAULT); } t++; } return (ENXIO); } static int ti_attach(device_t dev) { struct ifnet *ifp; struct ti_softc *sc; int error = 0, rid; u_char eaddr[6]; sc = device_get_softc(dev); sc->ti_dev = dev; mtx_init(&sc->ti_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->ti_watchdog, &sc->ti_mtx, 0); ifmedia_init(&sc->ifmedia, IFM_IMASK, ti_ifmedia_upd, ti_ifmedia_sts); ifp = sc->ti_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } sc->ti_ifp->if_hwassist = TI_CSUM_FEATURES; sc->ti_ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_RXCSUM; sc->ti_ifp->if_capenable = sc->ti_ifp->if_capabilities; /* * Map control/status registers. */ pci_enable_busmaster(dev); rid = PCIR_BAR(0); sc->ti_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->ti_res == NULL) { device_printf(dev, "couldn't map memory\n"); error = ENXIO; goto fail; } sc->ti_btag = rman_get_bustag(sc->ti_res); sc->ti_bhandle = rman_get_bushandle(sc->ti_res); /* Allocate interrupt */ rid = 0; sc->ti_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->ti_irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } if (ti_chipinit(sc)) { device_printf(dev, "chip initialization failed\n"); error = ENXIO; goto fail; } /* Zero out the NIC's on-board SRAM. */ ti_mem_zero(sc, 0x2000, 0x100000 - 0x2000); /* Init again -- zeroing memory may have clobbered some registers. */ if (ti_chipinit(sc)) { device_printf(dev, "chip initialization failed\n"); error = ENXIO; goto fail; } /* * Get station address from the EEPROM. Note: the manual states * that the MAC address is at offset 0x8c, however the data is * stored as two longwords (since that's how it's loaded into * the NIC). This means the MAC address is actually preceded * by two zero bytes. We need to skip over those. */ if (ti_read_eeprom(sc, eaddr, TI_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { device_printf(dev, "failed to read station address\n"); error = ENXIO; goto fail; } /* Allocate working area for memory dump. */ sc->ti_membuf = malloc(sizeof(uint8_t) * TI_WINLEN, M_DEVBUF, M_NOWAIT); sc->ti_membuf2 = malloc(sizeof(uint8_t) * TI_WINLEN, M_DEVBUF, M_NOWAIT); if (sc->ti_membuf == NULL || sc->ti_membuf2 == NULL) { device_printf(dev, "cannot allocate memory buffer\n"); error = ENOMEM; goto fail; } if ((error = ti_dma_alloc(sc)) != 0) goto fail; /* * We really need a better way to tell a 1000baseTX card * from a 1000baseSX one, since in theory there could be * OEMed 1000baseTX cards from lame vendors who aren't * clever enough to change the PCI ID. For the moment * though, the AceNIC is the only copper card available. */ if (pci_get_vendor(dev) == ALT_VENDORID && pci_get_device(dev) == ALT_DEVICEID_ACENIC_COPPER) sc->ti_copper = 1; /* Ok, it's not the only copper card available. */ if (pci_get_vendor(dev) == NG_VENDORID && pci_get_device(dev) == NG_DEVICEID_GA620T) sc->ti_copper = 1; /* Set default tunable values. */ ti_sysctl_node(sc); /* Set up ifnet structure */ ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = ti_ioctl; ifp->if_start = ti_start; ifp->if_init = ti_init; ifp->if_get_counter = ti_get_counter; ifp->if_baudrate = IF_Gbps(1UL); ifp->if_snd.ifq_drv_maxlen = TI_TX_RING_CNT - 1; IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); IFQ_SET_READY(&ifp->if_snd); /* Set up ifmedia support. */ if (sc->ti_copper) { /* * Copper cards allow manual 10/100 mode selection, * but not manual 1000baseTX mode selection. Why? * Becuase currently there's no way to specify the * master/slave setting through the firmware interface, * so Alteon decided to just bag it and handle it * via autonegotiation. */ ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_T, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL); } else { /* Fiber cards don't support 10/100 modes. */ ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); } ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_AUTO); /* * We're assuming here that card initialization is a sequential * thing. If it isn't, multiple cards probing at the same time * could stomp on the list of softcs here. */ /* Register the device */ sc->dev = make_dev(&ti_cdevsw, device_get_unit(dev), UID_ROOT, GID_OPERATOR, 0600, "ti%d", device_get_unit(dev)); sc->dev->si_drv1 = sc; /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* VLAN capability setup. */ ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTAGGING; ifp->if_capenable = ifp->if_capabilities; /* Tell the upper layer we support VLAN over-sized frames. */ ifp->if_hdrlen = sizeof(struct ether_vlan_header); /* Driver supports link state tracking. */ ifp->if_capabilities |= IFCAP_LINKSTATE; ifp->if_capenable |= IFCAP_LINKSTATE; /* Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->ti_irq, INTR_TYPE_NET|INTR_MPSAFE, NULL, ti_intr, sc, &sc->ti_intrhand); if (error) { device_printf(dev, "couldn't set up irq\n"); goto fail; } fail: if (error) ti_detach(dev); return (error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int ti_detach(device_t dev) { struct ti_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); if (sc->dev) destroy_dev(sc->dev); KASSERT(mtx_initialized(&sc->ti_mtx), ("ti mutex not initialized")); ifp = sc->ti_ifp; if (device_is_attached(dev)) { ether_ifdetach(ifp); TI_LOCK(sc); ti_stop(sc); TI_UNLOCK(sc); } /* These should only be active if attach succeeded */ callout_drain(&sc->ti_watchdog); bus_generic_detach(dev); ti_dma_free(sc); ifmedia_removeall(&sc->ifmedia); if (sc->ti_intrhand) bus_teardown_intr(dev, sc->ti_irq, sc->ti_intrhand); if (sc->ti_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ti_irq); if (sc->ti_res) { bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), sc->ti_res); } if (ifp) if_free(ifp); if (sc->ti_membuf) free(sc->ti_membuf, M_DEVBUF); if (sc->ti_membuf2) free(sc->ti_membuf2, M_DEVBUF); mtx_destroy(&sc->ti_mtx); return (0); } #ifdef TI_JUMBO_HDRSPLIT /* * If hdr_len is 0, that means that header splitting wasn't done on * this packet for some reason. The two most likely reasons are that * the protocol isn't a supported protocol for splitting, or this * packet had a fragment offset that wasn't 0. * * The header length, if it is non-zero, will always be the length of * the headers on the packet, but that length could be longer than the * first mbuf. So we take the minimum of the two as the actual * length. */ static __inline void ti_hdr_split(struct mbuf *top, int hdr_len, int pkt_len, int idx) { int i = 0; int lengths[4] = {0, 0, 0, 0}; struct mbuf *m, *mp; if (hdr_len != 0) top->m_len = min(hdr_len, top->m_len); pkt_len -= top->m_len; lengths[i++] = top->m_len; mp = top; for (m = top->m_next; m && pkt_len; m = m->m_next) { m->m_len = m->m_ext.ext_size = min(m->m_len, pkt_len); pkt_len -= m->m_len; lengths[i++] = m->m_len; mp = m; } #if 0 if (hdr_len != 0) printf("got split packet: "); else printf("got non-split packet: "); printf("%d,%d,%d,%d = %d\n", lengths[0], lengths[1], lengths[2], lengths[3], lengths[0] + lengths[1] + lengths[2] + lengths[3]); #endif if (pkt_len) panic("header splitting didn't"); if (m) { m_freem(m); mp->m_next = NULL; } if (mp->m_next != NULL) panic("ti_hdr_split: last mbuf in chain should be null"); } #endif /* TI_JUMBO_HDRSPLIT */ static void ti_discard_std(struct ti_softc *sc, int i) { struct ti_rx_desc *r; r = &sc->ti_rdata.ti_rx_std_ring[i]; r->ti_len = MCLBYTES - ETHER_ALIGN; r->ti_type = TI_BDTYPE_RECV_BD; r->ti_flags = 0; r->ti_vlan_tag = 0; r->ti_tcp_udp_cksum = 0; if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM) r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM; r->ti_idx = i; } static void ti_discard_mini(struct ti_softc *sc, int i) { struct ti_rx_desc *r; r = &sc->ti_rdata.ti_rx_mini_ring[i]; r->ti_len = MHLEN - ETHER_ALIGN; r->ti_type = TI_BDTYPE_RECV_BD; r->ti_flags = TI_BDFLAG_MINI_RING; r->ti_vlan_tag = 0; r->ti_tcp_udp_cksum = 0; if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM) r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM; r->ti_idx = i; } #ifndef TI_SF_BUF_JUMBO static void ti_discard_jumbo(struct ti_softc *sc, int i) { struct ti_rx_desc *r; r = &sc->ti_rdata.ti_rx_jumbo_ring[i]; r->ti_len = MJUM9BYTES - ETHER_ALIGN; r->ti_type = TI_BDTYPE_RECV_JUMBO_BD; r->ti_flags = TI_BDFLAG_JUMBO_RING; r->ti_vlan_tag = 0; r->ti_tcp_udp_cksum = 0; if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM) r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM; r->ti_idx = i; } #endif /* * Frame reception handling. This is called if there's a frame * on the receive return list. * * Note: we have to be able to handle three possibilities here: * 1) the frame is from the mini receive ring (can only happen) * on Tigon 2 boards) * 2) the frame is from the jumbo recieve ring * 3) the frame is from the standard receive ring */ static void ti_rxeof(struct ti_softc *sc) { struct ifnet *ifp; #ifdef TI_SF_BUF_JUMBO bus_dmamap_t map; #endif struct ti_cmd_desc cmd; int jumbocnt, minicnt, stdcnt, ti_len; TI_LOCK_ASSERT(sc); ifp = sc->ti_ifp; bus_dmamap_sync(sc->ti_cdata.ti_rx_std_ring_tag, sc->ti_cdata.ti_rx_std_ring_map, BUS_DMASYNC_POSTWRITE); if (ifp->if_mtu > ETHERMTU + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_ring_tag, sc->ti_cdata.ti_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE); if (sc->ti_rdata.ti_rx_mini_ring != NULL) bus_dmamap_sync(sc->ti_cdata.ti_rx_mini_ring_tag, sc->ti_cdata.ti_rx_mini_ring_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_sync(sc->ti_cdata.ti_rx_return_ring_tag, sc->ti_cdata.ti_rx_return_ring_map, BUS_DMASYNC_POSTREAD); jumbocnt = minicnt = stdcnt = 0; while (sc->ti_rx_saved_considx != sc->ti_return_prodidx.ti_idx) { struct ti_rx_desc *cur_rx; uint32_t rxidx; struct mbuf *m = NULL; uint16_t vlan_tag = 0; int have_tag = 0; cur_rx = &sc->ti_rdata.ti_rx_return_ring[sc->ti_rx_saved_considx]; rxidx = cur_rx->ti_idx; ti_len = cur_rx->ti_len; TI_INC(sc->ti_rx_saved_considx, TI_RETURN_RING_CNT); if (cur_rx->ti_flags & TI_BDFLAG_VLAN_TAG) { have_tag = 1; vlan_tag = cur_rx->ti_vlan_tag; } if (cur_rx->ti_flags & TI_BDFLAG_JUMBO_RING) { jumbocnt++; TI_INC(sc->ti_jumbo, TI_JUMBO_RX_RING_CNT); m = sc->ti_cdata.ti_rx_jumbo_chain[rxidx]; #ifndef TI_SF_BUF_JUMBO if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); ti_discard_jumbo(sc, rxidx); continue; } if (ti_newbuf_jumbo(sc, rxidx, NULL) != 0) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); ti_discard_jumbo(sc, rxidx); continue; } m->m_len = ti_len; #else /* !TI_SF_BUF_JUMBO */ sc->ti_cdata.ti_rx_jumbo_chain[rxidx] = NULL; map = sc->ti_cdata.ti_rx_jumbo_maps[rxidx]; bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_tag, map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->ti_cdata.ti_rx_jumbo_tag, map); if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); ti_newbuf_jumbo(sc, sc->ti_jumbo, m); continue; } if (ti_newbuf_jumbo(sc, sc->ti_jumbo, NULL) == ENOBUFS) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); ti_newbuf_jumbo(sc, sc->ti_jumbo, m); continue; } #ifdef TI_JUMBO_HDRSPLIT if (sc->ti_hdrsplit) ti_hdr_split(m, TI_HOSTADDR(cur_rx->ti_addr), ti_len, rxidx); else #endif /* TI_JUMBO_HDRSPLIT */ m_adj(m, ti_len - m->m_pkthdr.len); #endif /* TI_SF_BUF_JUMBO */ } else if (cur_rx->ti_flags & TI_BDFLAG_MINI_RING) { minicnt++; TI_INC(sc->ti_mini, TI_MINI_RX_RING_CNT); m = sc->ti_cdata.ti_rx_mini_chain[rxidx]; if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); ti_discard_mini(sc, rxidx); continue; } if (ti_newbuf_mini(sc, rxidx) != 0) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); ti_discard_mini(sc, rxidx); continue; } m->m_len = ti_len; } else { stdcnt++; TI_INC(sc->ti_std, TI_STD_RX_RING_CNT); m = sc->ti_cdata.ti_rx_std_chain[rxidx]; if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); ti_discard_std(sc, rxidx); continue; } if (ti_newbuf_std(sc, rxidx) != 0) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); ti_discard_std(sc, rxidx); continue; } m->m_len = ti_len; } m->m_pkthdr.len = ti_len; if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); m->m_pkthdr.rcvif = ifp; if (ifp->if_capenable & IFCAP_RXCSUM) { if (cur_rx->ti_flags & TI_BDFLAG_IP_CKSUM) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if ((cur_rx->ti_ip_cksum ^ 0xffff) == 0) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; } if (cur_rx->ti_flags & TI_BDFLAG_TCP_UDP_CKSUM) { m->m_pkthdr.csum_data = cur_rx->ti_tcp_udp_cksum; m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; } } /* * If we received a packet with a vlan tag, * tag it before passing the packet upward. */ if (have_tag) { m->m_pkthdr.ether_vtag = vlan_tag; m->m_flags |= M_VLANTAG; } TI_UNLOCK(sc); (*ifp->if_input)(ifp, m); TI_LOCK(sc); } bus_dmamap_sync(sc->ti_cdata.ti_rx_return_ring_tag, sc->ti_cdata.ti_rx_return_ring_map, BUS_DMASYNC_PREREAD); /* Only necessary on the Tigon 1. */ if (sc->ti_hwrev == TI_HWREV_TIGON) CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX, sc->ti_rx_saved_considx); if (stdcnt > 0) { bus_dmamap_sync(sc->ti_cdata.ti_rx_std_ring_tag, sc->ti_cdata.ti_rx_std_ring_map, BUS_DMASYNC_PREWRITE); TI_UPDATE_STDPROD(sc, sc->ti_std); } if (minicnt > 0) { bus_dmamap_sync(sc->ti_cdata.ti_rx_mini_ring_tag, sc->ti_cdata.ti_rx_mini_ring_map, BUS_DMASYNC_PREWRITE); TI_UPDATE_MINIPROD(sc, sc->ti_mini); } if (jumbocnt > 0) { bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_ring_tag, sc->ti_cdata.ti_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE); TI_UPDATE_JUMBOPROD(sc, sc->ti_jumbo); } } static void ti_txeof(struct ti_softc *sc) { struct ti_txdesc *txd; struct ti_tx_desc txdesc; struct ti_tx_desc *cur_tx = NULL; struct ifnet *ifp; int idx; ifp = sc->ti_ifp; txd = STAILQ_FIRST(&sc->ti_cdata.ti_txbusyq); if (txd == NULL) return; if (sc->ti_rdata.ti_tx_ring != NULL) bus_dmamap_sync(sc->ti_cdata.ti_tx_ring_tag, sc->ti_cdata.ti_tx_ring_map, BUS_DMASYNC_POSTWRITE); /* * Go through our tx ring and free mbufs for those * frames that have been sent. */ for (idx = sc->ti_tx_saved_considx; idx != sc->ti_tx_considx.ti_idx; TI_INC(idx, TI_TX_RING_CNT)) { if (sc->ti_hwrev == TI_HWREV_TIGON) { ti_mem_read(sc, TI_TX_RING_BASE + idx * sizeof(txdesc), sizeof(txdesc), &txdesc); cur_tx = &txdesc; } else cur_tx = &sc->ti_rdata.ti_tx_ring[idx]; sc->ti_txcnt--; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if ((cur_tx->ti_flags & TI_BDFLAG_END) == 0) continue; bus_dmamap_sync(sc->ti_cdata.ti_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->ti_cdata.ti_tx_tag, txd->tx_dmamap); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); m_freem(txd->tx_m); txd->tx_m = NULL; STAILQ_REMOVE_HEAD(&sc->ti_cdata.ti_txbusyq, tx_q); STAILQ_INSERT_TAIL(&sc->ti_cdata.ti_txfreeq, txd, tx_q); txd = STAILQ_FIRST(&sc->ti_cdata.ti_txbusyq); } sc->ti_tx_saved_considx = idx; if (sc->ti_txcnt == 0) sc->ti_timer = 0; } static void ti_intr(void *xsc) { struct ti_softc *sc; struct ifnet *ifp; sc = xsc; TI_LOCK(sc); ifp = sc->ti_ifp; /* Make sure this is really our interrupt. */ if (!(CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_INTSTATE)) { TI_UNLOCK(sc); return; } /* Ack interrupt and stop others from occuring. */ CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { bus_dmamap_sync(sc->ti_cdata.ti_status_tag, sc->ti_cdata.ti_status_map, BUS_DMASYNC_POSTREAD); /* Check RX return ring producer/consumer */ ti_rxeof(sc); /* Check TX ring producer/consumer */ ti_txeof(sc); bus_dmamap_sync(sc->ti_cdata.ti_status_tag, sc->ti_cdata.ti_status_map, BUS_DMASYNC_PREREAD); } ti_handle_events(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { /* Re-enable interrupts. */ CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) ti_start_locked(ifp); } TI_UNLOCK(sc); } static uint64_t ti_get_counter(struct ifnet *ifp, ift_counter cnt) { switch (cnt) { case IFCOUNTER_COLLISIONS: { struct ti_softc *sc; struct ti_stats *s; uint64_t rv; sc = if_getsoftc(ifp); s = &sc->ti_rdata.ti_info->ti_stats; TI_LOCK(sc); bus_dmamap_sync(sc->ti_cdata.ti_gib_tag, sc->ti_cdata.ti_gib_map, BUS_DMASYNC_POSTREAD); rv = s->dot3StatsSingleCollisionFrames + s->dot3StatsMultipleCollisionFrames + s->dot3StatsExcessiveCollisions + s->dot3StatsLateCollisions; bus_dmamap_sync(sc->ti_cdata.ti_gib_tag, sc->ti_cdata.ti_gib_map, BUS_DMASYNC_PREREAD); TI_UNLOCK(sc); return (rv); } default: return (if_get_counter_default(ifp, cnt)); } } /* * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data * pointers to descriptors. */ static int ti_encap(struct ti_softc *sc, struct mbuf **m_head) { struct ti_txdesc *txd; struct ti_tx_desc *f; struct ti_tx_desc txdesc; struct mbuf *m; bus_dma_segment_t txsegs[TI_MAXTXSEGS]; uint16_t csum_flags; int error, frag, i, nseg; if ((txd = STAILQ_FIRST(&sc->ti_cdata.ti_txfreeq)) == NULL) return (ENOBUFS); error = bus_dmamap_load_mbuf_sg(sc->ti_cdata.ti_tx_tag, txd->tx_dmamap, *m_head, txsegs, &nseg, 0); if (error == EFBIG) { m = m_defrag(*m_head, M_NOWAIT); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOMEM); } *m_head = m; error = bus_dmamap_load_mbuf_sg(sc->ti_cdata.ti_tx_tag, txd->tx_dmamap, *m_head, txsegs, &nseg, 0); if (error) { m_freem(*m_head); *m_head = NULL; return (error); } } else if (error != 0) return (error); if (nseg == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } if (sc->ti_txcnt + nseg >= TI_TX_RING_CNT) { bus_dmamap_unload(sc->ti_cdata.ti_tx_tag, txd->tx_dmamap); return (ENOBUFS); } bus_dmamap_sync(sc->ti_cdata.ti_tx_tag, txd->tx_dmamap, BUS_DMASYNC_PREWRITE); m = *m_head; csum_flags = 0; if (m->m_pkthdr.csum_flags & CSUM_IP) csum_flags |= TI_BDFLAG_IP_CKSUM; if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) csum_flags |= TI_BDFLAG_TCP_UDP_CKSUM; frag = sc->ti_tx_saved_prodidx; for (i = 0; i < nseg; i++) { if (sc->ti_hwrev == TI_HWREV_TIGON) { bzero(&txdesc, sizeof(txdesc)); f = &txdesc; } else f = &sc->ti_rdata.ti_tx_ring[frag]; ti_hostaddr64(&f->ti_addr, txsegs[i].ds_addr); f->ti_len = txsegs[i].ds_len; f->ti_flags = csum_flags; if (m->m_flags & M_VLANTAG) { f->ti_flags |= TI_BDFLAG_VLAN_TAG; f->ti_vlan_tag = m->m_pkthdr.ether_vtag; } else { f->ti_vlan_tag = 0; } if (sc->ti_hwrev == TI_HWREV_TIGON) ti_mem_write(sc, TI_TX_RING_BASE + frag * sizeof(txdesc), sizeof(txdesc), &txdesc); TI_INC(frag, TI_TX_RING_CNT); } sc->ti_tx_saved_prodidx = frag; /* set TI_BDFLAG_END on the last descriptor */ frag = (frag + TI_TX_RING_CNT - 1) % TI_TX_RING_CNT; if (sc->ti_hwrev == TI_HWREV_TIGON) { txdesc.ti_flags |= TI_BDFLAG_END; ti_mem_write(sc, TI_TX_RING_BASE + frag * sizeof(txdesc), sizeof(txdesc), &txdesc); } else sc->ti_rdata.ti_tx_ring[frag].ti_flags |= TI_BDFLAG_END; STAILQ_REMOVE_HEAD(&sc->ti_cdata.ti_txfreeq, tx_q); STAILQ_INSERT_TAIL(&sc->ti_cdata.ti_txbusyq, txd, tx_q); txd->tx_m = m; sc->ti_txcnt += nseg; return (0); } static void ti_start(struct ifnet *ifp) { struct ti_softc *sc; sc = ifp->if_softc; TI_LOCK(sc); ti_start_locked(ifp); TI_UNLOCK(sc); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit descriptors. */ static void ti_start_locked(struct ifnet *ifp) { struct ti_softc *sc; struct mbuf *m_head = NULL; int enq = 0; sc = ifp->if_softc; for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->ti_txcnt < (TI_TX_RING_CNT - 16);) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* * Pack the data into the transmit ring. If we * don't have room, set the OACTIVE flag and wait * for the NIC to drain the ring. */ if (ti_encap(sc, &m_head)) { if (m_head == NULL) break; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } enq++; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ ETHER_BPF_MTAP(ifp, m_head); } if (enq > 0) { if (sc->ti_rdata.ti_tx_ring != NULL) bus_dmamap_sync(sc->ti_cdata.ti_tx_ring_tag, sc->ti_cdata.ti_tx_ring_map, BUS_DMASYNC_PREWRITE); /* Transmit */ CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, sc->ti_tx_saved_prodidx); /* * Set a timeout in case the chip goes out to lunch. */ sc->ti_timer = 5; } } static void ti_init(void *xsc) { struct ti_softc *sc; sc = xsc; TI_LOCK(sc); ti_init_locked(sc); TI_UNLOCK(sc); } static void ti_init_locked(void *xsc) { struct ti_softc *sc = xsc; if (sc->ti_ifp->if_drv_flags & IFF_DRV_RUNNING) return; /* Cancel pending I/O and flush buffers. */ ti_stop(sc); /* Init the gen info block, ring control blocks and firmware. */ if (ti_gibinit(sc)) { device_printf(sc->ti_dev, "initialization failure\n"); return; } } static void ti_init2(struct ti_softc *sc) { struct ti_cmd_desc cmd; struct ifnet *ifp; uint8_t *ea; struct ifmedia *ifm; int tmp; TI_LOCK_ASSERT(sc); ifp = sc->ti_ifp; /* Specify MTU and interface index. */ CSR_WRITE_4(sc, TI_GCR_IFINDEX, device_get_unit(sc->ti_dev)); CSR_WRITE_4(sc, TI_GCR_IFMTU, ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); TI_DO_CMD(TI_CMD_UPDATE_GENCOM, 0, 0); /* Load our MAC address. */ ea = IF_LLADDR(sc->ti_ifp); CSR_WRITE_4(sc, TI_GCR_PAR0, (ea[0] << 8) | ea[1]); CSR_WRITE_4(sc, TI_GCR_PAR1, (ea[2] << 24) | (ea[3] << 16) | (ea[4] << 8) | ea[5]); TI_DO_CMD(TI_CMD_SET_MAC_ADDR, 0, 0); /* Enable or disable promiscuous mode as needed. */ if (ifp->if_flags & IFF_PROMISC) { TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_ENB, 0); } else { TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_DIS, 0); } /* Program multicast filter. */ ti_setmulti(sc); /* * If this is a Tigon 1, we should tell the * firmware to use software packet filtering. */ if (sc->ti_hwrev == TI_HWREV_TIGON) { TI_DO_CMD(TI_CMD_FDR_FILTERING, TI_CMD_CODE_FILT_ENB, 0); } /* Init RX ring. */ if (ti_init_rx_ring_std(sc) != 0) { /* XXX */ device_printf(sc->ti_dev, "no memory for std Rx buffers.\n"); return; } /* Init jumbo RX ring. */ if (ifp->if_mtu > ETHERMTU + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) { if (ti_init_rx_ring_jumbo(sc) != 0) { /* XXX */ device_printf(sc->ti_dev, "no memory for jumbo Rx buffers.\n"); return; } } /* * If this is a Tigon 2, we can also configure the * mini ring. */ if (sc->ti_hwrev == TI_HWREV_TIGON_II) { if (ti_init_rx_ring_mini(sc) != 0) { /* XXX */ device_printf(sc->ti_dev, "no memory for mini Rx buffers.\n"); return; } } CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX, 0); sc->ti_rx_saved_considx = 0; /* Init TX ring. */ ti_init_tx_ring(sc); /* Tell firmware we're alive. */ TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_UP, 0); /* Enable host interrupts. */ CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&sc->ti_watchdog, hz, ti_watchdog, sc); /* * Make sure to set media properly. We have to do this * here since we have to issue commands in order to set * the link negotiation and we can't issue commands until * the firmware is running. */ ifm = &sc->ifmedia; tmp = ifm->ifm_media; ifm->ifm_media = ifm->ifm_cur->ifm_media; ti_ifmedia_upd_locked(sc); ifm->ifm_media = tmp; } /* * Set media options. */ static int ti_ifmedia_upd(struct ifnet *ifp) { struct ti_softc *sc; int error; sc = ifp->if_softc; TI_LOCK(sc); error = ti_ifmedia_upd(ifp); TI_UNLOCK(sc); return (error); } static int ti_ifmedia_upd_locked(struct ti_softc *sc) { struct ifmedia *ifm; struct ti_cmd_desc cmd; uint32_t flowctl; ifm = &sc->ifmedia; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); flowctl = 0; switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: /* * Transmit flow control doesn't work on the Tigon 1. */ flowctl = TI_GLNK_RX_FLOWCTL_Y; /* * Transmit flow control can also cause problems on the * Tigon 2, apparantly with both the copper and fiber * boards. The symptom is that the interface will just * hang. This was reproduced with Alteon 180 switches. */ #if 0 if (sc->ti_hwrev != TI_HWREV_TIGON) flowctl |= TI_GLNK_TX_FLOWCTL_Y; #endif CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB| TI_GLNK_FULL_DUPLEX| flowctl | TI_GLNK_AUTONEGENB|TI_GLNK_ENB); flowctl = TI_LNK_RX_FLOWCTL_Y; #if 0 if (sc->ti_hwrev != TI_HWREV_TIGON) flowctl |= TI_LNK_TX_FLOWCTL_Y; #endif CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_100MB|TI_LNK_10MB| TI_LNK_FULL_DUPLEX|TI_LNK_HALF_DUPLEX| flowctl | TI_LNK_AUTONEGENB|TI_LNK_ENB); TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, TI_CMD_CODE_NEGOTIATE_BOTH, 0); break; case IFM_1000_SX: case IFM_1000_T: flowctl = TI_GLNK_RX_FLOWCTL_Y; #if 0 if (sc->ti_hwrev != TI_HWREV_TIGON) flowctl |= TI_GLNK_TX_FLOWCTL_Y; #endif CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB| flowctl |TI_GLNK_ENB); CSR_WRITE_4(sc, TI_GCR_LINK, 0); if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { TI_SETBIT(sc, TI_GCR_GLINK, TI_GLNK_FULL_DUPLEX); } TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, TI_CMD_CODE_NEGOTIATE_GIGABIT, 0); break; case IFM_100_FX: case IFM_10_FL: case IFM_100_TX: case IFM_10_T: flowctl = TI_LNK_RX_FLOWCTL_Y; #if 0 if (sc->ti_hwrev != TI_HWREV_TIGON) flowctl |= TI_LNK_TX_FLOWCTL_Y; #endif CSR_WRITE_4(sc, TI_GCR_GLINK, 0); CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_ENB|TI_LNK_PREF|flowctl); if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_FX || IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) { TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_100MB); } else { TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_10MB); } if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_FULL_DUPLEX); } else { TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_HALF_DUPLEX); } TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, TI_CMD_CODE_NEGOTIATE_10_100, 0); break; } return (0); } /* * Report current media status. */ static void ti_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct ti_softc *sc; uint32_t media = 0; sc = ifp->if_softc; TI_LOCK(sc); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (sc->ti_linkstat == TI_EV_CODE_LINK_DOWN) { TI_UNLOCK(sc); return; } ifmr->ifm_status |= IFM_ACTIVE; if (sc->ti_linkstat == TI_EV_CODE_GIG_LINK_UP) { media = CSR_READ_4(sc, TI_GCR_GLINK_STAT); if (sc->ti_copper) ifmr->ifm_active |= IFM_1000_T; else ifmr->ifm_active |= IFM_1000_SX; if (media & TI_GLNK_FULL_DUPLEX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; } else if (sc->ti_linkstat == TI_EV_CODE_LINK_UP) { media = CSR_READ_4(sc, TI_GCR_LINK_STAT); if (sc->ti_copper) { if (media & TI_LNK_100MB) ifmr->ifm_active |= IFM_100_TX; if (media & TI_LNK_10MB) ifmr->ifm_active |= IFM_10_T; } else { if (media & TI_LNK_100MB) ifmr->ifm_active |= IFM_100_FX; if (media & TI_LNK_10MB) ifmr->ifm_active |= IFM_10_FL; } if (media & TI_LNK_FULL_DUPLEX) ifmr->ifm_active |= IFM_FDX; if (media & TI_LNK_HALF_DUPLEX) ifmr->ifm_active |= IFM_HDX; } TI_UNLOCK(sc); } static int ti_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct ti_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct ti_cmd_desc cmd; int mask, error = 0; switch (command) { case SIOCSIFMTU: TI_LOCK(sc); if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > TI_JUMBO_MTU) error = EINVAL; else { ifp->if_mtu = ifr->ifr_mtu; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ti_init_locked(sc); } } TI_UNLOCK(sc); break; case SIOCSIFFLAGS: TI_LOCK(sc); if (ifp->if_flags & IFF_UP) { /* * If only the state of the PROMISC flag changed, * then just use the 'set promisc mode' command * instead of reinitializing the entire NIC. Doing * a full re-init means reloading the firmware and * waiting for it to start up, which may take a * second or two. */ if (ifp->if_drv_flags & IFF_DRV_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->ti_if_flags & IFF_PROMISC)) { TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_ENB, 0); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->ti_if_flags & IFF_PROMISC) { TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_DIS, 0); } else ti_init_locked(sc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ti_stop(sc); } } sc->ti_if_flags = ifp->if_flags; TI_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: TI_LOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) ti_setmulti(sc); TI_UNLOCK(sc); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); break; case SIOCSIFCAP: TI_LOCK(sc); mask = ifr->ifr_reqcap ^ ifp->if_capenable; if ((mask & IFCAP_TXCSUM) != 0 && (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { ifp->if_capenable ^= IFCAP_TXCSUM; if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) ifp->if_hwassist |= TI_CSUM_FEATURES; else ifp->if_hwassist &= ~TI_CSUM_FEATURES; } if ((mask & IFCAP_RXCSUM) != 0 && (ifp->if_capabilities & IFCAP_RXCSUM) != 0) ifp->if_capenable ^= IFCAP_RXCSUM; if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if ((mask & IFCAP_VLAN_HWCSUM) != 0 && (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; if ((mask & (IFCAP_TXCSUM | IFCAP_RXCSUM | IFCAP_VLAN_HWTAGGING)) != 0) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ti_init_locked(sc); } } TI_UNLOCK(sc); VLAN_CAPABILITIES(ifp); break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static int ti_open(struct cdev *dev, int flags, int fmt, struct thread *td) { struct ti_softc *sc; sc = dev->si_drv1; if (sc == NULL) return (ENODEV); TI_LOCK(sc); sc->ti_flags |= TI_FLAG_DEBUGING; TI_UNLOCK(sc); return (0); } static int ti_close(struct cdev *dev, int flag, int fmt, struct thread *td) { struct ti_softc *sc; sc = dev->si_drv1; if (sc == NULL) return (ENODEV); TI_LOCK(sc); sc->ti_flags &= ~TI_FLAG_DEBUGING; TI_UNLOCK(sc); return (0); } /* * This ioctl routine goes along with the Tigon character device. */ static int ti_ioctl2(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct ti_softc *sc; int error; sc = dev->si_drv1; if (sc == NULL) return (ENODEV); error = 0; switch (cmd) { case TIIOCGETSTATS: { struct ti_stats *outstats; outstats = (struct ti_stats *)addr; TI_LOCK(sc); bus_dmamap_sync(sc->ti_cdata.ti_gib_tag, sc->ti_cdata.ti_gib_map, BUS_DMASYNC_POSTREAD); bcopy(&sc->ti_rdata.ti_info->ti_stats, outstats, sizeof(struct ti_stats)); bus_dmamap_sync(sc->ti_cdata.ti_gib_tag, sc->ti_cdata.ti_gib_map, BUS_DMASYNC_PREREAD); TI_UNLOCK(sc); break; } case TIIOCGETPARAMS: { struct ti_params *params; params = (struct ti_params *)addr; TI_LOCK(sc); params->ti_stat_ticks = sc->ti_stat_ticks; params->ti_rx_coal_ticks = sc->ti_rx_coal_ticks; params->ti_tx_coal_ticks = sc->ti_tx_coal_ticks; params->ti_rx_max_coal_bds = sc->ti_rx_max_coal_bds; params->ti_tx_max_coal_bds = sc->ti_tx_max_coal_bds; params->ti_tx_buf_ratio = sc->ti_tx_buf_ratio; params->param_mask = TI_PARAM_ALL; TI_UNLOCK(sc); break; } case TIIOCSETPARAMS: { struct ti_params *params; params = (struct ti_params *)addr; TI_LOCK(sc); if (params->param_mask & TI_PARAM_STAT_TICKS) { sc->ti_stat_ticks = params->ti_stat_ticks; CSR_WRITE_4(sc, TI_GCR_STAT_TICKS, sc->ti_stat_ticks); } if (params->param_mask & TI_PARAM_RX_COAL_TICKS) { sc->ti_rx_coal_ticks = params->ti_rx_coal_ticks; CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, sc->ti_rx_coal_ticks); } if (params->param_mask & TI_PARAM_TX_COAL_TICKS) { sc->ti_tx_coal_ticks = params->ti_tx_coal_ticks; CSR_WRITE_4(sc, TI_GCR_TX_COAL_TICKS, sc->ti_tx_coal_ticks); } if (params->param_mask & TI_PARAM_RX_COAL_BDS) { sc->ti_rx_max_coal_bds = params->ti_rx_max_coal_bds; CSR_WRITE_4(sc, TI_GCR_RX_MAX_COAL_BD, sc->ti_rx_max_coal_bds); } if (params->param_mask & TI_PARAM_TX_COAL_BDS) { sc->ti_tx_max_coal_bds = params->ti_tx_max_coal_bds; CSR_WRITE_4(sc, TI_GCR_TX_MAX_COAL_BD, sc->ti_tx_max_coal_bds); } if (params->param_mask & TI_PARAM_TX_BUF_RATIO) { sc->ti_tx_buf_ratio = params->ti_tx_buf_ratio; CSR_WRITE_4(sc, TI_GCR_TX_BUFFER_RATIO, sc->ti_tx_buf_ratio); } TI_UNLOCK(sc); break; } case TIIOCSETTRACE: { ti_trace_type trace_type; trace_type = *(ti_trace_type *)addr; /* * Set tracing to whatever the user asked for. Setting * this register to 0 should have the effect of disabling * tracing. */ TI_LOCK(sc); CSR_WRITE_4(sc, TI_GCR_NIC_TRACING, trace_type); TI_UNLOCK(sc); break; } case TIIOCGETTRACE: { struct ti_trace_buf *trace_buf; uint32_t trace_start, cur_trace_ptr, trace_len; trace_buf = (struct ti_trace_buf *)addr; TI_LOCK(sc); trace_start = CSR_READ_4(sc, TI_GCR_NICTRACE_START); cur_trace_ptr = CSR_READ_4(sc, TI_GCR_NICTRACE_PTR); trace_len = CSR_READ_4(sc, TI_GCR_NICTRACE_LEN); #if 0 if_printf(sc->ti_ifp, "trace_start = %#x, cur_trace_ptr = %#x, " "trace_len = %d\n", trace_start, cur_trace_ptr, trace_len); if_printf(sc->ti_ifp, "trace_buf->buf_len = %d\n", trace_buf->buf_len); #endif error = ti_copy_mem(sc, trace_start, min(trace_len, trace_buf->buf_len), (caddr_t)trace_buf->buf, 1, 1); if (error == 0) { trace_buf->fill_len = min(trace_len, trace_buf->buf_len); if (cur_trace_ptr < trace_start) trace_buf->cur_trace_ptr = trace_start - cur_trace_ptr; else trace_buf->cur_trace_ptr = cur_trace_ptr - trace_start; } else trace_buf->fill_len = 0; TI_UNLOCK(sc); break; } /* * For debugging, five ioctls are needed: * ALT_ATTACH * ALT_READ_TG_REG * ALT_WRITE_TG_REG * ALT_READ_TG_MEM * ALT_WRITE_TG_MEM */ case ALT_ATTACH: /* * From what I can tell, Alteon's Solaris Tigon driver * only has one character device, so you have to attach * to the Tigon board you're interested in. This seems * like a not-so-good way to do things, since unless you * subsequently specify the unit number of the device * you're interested in every ioctl, you'll only be * able to debug one board at a time. */ break; case ALT_READ_TG_MEM: case ALT_WRITE_TG_MEM: { struct tg_mem *mem_param; uint32_t sram_end, scratch_end; mem_param = (struct tg_mem *)addr; if (sc->ti_hwrev == TI_HWREV_TIGON) { sram_end = TI_END_SRAM_I; scratch_end = TI_END_SCRATCH_I; } else { sram_end = TI_END_SRAM_II; scratch_end = TI_END_SCRATCH_II; } /* * For now, we'll only handle accessing regular SRAM, * nothing else. */ TI_LOCK(sc); if (mem_param->tgAddr >= TI_BEG_SRAM && mem_param->tgAddr + mem_param->len <= sram_end) { /* * In this instance, we always copy to/from user * space, so the user space argument is set to 1. */ error = ti_copy_mem(sc, mem_param->tgAddr, mem_param->len, mem_param->userAddr, 1, cmd == ALT_READ_TG_MEM ? 1 : 0); } else if (mem_param->tgAddr >= TI_BEG_SCRATCH && mem_param->tgAddr <= scratch_end) { error = ti_copy_scratch(sc, mem_param->tgAddr, mem_param->len, mem_param->userAddr, 1, cmd == ALT_READ_TG_MEM ? 1 : 0, TI_PROCESSOR_A); } else if (mem_param->tgAddr >= TI_BEG_SCRATCH_B_DEBUG && mem_param->tgAddr <= TI_BEG_SCRATCH_B_DEBUG) { if (sc->ti_hwrev == TI_HWREV_TIGON) { if_printf(sc->ti_ifp, "invalid memory range for Tigon I\n"); error = EINVAL; break; } error = ti_copy_scratch(sc, mem_param->tgAddr - TI_SCRATCH_DEBUG_OFF, mem_param->len, mem_param->userAddr, 1, cmd == ALT_READ_TG_MEM ? 1 : 0, TI_PROCESSOR_B); } else { if_printf(sc->ti_ifp, "memory address %#x len %d is " "out of supported range\n", mem_param->tgAddr, mem_param->len); error = EINVAL; } TI_UNLOCK(sc); break; } case ALT_READ_TG_REG: case ALT_WRITE_TG_REG: { struct tg_reg *regs; uint32_t tmpval; regs = (struct tg_reg *)addr; /* * Make sure the address in question isn't out of range. */ if (regs->addr > TI_REG_MAX) { error = EINVAL; break; } TI_LOCK(sc); if (cmd == ALT_READ_TG_REG) { bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle, regs->addr, &tmpval, 1); regs->data = ntohl(tmpval); #if 0 if ((regs->addr == TI_CPU_STATE) || (regs->addr == TI_CPU_CTL_B)) { if_printf(sc->ti_ifp, "register %#x = %#x\n", regs->addr, tmpval); } #endif } else { tmpval = htonl(regs->data); bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle, regs->addr, &tmpval, 1); } TI_UNLOCK(sc); break; } default: error = ENOTTY; break; } return (error); } static void ti_watchdog(void *arg) { struct ti_softc *sc; struct ifnet *ifp; sc = arg; TI_LOCK_ASSERT(sc); callout_reset(&sc->ti_watchdog, hz, ti_watchdog, sc); if (sc->ti_timer == 0 || --sc->ti_timer > 0) return; /* * When we're debugging, the chip is often stopped for long periods * of time, and that would normally cause the watchdog timer to fire. * Since that impedes debugging, we don't want to do that. */ if (sc->ti_flags & TI_FLAG_DEBUGING) return; ifp = sc->ti_ifp; if_printf(ifp, "watchdog timeout -- resetting\n"); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ti_init_locked(sc); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void ti_stop(struct ti_softc *sc) { struct ifnet *ifp; struct ti_cmd_desc cmd; TI_LOCK_ASSERT(sc); ifp = sc->ti_ifp; /* Disable host interrupts. */ CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); /* * Tell firmware we're shutting down. */ TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_DOWN, 0); /* Halt and reinitialize. */ if (ti_chipinit(sc) == 0) { ti_mem_zero(sc, 0x2000, 0x100000 - 0x2000); /* XXX ignore init errors. */ ti_chipinit(sc); } /* Free the RX lists. */ ti_free_rx_ring_std(sc); /* Free jumbo RX list. */ ti_free_rx_ring_jumbo(sc); /* Free mini RX list. */ ti_free_rx_ring_mini(sc); /* Free TX buffers. */ ti_free_tx_ring(sc); sc->ti_ev_prodidx.ti_idx = 0; sc->ti_return_prodidx.ti_idx = 0; sc->ti_tx_considx.ti_idx = 0; sc->ti_tx_saved_considx = TI_TXCONS_UNSET; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); callout_stop(&sc->ti_watchdog); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int ti_shutdown(device_t dev) { struct ti_softc *sc; sc = device_get_softc(dev); TI_LOCK(sc); ti_chipinit(sc); TI_UNLOCK(sc); return (0); } static void ti_sysctl_node(struct ti_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *child; char tname[32]; ctx = device_get_sysctl_ctx(sc->ti_dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->ti_dev)); /* Use DAC */ sc->ti_dac = 1; snprintf(tname, sizeof(tname), "dev.ti.%d.dac", device_get_unit(sc->ti_dev)); TUNABLE_INT_FETCH(tname, &sc->ti_dac); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_coal_ticks", CTLFLAG_RW, &sc->ti_rx_coal_ticks, 0, "Receive coalcesced ticks"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_max_coal_bds", CTLFLAG_RW, &sc->ti_rx_max_coal_bds, 0, "Receive max coalcesced BDs"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_coal_ticks", CTLFLAG_RW, &sc->ti_tx_coal_ticks, 0, "Send coalcesced ticks"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_max_coal_bds", CTLFLAG_RW, &sc->ti_tx_max_coal_bds, 0, "Send max coalcesced BDs"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_buf_ratio", CTLFLAG_RW, &sc->ti_tx_buf_ratio, 0, "Ratio of NIC memory devoted to TX buffer"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "stat_ticks", CTLFLAG_RW, &sc->ti_stat_ticks, 0, "Number of clock ticks for statistics update interval"); /* Pull in device tunables. */ sc->ti_rx_coal_ticks = 170; resource_int_value(device_get_name(sc->ti_dev), device_get_unit(sc->ti_dev), "rx_coal_ticks", &sc->ti_rx_coal_ticks); sc->ti_rx_max_coal_bds = 64; resource_int_value(device_get_name(sc->ti_dev), device_get_unit(sc->ti_dev), "rx_max_coal_bds", &sc->ti_rx_max_coal_bds); sc->ti_tx_coal_ticks = TI_TICKS_PER_SEC / 500; resource_int_value(device_get_name(sc->ti_dev), device_get_unit(sc->ti_dev), "tx_coal_ticks", &sc->ti_tx_coal_ticks); sc->ti_tx_max_coal_bds = 32; resource_int_value(device_get_name(sc->ti_dev), device_get_unit(sc->ti_dev), "tx_max_coal_bds", &sc->ti_tx_max_coal_bds); sc->ti_tx_buf_ratio = 21; resource_int_value(device_get_name(sc->ti_dev), device_get_unit(sc->ti_dev), "tx_buf_ratio", &sc->ti_tx_buf_ratio); sc->ti_stat_ticks = 2 * TI_TICKS_PER_SEC; resource_int_value(device_get_name(sc->ti_dev), device_get_unit(sc->ti_dev), "stat_ticks", &sc->ti_stat_ticks); } Index: head/sys/dev/tl/if_tl.c =================================================================== --- head/sys/dev/tl/if_tl.c (revision 276749) +++ head/sys/dev/tl/if_tl.c (revision 276750) @@ -1,2277 +1,2276 @@ /*- * Copyright (c) 1997, 1998 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Texas Instruments ThunderLAN driver for FreeBSD 2.2.6 and 3.x. * Supports many Compaq PCI NICs based on the ThunderLAN ethernet controller, * the National Semiconductor DP83840A physical interface and the * Microchip Technology 24Cxx series serial EEPROM. * * Written using the following four documents: * * Texas Instruments ThunderLAN Programmer's Guide (www.ti.com) * National Semiconductor DP83840A data sheet (www.national.com) * Microchip Technology 24C02C data sheet (www.microchip.com) * Micro Linear ML6692 100BaseTX only PHY data sheet (www.microlinear.com) * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * Some notes about the ThunderLAN: * * The ThunderLAN controller is a single chip containing PCI controller * logic, approximately 3K of on-board SRAM, a LAN controller, and media * independent interface (MII) bus. The MII allows the ThunderLAN chip to * control up to 32 different physical interfaces (PHYs). The ThunderLAN * also has a built-in 10baseT PHY, allowing a single ThunderLAN controller * to act as a complete ethernet interface. * * Other PHYs may be attached to the ThunderLAN; the Compaq 10/100 cards * use a National Semiconductor DP83840A PHY that supports 10 or 100Mb/sec * in full or half duplex. Some of the Compaq Deskpro machines use a * Level 1 LXT970 PHY with the same capabilities. Certain Olicom adapters * use a Micro Linear ML6692 100BaseTX only PHY, which can be used in * concert with the ThunderLAN's internal PHY to provide full 10/100 * support. This is cheaper than using a standalone external PHY for both * 10/100 modes and letting the ThunderLAN's internal PHY go to waste. * A serial EEPROM is also attached to the ThunderLAN chip to provide * power-up default register settings and for storing the adapter's * station address. Although not supported by this driver, the ThunderLAN * chip can also be connected to token ring PHYs. * * The ThunderLAN has a set of registers which can be used to issue * commands, acknowledge interrupts, and to manipulate other internal * registers on its DIO bus. The primary registers can be accessed * using either programmed I/O (inb/outb) or via PCI memory mapping, * depending on how the card is configured during the PCI probing * phase. It is even possible to have both PIO and memory mapped * access turned on at the same time. * * Frame reception and transmission with the ThunderLAN chip is done * using frame 'lists.' A list structure looks more or less like this: * * struct tl_frag { * u_int32_t fragment_address; * u_int32_t fragment_size; * }; * struct tl_list { * u_int32_t forward_pointer; * u_int16_t cstat; * u_int16_t frame_size; * struct tl_frag fragments[10]; * }; * * The forward pointer in the list header can be either a 0 or the address * of another list, which allows several lists to be linked together. Each * list contains up to 10 fragment descriptors. This means the chip allows * ethernet frames to be broken up into up to 10 chunks for transfer to * and from the SRAM. Note that the forward pointer and fragment buffer * addresses are physical memory addresses, not virtual. Note also that * a single ethernet frame can not span lists: if the host wants to * transmit a frame and the frame data is split up over more than 10 * buffers, the frame has to collapsed before it can be transmitted. * * To receive frames, the driver sets up a number of lists and populates * the fragment descriptors, then it sends an RX GO command to the chip. * When a frame is received, the chip will DMA it into the memory regions * specified by the fragment descriptors and then trigger an RX 'end of * frame interrupt' when done. The driver may choose to use only one * fragment per list; this may result is slighltly less efficient use * of memory in exchange for improving performance. * * To transmit frames, the driver again sets up lists and fragment * descriptors, only this time the buffers contain frame data that * is to be DMA'ed into the chip instead of out of it. Once the chip * has transfered the data into its on-board SRAM, it will trigger a * TX 'end of frame' interrupt. It will also generate an 'end of channel' * interrupt when it reaches the end of the list. */ /* * Some notes about this driver: * * The ThunderLAN chip provides a couple of different ways to organize * reception, transmission and interrupt handling. The simplest approach * is to use one list each for transmission and reception. In this mode, * the ThunderLAN will generate two interrupts for every received frame * (one RX EOF and one RX EOC) and two for each transmitted frame (one * TX EOF and one TX EOC). This may make the driver simpler but it hurts * performance to have to handle so many interrupts. * * Initially I wanted to create a circular list of receive buffers so * that the ThunderLAN chip would think there was an infinitely long * receive channel and never deliver an RXEOC interrupt. However this * doesn't work correctly under heavy load: while the manual says the * chip will trigger an RXEOF interrupt each time a frame is copied into * memory, you can't count on the chip waiting around for you to acknowledge * the interrupt before it starts trying to DMA the next frame. The result * is that the chip might traverse the entire circular list and then wrap * around before you have a chance to do anything about it. Consequently, * the receive list is terminated (with a 0 in the forward pointer in the * last element). Each time an RXEOF interrupt arrives, the used list * is shifted to the end of the list. This gives the appearance of an * infinitely large RX chain so long as the driver doesn't fall behind * the chip and allow all of the lists to be filled up. * * If all the lists are filled, the adapter will deliver an RX 'end of * channel' interrupt when it hits the 0 forward pointer at the end of * the chain. The RXEOC handler then cleans out the RX chain and resets * the list head pointer in the ch_parm register and restarts the receiver. * * For frame transmission, it is possible to program the ThunderLAN's * transmit interrupt threshold so that the chip can acknowledge multiple * lists with only a single TX EOF interrupt. This allows the driver to * queue several frames in one shot, and only have to handle a total * two interrupts (one TX EOF and one TX EOC) no matter how many frames * are transmitted. Frame transmission is done directly out of the * mbufs passed to the tl_start() routine via the interface send queue. * The driver simply sets up the fragment descriptors in the transmit * lists to point to the mbuf data regions and sends a TX GO command. * * Note that since the RX and TX lists themselves are always used * only by the driver, the are malloc()ed once at driver initialization * time and never free()ed. * * Also, in order to remain as platform independent as possible, this * driver uses memory mapped register access to manipulate the card * as opposed to programmed I/O. This avoids the use of the inb/outb * (and related) instructions which are specific to the i386 platform. * * Using these techniques, this driver achieves very high performance * by minimizing the amount of interrupts generated during large * transfers and by completely avoiding buffer copies. Frame transfer * to and from the ThunderLAN chip is performed entirely by the chip * itself thereby reducing the load on the host CPU. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include #include #include #include /* * Default to using PIO register access mode to pacify certain * laptop docking stations with built-in ThunderLAN chips that * don't seem to handle memory mapped mode properly. */ #define TL_USEIOSPACE #include MODULE_DEPEND(tl, pci, 1, 1, 1); MODULE_DEPEND(tl, ether, 1, 1, 1); MODULE_DEPEND(tl, miibus, 1, 1, 1); /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* * Various supported device vendors/types and their names. */ static const struct tl_type tl_devs[] = { { TI_VENDORID, TI_DEVICEID_THUNDERLAN, "Texas Instruments ThunderLAN" }, { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10, "Compaq Netelligent 10" }, { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100, "Compaq Netelligent 10/100" }, { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_PROLIANT, "Compaq Netelligent 10/100 Proliant" }, { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_DUAL, "Compaq Netelligent 10/100 Dual Port" }, { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_INTEGRATED, "Compaq NetFlex-3/P Integrated" }, { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P, "Compaq NetFlex-3/P" }, { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_BNC, "Compaq NetFlex 3/P w/ BNC" }, { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_EMBEDDED, "Compaq Netelligent 10/100 TX Embedded UTP" }, { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_T2_UTP_COAX, "Compaq Netelligent 10 T/2 PCI UTP/Coax" }, { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_TX_UTP, "Compaq Netelligent 10/100 TX UTP" }, { OLICOM_VENDORID, OLICOM_DEVICEID_OC2183, "Olicom OC-2183/2185" }, { OLICOM_VENDORID, OLICOM_DEVICEID_OC2325, "Olicom OC-2325" }, { OLICOM_VENDORID, OLICOM_DEVICEID_OC2326, "Olicom OC-2326 10/100 TX UTP" }, { 0, 0, NULL } }; static int tl_probe(device_t); static int tl_attach(device_t); static int tl_detach(device_t); static int tl_intvec_rxeoc(void *, u_int32_t); static int tl_intvec_txeoc(void *, u_int32_t); static int tl_intvec_txeof(void *, u_int32_t); static int tl_intvec_rxeof(void *, u_int32_t); static int tl_intvec_adchk(void *, u_int32_t); static int tl_intvec_netsts(void *, u_int32_t); static int tl_newbuf(struct tl_softc *, struct tl_chain_onefrag *); static void tl_stats_update(void *); static int tl_encap(struct tl_softc *, struct tl_chain *, struct mbuf *); static void tl_intr(void *); static void tl_start(struct ifnet *); static void tl_start_locked(struct ifnet *); static int tl_ioctl(struct ifnet *, u_long, caddr_t); static void tl_init(void *); static void tl_init_locked(struct tl_softc *); static void tl_stop(struct tl_softc *); static void tl_watchdog(struct tl_softc *); static int tl_shutdown(device_t); static int tl_ifmedia_upd(struct ifnet *); static void tl_ifmedia_sts(struct ifnet *, struct ifmediareq *); static u_int8_t tl_eeprom_putbyte(struct tl_softc *, int); static u_int8_t tl_eeprom_getbyte(struct tl_softc *, int, u_int8_t *); static int tl_read_eeprom(struct tl_softc *, caddr_t, int, int); static int tl_miibus_readreg(device_t, int, int); static int tl_miibus_writereg(device_t, int, int, int); static void tl_miibus_statchg(device_t); static void tl_setmode(struct tl_softc *, int); static uint32_t tl_mchash(const uint8_t *); static void tl_setmulti(struct tl_softc *); static void tl_setfilt(struct tl_softc *, caddr_t, int); static void tl_softreset(struct tl_softc *, int); static void tl_hardreset(device_t); static int tl_list_rx_init(struct tl_softc *); static int tl_list_tx_init(struct tl_softc *); static u_int8_t tl_dio_read8(struct tl_softc *, int); static u_int16_t tl_dio_read16(struct tl_softc *, int); static u_int32_t tl_dio_read32(struct tl_softc *, int); static void tl_dio_write8(struct tl_softc *, int, int); static void tl_dio_write16(struct tl_softc *, int, int); static void tl_dio_write32(struct tl_softc *, int, int); static void tl_dio_setbit(struct tl_softc *, int, int); static void tl_dio_clrbit(struct tl_softc *, int, int); static void tl_dio_setbit16(struct tl_softc *, int, int); static void tl_dio_clrbit16(struct tl_softc *, int, int); /* * MII bit-bang glue */ static uint32_t tl_mii_bitbang_read(device_t); static void tl_mii_bitbang_write(device_t, uint32_t); static const struct mii_bitbang_ops tl_mii_bitbang_ops = { tl_mii_bitbang_read, tl_mii_bitbang_write, { TL_SIO_MDATA, /* MII_BIT_MDO */ TL_SIO_MDATA, /* MII_BIT_MDI */ TL_SIO_MCLK, /* MII_BIT_MDC */ TL_SIO_MTXEN, /* MII_BIT_DIR_HOST_PHY */ 0, /* MII_BIT_DIR_PHY_HOST */ } }; #ifdef TL_USEIOSPACE #define TL_RES SYS_RES_IOPORT #define TL_RID TL_PCI_LOIO #else #define TL_RES SYS_RES_MEMORY #define TL_RID TL_PCI_LOMEM #endif static device_method_t tl_methods[] = { /* Device interface */ DEVMETHOD(device_probe, tl_probe), DEVMETHOD(device_attach, tl_attach), DEVMETHOD(device_detach, tl_detach), DEVMETHOD(device_shutdown, tl_shutdown), /* MII interface */ DEVMETHOD(miibus_readreg, tl_miibus_readreg), DEVMETHOD(miibus_writereg, tl_miibus_writereg), DEVMETHOD(miibus_statchg, tl_miibus_statchg), DEVMETHOD_END }; static driver_t tl_driver = { "tl", tl_methods, sizeof(struct tl_softc) }; static devclass_t tl_devclass; DRIVER_MODULE(tl, pci, tl_driver, tl_devclass, 0, 0); DRIVER_MODULE(miibus, tl, miibus_driver, miibus_devclass, 0, 0); static u_int8_t tl_dio_read8(sc, reg) struct tl_softc *sc; int reg; { CSR_BARRIER(sc, TL_DIO_ADDR, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); CSR_WRITE_2(sc, TL_DIO_ADDR, reg); CSR_BARRIER(sc, TL_DIO_ADDR, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); return(CSR_READ_1(sc, TL_DIO_DATA + (reg & 3))); } static u_int16_t tl_dio_read16(sc, reg) struct tl_softc *sc; int reg; { CSR_BARRIER(sc, TL_DIO_ADDR, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); CSR_WRITE_2(sc, TL_DIO_ADDR, reg); CSR_BARRIER(sc, TL_DIO_ADDR, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); return(CSR_READ_2(sc, TL_DIO_DATA + (reg & 3))); } static u_int32_t tl_dio_read32(sc, reg) struct tl_softc *sc; int reg; { CSR_BARRIER(sc, TL_DIO_ADDR, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); CSR_WRITE_2(sc, TL_DIO_ADDR, reg); CSR_BARRIER(sc, TL_DIO_ADDR, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); return(CSR_READ_4(sc, TL_DIO_DATA + (reg & 3))); } static void tl_dio_write8(sc, reg, val) struct tl_softc *sc; int reg; int val; { CSR_BARRIER(sc, TL_DIO_ADDR, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); CSR_WRITE_2(sc, TL_DIO_ADDR, reg); CSR_BARRIER(sc, TL_DIO_ADDR, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), val); } static void tl_dio_write16(sc, reg, val) struct tl_softc *sc; int reg; int val; { CSR_BARRIER(sc, TL_DIO_ADDR, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); CSR_WRITE_2(sc, TL_DIO_ADDR, reg); CSR_BARRIER(sc, TL_DIO_ADDR, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), val); } static void tl_dio_write32(sc, reg, val) struct tl_softc *sc; int reg; int val; { CSR_BARRIER(sc, TL_DIO_ADDR, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); CSR_WRITE_2(sc, TL_DIO_ADDR, reg); CSR_BARRIER(sc, TL_DIO_ADDR, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); CSR_WRITE_4(sc, TL_DIO_DATA + (reg & 3), val); } static void tl_dio_setbit(sc, reg, bit) struct tl_softc *sc; int reg; int bit; { u_int8_t f; CSR_BARRIER(sc, TL_DIO_ADDR, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); CSR_WRITE_2(sc, TL_DIO_ADDR, reg); CSR_BARRIER(sc, TL_DIO_ADDR, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)); f |= bit; CSR_BARRIER(sc, TL_DIO_DATA + (reg & 3), 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f); } static void tl_dio_clrbit(sc, reg, bit) struct tl_softc *sc; int reg; int bit; { u_int8_t f; CSR_BARRIER(sc, TL_DIO_ADDR, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); CSR_WRITE_2(sc, TL_DIO_ADDR, reg); CSR_BARRIER(sc, TL_DIO_ADDR, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)); f &= ~bit; CSR_BARRIER(sc, TL_DIO_DATA + (reg & 3), 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f); } static void tl_dio_setbit16(sc, reg, bit) struct tl_softc *sc; int reg; int bit; { u_int16_t f; CSR_BARRIER(sc, TL_DIO_ADDR, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); CSR_WRITE_2(sc, TL_DIO_ADDR, reg); CSR_BARRIER(sc, TL_DIO_ADDR, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)); f |= bit; CSR_BARRIER(sc, TL_DIO_DATA + (reg & 3), 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f); } static void tl_dio_clrbit16(sc, reg, bit) struct tl_softc *sc; int reg; int bit; { u_int16_t f; CSR_BARRIER(sc, TL_DIO_ADDR, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); CSR_WRITE_2(sc, TL_DIO_ADDR, reg); CSR_BARRIER(sc, TL_DIO_ADDR, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)); f &= ~bit; CSR_BARRIER(sc, TL_DIO_DATA + (reg & 3), 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f); } /* * Send an instruction or address to the EEPROM, check for ACK. */ static u_int8_t tl_eeprom_putbyte(sc, byte) struct tl_softc *sc; int byte; { register int i, ack = 0; /* * Make sure we're in TX mode. */ tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ETXEN); /* * Feed in each bit and stobe the clock. */ for (i = 0x80; i; i >>= 1) { if (byte & i) { tl_dio_setbit(sc, TL_NETSIO, TL_SIO_EDATA); } else { tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_EDATA); } DELAY(1); tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK); DELAY(1); tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK); } /* * Turn off TX mode. */ tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN); /* * Check for ack. */ tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK); ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA; tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK); return(ack); } /* * Read a byte of data stored in the EEPROM at address 'addr.' */ static u_int8_t tl_eeprom_getbyte(sc, addr, dest) struct tl_softc *sc; int addr; u_int8_t *dest; { register int i; u_int8_t byte = 0; device_t tl_dev = sc->tl_dev; tl_dio_write8(sc, TL_NETSIO, 0); EEPROM_START; /* * Send write control code to EEPROM. */ if (tl_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) { device_printf(tl_dev, "failed to send write command, status: %x\n", tl_dio_read8(sc, TL_NETSIO)); return(1); } /* * Send address of byte we want to read. */ if (tl_eeprom_putbyte(sc, addr)) { device_printf(tl_dev, "failed to send address, status: %x\n", tl_dio_read8(sc, TL_NETSIO)); return(1); } EEPROM_STOP; EEPROM_START; /* * Send read control code to EEPROM. */ if (tl_eeprom_putbyte(sc, EEPROM_CTL_READ)) { device_printf(tl_dev, "failed to send write command, status: %x\n", tl_dio_read8(sc, TL_NETSIO)); return(1); } /* * Start reading bits from EEPROM. */ tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN); for (i = 0x80; i; i >>= 1) { tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK); DELAY(1); if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA) byte |= i; tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK); DELAY(1); } EEPROM_STOP; /* * No ACK generated for read, so just return byte. */ *dest = byte; return(0); } /* * Read a sequence of bytes from the EEPROM. */ static int tl_read_eeprom(sc, dest, off, cnt) struct tl_softc *sc; caddr_t dest; int off; int cnt; { int err = 0, i; u_int8_t byte = 0; for (i = 0; i < cnt; i++) { err = tl_eeprom_getbyte(sc, off + i, &byte); if (err) break; *(dest + i) = byte; } return(err ? 1 : 0); } #define TL_SIO_MII (TL_SIO_MCLK | TL_SIO_MDATA | TL_SIO_MTXEN) /* * Read the MII serial port for the MII bit-bang module. */ static uint32_t tl_mii_bitbang_read(device_t dev) { struct tl_softc *sc; uint32_t val; sc = device_get_softc(dev); val = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MII; CSR_BARRIER(sc, TL_NETSIO, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); return (val); } /* * Write the MII serial port for the MII bit-bang module. */ static void tl_mii_bitbang_write(device_t dev, uint32_t val) { struct tl_softc *sc; sc = device_get_softc(dev); val = (tl_dio_read8(sc, TL_NETSIO) & ~TL_SIO_MII) | val; CSR_BARRIER(sc, TL_NETSIO, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); tl_dio_write8(sc, TL_NETSIO, val); CSR_BARRIER(sc, TL_NETSIO, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); } static int tl_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct tl_softc *sc; int minten, val; sc = device_get_softc(dev); /* * Turn off MII interrupt by forcing MINTEN low. */ minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN; if (minten) { tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN); } val = mii_bitbang_readreg(dev, &tl_mii_bitbang_ops, phy, reg); /* Reenable interrupts. */ if (minten) { tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN); } return (val); } static int tl_miibus_writereg(dev, phy, reg, data) device_t dev; int phy, reg, data; { struct tl_softc *sc; int minten; sc = device_get_softc(dev); /* * Turn off MII interrupt by forcing MINTEN low. */ minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN; if (minten) { tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN); } mii_bitbang_writereg(dev, &tl_mii_bitbang_ops, phy, reg, data); /* Reenable interrupts. */ if (minten) { tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN); } return(0); } static void tl_miibus_statchg(dev) device_t dev; { struct tl_softc *sc; struct mii_data *mii; sc = device_get_softc(dev); mii = device_get_softc(sc->tl_miibus); if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX); } else { tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX); } } /* * Set modes for bitrate devices. */ static void tl_setmode(sc, media) struct tl_softc *sc; int media; { if (IFM_SUBTYPE(media) == IFM_10_5) tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD1); if (IFM_SUBTYPE(media) == IFM_10_T) { tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD1); if ((media & IFM_GMASK) == IFM_FDX) { tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD3); tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX); } else { tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD3); tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX); } } } /* * Calculate the hash of a MAC address for programming the multicast hash * table. This hash is simply the address split into 6-bit chunks * XOR'd, e.g. * byte: 000000|00 1111|1111 22|222222|333333|33 4444|4444 55|555555 * bit: 765432|10 7654|3210 76|543210|765432|10 7654|3210 76|543210 * Bytes 0-2 and 3-5 are symmetrical, so are folded together. Then * the folded 24-bit value is split into 6-bit portions and XOR'd. */ static uint32_t tl_mchash(addr) const uint8_t *addr; { int t; t = (addr[0] ^ addr[3]) << 16 | (addr[1] ^ addr[4]) << 8 | (addr[2] ^ addr[5]); return ((t >> 18) ^ (t >> 12) ^ (t >> 6) ^ t) & 0x3f; } /* * The ThunderLAN has a perfect MAC address filter in addition to * the multicast hash filter. The perfect filter can be programmed * with up to four MAC addresses. The first one is always used to * hold the station address, which leaves us free to use the other * three for multicast addresses. */ static void tl_setfilt(sc, addr, slot) struct tl_softc *sc; caddr_t addr; int slot; { int i; u_int16_t regaddr; regaddr = TL_AREG0_B5 + (slot * ETHER_ADDR_LEN); for (i = 0; i < ETHER_ADDR_LEN; i++) tl_dio_write8(sc, regaddr + i, *(addr + i)); } /* * XXX In FreeBSD 3.0, multicast addresses are managed using a doubly * linked list. This is fine, except addresses are added from the head * end of the list. We want to arrange for 224.0.0.1 (the "all hosts") * group to always be in the perfect filter, but as more groups are added, * the 224.0.0.1 entry (which is always added first) gets pushed down * the list and ends up at the tail. So after 3 or 4 multicast groups * are added, the all-hosts entry gets pushed out of the perfect filter * and into the hash table. * * Because the multicast list is a doubly-linked list as opposed to a * circular queue, we don't have the ability to just grab the tail of * the list and traverse it backwards. Instead, we have to traverse * the list once to find the tail, then traverse it again backwards to * update the multicast filter. */ static void tl_setmulti(sc) struct tl_softc *sc; { struct ifnet *ifp; u_int32_t hashes[2] = { 0, 0 }; int h, i; struct ifmultiaddr *ifma; u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 }; ifp = sc->tl_ifp; /* First, zot all the existing filters. */ for (i = 1; i < 4; i++) tl_setfilt(sc, (caddr_t)&dummy, i); tl_dio_write32(sc, TL_HASH1, 0); tl_dio_write32(sc, TL_HASH2, 0); /* Now program new ones. */ if (ifp->if_flags & IFF_ALLMULTI) { hashes[0] = 0xFFFFFFFF; hashes[1] = 0xFFFFFFFF; } else { i = 1; if_maddr_rlock(ifp); TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; /* * Program the first three multicast groups * into the perfect filter. For all others, * use the hash table. */ if (i < 4) { tl_setfilt(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i); i++; continue; } h = tl_mchash( LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); } if_maddr_runlock(ifp); } tl_dio_write32(sc, TL_HASH1, hashes[0]); tl_dio_write32(sc, TL_HASH2, hashes[1]); } /* * This routine is recommended by the ThunderLAN manual to insure that * the internal PHY is powered up correctly. It also recommends a one * second pause at the end to 'wait for the clocks to start' but in my * experience this isn't necessary. */ static void tl_hardreset(dev) device_t dev; { int i; u_int16_t flags; mii_bitbang_sync(dev, &tl_mii_bitbang_ops); flags = BMCR_LOOP|BMCR_ISO|BMCR_PDOWN; for (i = 0; i < MII_NPHY; i++) tl_miibus_writereg(dev, i, MII_BMCR, flags); tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_ISO); DELAY(50000); tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_LOOP|BMCR_ISO); mii_bitbang_sync(dev, &tl_mii_bitbang_ops); while(tl_miibus_readreg(dev, 31, MII_BMCR) & BMCR_RESET); DELAY(50000); } static void tl_softreset(sc, internal) struct tl_softc *sc; int internal; { u_int32_t cmd, dummy, i; /* Assert the adapter reset bit. */ CMD_SET(sc, TL_CMD_ADRST); /* Turn off interrupts */ CMD_SET(sc, TL_CMD_INTSOFF); /* First, clear the stats registers. */ for (i = 0; i < 5; i++) dummy = tl_dio_read32(sc, TL_TXGOODFRAMES); /* Clear Areg and Hash registers */ for (i = 0; i < 8; i++) tl_dio_write32(sc, TL_AREG0_B5, 0x00000000); /* * Set up Netconfig register. Enable one channel and * one fragment mode. */ tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_ONECHAN|TL_CFG_ONEFRAG); if (internal && !sc->tl_bitrate) { tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN); } else { tl_dio_clrbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN); } /* Handle cards with bitrate devices. */ if (sc->tl_bitrate) tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_BITRATE); /* * Load adapter irq pacing timer and tx threshold. * We make the transmit threshold 1 initially but we may * change that later. */ cmd = CSR_READ_4(sc, TL_HOSTCMD); cmd |= TL_CMD_NES; cmd &= ~(TL_CMD_RT|TL_CMD_EOC|TL_CMD_ACK_MASK|TL_CMD_CHSEL_MASK); CMD_PUT(sc, cmd | (TL_CMD_LDTHR | TX_THR)); CMD_PUT(sc, cmd | (TL_CMD_LDTMR | 0x00000003)); /* Unreset the MII */ tl_dio_setbit(sc, TL_NETSIO, TL_SIO_NMRST); /* Take the adapter out of reset */ tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NRESET|TL_CMD_NWRAP); /* Wait for things to settle down a little. */ DELAY(500); } /* * Probe for a ThunderLAN chip. Check the PCI vendor and device IDs * against our list and return its name if we find a match. */ static int tl_probe(dev) device_t dev; { const struct tl_type *t; t = tl_devs; while(t->tl_name != NULL) { if ((pci_get_vendor(dev) == t->tl_vid) && (pci_get_device(dev) == t->tl_did)) { device_set_desc(dev, t->tl_name); return (BUS_PROBE_DEFAULT); } t++; } return(ENXIO); } static int tl_attach(dev) device_t dev; { u_int16_t did, vid; const struct tl_type *t; struct ifnet *ifp; struct tl_softc *sc; int error, flags, i, rid, unit; u_char eaddr[6]; vid = pci_get_vendor(dev); did = pci_get_device(dev); sc = device_get_softc(dev); sc->tl_dev = dev; unit = device_get_unit(dev); t = tl_devs; while(t->tl_name != NULL) { if (vid == t->tl_vid && did == t->tl_did) break; t++; } if (t->tl_name == NULL) { device_printf(dev, "unknown device!?\n"); return (ENXIO); } mtx_init(&sc->tl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); /* * Map control/status registers. */ pci_enable_busmaster(dev); #ifdef TL_USEIOSPACE rid = TL_PCI_LOIO; sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); /* * Some cards have the I/O and memory mapped address registers * reversed. Try both combinations before giving up. */ if (sc->tl_res == NULL) { rid = TL_PCI_LOMEM; sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); } #else rid = TL_PCI_LOMEM; sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->tl_res == NULL) { rid = TL_PCI_LOIO; sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); } #endif if (sc->tl_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); error = ENXIO; goto fail; } #ifdef notdef /* * The ThunderLAN manual suggests jacking the PCI latency * timer all the way up to its maximum value. I'm not sure * if this is really necessary, but what the manual wants, * the manual gets. */ command = pci_read_config(dev, TL_PCI_LATENCY_TIMER, 4); command |= 0x0000FF00; pci_write_config(dev, TL_PCI_LATENCY_TIMER, command, 4); #endif /* Allocate interrupt */ rid = 0; sc->tl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->tl_irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } /* * Now allocate memory for the TX and RX lists. */ sc->tl_ldata = contigmalloc(sizeof(struct tl_list_data), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->tl_ldata == NULL) { device_printf(dev, "no memory for list buffers!\n"); error = ENXIO; goto fail; } bzero(sc->tl_ldata, sizeof(struct tl_list_data)); if (vid == COMPAQ_VENDORID || vid == TI_VENDORID) sc->tl_eeaddr = TL_EEPROM_EADDR; if (vid == OLICOM_VENDORID) sc->tl_eeaddr = TL_EEPROM_EADDR_OC; /* Reset the adapter. */ tl_softreset(sc, 1); tl_hardreset(dev); tl_softreset(sc, 1); /* * Get station address from the EEPROM. */ if (tl_read_eeprom(sc, eaddr, sc->tl_eeaddr, ETHER_ADDR_LEN)) { device_printf(dev, "failed to read station address\n"); error = ENXIO; goto fail; } /* * XXX Olicom, in its desire to be different from the * rest of the world, has done strange things with the * encoding of the station address in the EEPROM. First * of all, they store the address at offset 0xF8 rather * than at 0x83 like the ThunderLAN manual suggests. * Second, they store the address in three 16-bit words in * network byte order, as opposed to storing it sequentially * like all the other ThunderLAN cards. In order to get * the station address in a form that matches what the Olicom * diagnostic utility specifies, we have to byte-swap each * word. To make things even more confusing, neither 00:00:28 * nor 00:00:24 appear in the IEEE OUI database. */ if (vid == OLICOM_VENDORID) { for (i = 0; i < ETHER_ADDR_LEN; i += 2) { u_int16_t *p; p = (u_int16_t *)&eaddr[i]; *p = ntohs(*p); } } ifp = sc->tl_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = tl_ioctl; ifp->if_start = tl_start; ifp->if_init = tl_init; ifp->if_snd.ifq_maxlen = TL_TX_LIST_CNT - 1; ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_capenable |= IFCAP_VLAN_MTU; callout_init_mtx(&sc->tl_stat_callout, &sc->tl_mtx, 0); /* Reset the adapter again. */ tl_softreset(sc, 1); tl_hardreset(dev); tl_softreset(sc, 1); /* * Do MII setup. If no PHYs are found, then this is a * bitrate ThunderLAN chip that only supports 10baseT * and AUI/BNC. * XXX mii_attach() can fail for reason different than * no PHYs found! */ flags = 0; if (vid == COMPAQ_VENDORID) { if (did == COMPAQ_DEVICEID_NETEL_10_100_PROLIANT || did == COMPAQ_DEVICEID_NETFLEX_3P_INTEGRATED || did == COMPAQ_DEVICEID_NETFLEX_3P_BNC || did == COMPAQ_DEVICEID_NETEL_10_T2_UTP_COAX) flags |= MIIF_MACPRIV0; if (did == COMPAQ_DEVICEID_NETEL_10 || did == COMPAQ_DEVICEID_NETEL_10_100_DUAL || did == COMPAQ_DEVICEID_NETFLEX_3P || did == COMPAQ_DEVICEID_NETEL_10_100_EMBEDDED) flags |= MIIF_MACPRIV1; } else if (vid == OLICOM_VENDORID && did == OLICOM_DEVICEID_OC2183) flags |= MIIF_MACPRIV0 | MIIF_MACPRIV1; if (mii_attach(dev, &sc->tl_miibus, ifp, tl_ifmedia_upd, tl_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0)) { struct ifmedia *ifm; sc->tl_bitrate = 1; ifmedia_init(&sc->ifmedia, 0, tl_ifmedia_upd, tl_ifmedia_sts); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL); ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_10_T); /* Reset again, this time setting bitrate mode. */ tl_softreset(sc, 1); ifm = &sc->ifmedia; ifm->ifm_media = ifm->ifm_cur->ifm_media; tl_ifmedia_upd(ifp); } /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->tl_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, tl_intr, sc, &sc->tl_intrhand); if (error) { device_printf(dev, "couldn't set up irq\n"); ether_ifdetach(ifp); goto fail; } fail: if (error) tl_detach(dev); return(error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int tl_detach(dev) device_t dev; { struct tl_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); KASSERT(mtx_initialized(&sc->tl_mtx), ("tl mutex not initialized")); ifp = sc->tl_ifp; /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { ether_ifdetach(ifp); TL_LOCK(sc); tl_stop(sc); TL_UNLOCK(sc); callout_drain(&sc->tl_stat_callout); } if (sc->tl_miibus) device_delete_child(dev, sc->tl_miibus); bus_generic_detach(dev); if (sc->tl_ldata) contigfree(sc->tl_ldata, sizeof(struct tl_list_data), M_DEVBUF); if (sc->tl_bitrate) ifmedia_removeall(&sc->ifmedia); if (sc->tl_intrhand) bus_teardown_intr(dev, sc->tl_irq, sc->tl_intrhand); if (sc->tl_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->tl_irq); if (sc->tl_res) bus_release_resource(dev, TL_RES, TL_RID, sc->tl_res); if (ifp) if_free(ifp); mtx_destroy(&sc->tl_mtx); return(0); } /* * Initialize the transmit lists. */ static int tl_list_tx_init(sc) struct tl_softc *sc; { struct tl_chain_data *cd; struct tl_list_data *ld; int i; cd = &sc->tl_cdata; ld = sc->tl_ldata; for (i = 0; i < TL_TX_LIST_CNT; i++) { cd->tl_tx_chain[i].tl_ptr = &ld->tl_tx_list[i]; if (i == (TL_TX_LIST_CNT - 1)) cd->tl_tx_chain[i].tl_next = NULL; else cd->tl_tx_chain[i].tl_next = &cd->tl_tx_chain[i + 1]; } cd->tl_tx_free = &cd->tl_tx_chain[0]; cd->tl_tx_tail = cd->tl_tx_head = NULL; sc->tl_txeoc = 1; return(0); } /* * Initialize the RX lists and allocate mbufs for them. */ static int tl_list_rx_init(sc) struct tl_softc *sc; { struct tl_chain_data *cd; struct tl_list_data *ld; int i; cd = &sc->tl_cdata; ld = sc->tl_ldata; for (i = 0; i < TL_RX_LIST_CNT; i++) { cd->tl_rx_chain[i].tl_ptr = (struct tl_list_onefrag *)&ld->tl_rx_list[i]; if (tl_newbuf(sc, &cd->tl_rx_chain[i]) == ENOBUFS) return(ENOBUFS); if (i == (TL_RX_LIST_CNT - 1)) { cd->tl_rx_chain[i].tl_next = NULL; ld->tl_rx_list[i].tlist_fptr = 0; } else { cd->tl_rx_chain[i].tl_next = &cd->tl_rx_chain[i + 1]; ld->tl_rx_list[i].tlist_fptr = vtophys(&ld->tl_rx_list[i + 1]); } } cd->tl_rx_head = &cd->tl_rx_chain[0]; cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1]; return(0); } static int tl_newbuf(sc, c) struct tl_softc *sc; struct tl_chain_onefrag *c; { struct mbuf *m_new = NULL; m_new = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m_new == NULL) return(ENOBUFS); c->tl_mbuf = m_new; c->tl_next = NULL; c->tl_ptr->tlist_frsize = MCLBYTES; c->tl_ptr->tlist_fptr = 0; c->tl_ptr->tl_frag.tlist_dadr = vtophys(mtod(m_new, caddr_t)); c->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES; c->tl_ptr->tlist_cstat = TL_CSTAT_READY; return(0); } /* * Interrupt handler for RX 'end of frame' condition (EOF). This * tells us that a full ethernet frame has been captured and we need * to handle it. * * Reception is done using 'lists' which consist of a header and a * series of 10 data count/data address pairs that point to buffers. * Initially you're supposed to create a list, populate it with pointers * to buffers, then load the physical address of the list into the * ch_parm register. The adapter is then supposed to DMA the received * frame into the buffers for you. * * To make things as fast as possible, we have the chip DMA directly * into mbufs. This saves us from having to do a buffer copy: we can * just hand the mbufs directly to ether_input(). Once the frame has * been sent on its way, the 'list' structure is assigned a new buffer * and moved to the end of the RX chain. As long we we stay ahead of * the chip, it will always think it has an endless receive channel. * * If we happen to fall behind and the chip manages to fill up all of * the buffers, it will generate an end of channel interrupt and wait * for us to empty the chain and restart the receiver. */ static int tl_intvec_rxeof(xsc, type) void *xsc; u_int32_t type; { struct tl_softc *sc; int r = 0, total_len = 0; struct ether_header *eh; struct mbuf *m; struct ifnet *ifp; struct tl_chain_onefrag *cur_rx; sc = xsc; ifp = sc->tl_ifp; TL_LOCK_ASSERT(sc); while(sc->tl_cdata.tl_rx_head != NULL) { cur_rx = sc->tl_cdata.tl_rx_head; if (!(cur_rx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP)) break; r++; sc->tl_cdata.tl_rx_head = cur_rx->tl_next; m = cur_rx->tl_mbuf; total_len = cur_rx->tl_ptr->tlist_frsize; if (tl_newbuf(sc, cur_rx) == ENOBUFS) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); cur_rx->tl_ptr->tlist_frsize = MCLBYTES; cur_rx->tl_ptr->tlist_cstat = TL_CSTAT_READY; cur_rx->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES; continue; } sc->tl_cdata.tl_rx_tail->tl_ptr->tlist_fptr = vtophys(cur_rx->tl_ptr); sc->tl_cdata.tl_rx_tail->tl_next = cur_rx; sc->tl_cdata.tl_rx_tail = cur_rx; /* * Note: when the ThunderLAN chip is in 'capture all * frames' mode, it will receive its own transmissions. * We drop don't need to process our own transmissions, * so we drop them here and continue. */ eh = mtod(m, struct ether_header *); /*if (ifp->if_flags & IFF_PROMISC && */ if (!bcmp(eh->ether_shost, IF_LLADDR(sc->tl_ifp), ETHER_ADDR_LEN)) { m_freem(m); continue; } m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = total_len; TL_UNLOCK(sc); (*ifp->if_input)(ifp, m); TL_LOCK(sc); } return(r); } /* * The RX-EOC condition hits when the ch_parm address hasn't been * initialized or the adapter reached a list with a forward pointer * of 0 (which indicates the end of the chain). In our case, this means * the card has hit the end of the receive buffer chain and we need to * empty out the buffers and shift the pointer back to the beginning again. */ static int tl_intvec_rxeoc(xsc, type) void *xsc; u_int32_t type; { struct tl_softc *sc; int r; struct tl_chain_data *cd; sc = xsc; cd = &sc->tl_cdata; /* Flush out the receive queue and ack RXEOF interrupts. */ r = tl_intvec_rxeof(xsc, type); CMD_PUT(sc, TL_CMD_ACK | r | (type & ~(0x00100000))); r = 1; cd->tl_rx_head = &cd->tl_rx_chain[0]; cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1]; CSR_WRITE_4(sc, TL_CH_PARM, vtophys(sc->tl_cdata.tl_rx_head->tl_ptr)); r |= (TL_CMD_GO|TL_CMD_RT); return(r); } static int tl_intvec_txeof(xsc, type) void *xsc; u_int32_t type; { struct tl_softc *sc; int r = 0; struct tl_chain *cur_tx; sc = xsc; /* * Go through our tx list and free mbufs for those * frames that have been sent. */ while (sc->tl_cdata.tl_tx_head != NULL) { cur_tx = sc->tl_cdata.tl_tx_head; if (!(cur_tx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP)) break; sc->tl_cdata.tl_tx_head = cur_tx->tl_next; r++; m_freem(cur_tx->tl_mbuf); cur_tx->tl_mbuf = NULL; cur_tx->tl_next = sc->tl_cdata.tl_tx_free; sc->tl_cdata.tl_tx_free = cur_tx; if (!cur_tx->tl_ptr->tlist_fptr) break; } return(r); } /* * The transmit end of channel interrupt. The adapter triggers this * interrupt to tell us it hit the end of the current transmit list. * * A note about this: it's possible for a condition to arise where * tl_start() may try to send frames between TXEOF and TXEOC interrupts. * You have to avoid this since the chip expects things to go in a * particular order: transmit, acknowledge TXEOF, acknowledge TXEOC. * When the TXEOF handler is called, it will free all of the transmitted * frames and reset the tx_head pointer to NULL. However, a TXEOC * interrupt should be received and acknowledged before any more frames * are queued for transmission. If tl_statrt() is called after TXEOF * resets the tx_head pointer but _before_ the TXEOC interrupt arrives, * it could attempt to issue a transmit command prematurely. * * To guard against this, tl_start() will only issue transmit commands * if the tl_txeoc flag is set, and only the TXEOC interrupt handler * can set this flag once tl_start() has cleared it. */ static int tl_intvec_txeoc(xsc, type) void *xsc; u_int32_t type; { struct tl_softc *sc; struct ifnet *ifp; u_int32_t cmd; sc = xsc; ifp = sc->tl_ifp; /* Clear the timeout timer. */ sc->tl_timer = 0; if (sc->tl_cdata.tl_tx_head == NULL) { ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->tl_cdata.tl_tx_tail = NULL; sc->tl_txeoc = 1; } else { sc->tl_txeoc = 0; /* First we have to ack the EOC interrupt. */ CMD_PUT(sc, TL_CMD_ACK | 0x00000001 | type); /* Then load the address of the next TX list. */ CSR_WRITE_4(sc, TL_CH_PARM, vtophys(sc->tl_cdata.tl_tx_head->tl_ptr)); /* Restart TX channel. */ cmd = CSR_READ_4(sc, TL_HOSTCMD); cmd &= ~TL_CMD_RT; cmd |= TL_CMD_GO|TL_CMD_INTSON; CMD_PUT(sc, cmd); return(0); } return(1); } static int tl_intvec_adchk(xsc, type) void *xsc; u_int32_t type; { struct tl_softc *sc; sc = xsc; if (type) device_printf(sc->tl_dev, "adapter check: %x\n", (unsigned int)CSR_READ_4(sc, TL_CH_PARM)); tl_softreset(sc, 1); tl_stop(sc); tl_init_locked(sc); CMD_SET(sc, TL_CMD_INTSON); return(0); } static int tl_intvec_netsts(xsc, type) void *xsc; u_int32_t type; { struct tl_softc *sc; u_int16_t netsts; sc = xsc; netsts = tl_dio_read16(sc, TL_NETSTS); tl_dio_write16(sc, TL_NETSTS, netsts); device_printf(sc->tl_dev, "network status: %x\n", netsts); return(1); } static void tl_intr(xsc) void *xsc; { struct tl_softc *sc; struct ifnet *ifp; int r = 0; u_int32_t type = 0; u_int16_t ints = 0; u_int8_t ivec = 0; sc = xsc; TL_LOCK(sc); /* Disable interrupts */ ints = CSR_READ_2(sc, TL_HOST_INT); CSR_WRITE_2(sc, TL_HOST_INT, ints); type = (ints << 16) & 0xFFFF0000; ivec = (ints & TL_VEC_MASK) >> 5; ints = (ints & TL_INT_MASK) >> 2; ifp = sc->tl_ifp; switch(ints) { case (TL_INTR_INVALID): #ifdef DIAGNOSTIC device_printf(sc->tl_dev, "got an invalid interrupt!\n"); #endif /* Re-enable interrupts but don't ack this one. */ CMD_PUT(sc, type); r = 0; break; case (TL_INTR_TXEOF): r = tl_intvec_txeof((void *)sc, type); break; case (TL_INTR_TXEOC): r = tl_intvec_txeoc((void *)sc, type); break; case (TL_INTR_STATOFLOW): tl_stats_update(sc); r = 1; break; case (TL_INTR_RXEOF): r = tl_intvec_rxeof((void *)sc, type); break; case (TL_INTR_DUMMY): device_printf(sc->tl_dev, "got a dummy interrupt\n"); r = 1; break; case (TL_INTR_ADCHK): if (ivec) r = tl_intvec_adchk((void *)sc, type); else r = tl_intvec_netsts((void *)sc, type); break; case (TL_INTR_RXEOC): r = tl_intvec_rxeoc((void *)sc, type); break; default: device_printf(sc->tl_dev, "bogus interrupt type\n"); break; } /* Re-enable interrupts */ if (r) { CMD_PUT(sc, TL_CMD_ACK | r | type); } if (ifp->if_snd.ifq_head != NULL) tl_start_locked(ifp); TL_UNLOCK(sc); } static void tl_stats_update(xsc) void *xsc; { struct tl_softc *sc; struct ifnet *ifp; struct tl_stats tl_stats; struct mii_data *mii; u_int32_t *p; bzero((char *)&tl_stats, sizeof(struct tl_stats)); sc = xsc; TL_LOCK_ASSERT(sc); ifp = sc->tl_ifp; p = (u_int32_t *)&tl_stats; CSR_WRITE_2(sc, TL_DIO_ADDR, TL_TXGOODFRAMES|TL_DIO_ADDR_INC); *p++ = CSR_READ_4(sc, TL_DIO_DATA); *p++ = CSR_READ_4(sc, TL_DIO_DATA); *p++ = CSR_READ_4(sc, TL_DIO_DATA); *p++ = CSR_READ_4(sc, TL_DIO_DATA); *p++ = CSR_READ_4(sc, TL_DIO_DATA); if_inc_counter(ifp, IFCOUNTER_OPACKETS, tl_tx_goodframes(tl_stats)); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, tl_stats.tl_tx_single_collision + tl_stats.tl_tx_multi_collision); if_inc_counter(ifp, IFCOUNTER_IPACKETS, tl_rx_goodframes(tl_stats)); if_inc_counter(ifp, IFCOUNTER_IERRORS, tl_stats.tl_crc_errors + tl_stats.tl_code_errors + tl_rx_overrun(tl_stats)); if_inc_counter(ifp, IFCOUNTER_OERRORS, tl_tx_underrun(tl_stats)); if (tl_tx_underrun(tl_stats)) { u_int8_t tx_thresh; tx_thresh = tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_TXTHRESH; if (tx_thresh != TL_AC_TXTHRESH_WHOLEPKT) { tx_thresh >>= 4; tx_thresh++; device_printf(sc->tl_dev, "tx underrun -- increasing " "tx threshold to %d bytes\n", (64 * (tx_thresh * 4))); tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH); tl_dio_setbit(sc, TL_ACOMMIT, tx_thresh << 4); } } if (sc->tl_timer > 0 && --sc->tl_timer == 0) tl_watchdog(sc); callout_reset(&sc->tl_stat_callout, hz, tl_stats_update, sc); if (!sc->tl_bitrate) { mii = device_get_softc(sc->tl_miibus); mii_tick(mii); } } /* * Encapsulate an mbuf chain in a list by coupling the mbuf data * pointers to the fragment pointers. */ static int tl_encap(sc, c, m_head) struct tl_softc *sc; struct tl_chain *c; struct mbuf *m_head; { int frag = 0; struct tl_frag *f = NULL; int total_len; struct mbuf *m; struct ifnet *ifp = sc->tl_ifp; /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ m = m_head; total_len = 0; for (m = m_head, frag = 0; m != NULL; m = m->m_next) { if (m->m_len != 0) { if (frag == TL_MAXFRAGS) break; total_len+= m->m_len; c->tl_ptr->tl_frag[frag].tlist_dadr = vtophys(mtod(m, vm_offset_t)); c->tl_ptr->tl_frag[frag].tlist_dcnt = m->m_len; frag++; } } /* * Handle special cases. * Special case #1: we used up all 10 fragments, but * we have more mbufs left in the chain. Copy the * data into an mbuf cluster. Note that we don't * bother clearing the values in the other fragment * pointers/counters; it wouldn't gain us anything, * and would waste cycles. */ if (m != NULL) { struct mbuf *m_new = NULL; MGETHDR(m_new, M_NOWAIT, MT_DATA); if (m_new == NULL) { if_printf(ifp, "no memory for tx list\n"); return(1); } if (m_head->m_pkthdr.len > MHLEN) { - MCLGET(m_new, M_NOWAIT); - if (!(m_new->m_flags & M_EXT)) { + if (!(MCLGET(m_new, M_NOWAIT))) { m_freem(m_new); if_printf(ifp, "no memory for tx list\n"); return(1); } } m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t)); m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; m_freem(m_head); m_head = m_new; f = &c->tl_ptr->tl_frag[0]; f->tlist_dadr = vtophys(mtod(m_new, caddr_t)); f->tlist_dcnt = total_len = m_new->m_len; frag = 1; } /* * Special case #2: the frame is smaller than the minimum * frame size. We have to pad it to make the chip happy. */ if (total_len < TL_MIN_FRAMELEN) { if (frag == TL_MAXFRAGS) if_printf(ifp, "all frags filled but frame still to small!\n"); f = &c->tl_ptr->tl_frag[frag]; f->tlist_dcnt = TL_MIN_FRAMELEN - total_len; f->tlist_dadr = vtophys(&sc->tl_ldata->tl_pad); total_len += f->tlist_dcnt; frag++; } c->tl_mbuf = m_head; c->tl_ptr->tl_frag[frag - 1].tlist_dcnt |= TL_LAST_FRAG; c->tl_ptr->tlist_frsize = total_len; c->tl_ptr->tlist_cstat = TL_CSTAT_READY; c->tl_ptr->tlist_fptr = 0; return(0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void tl_start(ifp) struct ifnet *ifp; { struct tl_softc *sc; sc = ifp->if_softc; TL_LOCK(sc); tl_start_locked(ifp); TL_UNLOCK(sc); } static void tl_start_locked(ifp) struct ifnet *ifp; { struct tl_softc *sc; struct mbuf *m_head = NULL; u_int32_t cmd; struct tl_chain *prev = NULL, *cur_tx = NULL, *start_tx; sc = ifp->if_softc; TL_LOCK_ASSERT(sc); /* * Check for an available queue slot. If there are none, * punt. */ if (sc->tl_cdata.tl_tx_free == NULL) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; return; } start_tx = sc->tl_cdata.tl_tx_free; while(sc->tl_cdata.tl_tx_free != NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* Pick a chain member off the free list. */ cur_tx = sc->tl_cdata.tl_tx_free; sc->tl_cdata.tl_tx_free = cur_tx->tl_next; cur_tx->tl_next = NULL; /* Pack the data into the list. */ tl_encap(sc, cur_tx, m_head); /* Chain it together */ if (prev != NULL) { prev->tl_next = cur_tx; prev->tl_ptr->tlist_fptr = vtophys(cur_tx->tl_ptr); } prev = cur_tx; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, cur_tx->tl_mbuf); } /* * If there are no packets queued, bail. */ if (cur_tx == NULL) return; /* * That's all we can stands, we can't stands no more. * If there are no other transfers pending, then issue the * TX GO command to the adapter to start things moving. * Otherwise, just leave the data in the queue and let * the EOF/EOC interrupt handler send. */ if (sc->tl_cdata.tl_tx_head == NULL) { sc->tl_cdata.tl_tx_head = start_tx; sc->tl_cdata.tl_tx_tail = cur_tx; if (sc->tl_txeoc) { sc->tl_txeoc = 0; CSR_WRITE_4(sc, TL_CH_PARM, vtophys(start_tx->tl_ptr)); cmd = CSR_READ_4(sc, TL_HOSTCMD); cmd &= ~TL_CMD_RT; cmd |= TL_CMD_GO|TL_CMD_INTSON; CMD_PUT(sc, cmd); } } else { sc->tl_cdata.tl_tx_tail->tl_next = start_tx; sc->tl_cdata.tl_tx_tail = cur_tx; } /* * Set a timeout in case the chip goes out to lunch. */ sc->tl_timer = 5; } static void tl_init(xsc) void *xsc; { struct tl_softc *sc = xsc; TL_LOCK(sc); tl_init_locked(sc); TL_UNLOCK(sc); } static void tl_init_locked(sc) struct tl_softc *sc; { struct ifnet *ifp = sc->tl_ifp; struct mii_data *mii; TL_LOCK_ASSERT(sc); ifp = sc->tl_ifp; /* * Cancel pending I/O. */ tl_stop(sc); /* Initialize TX FIFO threshold */ tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH); tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH_16LONG); /* Set PCI burst size */ tl_dio_write8(sc, TL_BSIZEREG, TL_RXBURST_16LONG|TL_TXBURST_16LONG); /* * Set 'capture all frames' bit for promiscuous mode. */ if (ifp->if_flags & IFF_PROMISC) tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF); else tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF); /* * Set capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_NOBRX); else tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NOBRX); tl_dio_write16(sc, TL_MAXRX, MCLBYTES); /* Init our MAC address */ tl_setfilt(sc, IF_LLADDR(sc->tl_ifp), 0); /* Init multicast filter, if needed. */ tl_setmulti(sc); /* Init circular RX list. */ if (tl_list_rx_init(sc) == ENOBUFS) { device_printf(sc->tl_dev, "initialization failed: no memory for rx buffers\n"); tl_stop(sc); return; } /* Init TX pointers. */ tl_list_tx_init(sc); /* Enable PCI interrupts. */ CMD_SET(sc, TL_CMD_INTSON); /* Load the address of the rx list */ CMD_SET(sc, TL_CMD_RT); CSR_WRITE_4(sc, TL_CH_PARM, vtophys(&sc->tl_ldata->tl_rx_list[0])); if (!sc->tl_bitrate) { if (sc->tl_miibus != NULL) { mii = device_get_softc(sc->tl_miibus); mii_mediachg(mii); } } else { tl_ifmedia_upd(ifp); } /* Send the RX go command */ CMD_SET(sc, TL_CMD_GO|TL_CMD_NES|TL_CMD_RT); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; /* Start the stats update counter */ callout_reset(&sc->tl_stat_callout, hz, tl_stats_update, sc); } /* * Set media options. */ static int tl_ifmedia_upd(ifp) struct ifnet *ifp; { struct tl_softc *sc; struct mii_data *mii = NULL; sc = ifp->if_softc; TL_LOCK(sc); if (sc->tl_bitrate) tl_setmode(sc, sc->ifmedia.ifm_media); else { mii = device_get_softc(sc->tl_miibus); mii_mediachg(mii); } TL_UNLOCK(sc); return(0); } /* * Report current media status. */ static void tl_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct tl_softc *sc; struct mii_data *mii; sc = ifp->if_softc; TL_LOCK(sc); ifmr->ifm_active = IFM_ETHER; if (sc->tl_bitrate) { if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD1) ifmr->ifm_active = IFM_ETHER|IFM_10_5; else ifmr->ifm_active = IFM_ETHER|IFM_10_T; if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD3) ifmr->ifm_active |= IFM_HDX; else ifmr->ifm_active |= IFM_FDX; return; } else { mii = device_get_softc(sc->tl_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } TL_UNLOCK(sc); } static int tl_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct tl_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; int error = 0; switch(command) { case SIOCSIFFLAGS: TL_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->tl_if_flags & IFF_PROMISC)) { tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF); tl_setmulti(sc); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->tl_if_flags & IFF_PROMISC) { tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF); tl_setmulti(sc); } else tl_init_locked(sc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { tl_stop(sc); } } sc->tl_if_flags = ifp->if_flags; TL_UNLOCK(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: TL_LOCK(sc); tl_setmulti(sc); TL_UNLOCK(sc); error = 0; break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: if (sc->tl_bitrate) error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); else { struct mii_data *mii; mii = device_get_softc(sc->tl_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); } break; default: error = ether_ioctl(ifp, command, data); break; } return(error); } static void tl_watchdog(sc) struct tl_softc *sc; { struct ifnet *ifp; TL_LOCK_ASSERT(sc); ifp = sc->tl_ifp; if_printf(ifp, "device timeout\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); tl_softreset(sc, 1); tl_init_locked(sc); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void tl_stop(sc) struct tl_softc *sc; { register int i; struct ifnet *ifp; TL_LOCK_ASSERT(sc); ifp = sc->tl_ifp; /* Stop the stats updater. */ callout_stop(&sc->tl_stat_callout); /* Stop the transmitter */ CMD_CLR(sc, TL_CMD_RT); CMD_SET(sc, TL_CMD_STOP); CSR_WRITE_4(sc, TL_CH_PARM, 0); /* Stop the receiver */ CMD_SET(sc, TL_CMD_RT); CMD_SET(sc, TL_CMD_STOP); CSR_WRITE_4(sc, TL_CH_PARM, 0); /* * Disable host interrupts. */ CMD_SET(sc, TL_CMD_INTSOFF); /* * Clear list pointer. */ CSR_WRITE_4(sc, TL_CH_PARM, 0); /* * Free the RX lists. */ for (i = 0; i < TL_RX_LIST_CNT; i++) { if (sc->tl_cdata.tl_rx_chain[i].tl_mbuf != NULL) { m_freem(sc->tl_cdata.tl_rx_chain[i].tl_mbuf); sc->tl_cdata.tl_rx_chain[i].tl_mbuf = NULL; } } bzero((char *)&sc->tl_ldata->tl_rx_list, sizeof(sc->tl_ldata->tl_rx_list)); /* * Free the TX list buffers. */ for (i = 0; i < TL_TX_LIST_CNT; i++) { if (sc->tl_cdata.tl_tx_chain[i].tl_mbuf != NULL) { m_freem(sc->tl_cdata.tl_tx_chain[i].tl_mbuf); sc->tl_cdata.tl_tx_chain[i].tl_mbuf = NULL; } } bzero((char *)&sc->tl_ldata->tl_tx_list, sizeof(sc->tl_ldata->tl_tx_list)); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int tl_shutdown(dev) device_t dev; { struct tl_softc *sc; sc = device_get_softc(dev); TL_LOCK(sc); tl_stop(sc); TL_UNLOCK(sc); return (0); } Index: head/sys/dev/usb/misc/udbp.c =================================================================== --- head/sys/dev/usb/misc/udbp.c (revision 276749) +++ head/sys/dev/usb/misc/udbp.c (revision 276750) @@ -1,859 +1,858 @@ /*- * Copyright (c) 1996-2000 Whistle Communications, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of author nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY NICK HIBMA AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); /* Driver for arbitrary double bulk pipe devices. * The driver assumes that there will be the same driver on the other side. * * XXX Some more information on what the framing of the IP packets looks like. * * To take full advantage of bulk transmission, packets should be chosen * between 1k and 5k in size (1k to make sure the sending side starts * streaming, and <5k to avoid overflowing the system with small TDs). */ /* probe/attach/detach: * Connect the driver to the hardware and netgraph * * The reason we submit a bulk in transfer is that USB does not know about * interrupts. The bulk transfer continuously polls the device for data. * While the device has no data available, the device NAKs the TDs. As soon * as there is data, the transfer happens and the data comes flowing in. * * In case you were wondering, interrupt transfers happen exactly that way. * It therefore doesn't make sense to use the interrupt pipe to signal * 'data ready' and then schedule a bulk transfer to fetch it. That would * incur a 2ms delay at least, without reducing bandwidth requirements. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "usbdevs.h" #define USB_DEBUG_VAR udbp_debug #include #include #include #include #include #include #include #ifdef USB_DEBUG static int udbp_debug = 0; static SYSCTL_NODE(_hw_usb, OID_AUTO, udbp, CTLFLAG_RW, 0, "USB udbp"); SYSCTL_INT(_hw_usb_udbp, OID_AUTO, debug, CTLFLAG_RWTUN, &udbp_debug, 0, "udbp debug level"); #endif #define UDBP_TIMEOUT 2000 /* timeout on outbound transfers, in * msecs */ #define UDBP_BUFFERSIZE MCLBYTES /* maximum number of bytes in one * transfer */ #define UDBP_T_WR 0 #define UDBP_T_RD 1 #define UDBP_T_WR_CS 2 #define UDBP_T_RD_CS 3 #define UDBP_T_MAX 4 #define UDBP_Q_MAXLEN 50 struct udbp_softc { struct mtx sc_mtx; struct ng_bt_mbufq sc_xmitq_hipri; /* hi-priority transmit queue */ struct ng_bt_mbufq sc_xmitq; /* low-priority transmit queue */ struct usb_xfer *sc_xfer[UDBP_T_MAX]; node_p sc_node; /* back pointer to node */ hook_p sc_hook; /* pointer to the hook */ struct mbuf *sc_bulk_in_buffer; uint32_t sc_packets_in; /* packets in from downstream */ uint32_t sc_packets_out; /* packets out towards downstream */ uint8_t sc_flags; #define UDBP_FLAG_READ_STALL 0x01 /* read transfer stalled */ #define UDBP_FLAG_WRITE_STALL 0x02 /* write transfer stalled */ uint8_t sc_name[16]; }; /* prototypes */ static int udbp_modload(module_t mod, int event, void *data); static device_probe_t udbp_probe; static device_attach_t udbp_attach; static device_detach_t udbp_detach; static usb_callback_t udbp_bulk_read_callback; static usb_callback_t udbp_bulk_read_clear_stall_callback; static usb_callback_t udbp_bulk_write_callback; static usb_callback_t udbp_bulk_write_clear_stall_callback; static void udbp_bulk_read_complete(node_p, hook_p, void *, int); static ng_constructor_t ng_udbp_constructor; static ng_rcvmsg_t ng_udbp_rcvmsg; static ng_shutdown_t ng_udbp_rmnode; static ng_newhook_t ng_udbp_newhook; static ng_connect_t ng_udbp_connect; static ng_rcvdata_t ng_udbp_rcvdata; static ng_disconnect_t ng_udbp_disconnect; /* Parse type for struct ngudbpstat */ static const struct ng_parse_struct_field ng_udbp_stat_type_fields[] = NG_UDBP_STATS_TYPE_INFO; static const struct ng_parse_type ng_udbp_stat_type = { &ng_parse_struct_type, &ng_udbp_stat_type_fields }; /* List of commands and how to convert arguments to/from ASCII */ static const struct ng_cmdlist ng_udbp_cmdlist[] = { { NGM_UDBP_COOKIE, NGM_UDBP_GET_STATUS, "getstatus", NULL, &ng_udbp_stat_type, }, { NGM_UDBP_COOKIE, NGM_UDBP_SET_FLAG, "setflag", &ng_parse_int32_type, NULL }, {0} }; /* Netgraph node type descriptor */ static struct ng_type ng_udbp_typestruct = { .version = NG_ABI_VERSION, .name = NG_UDBP_NODE_TYPE, .constructor = ng_udbp_constructor, .rcvmsg = ng_udbp_rcvmsg, .shutdown = ng_udbp_rmnode, .newhook = ng_udbp_newhook, .connect = ng_udbp_connect, .rcvdata = ng_udbp_rcvdata, .disconnect = ng_udbp_disconnect, .cmdlist = ng_udbp_cmdlist, }; /* USB config */ static const struct usb_config udbp_config[UDBP_T_MAX] = { [UDBP_T_WR] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .bufsize = UDBP_BUFFERSIZE, .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, .callback = &udbp_bulk_write_callback, .timeout = UDBP_TIMEOUT, }, [UDBP_T_RD] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = UDBP_BUFFERSIZE, .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, .callback = &udbp_bulk_read_callback, }, [UDBP_T_WR_CS] = { .type = UE_CONTROL, .endpoint = 0x00, /* Control pipe */ .direction = UE_DIR_ANY, .bufsize = sizeof(struct usb_device_request), .callback = &udbp_bulk_write_clear_stall_callback, .timeout = 1000, /* 1 second */ .interval = 50, /* 50ms */ }, [UDBP_T_RD_CS] = { .type = UE_CONTROL, .endpoint = 0x00, /* Control pipe */ .direction = UE_DIR_ANY, .bufsize = sizeof(struct usb_device_request), .callback = &udbp_bulk_read_clear_stall_callback, .timeout = 1000, /* 1 second */ .interval = 50, /* 50ms */ }, }; static devclass_t udbp_devclass; static device_method_t udbp_methods[] = { /* Device interface */ DEVMETHOD(device_probe, udbp_probe), DEVMETHOD(device_attach, udbp_attach), DEVMETHOD(device_detach, udbp_detach), DEVMETHOD_END }; static driver_t udbp_driver = { .name = "udbp", .methods = udbp_methods, .size = sizeof(struct udbp_softc), }; DRIVER_MODULE(udbp, uhub, udbp_driver, udbp_devclass, udbp_modload, 0); MODULE_DEPEND(udbp, netgraph, NG_ABI_VERSION, NG_ABI_VERSION, NG_ABI_VERSION); MODULE_DEPEND(udbp, usb, 1, 1, 1); MODULE_VERSION(udbp, 1); static int udbp_modload(module_t mod, int event, void *data) { int error; switch (event) { case MOD_LOAD: error = ng_newtype(&ng_udbp_typestruct); if (error != 0) { printf("%s: Could not register " "Netgraph node type, error=%d\n", NG_UDBP_NODE_TYPE, error); } break; case MOD_UNLOAD: error = ng_rmtype(&ng_udbp_typestruct); break; default: error = EOPNOTSUPP; break; } return (error); } static const STRUCT_USB_HOST_ID udbp_devs[] = { {USB_VPI(USB_VENDOR_NETCHIP, USB_PRODUCT_NETCHIP_TURBOCONNECT, 0)}, {USB_VPI(USB_VENDOR_NETCHIP, USB_PRODUCT_NETCHIP_GADGETZERO, 0)}, {USB_VPI(USB_VENDOR_PROLIFIC, USB_PRODUCT_PROLIFIC_PL2301, 0)}, {USB_VPI(USB_VENDOR_PROLIFIC, USB_PRODUCT_PROLIFIC_PL2302, 0)}, {USB_VPI(USB_VENDOR_ANCHOR, USB_PRODUCT_ANCHOR_EZLINK, 0)}, {USB_VPI(USB_VENDOR_GENESYS, USB_PRODUCT_GENESYS_GL620USB, 0)}, }; static int udbp_probe(device_t dev) { struct usb_attach_arg *uaa = device_get_ivars(dev); if (uaa->usb_mode != USB_MODE_HOST) return (ENXIO); if (uaa->info.bConfigIndex != 0) return (ENXIO); if (uaa->info.bIfaceIndex != 0) return (ENXIO); return (usbd_lookup_id_by_uaa(udbp_devs, sizeof(udbp_devs), uaa)); } static int udbp_attach(device_t dev) { struct usb_attach_arg *uaa = device_get_ivars(dev); struct udbp_softc *sc = device_get_softc(dev); int error; device_set_usb_desc(dev); snprintf(sc->sc_name, sizeof(sc->sc_name), "%s", device_get_nameunit(dev)); mtx_init(&sc->sc_mtx, "udbp lock", NULL, MTX_DEF | MTX_RECURSE); error = usbd_transfer_setup(uaa->device, &uaa->info.bIfaceIndex, sc->sc_xfer, udbp_config, UDBP_T_MAX, sc, &sc->sc_mtx); if (error) { DPRINTF("error=%s\n", usbd_errstr(error)); goto detach; } NG_BT_MBUFQ_INIT(&sc->sc_xmitq, UDBP_Q_MAXLEN); NG_BT_MBUFQ_INIT(&sc->sc_xmitq_hipri, UDBP_Q_MAXLEN); /* create Netgraph node */ if (ng_make_node_common(&ng_udbp_typestruct, &sc->sc_node) != 0) { printf("%s: Could not create Netgraph node\n", sc->sc_name); sc->sc_node = NULL; goto detach; } /* name node */ if (ng_name_node(sc->sc_node, sc->sc_name) != 0) { printf("%s: Could not name node\n", sc->sc_name); NG_NODE_UNREF(sc->sc_node); sc->sc_node = NULL; goto detach; } NG_NODE_SET_PRIVATE(sc->sc_node, sc); /* the device is now operational */ return (0); /* success */ detach: udbp_detach(dev); return (ENOMEM); /* failure */ } static int udbp_detach(device_t dev) { struct udbp_softc *sc = device_get_softc(dev); /* destroy Netgraph node */ if (sc->sc_node != NULL) { NG_NODE_SET_PRIVATE(sc->sc_node, NULL); ng_rmnode_self(sc->sc_node); sc->sc_node = NULL; } /* free USB transfers, if any */ usbd_transfer_unsetup(sc->sc_xfer, UDBP_T_MAX); mtx_destroy(&sc->sc_mtx); /* destroy queues */ NG_BT_MBUFQ_DESTROY(&sc->sc_xmitq); NG_BT_MBUFQ_DESTROY(&sc->sc_xmitq_hipri); /* extra check */ if (sc->sc_bulk_in_buffer) { m_freem(sc->sc_bulk_in_buffer); sc->sc_bulk_in_buffer = NULL; } return (0); /* success */ } static void udbp_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error) { struct udbp_softc *sc = usbd_xfer_softc(xfer); struct usb_page_cache *pc; struct mbuf *m; int actlen; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: /* allocate new mbuf */ MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { goto tr_setup; } - MCLGET(m, M_NOWAIT); - if (!(m->m_flags & M_EXT)) { + if (!(MCLGET(m, M_NOWAIT))) { m_freem(m); goto tr_setup; } m->m_pkthdr.len = m->m_len = actlen; pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_out(pc, 0, m->m_data, actlen); sc->sc_bulk_in_buffer = m; DPRINTF("received package %d bytes\n", actlen); case USB_ST_SETUP: tr_setup: if (sc->sc_bulk_in_buffer) { ng_send_fn(sc->sc_node, NULL, &udbp_bulk_read_complete, NULL, 0); return; } if (sc->sc_flags & UDBP_FLAG_READ_STALL) { usbd_transfer_start(sc->sc_xfer[UDBP_T_RD_CS]); return; } usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); return; default: /* Error */ if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ sc->sc_flags |= UDBP_FLAG_READ_STALL; usbd_transfer_start(sc->sc_xfer[UDBP_T_RD_CS]); } return; } } static void udbp_bulk_read_clear_stall_callback(struct usb_xfer *xfer, usb_error_t error) { struct udbp_softc *sc = usbd_xfer_softc(xfer); struct usb_xfer *xfer_other = sc->sc_xfer[UDBP_T_RD]; if (usbd_clear_stall_callback(xfer, xfer_other)) { DPRINTF("stall cleared\n"); sc->sc_flags &= ~UDBP_FLAG_READ_STALL; usbd_transfer_start(xfer_other); } } static void udbp_bulk_read_complete(node_p node, hook_p hook, void *arg1, int arg2) { struct udbp_softc *sc = NG_NODE_PRIVATE(node); struct mbuf *m; int error; if (sc == NULL) { return; } mtx_lock(&sc->sc_mtx); m = sc->sc_bulk_in_buffer; if (m) { sc->sc_bulk_in_buffer = NULL; if ((sc->sc_hook == NULL) || NG_HOOK_NOT_VALID(sc->sc_hook)) { DPRINTF("No upstream hook\n"); goto done; } sc->sc_packets_in++; NG_SEND_DATA_ONLY(error, sc->sc_hook, m); m = NULL; } done: if (m) { m_freem(m); } /* start USB bulk-in transfer, if not already started */ usbd_transfer_start(sc->sc_xfer[UDBP_T_RD]); mtx_unlock(&sc->sc_mtx); } static void udbp_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct udbp_softc *sc = usbd_xfer_softc(xfer); struct usb_page_cache *pc; struct mbuf *m; switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: sc->sc_packets_out++; case USB_ST_SETUP: if (sc->sc_flags & UDBP_FLAG_WRITE_STALL) { usbd_transfer_start(sc->sc_xfer[UDBP_T_WR_CS]); return; } /* get next mbuf, if any */ NG_BT_MBUFQ_DEQUEUE(&sc->sc_xmitq_hipri, m); if (m == NULL) { NG_BT_MBUFQ_DEQUEUE(&sc->sc_xmitq, m); if (m == NULL) { DPRINTF("Data queue is empty\n"); return; } } if (m->m_pkthdr.len > MCLBYTES) { DPRINTF("truncating large packet " "from %d to %d bytes\n", m->m_pkthdr.len, MCLBYTES); m->m_pkthdr.len = MCLBYTES; } pc = usbd_xfer_get_frame(xfer, 0); usbd_m_copy_in(pc, 0, m, 0, m->m_pkthdr.len); usbd_xfer_set_frame_len(xfer, 0, m->m_pkthdr.len); DPRINTF("packet out: %d bytes\n", m->m_pkthdr.len); m_freem(m); usbd_transfer_submit(xfer); return; default: /* Error */ if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ sc->sc_flags |= UDBP_FLAG_WRITE_STALL; usbd_transfer_start(sc->sc_xfer[UDBP_T_WR_CS]); } return; } } static void udbp_bulk_write_clear_stall_callback(struct usb_xfer *xfer, usb_error_t error) { struct udbp_softc *sc = usbd_xfer_softc(xfer); struct usb_xfer *xfer_other = sc->sc_xfer[UDBP_T_WR]; if (usbd_clear_stall_callback(xfer, xfer_other)) { DPRINTF("stall cleared\n"); sc->sc_flags &= ~UDBP_FLAG_WRITE_STALL; usbd_transfer_start(xfer_other); } } /*********************************************************************** * Start of Netgraph methods **********************************************************************/ /* * If this is a device node so this work is done in the attach() * routine and the constructor will return EINVAL as you should not be able * to create nodes that depend on hardware (unless you can add the hardware :) */ static int ng_udbp_constructor(node_p node) { return (EINVAL); } /* * Give our ok for a hook to be added... * If we are not running this might kick a device into life. * Possibly decode information out of the hook name. * Add the hook's private info to the hook structure. * (if we had some). In this example, we assume that there is a * an array of structs, called 'channel' in the private info, * one for each active channel. The private * pointer of each hook points to the appropriate UDBP_hookinfo struct * so that the source of an input packet is easily identified. */ static int ng_udbp_newhook(node_p node, hook_p hook, const char *name) { struct udbp_softc *sc = NG_NODE_PRIVATE(node); int32_t error = 0; if (strcmp(name, NG_UDBP_HOOK_NAME)) { return (EINVAL); } mtx_lock(&sc->sc_mtx); if (sc->sc_hook != NULL) { error = EISCONN; } else { sc->sc_hook = hook; NG_HOOK_SET_PRIVATE(hook, NULL); } mtx_unlock(&sc->sc_mtx); return (error); } /* * Get a netgraph control message. * Check it is one we understand. If needed, send a response. * We could save the address for an async action later, but don't here. * Always free the message. * The response should be in a malloc'd region that the caller can 'free'. * A response is not required. * Theoretically you could respond defferently to old message types if * the cookie in the header didn't match what we consider to be current * (so that old userland programs could continue to work). */ static int ng_udbp_rcvmsg(node_p node, item_p item, hook_p lasthook) { struct udbp_softc *sc = NG_NODE_PRIVATE(node); struct ng_mesg *resp = NULL; int error = 0; struct ng_mesg *msg; NGI_GET_MSG(item, msg); /* Deal with message according to cookie and command */ switch (msg->header.typecookie) { case NGM_UDBP_COOKIE: switch (msg->header.cmd) { case NGM_UDBP_GET_STATUS: { struct ngudbpstat *stats; NG_MKRESPONSE(resp, msg, sizeof(*stats), M_NOWAIT); if (!resp) { error = ENOMEM; break; } stats = (struct ngudbpstat *)resp->data; mtx_lock(&sc->sc_mtx); stats->packets_in = sc->sc_packets_in; stats->packets_out = sc->sc_packets_out; mtx_unlock(&sc->sc_mtx); break; } case NGM_UDBP_SET_FLAG: if (msg->header.arglen != sizeof(uint32_t)) { error = EINVAL; break; } DPRINTF("flags = 0x%08x\n", *((uint32_t *)msg->data)); break; default: error = EINVAL; /* unknown command */ break; } break; default: error = EINVAL; /* unknown cookie type */ break; } /* Take care of synchronous response, if any */ NG_RESPOND_MSG(error, node, item, resp); NG_FREE_MSG(msg); return (error); } /* * Accept data from the hook and queue it for output. */ static int ng_udbp_rcvdata(hook_p hook, item_p item) { struct udbp_softc *sc = NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); struct ng_bt_mbufq *queue_ptr; struct mbuf *m; struct ng_tag_prio *ptag; int error; if (sc == NULL) { NG_FREE_ITEM(item); return (EHOSTDOWN); } NGI_GET_M(item, m); NG_FREE_ITEM(item); /* * Now queue the data for when it can be sent */ ptag = (void *)m_tag_locate(m, NGM_GENERIC_COOKIE, NG_TAG_PRIO, NULL); if (ptag && (ptag->priority > NG_PRIO_CUTOFF)) queue_ptr = &sc->sc_xmitq_hipri; else queue_ptr = &sc->sc_xmitq; mtx_lock(&sc->sc_mtx); if (NG_BT_MBUFQ_FULL(queue_ptr)) { NG_BT_MBUFQ_DROP(queue_ptr); NG_FREE_M(m); error = ENOBUFS; } else { NG_BT_MBUFQ_ENQUEUE(queue_ptr, m); /* * start bulk-out transfer, if not already started: */ usbd_transfer_start(sc->sc_xfer[UDBP_T_WR]); error = 0; } mtx_unlock(&sc->sc_mtx); return (error); } /* * Do local shutdown processing.. * We are a persistant device, we refuse to go away, and * only remove our links and reset ourself. */ static int ng_udbp_rmnode(node_p node) { struct udbp_softc *sc = NG_NODE_PRIVATE(node); /* Let old node go */ NG_NODE_SET_PRIVATE(node, NULL); NG_NODE_UNREF(node); /* forget it ever existed */ if (sc == NULL) { goto done; } /* Create Netgraph node */ if (ng_make_node_common(&ng_udbp_typestruct, &sc->sc_node) != 0) { printf("%s: Could not create Netgraph node\n", sc->sc_name); sc->sc_node = NULL; goto done; } /* Name node */ if (ng_name_node(sc->sc_node, sc->sc_name) != 0) { printf("%s: Could not name Netgraph node\n", sc->sc_name); NG_NODE_UNREF(sc->sc_node); sc->sc_node = NULL; goto done; } NG_NODE_SET_PRIVATE(sc->sc_node, sc); done: if (sc) { mtx_unlock(&sc->sc_mtx); } return (0); } /* * This is called once we've already connected a new hook to the other node. * It gives us a chance to balk at the last minute. */ static int ng_udbp_connect(hook_p hook) { struct udbp_softc *sc = NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); /* probably not at splnet, force outward queueing */ NG_HOOK_FORCE_QUEUE(NG_HOOK_PEER(hook)); mtx_lock(&sc->sc_mtx); sc->sc_flags |= (UDBP_FLAG_READ_STALL | UDBP_FLAG_WRITE_STALL); /* start bulk-in transfer */ usbd_transfer_start(sc->sc_xfer[UDBP_T_RD]); /* start bulk-out transfer */ usbd_transfer_start(sc->sc_xfer[UDBP_T_WR]); mtx_unlock(&sc->sc_mtx); return (0); } /* * Dook disconnection * * For this type, removal of the last link destroys the node */ static int ng_udbp_disconnect(hook_p hook) { struct udbp_softc *sc = NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); int error = 0; if (sc != NULL) { mtx_lock(&sc->sc_mtx); if (hook != sc->sc_hook) { error = EINVAL; } else { /* stop bulk-in transfer */ usbd_transfer_stop(sc->sc_xfer[UDBP_T_RD_CS]); usbd_transfer_stop(sc->sc_xfer[UDBP_T_RD]); /* stop bulk-out transfer */ usbd_transfer_stop(sc->sc_xfer[UDBP_T_WR_CS]); usbd_transfer_stop(sc->sc_xfer[UDBP_T_WR]); /* cleanup queues */ NG_BT_MBUFQ_DRAIN(&sc->sc_xmitq); NG_BT_MBUFQ_DRAIN(&sc->sc_xmitq_hipri); if (sc->sc_bulk_in_buffer) { m_freem(sc->sc_bulk_in_buffer); sc->sc_bulk_in_buffer = NULL; } sc->sc_hook = NULL; } mtx_unlock(&sc->sc_mtx); } if ((NG_NODE_NUMHOOKS(NG_HOOK_NODE(hook)) == 0) && (NG_NODE_IS_VALID(NG_HOOK_NODE(hook)))) ng_rmnode_self(NG_HOOK_NODE(hook)); return (error); } Index: head/sys/dev/vx/if_vx.c =================================================================== --- head/sys/dev/vx/if_vx.c (revision 276749) +++ head/sys/dev/vx/if_vx.c (revision 276750) @@ -1,1078 +1,1077 @@ /*- * Copyright (c) 1994 Herb Peyerl * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Herb Peyerl. * 4. The name of Herb Peyerl may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * */ #include __FBSDID("$FreeBSD$"); /* * Created from if_ep.c driver by Fred Gray (fgray@rice.edu) to support * the 3c590 family. */ /* * Modified from the FreeBSD 1.1.5.1 version by: * Andres Vega Garcia * INRIA - Sophia Antipolis, France * avega@sophia.inria.fr */ /* * Promiscuous mode added and interrupt logic slightly changed * to reduce the number of adapter failures. Transceiver select * logic changed to use value from EEPROM. Autoconfiguration * features added. * Done by: * Serge Babkin * Chelindbank (Chelyabinsk, Russia) * babkin@hq.icb.chel.su */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define ETHER_MAX_LEN 1518 #define ETHER_ADDR_LEN 6 #define ETHER_ALIGN 2 static struct connector_entry { int bit; char *name; } conn_tab[VX_CONNECTORS] = { #define CONNECTOR_UTP 0 { 0x08, "utp" }, #define CONNECTOR_AUI 1 { 0x20, "aui" }, /* dummy */ { 0, "???" }, #define CONNECTOR_BNC 3 { 0x10, "bnc" }, #define CONNECTOR_TX 4 { 0x02, "tx" }, #define CONNECTOR_FX 5 { 0x04, "fx" }, #define CONNECTOR_MII 6 { 0x40, "mii" }, { 0, "???" } }; static void vx_txstat(struct vx_softc *); static int vx_status(struct vx_softc *); static void vx_init(void *); static void vx_init_locked(struct vx_softc *); static int vx_ioctl(struct ifnet *, u_long, caddr_t); static void vx_start(struct ifnet *); static void vx_start_locked(struct ifnet *); static void vx_watchdog(void *); static void vx_reset(struct vx_softc *); static void vx_read(struct vx_softc *); static struct mbuf *vx_get(struct vx_softc *, u_int); static void vx_mbuf_fill(void *); static void vx_mbuf_empty(struct vx_softc *); static void vx_setfilter(struct vx_softc *); static void vx_getlink(struct vx_softc *); static void vx_setlink(struct vx_softc *); int vx_attach(device_t dev) { struct vx_softc *sc = device_get_softc(dev); struct ifnet *ifp; int i; u_char eaddr[6]; ifp = sc->vx_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); return 0; } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); mtx_init(&sc->vx_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->vx_callout, &sc->vx_mtx, 0); callout_init_mtx(&sc->vx_watchdog, &sc->vx_mtx, 0); GO_WINDOW(0); CSR_WRITE_2(sc, VX_COMMAND, GLOBAL_RESET); VX_BUSY_WAIT; vx_getlink(sc); /* * Read the station address from the eeprom */ GO_WINDOW(0); for (i = 0; i < 3; i++) { int x; if (vx_busy_eeprom(sc)) { mtx_destroy(&sc->vx_mtx); if_free(ifp); return 0; } CSR_WRITE_2(sc, VX_W0_EEPROM_COMMAND, EEPROM_CMD_RD | (EEPROM_OEM_ADDR0 + i)); if (vx_busy_eeprom(sc)) { mtx_destroy(&sc->vx_mtx); if_free(ifp); return 0; } x = CSR_READ_2(sc, VX_W0_EEPROM_DATA); eaddr[(i << 1)] = x >> 8; eaddr[(i << 1) + 1] = x; } ifp->if_snd.ifq_maxlen = ifqmaxlen; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_start = vx_start; ifp->if_ioctl = vx_ioctl; ifp->if_init = vx_init; ifp->if_softc = sc; ether_ifattach(ifp, eaddr); sc->vx_tx_start_thresh = 20; /* probably a good starting point. */ VX_LOCK(sc); vx_stop(sc); VX_UNLOCK(sc); return 1; } /* * The order in here seems important. Otherwise we may not receive * interrupts. ?! */ static void vx_init(void *xsc) { struct vx_softc *sc = (struct vx_softc *)xsc; VX_LOCK(sc); vx_init_locked(sc); VX_UNLOCK(sc); } static void vx_init_locked(struct vx_softc *sc) { struct ifnet *ifp = sc->vx_ifp; int i; VX_LOCK_ASSERT(sc); VX_BUSY_WAIT; GO_WINDOW(2); for (i = 0; i < 6; i++) /* Reload the ether_addr. */ CSR_WRITE_1(sc, VX_W2_ADDR_0 + i, IF_LLADDR(sc->vx_ifp)[i]); CSR_WRITE_2(sc, VX_COMMAND, RX_RESET); VX_BUSY_WAIT; CSR_WRITE_2(sc, VX_COMMAND, TX_RESET); VX_BUSY_WAIT; GO_WINDOW(1); /* Window 1 is operating window */ for (i = 0; i < 31; i++) CSR_READ_1(sc, VX_W1_TX_STATUS); CSR_WRITE_2(sc, VX_COMMAND, SET_RD_0_MASK | S_CARD_FAILURE | S_RX_COMPLETE | S_TX_COMPLETE | S_TX_AVAIL); CSR_WRITE_2(sc, VX_COMMAND, SET_INTR_MASK | S_CARD_FAILURE | S_RX_COMPLETE | S_TX_COMPLETE | S_TX_AVAIL); /* * Attempt to get rid of any stray interrupts that occured during * configuration. On the i386 this isn't possible because one may * already be queued. However, a single stray interrupt is * unimportant. */ CSR_WRITE_2(sc, VX_COMMAND, ACK_INTR | 0xff); vx_setfilter(sc); vx_setlink(sc); CSR_WRITE_2(sc, VX_COMMAND, RX_ENABLE); CSR_WRITE_2(sc, VX_COMMAND, TX_ENABLE); vx_mbuf_fill(sc); /* Interface is now `running', with no output active. */ ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&sc->vx_watchdog, hz, vx_watchdog, sc); /* Attempt to start output, if any. */ vx_start_locked(ifp); } static void vx_setfilter(struct vx_softc *sc) { struct ifnet *ifp = sc->vx_ifp; VX_LOCK_ASSERT(sc); GO_WINDOW(1); /* Window 1 is operating window */ CSR_WRITE_2(sc, VX_COMMAND, SET_RX_FILTER | FIL_INDIVIDUAL | FIL_BRDCST | FIL_MULTICAST | ((ifp->if_flags & IFF_PROMISC) ? FIL_PROMISC : 0)); } static void vx_getlink(struct vx_softc *sc) { int n, k; GO_WINDOW(3); sc->vx_connectors = CSR_READ_2(sc, VX_W3_RESET_OPT) & 0x7f; for (n = 0, k = 0; k < VX_CONNECTORS; k++) { if (sc->vx_connectors & conn_tab[k].bit) { if (n > 0) printf("/"); printf("%s", conn_tab[k].name); n++; } } if (sc->vx_connectors == 0) { printf("no connectors!\n"); return; } GO_WINDOW(3); sc->vx_connector = (CSR_READ_4(sc, VX_W3_INTERNAL_CFG) & INTERNAL_CONNECTOR_MASK) >> INTERNAL_CONNECTOR_BITS; if (sc->vx_connector & 0x10) { sc->vx_connector &= 0x0f; printf("[*%s*]", conn_tab[(int)sc->vx_connector].name); printf(": disable 'auto select' with DOS util!\n"); } else { printf("[*%s*]\n", conn_tab[(int)sc->vx_connector].name); } } static void vx_setlink(struct vx_softc *sc) { struct ifnet *ifp = sc->vx_ifp; int i, j, k; char *reason, *warning; static int prev_flags; static signed char prev_conn = -1; VX_LOCK_ASSERT(sc); if (prev_conn == -1) prev_conn = sc->vx_connector; /* * S.B. * * Now behavior was slightly changed: * * if any of flags link[0-2] is used and its connector is * physically present the following connectors are used: * * link0 - AUI * highest precedence * link1 - BNC * link2 - UTP * lowest precedence * * If none of them is specified then * connector specified in the EEPROM is used * (if present on card or UTP if not). */ i = sc->vx_connector; /* default in EEPROM */ reason = "default"; warning = 0; if (ifp->if_flags & IFF_LINK0) { if (sc->vx_connectors & conn_tab[CONNECTOR_AUI].bit) { i = CONNECTOR_AUI; reason = "link0"; } else { warning = "aui not present! (link0)"; } } else if (ifp->if_flags & IFF_LINK1) { if (sc->vx_connectors & conn_tab[CONNECTOR_BNC].bit) { i = CONNECTOR_BNC; reason = "link1"; } else { warning = "bnc not present! (link1)"; } } else if (ifp->if_flags & IFF_LINK2) { if (sc->vx_connectors & conn_tab[CONNECTOR_UTP].bit) { i = CONNECTOR_UTP; reason = "link2"; } else { warning = "utp not present! (link2)"; } } else if ((sc->vx_connectors & conn_tab[(int)sc->vx_connector].bit) == 0) { warning = "strange connector type in EEPROM."; reason = "forced"; i = CONNECTOR_UTP; } /* Avoid unnecessary message. */ k = (prev_flags ^ ifp->if_flags) & (IFF_LINK0 | IFF_LINK1 | IFF_LINK2); if ((k != 0) || (prev_conn != i)) { if (warning != NULL) if_printf(ifp, "warning: %s\n", warning); if_printf(ifp, "selected %s. (%s)\n", conn_tab[i].name, reason); } /* Set the selected connector. */ GO_WINDOW(3); j = CSR_READ_4(sc, VX_W3_INTERNAL_CFG) & ~INTERNAL_CONNECTOR_MASK; CSR_WRITE_4(sc, VX_W3_INTERNAL_CFG, j | (i << INTERNAL_CONNECTOR_BITS)); /* First, disable all. */ CSR_WRITE_2(sc, VX_COMMAND, STOP_TRANSCEIVER); DELAY(800); GO_WINDOW(4); CSR_WRITE_2(sc, VX_W4_MEDIA_TYPE, 0); /* Second, enable the selected one. */ switch (i) { case CONNECTOR_UTP: GO_WINDOW(4); CSR_WRITE_2(sc, VX_W4_MEDIA_TYPE, ENABLE_UTP); break; case CONNECTOR_BNC: CSR_WRITE_2(sc, VX_COMMAND, START_TRANSCEIVER); DELAY(800); break; case CONNECTOR_TX: case CONNECTOR_FX: GO_WINDOW(4); CSR_WRITE_2(sc, VX_W4_MEDIA_TYPE, LINKBEAT_ENABLE); break; default: /* AUI and MII fall here */ break; } GO_WINDOW(1); prev_flags = ifp->if_flags; prev_conn = i; } static void vx_start(struct ifnet *ifp) { struct vx_softc *sc = ifp->if_softc; VX_LOCK(sc); vx_start_locked(ifp); VX_UNLOCK(sc); } static void vx_start_locked(struct ifnet *ifp) { struct vx_softc *sc = ifp->if_softc; struct mbuf *m; int len, pad; VX_LOCK_ASSERT(sc); /* Don't transmit if interface is busy or not running */ if ((sc->vx_ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; startagain: /* Sneak a peek at the next packet */ m = ifp->if_snd.ifq_head; if (m == NULL) { return; } /* We need to use m->m_pkthdr.len, so require the header */ M_ASSERTPKTHDR(m); len = m->m_pkthdr.len; pad = (4 - len) & 3; /* * The 3c509 automatically pads short packets to minimum ethernet * length, but we drop packets that are too large. Perhaps we should * truncate them instead? */ if (len + pad > ETHER_MAX_LEN) { /* packet is obviously too large: toss it */ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); IF_DEQUEUE(&ifp->if_snd, m); m_freem(m); goto readcheck; } VX_BUSY_WAIT; if (CSR_READ_2(sc, VX_W1_FREE_TX) < len + pad + 4) { CSR_WRITE_2(sc, VX_COMMAND, SET_TX_AVAIL_THRESH | ((len + pad + 4) >> 2)); /* not enough room in FIFO - make sure */ if (CSR_READ_2(sc, VX_W1_FREE_TX) < len + pad + 4) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; sc->vx_timer = 1; return; } } CSR_WRITE_2(sc, VX_COMMAND, SET_TX_AVAIL_THRESH | (8188 >> 2)); IF_DEQUEUE(&ifp->if_snd, m); if (m == NULL) /* not really needed */ return; VX_BUSY_WAIT; CSR_WRITE_2(sc, VX_COMMAND, SET_TX_START_THRESH | ((len / 4 + sc->vx_tx_start_thresh) >> 2)); BPF_MTAP(sc->vx_ifp, m); /* * Do the output at splhigh() so that an interrupt from another device * won't cause a FIFO underrun. * * XXX: Can't enforce that anymore. */ CSR_WRITE_4(sc, VX_W1_TX_PIO_WR_1, len | TX_INDICATE); while (m) { if (m->m_len > 3) bus_space_write_multi_4(sc->vx_bst, sc->vx_bsh, VX_W1_TX_PIO_WR_1, (u_int32_t *)mtod(m, caddr_t), m->m_len / 4); if (m->m_len & 3) bus_space_write_multi_1(sc->vx_bst, sc->vx_bsh, VX_W1_TX_PIO_WR_1, mtod(m, caddr_t) + (m->m_len & ~3), m->m_len & 3); m = m_free(m); } while (pad--) CSR_WRITE_1(sc, VX_W1_TX_PIO_WR_1, 0); /* Padding */ if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); sc->vx_timer = 1; readcheck: if ((CSR_READ_2(sc, VX_W1_RX_STATUS) & ERR_INCOMPLETE) == 0) { /* We received a complete packet. */ if ((CSR_READ_2(sc, VX_STATUS) & S_INTR_LATCH) == 0) { /* * No interrupt, read the packet and continue * Is this supposed to happen? Is my motherboard * completely busted? */ vx_read(sc); } else /* * Got an interrupt, return so that it gets * serviced. */ return; } else { /* Check if we are stuck and reset [see XXX comment] */ if (vx_status(sc)) { if (ifp->if_flags & IFF_DEBUG) if_printf(ifp, "adapter reset\n"); vx_reset(sc); } } goto startagain; } /* * XXX: The 3c509 card can get in a mode where both the fifo status bit * FIFOS_RX_OVERRUN and the status bit ERR_INCOMPLETE are set * We detect this situation and we reset the adapter. * It happens at times when there is a lot of broadcast traffic * on the cable (once in a blue moon). */ static int vx_status(struct vx_softc *sc) { struct ifnet *ifp; int fifost; VX_LOCK_ASSERT(sc); /* * Check the FIFO status and act accordingly */ GO_WINDOW(4); fifost = CSR_READ_2(sc, VX_W4_FIFO_DIAG); GO_WINDOW(1); ifp = sc->vx_ifp; if (fifost & FIFOS_RX_UNDERRUN) { if (ifp->if_flags & IFF_DEBUG) if_printf(ifp, "RX underrun\n"); vx_reset(sc); return 0; } if (fifost & FIFOS_RX_STATUS_OVERRUN) { if (ifp->if_flags & IFF_DEBUG) if_printf(ifp, "RX Status overrun\n"); return 1; } if (fifost & FIFOS_RX_OVERRUN) { if (ifp->if_flags & IFF_DEBUG) if_printf(ifp, "RX overrun\n"); return 1; } if (fifost & FIFOS_TX_OVERRUN) { if (ifp->if_flags & IFF_DEBUG) if_printf(ifp, "TX overrun\n"); vx_reset(sc); return 0; } return 0; } static void vx_txstat(struct vx_softc *sc) { struct ifnet *ifp; int i; VX_LOCK_ASSERT(sc); /* * We need to read+write TX_STATUS until we get a 0 status * in order to turn off the interrupt flag. */ ifp = sc->vx_ifp; while ((i = CSR_READ_1(sc, VX_W1_TX_STATUS)) & TXS_COMPLETE) { CSR_WRITE_1(sc, VX_W1_TX_STATUS, 0x0); if (i & TXS_JABBER) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); if (ifp->if_flags & IFF_DEBUG) if_printf(ifp, "jabber (%x)\n", i); vx_reset(sc); } else if (i & TXS_UNDERRUN) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); if (ifp->if_flags & IFF_DEBUG) if_printf(ifp, "fifo underrun (%x) @%d\n", i, sc->vx_tx_start_thresh); if (sc->vx_tx_succ_ok < 100) sc->vx_tx_start_thresh = min(ETHER_MAX_LEN, sc->vx_tx_start_thresh + 20); sc->vx_tx_succ_ok = 0; vx_reset(sc); } else if (i & TXS_MAX_COLLISION) { if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); CSR_WRITE_2(sc, VX_COMMAND, TX_ENABLE); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } else sc->vx_tx_succ_ok = (sc->vx_tx_succ_ok + 1) & 127; } } void vx_intr(void *voidsc) { short status; struct vx_softc *sc = voidsc; struct ifnet *ifp = sc->vx_ifp; VX_LOCK(sc); for (;;) { CSR_WRITE_2(sc, VX_COMMAND, C_INTR_LATCH); status = CSR_READ_2(sc, VX_STATUS); if ((status & (S_TX_COMPLETE | S_TX_AVAIL | S_RX_COMPLETE | S_CARD_FAILURE)) == 0) break; /* * Acknowledge any interrupts. It's important that we do this * first, since there would otherwise be a race condition. * Due to the i386 interrupt queueing, we may get spurious * interrupts occasionally. */ CSR_WRITE_2(sc, VX_COMMAND, ACK_INTR | status); if (status & S_RX_COMPLETE) vx_read(sc); if (status & S_TX_AVAIL) { sc->vx_timer = 0; sc->vx_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; vx_start_locked(sc->vx_ifp); } if (status & S_CARD_FAILURE) { if_printf(ifp, "adapter failure (%x)\n", status); sc->vx_timer = 0; vx_reset(sc); break; } if (status & S_TX_COMPLETE) { sc->vx_timer = 0; vx_txstat(sc); vx_start_locked(ifp); } } VX_UNLOCK(sc); /* no more interrupts */ return; } static void vx_read(struct vx_softc *sc) { struct ifnet *ifp = sc->vx_ifp; struct mbuf *m; struct ether_header *eh; u_int len; VX_LOCK_ASSERT(sc); len = CSR_READ_2(sc, VX_W1_RX_STATUS); again: if (ifp->if_flags & IFF_DEBUG) { int err = len & ERR_MASK; char *s = NULL; if (len & ERR_INCOMPLETE) s = "incomplete packet"; else if (err == ERR_OVERRUN) s = "packet overrun"; else if (err == ERR_RUNT) s = "runt packet"; else if (err == ERR_ALIGNMENT) s = "bad alignment"; else if (err == ERR_CRC) s = "bad crc"; else if (err == ERR_OVERSIZE) s = "oversized packet"; else if (err == ERR_DRIBBLE) s = "dribble bits"; if (s) if_printf(ifp, "%s\n", s); } if (len & ERR_INCOMPLETE) return; if (len & ERR_RX) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); goto abort; } len &= RX_BYTES_MASK; /* Lower 11 bits = RX bytes. */ /* Pull packet off interface. */ m = vx_get(sc, len); if (m == 0) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); goto abort; } if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); { struct mbuf *m0; m0 = m_devget(mtod(m, char *), m->m_pkthdr.len, ETHER_ALIGN, ifp, NULL); if (m0 == NULL) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); goto abort; } m_freem(m); m = m0; } /* We assume the header fit entirely in one mbuf. */ eh = mtod(m, struct ether_header *); /* * XXX: Some cards seem to be in promiscous mode all the time. * we need to make sure we only get our own stuff always. * bleah! */ if (!(ifp->if_flags & IFF_PROMISC) && (eh->ether_dhost[0] & 1) == 0 /* !mcast and !bcast */ && bcmp(eh->ether_dhost, IF_LLADDR(sc->vx_ifp), ETHER_ADDR_LEN) != 0) { m_freem(m); return; } VX_UNLOCK(sc); (*ifp->if_input)(ifp, m); VX_LOCK(sc); /* * In periods of high traffic we can actually receive enough * packets so that the fifo overrun bit will be set at this point, * even though we just read a packet. In this case we * are not going to receive any more interrupts. We check for * this condition and read again until the fifo is not full. * We could simplify this test by not using vx_status(), but * rechecking the RX_STATUS register directly. This test could * result in unnecessary looping in cases where there is a new * packet but the fifo is not full, but it will not fix the * stuck behavior. * * Even with this improvement, we still get packet overrun errors * which are hurting performance. Maybe when I get some more time * I'll modify vx_read() so that it can handle RX_EARLY interrupts. */ if (vx_status(sc)) { len = CSR_READ_2(sc, VX_W1_RX_STATUS); /* Check if we are stuck and reset [see XXX comment] */ if (len & ERR_INCOMPLETE) { if (ifp->if_flags & IFF_DEBUG) if_printf(ifp, "adapter reset\n"); vx_reset(sc); return; } goto again; } return; abort: CSR_WRITE_2(sc, VX_COMMAND, RX_DISCARD_TOP_PACK); } static struct mbuf * vx_get(struct vx_softc *sc, u_int totlen) { struct ifnet *ifp = sc->vx_ifp; struct mbuf *top, **mp, *m; int len; VX_LOCK_ASSERT(sc); m = sc->vx_mb[sc->vx_next_mb]; sc->vx_mb[sc->vx_next_mb] = NULL; if (m == NULL) { MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) return NULL; } else { /* If the queue is no longer full, refill. */ if (sc->vx_last_mb == sc->vx_next_mb && sc->vx_buffill_pending == 0) { callout_reset(&sc->vx_callout, hz / 100, vx_mbuf_fill, sc); sc->vx_buffill_pending = 1; } /* Convert one of our saved mbuf's. */ sc->vx_next_mb = (sc->vx_next_mb + 1) % MAX_MBS; m->m_data = m->m_pktdat; m->m_flags = M_PKTHDR; bzero(&m->m_pkthdr, sizeof(m->m_pkthdr)); } m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = totlen; len = MHLEN; top = NULL; mp = ⊤ /* * We read the packet at splhigh() so that an interrupt from another * device doesn't cause the card's buffer to overflow while we're * reading it. We may still lose packets at other times. * * XXX: Can't enforce this anymore. */ /* * Since we don't set allowLargePackets bit in MacControl register, * we can assume that totlen <= 1500bytes. * The while loop will be performed iff we have a packet with * MLEN < m_len < MINCLSIZE. */ while (totlen > 0) { if (top) { m = sc->vx_mb[sc->vx_next_mb]; sc->vx_mb[sc->vx_next_mb] = NULL; if (m == NULL) { MGET(m, M_NOWAIT, MT_DATA); if (m == NULL) { m_freem(top); return NULL; } } else { sc->vx_next_mb = (sc->vx_next_mb + 1) % MAX_MBS; } len = MLEN; } if (totlen >= MINCLSIZE) { - MCLGET(m, M_NOWAIT); - if (m->m_flags & M_EXT) + if (MCLGET(m, M_NOWAIT)) len = MCLBYTES; } len = min(totlen, len); if (len > 3) bus_space_read_multi_4(sc->vx_bst, sc->vx_bsh, VX_W1_RX_PIO_RD_1, mtod(m, u_int32_t *), len / 4); if (len & 3) { bus_space_read_multi_1(sc->vx_bst, sc->vx_bsh, VX_W1_RX_PIO_RD_1, mtod(m, u_int8_t *) + (len & ~3), len & 3); } m->m_len = len; totlen -= len; *mp = m; mp = &m->m_next; } CSR_WRITE_2(sc, VX_COMMAND, RX_DISCARD_TOP_PACK); return top; } static int vx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct vx_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; int error = 0; switch (cmd) { case SIOCSIFFLAGS: VX_LOCK(sc); if ((ifp->if_flags & IFF_UP) == 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { /* * If interface is marked up and it is stopped, then * start it. */ vx_stop(sc); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; } else if ((ifp->if_flags & IFF_UP) != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { /* * If interface is marked up and it is stopped, then * start it. */ vx_init_locked(sc); } else { /* * deal with flags changes: * IFF_MULTICAST, IFF_PROMISC, * IFF_LINK0, IFF_LINK1, */ vx_setfilter(sc); vx_setlink(sc); } VX_UNLOCK(sc); break; case SIOCSIFMTU: /* * Set the interface MTU. */ VX_LOCK(sc); if (ifr->ifr_mtu > ETHERMTU) { error = EINVAL; } else { ifp->if_mtu = ifr->ifr_mtu; } VX_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: /* * Multicast list has changed; set the hardware filter * accordingly. */ VX_LOCK(sc); vx_reset(sc); VX_UNLOCK(sc); error = 0; break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } static void vx_reset(struct vx_softc *sc) { VX_LOCK_ASSERT(sc); vx_stop(sc); vx_init_locked(sc); } static void vx_watchdog(void *arg) { struct vx_softc *sc; struct ifnet *ifp; sc = arg; VX_LOCK_ASSERT(sc); callout_reset(&sc->vx_watchdog, hz, vx_watchdog, sc); if (sc->vx_timer == 0 || --sc->vx_timer > 0) return; ifp = sc->vx_ifp; if (ifp->if_flags & IFF_DEBUG) if_printf(ifp, "device timeout\n"); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; vx_start_locked(ifp); vx_intr(sc); } void vx_stop(struct vx_softc *sc) { VX_LOCK_ASSERT(sc); sc->vx_timer = 0; callout_stop(&sc->vx_watchdog); CSR_WRITE_2(sc, VX_COMMAND, RX_DISABLE); CSR_WRITE_2(sc, VX_COMMAND, RX_DISCARD_TOP_PACK); VX_BUSY_WAIT; CSR_WRITE_2(sc, VX_COMMAND, TX_DISABLE); CSR_WRITE_2(sc, VX_COMMAND, STOP_TRANSCEIVER); DELAY(800); CSR_WRITE_2(sc, VX_COMMAND, RX_RESET); VX_BUSY_WAIT; CSR_WRITE_2(sc, VX_COMMAND, TX_RESET); VX_BUSY_WAIT; CSR_WRITE_2(sc, VX_COMMAND, C_INTR_LATCH); CSR_WRITE_2(sc, VX_COMMAND, SET_RD_0_MASK); CSR_WRITE_2(sc, VX_COMMAND, SET_INTR_MASK); CSR_WRITE_2(sc, VX_COMMAND, SET_RX_FILTER); vx_mbuf_empty(sc); } int vx_busy_eeprom(struct vx_softc *sc) { int j, i = 100; while (i--) { j = CSR_READ_2(sc, VX_W0_EEPROM_COMMAND); if (j & EEPROM_BUSY) DELAY(100); else break; } if (!i) { if_printf(sc->vx_ifp, "eeprom failed to come ready\n"); return (1); } return (0); } static void vx_mbuf_fill(void *sp) { struct vx_softc *sc = (struct vx_softc *)sp; int i; VX_LOCK_ASSERT(sc); i = sc->vx_last_mb; do { if (sc->vx_mb[i] == NULL) MGET(sc->vx_mb[i], M_NOWAIT, MT_DATA); if (sc->vx_mb[i] == NULL) break; i = (i + 1) % MAX_MBS; } while (i != sc->vx_next_mb); sc->vx_last_mb = i; /* If the queue was not filled, try again. */ if (sc->vx_last_mb != sc->vx_next_mb) { callout_reset(&sc->vx_callout, hz / 100, vx_mbuf_fill, sc); sc->vx_buffill_pending = 1; } else { sc->vx_buffill_pending = 0; } } static void vx_mbuf_empty(struct vx_softc *sc) { int i; VX_LOCK_ASSERT(sc); for (i = 0; i < MAX_MBS; i++) { if (sc->vx_mb[i]) { m_freem(sc->vx_mb[i]); sc->vx_mb[i] = NULL; } } sc->vx_last_mb = sc->vx_next_mb = 0; if (sc->vx_buffill_pending != 0) callout_stop(&sc->vx_callout); } Index: head/sys/dev/wb/if_wb.c =================================================================== --- head/sys/dev/wb/if_wb.c (revision 276749) +++ head/sys/dev/wb/if_wb.c (revision 276750) @@ -1,1637 +1,1636 @@ /*- * Copyright (c) 1997, 1998 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Winbond fast ethernet PCI NIC driver * * Supports various cheap network adapters based on the Winbond W89C840F * fast ethernet controller chip. This includes adapters manufactured by * Winbond itself and some made by Linksys. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The Winbond W89C840F chip is a bus master; in some ways it resembles * a DEC 'tulip' chip, only not as complicated. Unfortunately, it has * one major difference which is that while the registers do many of * the same things as a tulip adapter, the offsets are different: where * tulip registers are typically spaced 8 bytes apart, the Winbond * registers are spaced 4 bytes apart. The receiver filter is also * programmed differently. * * Like the tulip, the Winbond chip uses small descriptors containing * a status word, a control word and 32-bit areas that can either be used * to point to two external data blocks, or to point to a single block * and another descriptor in a linked list. Descriptors can be grouped * together in blocks to form fixed length rings or can be chained * together in linked lists. A single packet may be spread out over * several descriptors if necessary. * * For the receive ring, this driver uses a linked list of descriptors, * each pointing to a single mbuf cluster buffer, which us large enough * to hold an entire packet. The link list is looped back to created a * closed ring. * * For transmission, the driver creates a linked list of 'super descriptors' * which each contain several individual descriptors linked toghether. * Each 'super descriptor' contains WB_MAXFRAGS descriptors, which we * abuse as fragment pointers. This allows us to use a buffer managment * scheme very similar to that used in the ThunderLAN and Etherlink XL * drivers. * * Autonegotiation is performed using the external PHY via the MII bus. * The sample boards I have all use a Davicom PHY. * * Note: the author of the Linux driver for the Winbond chip alludes * to some sort of flaw in the chip's design that seems to mandate some * drastic workaround which signigicantly impairs transmit performance. * I have no idea what he's on about: transmit performance with all * three of my test boards seems fine. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include #include #include #include /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #define WB_USEIOSPACE #include MODULE_DEPEND(wb, pci, 1, 1, 1); MODULE_DEPEND(wb, ether, 1, 1, 1); MODULE_DEPEND(wb, miibus, 1, 1, 1); /* * Various supported device vendors/types and their names. */ static const struct wb_type wb_devs[] = { { WB_VENDORID, WB_DEVICEID_840F, "Winbond W89C840F 10/100BaseTX" }, { CP_VENDORID, CP_DEVICEID_RL100, "Compex RL100-ATX 10/100baseTX" }, { 0, 0, NULL } }; static int wb_probe(device_t); static int wb_attach(device_t); static int wb_detach(device_t); static void wb_bfree(struct mbuf *, void *addr, void *args); static int wb_newbuf(struct wb_softc *, struct wb_chain_onefrag *, struct mbuf *); static int wb_encap(struct wb_softc *, struct wb_chain *, struct mbuf *); static void wb_rxeof(struct wb_softc *); static void wb_rxeoc(struct wb_softc *); static void wb_txeof(struct wb_softc *); static void wb_txeoc(struct wb_softc *); static void wb_intr(void *); static void wb_tick(void *); static void wb_start(struct ifnet *); static void wb_start_locked(struct ifnet *); static int wb_ioctl(struct ifnet *, u_long, caddr_t); static void wb_init(void *); static void wb_init_locked(struct wb_softc *); static void wb_stop(struct wb_softc *); static void wb_watchdog(struct wb_softc *); static int wb_shutdown(device_t); static int wb_ifmedia_upd(struct ifnet *); static void wb_ifmedia_sts(struct ifnet *, struct ifmediareq *); static void wb_eeprom_putbyte(struct wb_softc *, int); static void wb_eeprom_getword(struct wb_softc *, int, u_int16_t *); static void wb_read_eeprom(struct wb_softc *, caddr_t, int, int, int); static void wb_setcfg(struct wb_softc *, u_int32_t); static void wb_setmulti(struct wb_softc *); static void wb_reset(struct wb_softc *); static void wb_fixmedia(struct wb_softc *); static int wb_list_rx_init(struct wb_softc *); static int wb_list_tx_init(struct wb_softc *); static int wb_miibus_readreg(device_t, int, int); static int wb_miibus_writereg(device_t, int, int, int); static void wb_miibus_statchg(device_t); /* * MII bit-bang glue */ static uint32_t wb_mii_bitbang_read(device_t); static void wb_mii_bitbang_write(device_t, uint32_t); static const struct mii_bitbang_ops wb_mii_bitbang_ops = { wb_mii_bitbang_read, wb_mii_bitbang_write, { WB_SIO_MII_DATAOUT, /* MII_BIT_MDO */ WB_SIO_MII_DATAIN, /* MII_BIT_MDI */ WB_SIO_MII_CLK, /* MII_BIT_MDC */ WB_SIO_MII_DIR, /* MII_BIT_DIR_HOST_PHY */ 0, /* MII_BIT_DIR_PHY_HOST */ } }; #ifdef WB_USEIOSPACE #define WB_RES SYS_RES_IOPORT #define WB_RID WB_PCI_LOIO #else #define WB_RES SYS_RES_MEMORY #define WB_RID WB_PCI_LOMEM #endif static device_method_t wb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, wb_probe), DEVMETHOD(device_attach, wb_attach), DEVMETHOD(device_detach, wb_detach), DEVMETHOD(device_shutdown, wb_shutdown), /* MII interface */ DEVMETHOD(miibus_readreg, wb_miibus_readreg), DEVMETHOD(miibus_writereg, wb_miibus_writereg), DEVMETHOD(miibus_statchg, wb_miibus_statchg), DEVMETHOD_END }; static driver_t wb_driver = { "wb", wb_methods, sizeof(struct wb_softc) }; static devclass_t wb_devclass; DRIVER_MODULE(wb, pci, wb_driver, wb_devclass, 0, 0); DRIVER_MODULE(miibus, wb, miibus_driver, miibus_devclass, 0, 0); #define WB_SETBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, \ CSR_READ_4(sc, reg) | (x)) #define WB_CLRBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, \ CSR_READ_4(sc, reg) & ~(x)) #define SIO_SET(x) \ CSR_WRITE_4(sc, WB_SIO, \ CSR_READ_4(sc, WB_SIO) | (x)) #define SIO_CLR(x) \ CSR_WRITE_4(sc, WB_SIO, \ CSR_READ_4(sc, WB_SIO) & ~(x)) /* * Send a read command and address to the EEPROM, check for ACK. */ static void wb_eeprom_putbyte(sc, addr) struct wb_softc *sc; int addr; { register int d, i; d = addr | WB_EECMD_READ; /* * Feed in each bit and stobe the clock. */ for (i = 0x400; i; i >>= 1) { if (d & i) { SIO_SET(WB_SIO_EE_DATAIN); } else { SIO_CLR(WB_SIO_EE_DATAIN); } DELAY(100); SIO_SET(WB_SIO_EE_CLK); DELAY(150); SIO_CLR(WB_SIO_EE_CLK); DELAY(100); } } /* * Read a word of data stored in the EEPROM at address 'addr.' */ static void wb_eeprom_getword(sc, addr, dest) struct wb_softc *sc; int addr; u_int16_t *dest; { register int i; u_int16_t word = 0; /* Enter EEPROM access mode. */ CSR_WRITE_4(sc, WB_SIO, WB_SIO_EESEL|WB_SIO_EE_CS); /* * Send address of word we want to read. */ wb_eeprom_putbyte(sc, addr); CSR_WRITE_4(sc, WB_SIO, WB_SIO_EESEL|WB_SIO_EE_CS); /* * Start reading bits from EEPROM. */ for (i = 0x8000; i; i >>= 1) { SIO_SET(WB_SIO_EE_CLK); DELAY(100); if (CSR_READ_4(sc, WB_SIO) & WB_SIO_EE_DATAOUT) word |= i; SIO_CLR(WB_SIO_EE_CLK); DELAY(100); } /* Turn off EEPROM access mode. */ CSR_WRITE_4(sc, WB_SIO, 0); *dest = word; } /* * Read a sequence of words from the EEPROM. */ static void wb_read_eeprom(sc, dest, off, cnt, swap) struct wb_softc *sc; caddr_t dest; int off; int cnt; int swap; { int i; u_int16_t word = 0, *ptr; for (i = 0; i < cnt; i++) { wb_eeprom_getword(sc, off + i, &word); ptr = (u_int16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } } /* * Read the MII serial port for the MII bit-bang module. */ static uint32_t wb_mii_bitbang_read(device_t dev) { struct wb_softc *sc; uint32_t val; sc = device_get_softc(dev); val = CSR_READ_4(sc, WB_SIO); CSR_BARRIER(sc, WB_SIO, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); return (val); } /* * Write the MII serial port for the MII bit-bang module. */ static void wb_mii_bitbang_write(device_t dev, uint32_t val) { struct wb_softc *sc; sc = device_get_softc(dev); CSR_WRITE_4(sc, WB_SIO, val); CSR_BARRIER(sc, WB_SIO, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); } static int wb_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { return (mii_bitbang_readreg(dev, &wb_mii_bitbang_ops, phy, reg)); } static int wb_miibus_writereg(dev, phy, reg, data) device_t dev; int phy, reg, data; { mii_bitbang_writereg(dev, &wb_mii_bitbang_ops, phy, reg, data); return(0); } static void wb_miibus_statchg(dev) device_t dev; { struct wb_softc *sc; struct mii_data *mii; sc = device_get_softc(dev); mii = device_get_softc(sc->wb_miibus); wb_setcfg(sc, mii->mii_media_active); } /* * Program the 64-bit multicast hash filter. */ static void wb_setmulti(sc) struct wb_softc *sc; { struct ifnet *ifp; int h = 0; u_int32_t hashes[2] = { 0, 0 }; struct ifmultiaddr *ifma; u_int32_t rxfilt; int mcnt = 0; ifp = sc->wb_ifp; rxfilt = CSR_READ_4(sc, WB_NETCFG); if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { rxfilt |= WB_NETCFG_RX_MULTI; CSR_WRITE_4(sc, WB_NETCFG, rxfilt); CSR_WRITE_4(sc, WB_MAR0, 0xFFFFFFFF); CSR_WRITE_4(sc, WB_MAR1, 0xFFFFFFFF); return; } /* first, zot all the existing hash bits */ CSR_WRITE_4(sc, WB_MAR0, 0); CSR_WRITE_4(sc, WB_MAR1, 0); /* now program new ones */ if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ~ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); mcnt++; } if_maddr_runlock(ifp); if (mcnt) rxfilt |= WB_NETCFG_RX_MULTI; else rxfilt &= ~WB_NETCFG_RX_MULTI; CSR_WRITE_4(sc, WB_MAR0, hashes[0]); CSR_WRITE_4(sc, WB_MAR1, hashes[1]); CSR_WRITE_4(sc, WB_NETCFG, rxfilt); } /* * The Winbond manual states that in order to fiddle with the * 'full-duplex' and '100Mbps' bits in the netconfig register, we * first have to put the transmit and/or receive logic in the idle state. */ static void wb_setcfg(sc, media) struct wb_softc *sc; u_int32_t media; { int i, restart = 0; if (CSR_READ_4(sc, WB_NETCFG) & (WB_NETCFG_TX_ON|WB_NETCFG_RX_ON)) { restart = 1; WB_CLRBIT(sc, WB_NETCFG, (WB_NETCFG_TX_ON|WB_NETCFG_RX_ON)); for (i = 0; i < WB_TIMEOUT; i++) { DELAY(10); if ((CSR_READ_4(sc, WB_ISR) & WB_ISR_TX_IDLE) && (CSR_READ_4(sc, WB_ISR) & WB_ISR_RX_IDLE)) break; } if (i == WB_TIMEOUT) device_printf(sc->wb_dev, "failed to force tx and rx to idle state\n"); } if (IFM_SUBTYPE(media) == IFM_10_T) WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_100MBPS); else WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_100MBPS); if ((media & IFM_GMASK) == IFM_FDX) WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_FULLDUPLEX); else WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_FULLDUPLEX); if (restart) WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON|WB_NETCFG_RX_ON); } static void wb_reset(sc) struct wb_softc *sc; { register int i; struct mii_data *mii; struct mii_softc *miisc; CSR_WRITE_4(sc, WB_NETCFG, 0); CSR_WRITE_4(sc, WB_BUSCTL, 0); CSR_WRITE_4(sc, WB_TXADDR, 0); CSR_WRITE_4(sc, WB_RXADDR, 0); WB_SETBIT(sc, WB_BUSCTL, WB_BUSCTL_RESET); WB_SETBIT(sc, WB_BUSCTL, WB_BUSCTL_RESET); for (i = 0; i < WB_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_4(sc, WB_BUSCTL) & WB_BUSCTL_RESET)) break; } if (i == WB_TIMEOUT) device_printf(sc->wb_dev, "reset never completed!\n"); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); if (sc->wb_miibus == NULL) return; mii = device_get_softc(sc->wb_miibus); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); } static void wb_fixmedia(sc) struct wb_softc *sc; { struct mii_data *mii = NULL; struct ifnet *ifp; u_int32_t media; mii = device_get_softc(sc->wb_miibus); ifp = sc->wb_ifp; mii_pollstat(mii); if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T) { media = mii->mii_media_active & ~IFM_10_T; media |= IFM_100_TX; } else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) { media = mii->mii_media_active & ~IFM_100_TX; media |= IFM_10_T; } else return; ifmedia_set(&mii->mii_media, media); } /* * Probe for a Winbond chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int wb_probe(dev) device_t dev; { const struct wb_type *t; t = wb_devs; while(t->wb_name != NULL) { if ((pci_get_vendor(dev) == t->wb_vid) && (pci_get_device(dev) == t->wb_did)) { device_set_desc(dev, t->wb_name); return (BUS_PROBE_DEFAULT); } t++; } return(ENXIO); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int wb_attach(dev) device_t dev; { u_char eaddr[ETHER_ADDR_LEN]; struct wb_softc *sc; struct ifnet *ifp; int error = 0, rid; sc = device_get_softc(dev); sc->wb_dev = dev; mtx_init(&sc->wb_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->wb_stat_callout, &sc->wb_mtx, 0); /* * Map control/status registers. */ pci_enable_busmaster(dev); rid = WB_RID; sc->wb_res = bus_alloc_resource_any(dev, WB_RES, &rid, RF_ACTIVE); if (sc->wb_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); error = ENXIO; goto fail; } /* Allocate interrupt */ rid = 0; sc->wb_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->wb_irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } /* Save the cache line size. */ sc->wb_cachesize = pci_read_config(dev, WB_PCI_CACHELEN, 4) & 0xFF; /* Reset the adapter. */ wb_reset(sc); /* * Get station address from the EEPROM. */ wb_read_eeprom(sc, (caddr_t)&eaddr, 0, 3, 0); sc->wb_ldata = contigmalloc(sizeof(struct wb_list_data) + 8, M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->wb_ldata == NULL) { device_printf(dev, "no memory for list buffers!\n"); error = ENXIO; goto fail; } bzero(sc->wb_ldata, sizeof(struct wb_list_data)); ifp = sc->wb_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = wb_ioctl; ifp->if_start = wb_start; ifp->if_init = wb_init; ifp->if_snd.ifq_maxlen = WB_TX_LIST_CNT - 1; /* * Do MII setup. */ error = mii_attach(dev, &sc->wb_miibus, ifp, wb_ifmedia_upd, wb_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); if (error != 0) { device_printf(dev, "attaching PHYs failed\n"); goto fail; } /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->wb_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, wb_intr, sc, &sc->wb_intrhand); if (error) { device_printf(dev, "couldn't set up irq\n"); ether_ifdetach(ifp); goto fail; } fail: if (error) wb_detach(dev); return(error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int wb_detach(dev) device_t dev; { struct wb_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); KASSERT(mtx_initialized(&sc->wb_mtx), ("wb mutex not initialized")); ifp = sc->wb_ifp; /* * Delete any miibus and phy devices attached to this interface. * This should only be done if attach succeeded. */ if (device_is_attached(dev)) { ether_ifdetach(ifp); WB_LOCK(sc); wb_stop(sc); WB_UNLOCK(sc); callout_drain(&sc->wb_stat_callout); } if (sc->wb_miibus) device_delete_child(dev, sc->wb_miibus); bus_generic_detach(dev); if (sc->wb_intrhand) bus_teardown_intr(dev, sc->wb_irq, sc->wb_intrhand); if (sc->wb_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->wb_irq); if (sc->wb_res) bus_release_resource(dev, WB_RES, WB_RID, sc->wb_res); if (ifp) if_free(ifp); if (sc->wb_ldata) { contigfree(sc->wb_ldata, sizeof(struct wb_list_data) + 8, M_DEVBUF); } mtx_destroy(&sc->wb_mtx); return(0); } /* * Initialize the transmit descriptors. */ static int wb_list_tx_init(sc) struct wb_softc *sc; { struct wb_chain_data *cd; struct wb_list_data *ld; int i; cd = &sc->wb_cdata; ld = sc->wb_ldata; for (i = 0; i < WB_TX_LIST_CNT; i++) { cd->wb_tx_chain[i].wb_ptr = &ld->wb_tx_list[i]; if (i == (WB_TX_LIST_CNT - 1)) { cd->wb_tx_chain[i].wb_nextdesc = &cd->wb_tx_chain[0]; } else { cd->wb_tx_chain[i].wb_nextdesc = &cd->wb_tx_chain[i + 1]; } } cd->wb_tx_free = &cd->wb_tx_chain[0]; cd->wb_tx_tail = cd->wb_tx_head = NULL; return(0); } /* * Initialize the RX descriptors and allocate mbufs for them. Note that * we arrange the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int wb_list_rx_init(sc) struct wb_softc *sc; { struct wb_chain_data *cd; struct wb_list_data *ld; int i; cd = &sc->wb_cdata; ld = sc->wb_ldata; for (i = 0; i < WB_RX_LIST_CNT; i++) { cd->wb_rx_chain[i].wb_ptr = (struct wb_desc *)&ld->wb_rx_list[i]; cd->wb_rx_chain[i].wb_buf = (void *)&ld->wb_rxbufs[i]; if (wb_newbuf(sc, &cd->wb_rx_chain[i], NULL) == ENOBUFS) return(ENOBUFS); if (i == (WB_RX_LIST_CNT - 1)) { cd->wb_rx_chain[i].wb_nextdesc = &cd->wb_rx_chain[0]; ld->wb_rx_list[i].wb_next = vtophys(&ld->wb_rx_list[0]); } else { cd->wb_rx_chain[i].wb_nextdesc = &cd->wb_rx_chain[i + 1]; ld->wb_rx_list[i].wb_next = vtophys(&ld->wb_rx_list[i + 1]); } } cd->wb_rx_head = &cd->wb_rx_chain[0]; return(0); } static void wb_bfree(struct mbuf *m, void *buf, void *args) { } /* * Initialize an RX descriptor and attach an MBUF cluster. */ static int wb_newbuf(sc, c, m) struct wb_softc *sc; struct wb_chain_onefrag *c; struct mbuf *m; { struct mbuf *m_new = NULL; if (m == NULL) { MGETHDR(m_new, M_NOWAIT, MT_DATA); if (m_new == NULL) return(ENOBUFS); m_new->m_data = c->wb_buf; m_new->m_pkthdr.len = m_new->m_len = WB_BUFBYTES; MEXTADD(m_new, c->wb_buf, WB_BUFBYTES, wb_bfree, c->wb_buf, NULL, 0, EXT_NET_DRV); } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = WB_BUFBYTES; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, sizeof(u_int64_t)); c->wb_mbuf = m_new; c->wb_ptr->wb_data = vtophys(mtod(m_new, caddr_t)); c->wb_ptr->wb_ctl = WB_RXCTL_RLINK | 1536; c->wb_ptr->wb_status = WB_RXSTAT; return(0); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ static void wb_rxeof(sc) struct wb_softc *sc; { struct mbuf *m = NULL; struct ifnet *ifp; struct wb_chain_onefrag *cur_rx; int total_len = 0; u_int32_t rxstat; WB_LOCK_ASSERT(sc); ifp = sc->wb_ifp; while(!((rxstat = sc->wb_cdata.wb_rx_head->wb_ptr->wb_status) & WB_RXSTAT_OWN)) { struct mbuf *m0 = NULL; cur_rx = sc->wb_cdata.wb_rx_head; sc->wb_cdata.wb_rx_head = cur_rx->wb_nextdesc; m = cur_rx->wb_mbuf; if ((rxstat & WB_RXSTAT_MIIERR) || (WB_RXBYTES(cur_rx->wb_ptr->wb_status) < WB_MIN_FRAMELEN) || (WB_RXBYTES(cur_rx->wb_ptr->wb_status) > 1536) || !(rxstat & WB_RXSTAT_LASTFRAG) || !(rxstat & WB_RXSTAT_RXCMP)) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); wb_newbuf(sc, cur_rx, m); device_printf(sc->wb_dev, "receiver babbling: possible chip bug," " forcing reset\n"); wb_fixmedia(sc); wb_reset(sc); wb_init_locked(sc); return; } if (rxstat & WB_RXSTAT_RXERR) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); wb_newbuf(sc, cur_rx, m); break; } /* No errors; receive the packet. */ total_len = WB_RXBYTES(cur_rx->wb_ptr->wb_status); /* * XXX The Winbond chip includes the CRC with every * received frame, and there's no way to turn this * behavior off (at least, I can't find anything in * the manual that explains how to do it) so we have * to trim off the CRC manually. */ total_len -= ETHER_CRC_LEN; m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, ifp, NULL); wb_newbuf(sc, cur_rx, m); if (m0 == NULL) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); break; } m = m0; if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); WB_UNLOCK(sc); (*ifp->if_input)(ifp, m); WB_LOCK(sc); } } static void wb_rxeoc(sc) struct wb_softc *sc; { wb_rxeof(sc); WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON); CSR_WRITE_4(sc, WB_RXADDR, vtophys(&sc->wb_ldata->wb_rx_list[0])); WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON); if (CSR_READ_4(sc, WB_ISR) & WB_RXSTATE_SUSPEND) CSR_WRITE_4(sc, WB_RXSTART, 0xFFFFFFFF); } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void wb_txeof(sc) struct wb_softc *sc; { struct wb_chain *cur_tx; struct ifnet *ifp; ifp = sc->wb_ifp; /* Clear the timeout timer. */ sc->wb_timer = 0; if (sc->wb_cdata.wb_tx_head == NULL) return; /* * Go through our tx list and free mbufs for those * frames that have been transmitted. */ while(sc->wb_cdata.wb_tx_head->wb_mbuf != NULL) { u_int32_t txstat; cur_tx = sc->wb_cdata.wb_tx_head; txstat = WB_TXSTATUS(cur_tx); if ((txstat & WB_TXSTAT_OWN) || txstat == WB_UNSENT) break; if (txstat & WB_TXSTAT_TXERR) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); if (txstat & WB_TXSTAT_ABORT) if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); if (txstat & WB_TXSTAT_LATECOLL) if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); } if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & WB_TXSTAT_COLLCNT) >> 3); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); m_freem(cur_tx->wb_mbuf); cur_tx->wb_mbuf = NULL; if (sc->wb_cdata.wb_tx_head == sc->wb_cdata.wb_tx_tail) { sc->wb_cdata.wb_tx_head = NULL; sc->wb_cdata.wb_tx_tail = NULL; break; } sc->wb_cdata.wb_tx_head = cur_tx->wb_nextdesc; } } /* * TX 'end of channel' interrupt handler. */ static void wb_txeoc(sc) struct wb_softc *sc; { struct ifnet *ifp; ifp = sc->wb_ifp; sc->wb_timer = 0; if (sc->wb_cdata.wb_tx_head == NULL) { ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->wb_cdata.wb_tx_tail = NULL; } else { if (WB_TXOWN(sc->wb_cdata.wb_tx_head) == WB_UNSENT) { WB_TXOWN(sc->wb_cdata.wb_tx_head) = WB_TXSTAT_OWN; sc->wb_timer = 5; CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF); } } } static void wb_intr(arg) void *arg; { struct wb_softc *sc; struct ifnet *ifp; u_int32_t status; sc = arg; WB_LOCK(sc); ifp = sc->wb_ifp; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { WB_UNLOCK(sc); return; } /* Disable interrupts. */ CSR_WRITE_4(sc, WB_IMR, 0x00000000); for (;;) { status = CSR_READ_4(sc, WB_ISR); if (status) CSR_WRITE_4(sc, WB_ISR, status); if ((status & WB_INTRS) == 0) break; if ((status & WB_ISR_RX_NOBUF) || (status & WB_ISR_RX_ERR)) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); wb_reset(sc); if (status & WB_ISR_RX_ERR) wb_fixmedia(sc); wb_init_locked(sc); continue; } if (status & WB_ISR_RX_OK) wb_rxeof(sc); if (status & WB_ISR_RX_IDLE) wb_rxeoc(sc); if (status & WB_ISR_TX_OK) wb_txeof(sc); if (status & WB_ISR_TX_NOBUF) wb_txeoc(sc); if (status & WB_ISR_TX_IDLE) { wb_txeof(sc); if (sc->wb_cdata.wb_tx_head != NULL) { WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF); } } if (status & WB_ISR_TX_UNDERRUN) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); wb_txeof(sc); WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); /* Jack up TX threshold */ sc->wb_txthresh += WB_TXTHRESH_CHUNK; WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_THRESH); WB_SETBIT(sc, WB_NETCFG, WB_TXTHRESH(sc->wb_txthresh)); WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); } if (status & WB_ISR_BUS_ERR) { wb_reset(sc); wb_init_locked(sc); } } /* Re-enable interrupts. */ CSR_WRITE_4(sc, WB_IMR, WB_INTRS); if (ifp->if_snd.ifq_head != NULL) { wb_start_locked(ifp); } WB_UNLOCK(sc); } static void wb_tick(xsc) void *xsc; { struct wb_softc *sc; struct mii_data *mii; sc = xsc; WB_LOCK_ASSERT(sc); mii = device_get_softc(sc->wb_miibus); mii_tick(mii); if (sc->wb_timer > 0 && --sc->wb_timer == 0) wb_watchdog(sc); callout_reset(&sc->wb_stat_callout, hz, wb_tick, sc); } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int wb_encap(sc, c, m_head) struct wb_softc *sc; struct wb_chain *c; struct mbuf *m_head; { int frag = 0; struct wb_desc *f = NULL; int total_len; struct mbuf *m; /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ m = m_head; total_len = 0; for (m = m_head, frag = 0; m != NULL; m = m->m_next) { if (m->m_len != 0) { if (frag == WB_MAXFRAGS) break; total_len += m->m_len; f = &c->wb_ptr->wb_frag[frag]; f->wb_ctl = WB_TXCTL_TLINK | m->m_len; if (frag == 0) { f->wb_ctl |= WB_TXCTL_FIRSTFRAG; f->wb_status = 0; } else f->wb_status = WB_TXSTAT_OWN; f->wb_next = vtophys(&c->wb_ptr->wb_frag[frag + 1]); f->wb_data = vtophys(mtod(m, vm_offset_t)); frag++; } } /* * Handle special case: we used up all 16 fragments, * but we have more mbufs left in the chain. Copy the * data into an mbuf cluster. Note that we don't * bother clearing the values in the other fragment * pointers/counters; it wouldn't gain us anything, * and would waste cycles. */ if (m != NULL) { struct mbuf *m_new = NULL; MGETHDR(m_new, M_NOWAIT, MT_DATA); if (m_new == NULL) return(1); if (m_head->m_pkthdr.len > MHLEN) { - MCLGET(m_new, M_NOWAIT); - if (!(m_new->m_flags & M_EXT)) { + if (!(MCLGET(m_new, M_NOWAIT))) { m_freem(m_new); return(1); } } m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t)); m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; m_freem(m_head); m_head = m_new; f = &c->wb_ptr->wb_frag[0]; f->wb_status = 0; f->wb_data = vtophys(mtod(m_new, caddr_t)); f->wb_ctl = total_len = m_new->m_len; f->wb_ctl |= WB_TXCTL_TLINK|WB_TXCTL_FIRSTFRAG; frag = 1; } if (total_len < WB_MIN_FRAMELEN) { f = &c->wb_ptr->wb_frag[frag]; f->wb_ctl = WB_MIN_FRAMELEN - total_len; f->wb_data = vtophys(&sc->wb_cdata.wb_pad); f->wb_ctl |= WB_TXCTL_TLINK; f->wb_status = WB_TXSTAT_OWN; frag++; } c->wb_mbuf = m_head; c->wb_lastdesc = frag - 1; WB_TXCTL(c) |= WB_TXCTL_LASTFRAG; WB_TXNEXT(c) = vtophys(&c->wb_nextdesc->wb_ptr->wb_frag[0]); return(0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void wb_start(ifp) struct ifnet *ifp; { struct wb_softc *sc; sc = ifp->if_softc; WB_LOCK(sc); wb_start_locked(ifp); WB_UNLOCK(sc); } static void wb_start_locked(ifp) struct ifnet *ifp; { struct wb_softc *sc; struct mbuf *m_head = NULL; struct wb_chain *cur_tx = NULL, *start_tx; sc = ifp->if_softc; WB_LOCK_ASSERT(sc); /* * Check for an available queue slot. If there are none, * punt. */ if (sc->wb_cdata.wb_tx_free->wb_mbuf != NULL) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; return; } start_tx = sc->wb_cdata.wb_tx_free; while(sc->wb_cdata.wb_tx_free->wb_mbuf == NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* Pick a descriptor off the free list. */ cur_tx = sc->wb_cdata.wb_tx_free; sc->wb_cdata.wb_tx_free = cur_tx->wb_nextdesc; /* Pack the data into the descriptor. */ wb_encap(sc, cur_tx, m_head); if (cur_tx != start_tx) WB_TXOWN(cur_tx) = WB_TXSTAT_OWN; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, cur_tx->wb_mbuf); } /* * If there are no packets queued, bail. */ if (cur_tx == NULL) return; /* * Place the request for the upload interrupt * in the last descriptor in the chain. This way, if * we're chaining several packets at once, we'll only * get an interrupt once for the whole chain rather than * once for each packet. */ WB_TXCTL(cur_tx) |= WB_TXCTL_FINT; cur_tx->wb_ptr->wb_frag[0].wb_ctl |= WB_TXCTL_FINT; sc->wb_cdata.wb_tx_tail = cur_tx; if (sc->wb_cdata.wb_tx_head == NULL) { sc->wb_cdata.wb_tx_head = start_tx; WB_TXOWN(start_tx) = WB_TXSTAT_OWN; CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF); } else { /* * We need to distinguish between the case where * the own bit is clear because the chip cleared it * and where the own bit is clear because we haven't * set it yet. The magic value WB_UNSET is just some * ramdomly chosen number which doesn't have the own * bit set. When we actually transmit the frame, the * status word will have _only_ the own bit set, so * the txeoc handler will be able to tell if it needs * to initiate another transmission to flush out pending * frames. */ WB_TXOWN(start_tx) = WB_UNSENT; } /* * Set a timeout in case the chip goes out to lunch. */ sc->wb_timer = 5; } static void wb_init(xsc) void *xsc; { struct wb_softc *sc = xsc; WB_LOCK(sc); wb_init_locked(sc); WB_UNLOCK(sc); } static void wb_init_locked(sc) struct wb_softc *sc; { struct ifnet *ifp = sc->wb_ifp; int i; struct mii_data *mii; WB_LOCK_ASSERT(sc); mii = device_get_softc(sc->wb_miibus); /* * Cancel pending I/O and free all RX/TX buffers. */ wb_stop(sc); wb_reset(sc); sc->wb_txthresh = WB_TXTHRESH_INIT; /* * Set cache alignment and burst length. */ #ifdef foo CSR_WRITE_4(sc, WB_BUSCTL, WB_BUSCTL_CONFIG); WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_THRESH); WB_SETBIT(sc, WB_NETCFG, WB_TXTHRESH(sc->wb_txthresh)); #endif CSR_WRITE_4(sc, WB_BUSCTL, WB_BUSCTL_MUSTBEONE|WB_BUSCTL_ARBITRATION); WB_SETBIT(sc, WB_BUSCTL, WB_BURSTLEN_16LONG); switch(sc->wb_cachesize) { case 32: WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_32LONG); break; case 16: WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_16LONG); break; case 8: WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_8LONG); break; case 0: default: WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_NONE); break; } /* This doesn't tend to work too well at 100Mbps. */ WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_EARLY_ON); /* Init our MAC address */ for (i = 0; i < ETHER_ADDR_LEN; i++) { CSR_WRITE_1(sc, WB_NODE0 + i, IF_LLADDR(sc->wb_ifp)[i]); } /* Init circular RX list. */ if (wb_list_rx_init(sc) == ENOBUFS) { device_printf(sc->wb_dev, "initialization failed: no memory for rx buffers\n"); wb_stop(sc); return; } /* Init TX descriptors. */ wb_list_tx_init(sc); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ALLPHYS); } else { WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ALLPHYS); } /* * Set capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) { WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_BROAD); } else { WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_BROAD); } /* * Program the multicast filter, if necessary. */ wb_setmulti(sc); /* * Load the address of the RX list. */ WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON); CSR_WRITE_4(sc, WB_RXADDR, vtophys(&sc->wb_ldata->wb_rx_list[0])); /* * Enable interrupts. */ CSR_WRITE_4(sc, WB_IMR, WB_INTRS); CSR_WRITE_4(sc, WB_ISR, 0xFFFFFFFF); /* Enable receiver and transmitter. */ WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON); CSR_WRITE_4(sc, WB_RXSTART, 0xFFFFFFFF); WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); CSR_WRITE_4(sc, WB_TXADDR, vtophys(&sc->wb_ldata->wb_tx_list[0])); WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); mii_mediachg(mii); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&sc->wb_stat_callout, hz, wb_tick, sc); } /* * Set media options. */ static int wb_ifmedia_upd(ifp) struct ifnet *ifp; { struct wb_softc *sc; sc = ifp->if_softc; WB_LOCK(sc); if (ifp->if_flags & IFF_UP) wb_init_locked(sc); WB_UNLOCK(sc); return(0); } /* * Report current media status. */ static void wb_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct wb_softc *sc; struct mii_data *mii; sc = ifp->if_softc; WB_LOCK(sc); mii = device_get_softc(sc->wb_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; WB_UNLOCK(sc); } static int wb_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct wb_softc *sc = ifp->if_softc; struct mii_data *mii; struct ifreq *ifr = (struct ifreq *) data; int error = 0; switch(command) { case SIOCSIFFLAGS: WB_LOCK(sc); if (ifp->if_flags & IFF_UP) { wb_init_locked(sc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) wb_stop(sc); } WB_UNLOCK(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: WB_LOCK(sc); wb_setmulti(sc); WB_UNLOCK(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->wb_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = ether_ioctl(ifp, command, data); break; } return(error); } static void wb_watchdog(sc) struct wb_softc *sc; { struct ifnet *ifp; WB_LOCK_ASSERT(sc); ifp = sc->wb_ifp; if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); if_printf(ifp, "watchdog timeout\n"); #ifdef foo if (!(wb_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT)) if_printf(ifp, "no carrier - transceiver cable problem?\n"); #endif wb_stop(sc); wb_reset(sc); wb_init_locked(sc); if (ifp->if_snd.ifq_head != NULL) wb_start_locked(ifp); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void wb_stop(sc) struct wb_softc *sc; { register int i; struct ifnet *ifp; WB_LOCK_ASSERT(sc); ifp = sc->wb_ifp; sc->wb_timer = 0; callout_stop(&sc->wb_stat_callout); WB_CLRBIT(sc, WB_NETCFG, (WB_NETCFG_RX_ON|WB_NETCFG_TX_ON)); CSR_WRITE_4(sc, WB_IMR, 0x00000000); CSR_WRITE_4(sc, WB_TXADDR, 0x00000000); CSR_WRITE_4(sc, WB_RXADDR, 0x00000000); /* * Free data in the RX lists. */ for (i = 0; i < WB_RX_LIST_CNT; i++) { if (sc->wb_cdata.wb_rx_chain[i].wb_mbuf != NULL) { m_freem(sc->wb_cdata.wb_rx_chain[i].wb_mbuf); sc->wb_cdata.wb_rx_chain[i].wb_mbuf = NULL; } } bzero((char *)&sc->wb_ldata->wb_rx_list, sizeof(sc->wb_ldata->wb_rx_list)); /* * Free the TX list buffers. */ for (i = 0; i < WB_TX_LIST_CNT; i++) { if (sc->wb_cdata.wb_tx_chain[i].wb_mbuf != NULL) { m_freem(sc->wb_cdata.wb_tx_chain[i].wb_mbuf); sc->wb_cdata.wb_tx_chain[i].wb_mbuf = NULL; } } bzero((char *)&sc->wb_ldata->wb_tx_list, sizeof(sc->wb_ldata->wb_tx_list)); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int wb_shutdown(dev) device_t dev; { struct wb_softc *sc; sc = device_get_softc(dev); WB_LOCK(sc); wb_stop(sc); WB_UNLOCK(sc); return (0); } Index: head/sys/dev/xe/if_xe.c =================================================================== --- head/sys/dev/xe/if_xe.c (revision 276749) +++ head/sys/dev/xe/if_xe.c (revision 276750) @@ -1,2074 +1,2073 @@ /*- * Copyright (c) 1998, 1999, 2003 Scott Mitchell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Portions of this software were derived from Werner Koch's xirc2ps driver * for Linux under the terms of the following license (from v1.30 of the * xirc2ps driver): * * Copyright (c) 1997 by Werner Koch (dd9jn) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, and the entire permission notice in its entirety, * including the disclaimer of warranties. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote * products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * FreeBSD device driver for Xircom CreditCard PCMCIA Ethernet adapters. The * following cards are currently known to work with the driver: * Xircom CreditCard 10/100 (CE3) * Xircom CreditCard Ethernet + Modem 28 (CEM28) * Xircom CreditCard Ethernet 10/100 + Modem 56 (CEM56) * Xircom RealPort Ethernet 10 * Xircom RealPort Ethernet 10/100 * Xircom RealPort Ethernet 10/100 + Modem 56 (REM56, REM56G) * Intel EtherExpress Pro/100 PC Card Mobile Adapter 16 (Pro/100 M16A) * Compaq Netelligent 10/100 PC Card (CPQ-10/100) * * Some other cards *should* work, but support for them is either broken or in * an unknown state at the moment. I'm always interested in hearing from * people who own any of these cards: * Xircom CreditCard 10Base-T (PS-CE2-10) * Xircom CreditCard Ethernet + ModemII (CEM2) * Xircom CEM28 and CEM33 Ethernet/Modem cards (may be variants of CEM2?) * * Thanks to all who assisted with the development and testing of the driver, * especially: Werner Koch, Duke Kamstra, Duncan Barclay, Jason George, Dru * Nelson, Mike Kephart, Bill Rainey and Douglas Rand. Apologies if I've left * out anyone who deserves a mention here. * * Special thanks to Ade Lovett for both hosting the mailing list and doing * the CEM56/REM56 support code; and the FreeBSD UK Users' Group for hosting * the web pages. * * Author email: * Driver web page: http://ukug.uk.freebsd.org/~scott/xe_drv/ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * MII command structure */ struct xe_mii_frame { uint8_t mii_stdelim; uint8_t mii_opcode; uint8_t mii_phyaddr; uint8_t mii_regaddr; uint8_t mii_turnaround; uint16_t mii_data; }; /* * Media autonegotiation progress constants */ #define XE_AUTONEG_NONE 0 /* No autonegotiation in progress */ #define XE_AUTONEG_WAITING 1 /* Waiting for transmitter to go idle */ #define XE_AUTONEG_STARTED 2 /* Waiting for autonegotiation to complete */ #define XE_AUTONEG_100TX 3 /* Trying to force 100baseTX link */ #define XE_AUTONEG_FAIL 4 /* Autonegotiation failed */ /* * Prototypes start here */ static void xe_init(void *xscp); static void xe_init_locked(struct xe_softc *scp); static void xe_start(struct ifnet *ifp); static void xe_start_locked(struct ifnet *ifp); static int xe_ioctl(struct ifnet *ifp, u_long command, caddr_t data); static void xe_watchdog(void *arg); static void xe_intr(void *xscp); static void xe_txintr(struct xe_softc *scp, uint8_t txst1); static void xe_macintr(struct xe_softc *scp, uint8_t rst0, uint8_t txst0, uint8_t txst1); static void xe_rxintr(struct xe_softc *scp, uint8_t rst0); static int xe_media_change(struct ifnet *ifp); static void xe_media_status(struct ifnet *ifp, struct ifmediareq *mrp); static void xe_setmedia(void *arg); static void xe_reset(struct xe_softc *scp); static void xe_enable_intr(struct xe_softc *scp); static void xe_disable_intr(struct xe_softc *scp); static void xe_set_multicast(struct xe_softc *scp); static void xe_set_addr(struct xe_softc *scp, uint8_t* addr, unsigned idx); static void xe_mchash(struct xe_softc *scp, const uint8_t *addr); static int xe_pio_write_packet(struct xe_softc *scp, struct mbuf *mbp); /* * MII functions */ static void xe_mii_sync(struct xe_softc *scp); static int xe_mii_init(struct xe_softc *scp); static void xe_mii_send(struct xe_softc *scp, uint32_t bits, int cnt); static int xe_mii_readreg(struct xe_softc *scp, struct xe_mii_frame *frame); static int xe_mii_writereg(struct xe_softc *scp, struct xe_mii_frame *frame); static uint16_t xe_phy_readreg(struct xe_softc *scp, uint16_t reg); static void xe_phy_writereg(struct xe_softc *scp, uint16_t reg, uint16_t data); /* * Debugging functions */ static void xe_mii_dump(struct xe_softc *scp); #if 0 static void xe_reg_dump(struct xe_softc *scp); #endif /* * Debug logging levels - set with hw.xe.debug sysctl * 0 = None * 1 = More hardware details, probe/attach progress * 2 = Most function calls, ioctls and media selection progress * 3 = Everything - interrupts, packets in/out and multicast address setup */ #define XE_DEBUG #ifdef XE_DEBUG /* sysctl vars */ static SYSCTL_NODE(_hw, OID_AUTO, xe, CTLFLAG_RD, 0, "if_xe parameters"); int xe_debug = 0; SYSCTL_INT(_hw_xe, OID_AUTO, debug, CTLFLAG_RW, &xe_debug, 0, "if_xe debug level"); #define DEVPRINTF(level, arg) if (xe_debug >= (level)) device_printf arg #define DPRINTF(level, arg) if (xe_debug >= (level)) printf arg #define XE_MII_DUMP(scp) if (xe_debug >= 3) xe_mii_dump(scp) #if 0 #define XE_REG_DUMP(scp) if (xe_debug >= 3) xe_reg_dump(scp) #endif #else #define DEVPRINTF(level, arg) #define DPRINTF(level, arg) #define XE_MII_DUMP(scp) #if 0 #define XE_REG_DUMP(scp) #endif #endif /* * Attach a device. */ int xe_attach(device_t dev) { struct xe_softc *scp = device_get_softc(dev); int err; DEVPRINTF(2, (dev, "attach\n")); /* Initialise stuff... */ scp->dev = dev; scp->ifp = if_alloc(IFT_ETHER); if (scp->ifp == NULL) return (ENOSPC); scp->ifm = &scp->ifmedia; scp->autoneg_status = XE_AUTONEG_NONE; mtx_init(&scp->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&scp->wdog_timer, &scp->lock, 0); /* Initialise the ifnet structure */ scp->ifp->if_softc = scp; if_initname(scp->ifp, device_get_name(dev), device_get_unit(dev)); scp->ifp->if_flags = (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); scp->ifp->if_linkmib = &scp->mibdata; scp->ifp->if_linkmiblen = sizeof(scp->mibdata); scp->ifp->if_start = xe_start; scp->ifp->if_ioctl = xe_ioctl; scp->ifp->if_init = xe_init; scp->ifp->if_baudrate = 100000000; IFQ_SET_MAXLEN(&scp->ifp->if_snd, ifqmaxlen); /* Initialise the ifmedia structure */ ifmedia_init(scp->ifm, 0, xe_media_change, xe_media_status); callout_init_mtx(&scp->media_timer, &scp->lock, 0); /* Add supported media types */ if (scp->mohawk) { ifmedia_add(scp->ifm, IFM_ETHER|IFM_100_TX, 0, NULL); ifmedia_add(scp->ifm, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); ifmedia_add(scp->ifm, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); } ifmedia_add(scp->ifm, IFM_ETHER|IFM_10_T, 0, NULL); if (scp->ce2) ifmedia_add(scp->ifm, IFM_ETHER|IFM_10_2, 0, NULL); ifmedia_add(scp->ifm, IFM_ETHER|IFM_AUTO, 0, NULL); /* Default is to autoselect best supported media type */ ifmedia_set(scp->ifm, IFM_ETHER|IFM_AUTO); /* Get the hardware into a known state */ XE_LOCK(scp); xe_reset(scp); XE_UNLOCK(scp); /* Get hardware version numbers */ XE_SELECT_PAGE(4); scp->version = XE_INB(XE_BOV); if (scp->mohawk) scp->srev = (XE_INB(XE_BOV) & 0x70) >> 4; else scp->srev = (XE_INB(XE_BOV) & 0x30) >> 4; /* Print some useful information */ device_printf(dev, "version 0x%02x/0x%02x%s%s\n", scp->version, scp->srev, scp->mohawk ? ", 100Mbps capable" : "", scp->modem ? ", with modem" : ""); if (scp->mohawk) { XE_SELECT_PAGE(0x10); DEVPRINTF(1, (dev, "DingoID=0x%04x, RevisionID=0x%04x, VendorID=0x%04x\n", XE_INW(XE_DINGOID), XE_INW(XE_RevID), XE_INW(XE_VendorID))); } if (scp->ce2) { XE_SELECT_PAGE(0x45); DEVPRINTF(1, (dev, "CE2 version = 0x%02x\n", XE_INB(XE_REV))); } /* Attach the interface */ ether_ifattach(scp->ifp, scp->enaddr); err = bus_setup_intr(dev, scp->irq_res, INTR_TYPE_NET | INTR_MPSAFE, NULL, xe_intr, scp, &scp->intrhand); if (err) { ether_ifdetach(scp->ifp); mtx_destroy(&scp->lock); return (err); } /* Done */ return (0); } /* * Complete hardware intitialisation and enable output. Exits without doing * anything if there's no address assigned to the card, or if media selection * is in progress (the latter implies we've already run this function). */ static void xe_init(void *xscp) { struct xe_softc *scp = xscp; XE_LOCK(scp); xe_init_locked(scp); XE_UNLOCK(scp); } static void xe_init_locked(struct xe_softc *scp) { unsigned i; if (scp->autoneg_status != XE_AUTONEG_NONE) return; DEVPRINTF(2, (scp->dev, "init\n")); /* Reset transmitter flags */ scp->tx_queued = 0; scp->tx_tpr = 0; scp->tx_timeouts = 0; scp->tx_thres = 64; scp->tx_min = ETHER_MIN_LEN - ETHER_CRC_LEN; scp->tx_timeout = 0; /* Soft reset the card */ XE_SELECT_PAGE(0); XE_OUTB(XE_CR, XE_CR_SOFT_RESET); DELAY(40000); XE_OUTB(XE_CR, 0); DELAY(40000); if (scp->mohawk) { /* * set GP1 and GP2 as outputs (bits 2 & 3) * set GP1 low to power on the ML6692 (bit 0) * set GP2 high to power on the 10Mhz chip (bit 1) */ XE_SELECT_PAGE(4); XE_OUTB(XE_GPR0, XE_GPR0_GP2_SELECT | XE_GPR0_GP1_SELECT | XE_GPR0_GP2_OUT); } /* Shut off interrupts */ xe_disable_intr(scp); /* Wait for everything to wake up */ DELAY(500000); /* Check for PHY */ if (scp->mohawk) scp->phy_ok = xe_mii_init(scp); /* Disable 'source insertion' (not sure what that means) */ XE_SELECT_PAGE(0x42); XE_OUTB(XE_SWC0, XE_SWC0_NO_SRC_INSERT); /* Set 8K/24K Tx/Rx buffer split */ if (scp->srev != 1) { XE_SELECT_PAGE(2); XE_OUTW(XE_RBS, 0x2000); } /* Enable early transmit mode on Mohawk/Dingo */ if (scp->mohawk) { XE_SELECT_PAGE(0x03); XE_OUTW(XE_TPT, scp->tx_thres); XE_SELECT_PAGE(0x01); XE_OUTB(XE_ECR, XE_INB(XE_ECR) | XE_ECR_EARLY_TX); } /* Put MAC address in first 'individual address' register */ XE_SELECT_PAGE(0x50); for (i = 0; i < ETHER_ADDR_LEN; i++) XE_OUTB(0x08 + i, IF_LLADDR(scp->ifp)[scp->mohawk ? 5 - i : i]); /* Set up multicast addresses */ xe_set_multicast(scp); /* Fix the receive data offset -- reset can leave it off-by-one */ XE_SELECT_PAGE(0); XE_OUTW(XE_DO, 0x2000); /* Set interrupt masks */ XE_SELECT_PAGE(1); XE_OUTB(XE_IMR0, XE_IMR0_TX_PACKET | XE_IMR0_MAC_INTR | XE_IMR0_RX_PACKET); /* Set MAC interrupt masks */ XE_SELECT_PAGE(0x40); XE_OUTB(XE_RX0Msk, ~(XE_RX0M_RX_OVERRUN | XE_RX0M_CRC_ERROR | XE_RX0M_ALIGN_ERROR | XE_RX0M_LONG_PACKET)); XE_OUTB(XE_TX0Msk, ~(XE_TX0M_SQE_FAIL | XE_TX0M_LATE_COLLISION | XE_TX0M_TX_UNDERRUN | XE_TX0M_16_COLLISIONS | XE_TX0M_NO_CARRIER)); /* Clear MAC status registers */ XE_SELECT_PAGE(0x40); XE_OUTB(XE_RST0, 0x00); XE_OUTB(XE_TXST0, 0x00); /* Enable receiver and put MAC online */ XE_SELECT_PAGE(0x40); XE_OUTB(XE_CMD0, XE_CMD0_RX_ENABLE|XE_CMD0_ONLINE); /* Set up IMR, enable interrupts */ xe_enable_intr(scp); /* Start media selection */ xe_setmedia(scp); /* Enable output */ scp->ifp->if_drv_flags |= IFF_DRV_RUNNING; scp->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&scp->wdog_timer, hz, xe_watchdog, scp); } /* * Start output on interface. Should be called at splimp() priority. Check * that the output is idle (ie, IFF_DRV_OACTIVE is not set) before calling this * function. If media selection is in progress we set IFF_DRV_OACTIVE ourselves * and return immediately. */ static void xe_start(struct ifnet *ifp) { struct xe_softc *scp = ifp->if_softc; XE_LOCK(scp); xe_start_locked(ifp); XE_UNLOCK(scp); } static void xe_start_locked(struct ifnet *ifp) { struct xe_softc *scp = ifp->if_softc; struct mbuf *mbp; if (scp->autoneg_status != XE_AUTONEG_NONE) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; return; } DEVPRINTF(3, (scp->dev, "start\n")); /* * Loop while there are packets to be sent, and space to send * them. */ for (;;) { /* Suck a packet off the send queue */ IF_DEQUEUE(&ifp->if_snd, mbp); if (mbp == NULL) { /* * We are using the !OACTIVE flag to indicate * to the outside world that we can accept an * additional packet rather than that the * transmitter is _actually_ active. Indeed, * the transmitter may be active, but if we * haven't filled all the buffers with data * then we still want to accept more. */ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; return; } if (xe_pio_write_packet(scp, mbp) != 0) { /* Push the packet back onto the queue */ IF_PREPEND(&ifp->if_snd, mbp); ifp->if_drv_flags |= IFF_DRV_OACTIVE; return; } /* Tap off here if there is a bpf listener */ BPF_MTAP(ifp, mbp); /* In case we don't hear from the card again... */ scp->tx_timeout = 5; scp->tx_queued++; m_freem(mbp); } } /* * Process an ioctl request. Adapted from the ed driver. */ static int xe_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct xe_softc *scp; int error; scp = ifp->if_softc; error = 0; switch (command) { case SIOCSIFFLAGS: DEVPRINTF(2, (scp->dev, "ioctl: SIOCSIFFLAGS: 0x%04x\n", ifp->if_flags)); /* * If the interface is marked up and stopped, then * start it. If it is marked down and running, then * stop it. */ XE_LOCK(scp); if (ifp->if_flags & IFF_UP) { if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { xe_reset(scp); xe_init_locked(scp); } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) xe_stop(scp); } /* handle changes to PROMISC/ALLMULTI flags */ xe_set_multicast(scp); XE_UNLOCK(scp); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: DEVPRINTF(2, (scp->dev, "ioctl: SIOC{ADD,DEL}MULTI\n")); /* * Multicast list has (maybe) changed; set the * hardware filters accordingly. */ XE_LOCK(scp); xe_set_multicast(scp); XE_UNLOCK(scp); error = 0; break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: DEVPRINTF(3, (scp->dev, "ioctl: bounce to ifmedia_ioctl\n")); /* * Someone wants to get/set media options. */ error = ifmedia_ioctl(ifp, (struct ifreq *)data, &scp->ifmedia, command); break; default: DEVPRINTF(3, (scp->dev, "ioctl: bounce to ether_ioctl\n")); error = ether_ioctl(ifp, command, data); } return (error); } /* * Card interrupt handler. * * This function is probably more complicated than it needs to be, as it * attempts to deal with the case where multiple packets get sent between * interrupts. This is especially annoying when working out the collision * stats. Not sure whether this case ever really happens or not (maybe on a * slow/heavily loaded machine?) so it's probably best to leave this like it * is. * * Note that the crappy PIO used to get packets on and off the card means that * you will spend a lot of time in this routine -- I can get my P150 to spend * 90% of its time servicing interrupts if I really hammer the network. Could * fix this, but then you'd start dropping/losing packets. The moral of this * story? If you want good network performance _and_ some cycles left over to * get your work done, don't buy a Xircom card. Or convince them to tell me * how to do memory-mapped I/O :) */ static void xe_txintr(struct xe_softc *scp, uint8_t txst1) { struct ifnet *ifp; uint8_t tpr, sent, coll; ifp = scp->ifp; /* Update packet count, accounting for rollover */ tpr = XE_INB(XE_TPR); sent = -scp->tx_tpr + tpr; /* Update statistics if we actually sent anything */ if (sent > 0) { coll = txst1 & XE_TXST1_RETRY_COUNT; scp->tx_tpr = tpr; scp->tx_queued -= sent; if_inc_counter(ifp, IFCOUNTER_OPACKETS, sent); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, coll); /* * According to the Xircom manual, Dingo will * sometimes manage to transmit a packet with * triggering an interrupt. If this happens, we have * sent > 1 and the collision count only reflects * collisions on the last packet sent (the one that * triggered the interrupt). Collision stats might * therefore be a bit low, but there doesn't seem to * be anything we can do about that. */ switch (coll) { case 0: break; case 1: scp->mibdata.dot3StatsSingleCollisionFrames++; scp->mibdata.dot3StatsCollFrequencies[0]++; break; default: scp->mibdata.dot3StatsMultipleCollisionFrames++; scp->mibdata.dot3StatsCollFrequencies[coll-1]++; } } scp->tx_timeout = 0; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } /* Handle most MAC interrupts */ static void xe_macintr(struct xe_softc *scp, uint8_t rst0, uint8_t txst0, uint8_t txst1) { struct ifnet *ifp; ifp = scp->ifp; #if 0 /* Carrier sense lost -- only in 10Mbit HDX mode */ if (txst0 & XE_TXST0_NO_CARRIER || !(txst1 & XE_TXST1_LINK_STATUS)) { /* XXX - Need to update media status here */ device_printf(scp->dev, "no carrier\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); scp->mibdata.dot3StatsCarrierSenseErrors++; } #endif /* Excessive collisions -- try sending again */ if (txst0 & XE_TXST0_16_COLLISIONS) { if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 16); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); scp->mibdata.dot3StatsExcessiveCollisions++; scp->mibdata.dot3StatsMultipleCollisionFrames++; scp->mibdata.dot3StatsCollFrequencies[15]++; XE_OUTB(XE_CR, XE_CR_RESTART_TX); } /* Transmit underrun -- increase early transmit threshold */ if (txst0 & XE_TXST0_TX_UNDERRUN && scp->mohawk) { DEVPRINTF(1, (scp->dev, "transmit underrun")); if (scp->tx_thres < ETHER_MAX_LEN) { if ((scp->tx_thres += 64) > ETHER_MAX_LEN) scp->tx_thres = ETHER_MAX_LEN; DPRINTF(1, (": increasing transmit threshold to %u", scp->tx_thres)); XE_SELECT_PAGE(0x3); XE_OUTW(XE_TPT, scp->tx_thres); XE_SELECT_PAGE(0x0); } DPRINTF(1, ("\n")); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); scp->mibdata.dot3StatsInternalMacTransmitErrors++; } /* Late collision -- just complain about it */ if (txst0 & XE_TXST0_LATE_COLLISION) { device_printf(scp->dev, "late collision\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); scp->mibdata.dot3StatsLateCollisions++; } /* SQE test failure -- just complain about it */ if (txst0 & XE_TXST0_SQE_FAIL) { device_printf(scp->dev, "SQE test failure\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); scp->mibdata.dot3StatsSQETestErrors++; } /* Packet too long -- what happens to these */ if (rst0 & XE_RST0_LONG_PACKET) { device_printf(scp->dev, "received giant packet\n"); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); scp->mibdata.dot3StatsFrameTooLongs++; } /* CRC error -- packet dropped */ if (rst0 & XE_RST0_CRC_ERROR) { device_printf(scp->dev, "CRC error\n"); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); scp->mibdata.dot3StatsFCSErrors++; } } static void xe_rxintr(struct xe_softc *scp, uint8_t rst0) { struct ifnet *ifp; uint8_t esr, rsr; ifp = scp->ifp; /* Handle received packet(s) */ while ((esr = XE_INB(XE_ESR)) & XE_ESR_FULL_PACKET_RX) { rsr = XE_INB(XE_RSR); DEVPRINTF(3, (scp->dev, "intr: ESR=0x%02x, RSR=0x%02x\n", esr, rsr)); /* Make sure packet is a good one */ if (rsr & XE_RSR_RX_OK) { struct ether_header *ehp; struct mbuf *mbp; uint16_t len; len = XE_INW(XE_RBC) - ETHER_CRC_LEN; DEVPRINTF(3, (scp->dev, "intr: receive length = %d\n", len)); if (len == 0) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); continue; } /* * Allocate mbuf to hold received packet. If * the mbuf header isn't big enough, we attach * an mbuf cluster to hold the packet. Note * the +=2 to align the packet data on a * 32-bit boundary, and the +3 to allow for * the possibility of reading one more byte * than the actual packet length (we always * read 16-bit words). XXX - Surely there's a * better way to do this alignment? */ MGETHDR(mbp, M_NOWAIT, MT_DATA); if (mbp == NULL) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); continue; } if (len + 3 > MHLEN) { - MCLGET(mbp, M_NOWAIT); - if ((mbp->m_flags & M_EXT) == 0) { + if (!(MCLGET(mbp, M_NOWAIT))) { m_freem(mbp); if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); continue; } } mbp->m_data += 2; ehp = mtod(mbp, struct ether_header *); /* * Now get the packet in PIO mode, including * the Ethernet header but omitting the * trailing CRC. */ /* * Work around a bug in CE2 cards. There * seems to be a problem with duplicated and * extraneous bytes in the receive buffer, but * without any real documentation for the CE2 * it's hard to tell for sure. XXX - Needs * testing on CE2 hardware */ if (scp->srev == 0) { u_short rhs; XE_SELECT_PAGE(5); rhs = XE_INW(XE_RHSA); XE_SELECT_PAGE(0); rhs += 3; /* Skip control info */ if (rhs >= 0x8000) rhs = 0; if (rhs + len > 0x8000) { int i; for (i = 0; i < len; i++, rhs++) { ((char *)ehp)[i] = XE_INB(XE_EDP); if (rhs == 0x8000) { rhs = 0; i--; } } } else bus_read_multi_2(scp->port_res, XE_EDP, (uint16_t *)ehp, (len + 1) >> 1); } else bus_read_multi_2(scp->port_res, XE_EDP, (uint16_t *)ehp, (len + 1) >> 1); /* Deliver packet to upper layers */ mbp->m_pkthdr.rcvif = ifp; mbp->m_pkthdr.len = mbp->m_len = len; XE_UNLOCK(scp); (*ifp->if_input)(ifp, mbp); XE_LOCK(scp); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); } else if (rsr & XE_RSR_ALIGN_ERROR) { /* Packet alignment error -- drop packet */ device_printf(scp->dev, "alignment error\n"); scp->mibdata.dot3StatsAlignmentErrors++; if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); } /* Skip to next packet, if there is one */ XE_OUTW(XE_DO, 0x8000); } /* Clear receiver overruns now we have some free buffer space */ if (rst0 & XE_RST0_RX_OVERRUN) { DEVPRINTF(1, (scp->dev, "receive overrun\n")); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); scp->mibdata.dot3StatsInternalMacReceiveErrors++; XE_OUTB(XE_CR, XE_CR_CLEAR_OVERRUN); } } static void xe_intr(void *xscp) { struct xe_softc *scp = (struct xe_softc *) xscp; struct ifnet *ifp; uint8_t psr, isr, rst0, txst0, txst1; ifp = scp->ifp; XE_LOCK(scp); /* Disable interrupts */ if (scp->mohawk) XE_OUTB(XE_CR, 0); /* Cache current register page */ psr = XE_INB(XE_PR); /* Read ISR to see what caused this interrupt */ while ((isr = XE_INB(XE_ISR)) != 0) { /* 0xff might mean the card is no longer around */ if (isr == 0xff) { DEVPRINTF(3, (scp->dev, "intr: interrupt received for missing card?\n")); break; } /* Read other status registers */ XE_SELECT_PAGE(0x40); rst0 = XE_INB(XE_RST0); XE_OUTB(XE_RST0, 0); txst0 = XE_INB(XE_TXST0); txst1 = XE_INB(XE_TXST1); XE_OUTB(XE_TXST0, 0); XE_OUTB(XE_TXST1, 0); XE_SELECT_PAGE(0); DEVPRINTF(3, (scp->dev, "intr: ISR=0x%02x, RST=0x%02x, TXT=0x%02x%02x\n", isr, rst0, txst1, txst0)); if (isr & XE_ISR_TX_PACKET) xe_txintr(scp, txst1); if (isr & XE_ISR_MAC_INTR) xe_macintr(scp, rst0, txst0, txst1); xe_rxintr(scp, rst0); } /* Restore saved page */ XE_SELECT_PAGE(psr); /* Re-enable interrupts */ XE_OUTB(XE_CR, XE_CR_ENABLE_INTR); XE_UNLOCK(scp); } /* * Device timeout/watchdog routine. Called automatically if we queue a packet * for transmission but don't get an interrupt within a specified timeout * (usually 5 seconds). When this happens we assume the worst and reset the * card. */ static void xe_watchdog(void *arg) { struct xe_softc *scp = arg; XE_ASSERT_LOCKED(scp); if (scp->tx_timeout && --scp->tx_timeout == 0) { device_printf(scp->dev, "watchdog timeout: resetting card\n"); scp->tx_timeouts++; if_inc_counter(scp->ifp, IFCOUNTER_OERRORS, scp->tx_queued); xe_stop(scp); xe_reset(scp); xe_init_locked(scp); } callout_reset(&scp->wdog_timer, hz, xe_watchdog, scp); } /* * Change media selection. */ static int xe_media_change(struct ifnet *ifp) { struct xe_softc *scp = ifp->if_softc; DEVPRINTF(2, (scp->dev, "media_change\n")); XE_LOCK(scp); if (IFM_TYPE(scp->ifm->ifm_media) != IFM_ETHER) { XE_UNLOCK(scp); return(EINVAL); } /* * Some card/media combos aren't always possible -- filter * those out here. */ if ((IFM_SUBTYPE(scp->ifm->ifm_media) == IFM_AUTO || IFM_SUBTYPE(scp->ifm->ifm_media) == IFM_100_TX) && !scp->phy_ok) { XE_UNLOCK(scp); return (EINVAL); } xe_setmedia(scp); XE_UNLOCK(scp); return (0); } /* * Return current media selection. */ static void xe_media_status(struct ifnet *ifp, struct ifmediareq *mrp) { struct xe_softc *scp = ifp->if_softc; DEVPRINTF(3, (scp->dev, "media_status\n")); /* XXX - This is clearly wrong. Will fix once I have CE2 working */ XE_LOCK(scp); mrp->ifm_status = IFM_AVALID | IFM_ACTIVE; mrp->ifm_active = ((struct xe_softc *)ifp->if_softc)->media; XE_UNLOCK(scp); } /* * Select active media. */ static void xe_setmedia(void *xscp) { struct xe_softc *scp = xscp; uint16_t bmcr, bmsr, anar, lpar; DEVPRINTF(2, (scp->dev, "setmedia\n")); XE_ASSERT_LOCKED(scp); /* Cancel any pending timeout */ callout_stop(&scp->media_timer); xe_disable_intr(scp); /* Select media */ scp->media = IFM_ETHER; switch (IFM_SUBTYPE(scp->ifm->ifm_media)) { case IFM_AUTO: /* Autoselect media */ scp->media = IFM_ETHER|IFM_AUTO; /* * Autoselection is really awful. It goes something like this: * * Wait until the transmitter goes idle (2sec timeout). * Reset card * IF a 100Mbit PHY exists * Start NWAY autonegotiation (3.5sec timeout) * IF that succeeds * Select 100baseTX or 10baseT, whichever was detected * ELSE * Reset card * IF a 100Mbit PHY exists * Try to force a 100baseTX link (3sec timeout) * IF that succeeds * Select 100baseTX * ELSE * Disable the PHY * ENDIF * ENDIF * ENDIF * ENDIF * IF nothing selected so far * IF a 100Mbit PHY exists * Select 10baseT * ELSE * Select 10baseT or 10base2, whichever is connected * ENDIF * ENDIF */ switch (scp->autoneg_status) { case XE_AUTONEG_NONE: DEVPRINTF(2, (scp->dev, "Waiting for idle transmitter\n")); scp->ifp->if_drv_flags |= IFF_DRV_OACTIVE; scp->autoneg_status = XE_AUTONEG_WAITING; /* FALL THROUGH */ case XE_AUTONEG_WAITING: if (scp->tx_queued != 0) { callout_reset(&scp->media_timer, hz / 2, xe_setmedia, scp); return; } if (scp->phy_ok) { DEVPRINTF(2, (scp->dev, "Starting autonegotiation\n")); bmcr = xe_phy_readreg(scp, PHY_BMCR); bmcr &= ~(PHY_BMCR_AUTONEGENBL); xe_phy_writereg(scp, PHY_BMCR, bmcr); anar = xe_phy_readreg(scp, PHY_ANAR); anar &= ~(PHY_ANAR_100BT4 | PHY_ANAR_100BTXFULL | PHY_ANAR_10BTFULL); anar |= PHY_ANAR_100BTXHALF | PHY_ANAR_10BTHALF; xe_phy_writereg(scp, PHY_ANAR, anar); bmcr |= PHY_BMCR_AUTONEGENBL | PHY_BMCR_AUTONEGRSTR; xe_phy_writereg(scp, PHY_BMCR, bmcr); scp->autoneg_status = XE_AUTONEG_STARTED; callout_reset(&scp->media_timer, hz * 7/2, xe_setmedia, scp); return; } else { scp->autoneg_status = XE_AUTONEG_FAIL; } break; case XE_AUTONEG_STARTED: bmsr = xe_phy_readreg(scp, PHY_BMSR); lpar = xe_phy_readreg(scp, PHY_LPAR); if (bmsr & (PHY_BMSR_AUTONEGCOMP | PHY_BMSR_LINKSTAT)) { DEVPRINTF(2, (scp->dev, "Autonegotiation complete!\n")); /* * XXX - Shouldn't have to do this, * but (on my hub at least) the * transmitter won't work after a * successful autoneg. So we see what * the negotiation result was and * force that mode. I'm sure there is * an easy fix for this. */ if (lpar & PHY_LPAR_100BTXHALF) { xe_phy_writereg(scp, PHY_BMCR, PHY_BMCR_SPEEDSEL); XE_MII_DUMP(scp); XE_SELECT_PAGE(2); XE_OUTB(XE_MSR, XE_INB(XE_MSR) | 0x08); scp->media = IFM_ETHER | IFM_100_TX; scp->autoneg_status = XE_AUTONEG_NONE; } else { /* * XXX - Bit of a hack going * on in here. This is * derived from Ken Hughes * patch to the Linux driver * to make it work with 10Mbit * _autonegotiated_ links on * CE3B cards. What's a CE3B * and how's it differ from a * plain CE3? these are the * things we need to find out. */ xe_phy_writereg(scp, PHY_BMCR, 0x0000); XE_SELECT_PAGE(2); /* BEGIN HACK */ XE_OUTB(XE_MSR, XE_INB(XE_MSR) | 0x08); XE_SELECT_PAGE(0x42); XE_OUTB(XE_SWC1, 0x80); scp->media = IFM_ETHER | IFM_10_T; scp->autoneg_status = XE_AUTONEG_NONE; /* END HACK */ #if 0 /* Display PHY? */ XE_OUTB(XE_MSR, XE_INB(XE_MSR) & ~0x08); scp->autoneg_status = XE_AUTONEG_FAIL; #endif } } else { DEVPRINTF(2, (scp->dev, "Autonegotiation failed; trying 100baseTX\n")); XE_MII_DUMP(scp); if (scp->phy_ok) { xe_phy_writereg(scp, PHY_BMCR, PHY_BMCR_SPEEDSEL); scp->autoneg_status = XE_AUTONEG_100TX; callout_reset(&scp->media_timer, hz * 3, xe_setmedia, scp); return; } else { scp->autoneg_status = XE_AUTONEG_FAIL; } } break; case XE_AUTONEG_100TX: (void)xe_phy_readreg(scp, PHY_BMSR); bmsr = xe_phy_readreg(scp, PHY_BMSR); if (bmsr & PHY_BMSR_LINKSTAT) { DEVPRINTF(2, (scp->dev, "Got 100baseTX link!\n")); XE_MII_DUMP(scp); XE_SELECT_PAGE(2); XE_OUTB(XE_MSR, XE_INB(XE_MSR) | 0x08); scp->media = IFM_ETHER | IFM_100_TX; scp->autoneg_status = XE_AUTONEG_NONE; } else { DEVPRINTF(2, (scp->dev, "Autonegotiation failed; disabling PHY\n")); XE_MII_DUMP(scp); xe_phy_writereg(scp, PHY_BMCR, 0x0000); XE_SELECT_PAGE(2); /* Disable PHY? */ XE_OUTB(XE_MSR, XE_INB(XE_MSR) & ~0x08); scp->autoneg_status = XE_AUTONEG_FAIL; } break; } /* * If we got down here _and_ autoneg_status is * XE_AUTONEG_FAIL, then either autonegotiation * failed, or never got started to begin with. In * either case, select a suitable 10Mbit media and * hope it works. We don't need to reset the card * again, since it will have been done already by the * big switch above. */ if (scp->autoneg_status == XE_AUTONEG_FAIL) { DEVPRINTF(2, (scp->dev, "Selecting 10baseX\n")); if (scp->mohawk) { XE_SELECT_PAGE(0x42); XE_OUTB(XE_SWC1, 0x80); scp->media = IFM_ETHER | IFM_10_T; scp->autoneg_status = XE_AUTONEG_NONE; } else { XE_SELECT_PAGE(4); XE_OUTB(XE_GPR0, 4); DELAY(50000); XE_SELECT_PAGE(0x42); XE_OUTB(XE_SWC1, (XE_INB(XE_ESR) & XE_ESR_MEDIA_SELECT) ? 0x80 : 0xc0); scp->media = IFM_ETHER | ((XE_INB(XE_ESR) & XE_ESR_MEDIA_SELECT) ? IFM_10_T : IFM_10_2); scp->autoneg_status = XE_AUTONEG_NONE; } } break; /* * If a specific media has been requested, we just reset the * card and select it (one small exception -- if 100baseTX is * requested but there is no PHY, we fall back to 10baseT * operation). */ case IFM_100_TX: /* Force 100baseTX */ if (scp->phy_ok) { DEVPRINTF(2, (scp->dev, "Selecting 100baseTX\n")); XE_SELECT_PAGE(0x42); XE_OUTB(XE_SWC1, 0); xe_phy_writereg(scp, PHY_BMCR, PHY_BMCR_SPEEDSEL); XE_SELECT_PAGE(2); XE_OUTB(XE_MSR, XE_INB(XE_MSR) | 0x08); scp->media |= IFM_100_TX; break; } /* FALLTHROUGH */ case IFM_10_T: /* Force 10baseT */ DEVPRINTF(2, (scp->dev, "Selecting 10baseT\n")); if (scp->phy_ok) { xe_phy_writereg(scp, PHY_BMCR, 0x0000); XE_SELECT_PAGE(2); /* Disable PHY */ XE_OUTB(XE_MSR, XE_INB(XE_MSR) & ~0x08); } XE_SELECT_PAGE(0x42); XE_OUTB(XE_SWC1, 0x80); scp->media |= IFM_10_T; break; case IFM_10_2: DEVPRINTF(2, (scp->dev, "Selecting 10base2\n")); XE_SELECT_PAGE(0x42); XE_OUTB(XE_SWC1, 0xc0); scp->media |= IFM_10_2; break; } /* * Finally, the LEDs are set to match whatever media was * chosen and the transmitter is unblocked. */ DEVPRINTF(2, (scp->dev, "Setting LEDs\n")); XE_SELECT_PAGE(2); switch (IFM_SUBTYPE(scp->media)) { case IFM_100_TX: case IFM_10_T: XE_OUTB(XE_LED, 0x3b); if (scp->dingo) XE_OUTB(0x0b, 0x04); /* 100Mbit LED */ break; case IFM_10_2: XE_OUTB(XE_LED, 0x3a); break; } /* Restart output? */ xe_enable_intr(scp); scp->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; xe_start_locked(scp->ifp); } /* * Hard reset (power cycle) the card. */ static void xe_reset(struct xe_softc *scp) { DEVPRINTF(2, (scp->dev, "reset\n")); XE_ASSERT_LOCKED(scp); /* Power down */ XE_SELECT_PAGE(4); XE_OUTB(XE_GPR1, 0); DELAY(40000); /* Power up again */ if (scp->mohawk) XE_OUTB(XE_GPR1, XE_GPR1_POWER_DOWN); else XE_OUTB(XE_GPR1, XE_GPR1_POWER_DOWN | XE_GPR1_AIC); DELAY(40000); XE_SELECT_PAGE(0); } /* * Take interface offline. This is done by powering down the device, which I * assume means just shutting down the transceiver and Ethernet logic. This * requires a _hard_ reset to recover from, as we need to power up again. */ void xe_stop(struct xe_softc *scp) { DEVPRINTF(2, (scp->dev, "stop\n")); XE_ASSERT_LOCKED(scp); /* * Shut off interrupts. */ xe_disable_intr(scp); /* * Power down. */ XE_SELECT_PAGE(4); XE_OUTB(XE_GPR1, 0); XE_SELECT_PAGE(0); if (scp->mohawk) { /* * set GP1 and GP2 as outputs (bits 2 & 3) * set GP1 high to power on the ML6692 (bit 0) * set GP2 low to power on the 10Mhz chip (bit 1) */ XE_SELECT_PAGE(4); XE_OUTB(XE_GPR0, XE_GPR0_GP2_SELECT | XE_GPR0_GP1_SELECT | XE_GPR0_GP1_OUT); } /* * ~IFF_DRV_RUNNING == interface down. */ scp->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; scp->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; scp->tx_timeout = 0; callout_stop(&scp->wdog_timer); callout_stop(&scp->media_timer); } /* * Enable interrupts from the card. */ static void xe_enable_intr(struct xe_softc *scp) { DEVPRINTF(2, (scp->dev, "enable_intr\n")); XE_SELECT_PAGE(0); XE_OUTB(XE_CR, XE_CR_ENABLE_INTR); /* Enable interrupts */ if (scp->modem && !scp->dingo) { /* This bit is just magic */ if (!(XE_INB(0x10) & 0x01)) { XE_OUTB(0x10, 0x11); /* Unmask master int enable */ } } } /* * Disable interrupts from the card. */ static void xe_disable_intr(struct xe_softc *scp) { DEVPRINTF(2, (scp->dev, "disable_intr\n")); XE_SELECT_PAGE(0); XE_OUTB(XE_CR, 0); /* Disable interrupts */ if (scp->modem && !scp->dingo) { /* More magic */ XE_OUTB(0x10, 0x10); /* Mask the master int enable */ } } /* * Set up multicast filter and promiscuous modes. */ static void xe_set_multicast(struct xe_softc *scp) { struct ifnet *ifp; struct ifmultiaddr *maddr; unsigned count, i; DEVPRINTF(2, (scp->dev, "set_multicast\n")); ifp = scp->ifp; XE_SELECT_PAGE(0x42); /* Handle PROMISC flag */ if (ifp->if_flags & IFF_PROMISC) { XE_OUTB(XE_SWC1, XE_INB(XE_SWC1) | XE_SWC1_PROMISCUOUS); return; } else XE_OUTB(XE_SWC1, XE_INB(XE_SWC1) & ~XE_SWC1_PROMISCUOUS); /* Handle ALLMULTI flag */ if (ifp->if_flags & IFF_ALLMULTI) { XE_OUTB(XE_SWC1, XE_INB(XE_SWC1) | XE_SWC1_ALLMULTI); return; } else XE_OUTB(XE_SWC1, XE_INB(XE_SWC1) & ~XE_SWC1_ALLMULTI); /* Iterate over multicast address list */ count = 0; if_maddr_rlock(ifp); TAILQ_FOREACH(maddr, &ifp->if_multiaddrs, ifma_link) { if (maddr->ifma_addr->sa_family != AF_LINK) continue; count++; if (count < 10) /* * First 9 use Individual Addresses for exact * matching. */ xe_set_addr(scp, LLADDR((struct sockaddr_dl *)maddr->ifma_addr), count); else if (scp->mohawk) /* Use hash filter on Mohawk and Dingo */ xe_mchash(scp, LLADDR((struct sockaddr_dl *)maddr->ifma_addr)); else /* Nowhere else to put them on CE2 */ break; } if_maddr_runlock(ifp); DEVPRINTF(2, (scp->dev, "set_multicast: count = %u\n", count)); /* Now do some cleanup and enable multicast handling as needed */ if (count == 0) { /* Disable all multicast handling */ XE_SELECT_PAGE(0x42); XE_OUTB(XE_SWC1, XE_INB(XE_SWC1) & ~(XE_SWC1_IA_ENABLE | XE_SWC1_ALLMULTI)); if (scp->mohawk) { XE_SELECT_PAGE(0x02); XE_OUTB(XE_MSR, XE_INB(XE_MSR) & ~XE_MSR_HASH_TABLE); } } else if (count < 10) { /* * Full in any unused Individual Addresses with our * MAC address. */ for (i = count + 1; i < 10; i++) xe_set_addr(scp, IF_LLADDR(scp->ifp), i); /* Enable Individual Address matching only */ XE_SELECT_PAGE(0x42); XE_OUTB(XE_SWC1, (XE_INB(XE_SWC1) & ~XE_SWC1_ALLMULTI) | XE_SWC1_IA_ENABLE); if (scp->mohawk) { XE_SELECT_PAGE(0x02); XE_OUTB(XE_MSR, XE_INB(XE_MSR) & ~XE_MSR_HASH_TABLE); } } else if (scp->mohawk) { /* Check whether hash table is full */ XE_SELECT_PAGE(0x58); for (i = 0x08; i < 0x10; i++) if (XE_INB(i) != 0xff) break; if (i == 0x10) { /* * Hash table full - enable * promiscuous multicast matching */ XE_SELECT_PAGE(0x42); XE_OUTB(XE_SWC1, (XE_INB(XE_SWC1) & ~XE_SWC1_IA_ENABLE) | XE_SWC1_ALLMULTI); XE_SELECT_PAGE(0x02); XE_OUTB(XE_MSR, XE_INB(XE_MSR) & ~XE_MSR_HASH_TABLE); } else { /* Enable hash table and Individual Address matching */ XE_SELECT_PAGE(0x42); XE_OUTB(XE_SWC1, (XE_INB(XE_SWC1) & ~XE_SWC1_ALLMULTI) | XE_SWC1_IA_ENABLE); XE_SELECT_PAGE(0x02); XE_OUTB(XE_MSR, XE_INB(XE_MSR) | XE_MSR_HASH_TABLE); } } else { /* Enable promiscuous multicast matching */ XE_SELECT_PAGE(0x42); XE_OUTB(XE_SWC1, (XE_INB(XE_SWC1) & ~XE_SWC1_IA_ENABLE) | XE_SWC1_ALLMULTI); } XE_SELECT_PAGE(0); } /* * Copy the Ethernet multicast address in addr to the on-chip registers for * Individual Address idx. Assumes that addr is really a multicast address * and that idx > 0 (slot 0 is always used for the card MAC address). */ static void xe_set_addr(struct xe_softc *scp, uint8_t* addr, unsigned idx) { uint8_t page, reg; unsigned i; /* * Individual Addresses are stored in registers 8-F of pages * 0x50-0x57. IA1 therefore starts at register 0xE on page * 0x50. The expressions below compute the starting page and * register for any IA index > 0. */ --idx; page = 0x50 + idx % 4 + idx / 4 * 3; reg = 0x0e - 2 * (idx % 4); DEVPRINTF(3, (scp->dev, "set_addr: idx = %u, page = 0x%02x, reg = 0x%02x\n", idx + 1, page, reg)); /* * Copy the IA bytes. Note that the byte order is reversed * for Mohawk and Dingo wrt. CE2 hardware. */ XE_SELECT_PAGE(page); for (i = 0; i < ETHER_ADDR_LEN; i++) { if (i > 0) { DPRINTF(3, (":%02x", addr[i])); } else { DEVPRINTF(3, (scp->dev, "set_addr: %02x", addr[0])); } XE_OUTB(reg, addr[scp->mohawk ? 5 - i : i]); if (++reg == 0x10) { reg = 0x08; XE_SELECT_PAGE(++page); } } DPRINTF(3, ("\n")); } /* * Set the appropriate bit in the multicast hash table for the supplied * Ethernet multicast address addr. Assumes that addr is really a multicast * address. */ static void xe_mchash(struct xe_softc* scp, const uint8_t *addr) { int bit; uint8_t byte, hash; hash = ether_crc32_le(addr, ETHER_ADDR_LEN) & 0x3F; /* * Top 3 bits of hash give register - 8, bottom 3 give bit * within register. */ byte = hash >> 3 | 0x08; bit = 0x01 << (hash & 0x07); DEVPRINTF(3, (scp->dev, "set_hash: hash = 0x%02x, byte = 0x%02x, bit = 0x%02x\n", hash, byte, bit)); XE_SELECT_PAGE(0x58); XE_OUTB(byte, XE_INB(byte) | bit); } /* * Write an outgoing packet to the card using programmed I/O. */ static int xe_pio_write_packet(struct xe_softc *scp, struct mbuf *mbp) { unsigned len, pad; unsigned char wantbyte; uint8_t *data; uint8_t savebyte[2]; /* Get total packet length */ if (mbp->m_flags & M_PKTHDR) len = mbp->m_pkthdr.len; else { struct mbuf* mbp2 = mbp; for (len = 0; mbp2 != NULL; len += mbp2->m_len, mbp2 = mbp2->m_next); } DEVPRINTF(3, (scp->dev, "pio_write_packet: len = %u\n", len)); /* Packets < minimum length may need to be padded out */ pad = 0; if (len < scp->tx_min) { pad = scp->tx_min - len; len = scp->tx_min; } /* Check transmit buffer space */ XE_SELECT_PAGE(0); XE_OUTW(XE_TRS, len + 2); /* Only effective on rev. 1 CE2 cards */ if ((XE_INW(XE_TSO) & 0x7fff) <= len + 2) return (1); /* Send packet length to card */ XE_OUTW(XE_EDP, len); /* * Write packet to card using PIO (code stolen from the ed driver) */ wantbyte = 0; while (mbp != NULL) { len = mbp->m_len; if (len > 0) { data = mtod(mbp, caddr_t); if (wantbyte) { /* Finish the last word */ savebyte[1] = *data; XE_OUTW(XE_EDP, *(u_short *)savebyte); data++; len--; wantbyte = 0; } if (len > 1) { /* Output contiguous words */ bus_write_multi_2(scp->port_res, XE_EDP, (uint16_t *)data, len >> 1); data += len & ~1; len &= 1; } if (len == 1) { /* Save last byte, if needed */ savebyte[0] = *data; wantbyte = 1; } } mbp = mbp->m_next; } /* * Send last byte of odd-length packets */ if (wantbyte) XE_OUTB(XE_EDP, savebyte[0]); /* * Can just tell CE3 cards to send; short packets will be * padded out with random cruft automatically. For CE2, * manually pad the packet with garbage; it will be sent when * the required number of bytes have been delivered to the * card. */ if (scp->mohawk) XE_OUTB(XE_CR, XE_CR_TX_PACKET | XE_CR_RESTART_TX | XE_CR_ENABLE_INTR); else if (pad > 0) { if (pad & 0x01) XE_OUTB(XE_EDP, 0xaa); pad >>= 1; while (pad > 0) { XE_OUTW(XE_EDP, 0xdead); pad--; } } return (0); } /************************************************************** * * * M I I F U N C T I O N S * * * **************************************************************/ /* * Alternative MII/PHY handling code adapted from the xl driver. It doesn't * seem to work any better than the xirc2_ps stuff, but it's cleaner code. * XXX - this stuff shouldn't be here. It should all be abstracted off to * XXX - some kind of common MII-handling code, shared by all drivers. But * XXX - that's a whole other mission. */ #define XE_MII_SET(x) XE_OUTB(XE_GPR2, (XE_INB(XE_GPR2) | 0x04) | (x)) #define XE_MII_CLR(x) XE_OUTB(XE_GPR2, (XE_INB(XE_GPR2) | 0x04) & ~(x)) /* * Sync the PHYs by setting data bit and strobing the clock 32 times. */ static void xe_mii_sync(struct xe_softc *scp) { int i; XE_SELECT_PAGE(2); XE_MII_SET(XE_MII_DIR|XE_MII_WRD); for (i = 0; i < 32; i++) { XE_MII_SET(XE_MII_CLK); DELAY(1); XE_MII_CLR(XE_MII_CLK); DELAY(1); } } /* * Look for a MII-compliant PHY. If we find one, reset it. */ static int xe_mii_init(struct xe_softc *scp) { uint16_t status; status = xe_phy_readreg(scp, PHY_BMSR); if ((status & 0xff00) != 0x7800) { DEVPRINTF(2, (scp->dev, "no PHY found, %0x\n", status)); return (0); } else { DEVPRINTF(2, (scp->dev, "PHY OK!\n")); /* Reset the PHY */ xe_phy_writereg(scp, PHY_BMCR, PHY_BMCR_RESET); DELAY(500); while(xe_phy_readreg(scp, PHY_BMCR) & PHY_BMCR_RESET) ; /* nothing */ XE_MII_DUMP(scp); return (1); } } /* * Clock a series of bits through the MII. */ static void xe_mii_send(struct xe_softc *scp, uint32_t bits, int cnt) { int i; XE_SELECT_PAGE(2); XE_MII_CLR(XE_MII_CLK); for (i = (0x1 << (cnt - 1)); i; i >>= 1) { if (bits & i) { XE_MII_SET(XE_MII_WRD); } else { XE_MII_CLR(XE_MII_WRD); } DELAY(1); XE_MII_CLR(XE_MII_CLK); DELAY(1); XE_MII_SET(XE_MII_CLK); } } /* * Read an PHY register through the MII. */ static int xe_mii_readreg(struct xe_softc *scp, struct xe_mii_frame *frame) { int i, ack; XE_ASSERT_LOCKED(scp); /* * Set up frame for RX. */ frame->mii_stdelim = XE_MII_STARTDELIM; frame->mii_opcode = XE_MII_READOP; frame->mii_turnaround = 0; frame->mii_data = 0; XE_SELECT_PAGE(2); XE_OUTB(XE_GPR2, 0); /* * Turn on data xmit. */ XE_MII_SET(XE_MII_DIR); xe_mii_sync(scp); /* * Send command/address info. */ xe_mii_send(scp, frame->mii_stdelim, 2); xe_mii_send(scp, frame->mii_opcode, 2); xe_mii_send(scp, frame->mii_phyaddr, 5); xe_mii_send(scp, frame->mii_regaddr, 5); /* Idle bit */ XE_MII_CLR((XE_MII_CLK|XE_MII_WRD)); DELAY(1); XE_MII_SET(XE_MII_CLK); DELAY(1); /* Turn off xmit. */ XE_MII_CLR(XE_MII_DIR); /* Check for ack */ XE_MII_CLR(XE_MII_CLK); DELAY(1); ack = XE_INB(XE_GPR2) & XE_MII_RDD; XE_MII_SET(XE_MII_CLK); DELAY(1); /* * Now try reading data bits. If the ack failed, we still * need to clock through 16 cycles to keep the PHY(s) in sync. */ if (ack) { for(i = 0; i < 16; i++) { XE_MII_CLR(XE_MII_CLK); DELAY(1); XE_MII_SET(XE_MII_CLK); DELAY(1); } goto fail; } for (i = 0x8000; i; i >>= 1) { XE_MII_CLR(XE_MII_CLK); DELAY(1); if (!ack) { if (XE_INB(XE_GPR2) & XE_MII_RDD) frame->mii_data |= i; DELAY(1); } XE_MII_SET(XE_MII_CLK); DELAY(1); } fail: XE_MII_CLR(XE_MII_CLK); DELAY(1); XE_MII_SET(XE_MII_CLK); DELAY(1); if (ack) return(1); return(0); } /* * Write to a PHY register through the MII. */ static int xe_mii_writereg(struct xe_softc *scp, struct xe_mii_frame *frame) { XE_ASSERT_LOCKED(scp); /* * Set up frame for TX. */ frame->mii_stdelim = XE_MII_STARTDELIM; frame->mii_opcode = XE_MII_WRITEOP; frame->mii_turnaround = XE_MII_TURNAROUND; XE_SELECT_PAGE(2); /* * Turn on data output. */ XE_MII_SET(XE_MII_DIR); xe_mii_sync(scp); xe_mii_send(scp, frame->mii_stdelim, 2); xe_mii_send(scp, frame->mii_opcode, 2); xe_mii_send(scp, frame->mii_phyaddr, 5); xe_mii_send(scp, frame->mii_regaddr, 5); xe_mii_send(scp, frame->mii_turnaround, 2); xe_mii_send(scp, frame->mii_data, 16); /* Idle bit. */ XE_MII_SET(XE_MII_CLK); DELAY(1); XE_MII_CLR(XE_MII_CLK); DELAY(1); /* * Turn off xmit. */ XE_MII_CLR(XE_MII_DIR); return(0); } /* * Read a register from the PHY. */ static uint16_t xe_phy_readreg(struct xe_softc *scp, uint16_t reg) { struct xe_mii_frame frame; bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = 0; frame.mii_regaddr = reg; xe_mii_readreg(scp, &frame); return (frame.mii_data); } /* * Write to a PHY register. */ static void xe_phy_writereg(struct xe_softc *scp, uint16_t reg, uint16_t data) { struct xe_mii_frame frame; bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = 0; frame.mii_regaddr = reg; frame.mii_data = data; xe_mii_writereg(scp, &frame); } /* * A bit of debugging code. */ static void xe_mii_dump(struct xe_softc *scp) { int i; device_printf(scp->dev, "MII registers: "); for (i = 0; i < 2; i++) { printf(" %d:%04x", i, xe_phy_readreg(scp, i)); } for (i = 4; i < 7; i++) { printf(" %d:%04x", i, xe_phy_readreg(scp, i)); } printf("\n"); } #if 0 void xe_reg_dump(struct xe_softc *scp) { int page, i; device_printf(scp->dev, "Common registers: "); for (i = 0; i < 8; i++) { printf(" %2.2x", XE_INB(i)); } printf("\n"); for (page = 0; page <= 8; page++) { device_printf(scp->dev, "Register page %2.2x: ", page); XE_SELECT_PAGE(page); for (i = 8; i < 16; i++) { printf(" %2.2x", XE_INB(i)); } printf("\n"); } for (page = 0x10; page < 0x5f; page++) { if ((page >= 0x11 && page <= 0x3f) || (page == 0x41) || (page >= 0x43 && page <= 0x4f) || (page >= 0x59)) continue; device_printf(scp->dev, "Register page %2.2x: ", page); XE_SELECT_PAGE(page); for (i = 8; i < 16; i++) { printf(" %2.2x", XE_INB(i)); } printf("\n"); } } #endif int xe_activate(device_t dev) { struct xe_softc *sc = device_get_softc(dev); int start, i; DEVPRINTF(2, (dev, "activate\n")); if (!sc->modem) { sc->port_rid = 0; /* 0 is managed by pccard */ sc->port_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &sc->port_rid, 0ul, ~0ul, 16, RF_ACTIVE); } else if (sc->dingo) { /* * Find a 16 byte aligned ioport for the card. */ DEVPRINTF(1, (dev, "Finding an aligned port for RealPort\n")); sc->port_rid = 1; /* 0 is managed by pccard */ start = 0x100; do { sc->port_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &sc->port_rid, start, 0x3ff, 16, RF_ACTIVE); if (sc->port_res == NULL) break; if ((rman_get_start(sc->port_res) & 0xf) == 0) break; bus_release_resource(dev, SYS_RES_IOPORT, sc->port_rid, sc->port_res); start = (rman_get_start(sc->port_res) + 15) & ~0xf; } while (1); DEVPRINTF(1, (dev, "RealPort port 0x%0lx, size 0x%0lx\n", bus_get_resource_start(dev, SYS_RES_IOPORT, sc->port_rid), bus_get_resource_count(dev, SYS_RES_IOPORT, sc->port_rid))); } else if (sc->ce2) { /* * Find contiguous I/O port for the Ethernet function * on CEM2 and CEM3 cards. We allocate window 0 * wherever pccard has decided it should be, then find * an available window adjacent to it for the second * function. Not sure that both windows are actually * needed. */ DEVPRINTF(1, (dev, "Finding I/O port for CEM2/CEM3\n")); sc->ce2_port_rid = 0; /* 0 is managed by pccard */ sc->ce2_port_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &sc->ce2_port_rid, 0ul, ~0ul, 8, RF_ACTIVE); if (sc->ce2_port_res == NULL) { DEVPRINTF(1, (dev, "Cannot allocate I/O port for modem\n")); xe_deactivate(dev); return (ENOMEM); } sc->port_rid = 1; start = bus_get_resource_start(dev, SYS_RES_IOPORT, sc->ce2_port_rid); for (i = 0; i < 2; i++) { start += (i == 0 ? 8 : -24); sc->port_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &sc->port_rid, start, start + 15, 16, RF_ACTIVE); if (sc->port_res == NULL) continue; if (bus_get_resource_start(dev, SYS_RES_IOPORT, sc->port_rid) == start) break; bus_release_resource(dev, SYS_RES_IOPORT, sc->port_rid, sc->port_res); sc->port_res = NULL; } DEVPRINTF(1, (dev, "CEM2/CEM3 port 0x%0lx, size 0x%0lx\n", bus_get_resource_start(dev, SYS_RES_IOPORT, sc->port_rid), bus_get_resource_count(dev, SYS_RES_IOPORT, sc->port_rid))); } if (!sc->port_res) { DEVPRINTF(1, (dev, "Cannot allocate ioport\n")); xe_deactivate(dev); return (ENOMEM); } sc->irq_rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE); if (sc->irq_res == NULL) { DEVPRINTF(1, (dev, "Cannot allocate irq\n")); xe_deactivate(dev); return (ENOMEM); } return (0); } void xe_deactivate(device_t dev) { struct xe_softc *sc = device_get_softc(dev); DEVPRINTF(2, (dev, "deactivate\n")); if (sc->intrhand) bus_teardown_intr(dev, sc->irq_res, sc->intrhand); sc->intrhand = NULL; if (sc->port_res) bus_release_resource(dev, SYS_RES_IOPORT, sc->port_rid, sc->port_res); sc->port_res = NULL; if (sc->ce2_port_res) bus_release_resource(dev, SYS_RES_IOPORT, sc->ce2_port_rid, sc->ce2_port_res); sc->ce2_port_res = NULL; if (sc->irq_res) bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); sc->irq_res = NULL; if (sc->ifp) if_free(sc->ifp); sc->ifp = NULL; } Index: head/sys/dev/xen/netfront/netfront.c =================================================================== --- head/sys/dev/xen/netfront/netfront.c (revision 276749) +++ head/sys/dev/xen/netfront/netfront.c (revision 276750) @@ -1,2234 +1,2233 @@ /*- * Copyright (c) 2004-2006 Kip Macy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if __FreeBSD_version >= 700000 #include #include #endif #include #include #include /* for DELAY */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xenbus_if.h" /* Features supported by all backends. TSO and LRO can be negotiated */ #define XN_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE) #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE) #if __FreeBSD_version >= 700000 /* * Should the driver do LRO on the RX end * this can be toggled on the fly, but the * interface must be reset (down/up) for it * to take effect. */ static int xn_enable_lro = 1; TUNABLE_INT("hw.xn.enable_lro", &xn_enable_lro); #else #define IFCAP_TSO4 0 #define CSUM_TSO 0 #endif #ifdef CONFIG_XEN static int MODPARM_rx_copy = 0; module_param_named(rx_copy, MODPARM_rx_copy, bool, 0); MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)"); static int MODPARM_rx_flip = 0; module_param_named(rx_flip, MODPARM_rx_flip, bool, 0); MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)"); #else static const int MODPARM_rx_copy = 1; static const int MODPARM_rx_flip = 0; #endif /** * \brief The maximum allowed data fragments in a single transmit * request. * * This limit is imposed by the backend driver. We assume here that * we are dealing with a Linux driver domain and have set our limit * to mirror the Linux MAX_SKB_FRAGS constant. */ #define MAX_TX_REQ_FRAGS (65536 / PAGE_SIZE + 2) #define RX_COPY_THRESHOLD 256 #define net_ratelimit() 0 struct netfront_info; struct netfront_rx_info; static void xn_txeof(struct netfront_info *); static void xn_rxeof(struct netfront_info *); static void network_alloc_rx_buffers(struct netfront_info *); static void xn_tick_locked(struct netfront_info *); static void xn_tick(void *); static void xn_intr(void *); static inline int xn_count_frags(struct mbuf *m); static int xn_assemble_tx_request(struct netfront_info *sc, struct mbuf *m_head); static void xn_start_locked(struct ifnet *); static void xn_start(struct ifnet *); static int xn_ioctl(struct ifnet *, u_long, caddr_t); static void xn_ifinit_locked(struct netfront_info *); static void xn_ifinit(void *); static void xn_stop(struct netfront_info *); static void xn_query_features(struct netfront_info *np); static int xn_configure_features(struct netfront_info *np); #ifdef notyet static void xn_watchdog(struct ifnet *); #endif #ifdef notyet static void netfront_closing(device_t dev); #endif static void netif_free(struct netfront_info *info); static int netfront_detach(device_t dev); static int talk_to_backend(device_t dev, struct netfront_info *info); static int create_netdev(device_t dev); static void netif_disconnect_backend(struct netfront_info *info); static int setup_device(device_t dev, struct netfront_info *info); static void free_ring(int *ref, void *ring_ptr_ref); static int xn_ifmedia_upd(struct ifnet *ifp); static void xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); /* Xenolinux helper functions */ int network_connect(struct netfront_info *); static void xn_free_rx_ring(struct netfront_info *); static void xn_free_tx_ring(struct netfront_info *); static int xennet_get_responses(struct netfront_info *np, struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons, struct mbuf **list, int *pages_flipped_p); #define virt_to_mfn(x) (vtomach(x) >> PAGE_SHIFT) #define INVALID_P2M_ENTRY (~0UL) /* * Mbuf pointers. We need these to keep track of the virtual addresses * of our mbuf chains since we can only convert from virtual to physical, * not the other way around. The size must track the free index arrays. */ struct xn_chain_data { struct mbuf *xn_tx_chain[NET_TX_RING_SIZE+1]; int xn_tx_chain_cnt; struct mbuf *xn_rx_chain[NET_RX_RING_SIZE+1]; }; struct net_device_stats { u_long rx_packets; /* total packets received */ u_long tx_packets; /* total packets transmitted */ u_long rx_bytes; /* total bytes received */ u_long tx_bytes; /* total bytes transmitted */ u_long rx_errors; /* bad packets received */ u_long tx_errors; /* packet transmit problems */ u_long rx_dropped; /* no space in linux buffers */ u_long tx_dropped; /* no space available in linux */ u_long multicast; /* multicast packets received */ u_long collisions; /* detailed rx_errors: */ u_long rx_length_errors; u_long rx_over_errors; /* receiver ring buff overflow */ u_long rx_crc_errors; /* recved pkt with crc error */ u_long rx_frame_errors; /* recv'd frame alignment error */ u_long rx_fifo_errors; /* recv'r fifo overrun */ u_long rx_missed_errors; /* receiver missed packet */ /* detailed tx_errors */ u_long tx_aborted_errors; u_long tx_carrier_errors; u_long tx_fifo_errors; u_long tx_heartbeat_errors; u_long tx_window_errors; /* for cslip etc */ u_long rx_compressed; u_long tx_compressed; }; struct netfront_info { struct ifnet *xn_ifp; #if __FreeBSD_version >= 700000 struct lro_ctrl xn_lro; #endif struct net_device_stats stats; u_int tx_full; netif_tx_front_ring_t tx; netif_rx_front_ring_t rx; struct mtx tx_lock; struct mtx rx_lock; struct mtx sc_lock; xen_intr_handle_t xen_intr_handle; u_int copying_receiver; u_int carrier; u_int maxfrags; /* Receive-ring batched refills. */ #define RX_MIN_TARGET 32 #define RX_MAX_TARGET NET_RX_RING_SIZE int rx_min_target; int rx_max_target; int rx_target; grant_ref_t gref_tx_head; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1]; grant_ref_t gref_rx_head; grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1]; device_t xbdev; int tx_ring_ref; int rx_ring_ref; uint8_t mac[ETHER_ADDR_LEN]; struct xn_chain_data xn_cdata; /* mbufs */ struct mbuf_head xn_rx_batch; /* head of the batch queue */ int xn_if_flags; struct callout xn_stat_ch; u_long rx_pfn_array[NET_RX_RING_SIZE]; multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1]; mmu_update_t rx_mmu[NET_RX_RING_SIZE]; struct ifmedia sc_media; }; #define rx_mbufs xn_cdata.xn_rx_chain #define tx_mbufs xn_cdata.xn_tx_chain #define XN_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->tx_lock, #_name"_tx", "network transmit lock", MTX_DEF); \ mtx_init(&(_sc)->rx_lock, #_name"_rx", "network receive lock", MTX_DEF); \ mtx_init(&(_sc)->sc_lock, #_name"_sc", "netfront softc lock", MTX_DEF) #define XN_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_lock) #define XN_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_lock) #define XN_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_lock) #define XN_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_lock) #define XN_LOCK(_sc) mtx_lock(&(_sc)->sc_lock); #define XN_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_lock); #define XN_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_lock, MA_OWNED); #define XN_RX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rx_lock, MA_OWNED); #define XN_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_lock, MA_OWNED); #define XN_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_lock); \ mtx_destroy(&(_sc)->tx_lock); \ mtx_destroy(&(_sc)->sc_lock); struct netfront_rx_info { struct netif_rx_response rx; struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; }; #define netfront_carrier_on(netif) ((netif)->carrier = 1) #define netfront_carrier_off(netif) ((netif)->carrier = 0) #define netfront_carrier_ok(netif) ((netif)->carrier) /* Access macros for acquiring freeing slots in xn_free_{tx,rx}_idxs[]. */ static inline void add_id_to_freelist(struct mbuf **list, uintptr_t id) { KASSERT(id != 0, ("%s: the head item (0) must always be free.", __func__)); list[id] = list[0]; list[0] = (struct mbuf *)id; } static inline unsigned short get_id_from_freelist(struct mbuf **list) { uintptr_t id; id = (uintptr_t)list[0]; KASSERT(id != 0, ("%s: the head item (0) must always remain free.", __func__)); list[0] = list[id]; return (id); } static inline int xennet_rxidx(RING_IDX idx) { return idx & (NET_RX_RING_SIZE - 1); } static inline struct mbuf * xennet_get_rx_mbuf(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); struct mbuf *m; m = np->rx_mbufs[i]; np->rx_mbufs[i] = NULL; return (m); } static inline grant_ref_t xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); grant_ref_t ref = np->grant_rx_ref[i]; KASSERT(ref != GRANT_REF_INVALID, ("Invalid grant reference!\n")); np->grant_rx_ref[i] = GRANT_REF_INVALID; return ref; } #define IPRINTK(fmt, args...) \ printf("[XEN] " fmt, ##args) #ifdef INVARIANTS #define WPRINTK(fmt, args...) \ printf("[XEN] " fmt, ##args) #else #define WPRINTK(fmt, args...) #endif #ifdef DEBUG #define DPRINTK(fmt, args...) \ printf("[XEN] %s: " fmt, __func__, ##args) #else #define DPRINTK(fmt, args...) #endif /** * Read the 'mac' node at the given device's node in the store, and parse that * as colon-separated octets, placing result the given mac array. mac must be * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h). * Return 0 on success, or errno on error. */ static int xen_net_read_mac(device_t dev, uint8_t mac[]) { int error, i; char *s, *e, *macstr; const char *path; path = xenbus_get_node(dev); error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr); if (error == ENOENT) { /* * Deal with missing mac XenStore nodes on devices with * HVM emulation (the 'ioemu' configuration attribute) * enabled. * * The HVM emulator may execute in a stub device model * domain which lacks the permission, only given to Dom0, * to update the guest's XenStore tree. For this reason, * the HVM emulator doesn't even attempt to write the * front-side mac node, even when operating in Dom0. * However, there should always be a mac listed in the * backend tree. Fallback to this version if our query * of the front side XenStore location doesn't find * anything. */ path = xenbus_get_otherend_path(dev); error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr); } if (error != 0) { xenbus_dev_fatal(dev, error, "parsing %s/mac", path); return (error); } s = macstr; for (i = 0; i < ETHER_ADDR_LEN; i++) { mac[i] = strtoul(s, &e, 16); if (s == e || (e[0] != ':' && e[0] != 0)) { free(macstr, M_XENBUS); return (ENOENT); } s = &e[1]; } free(macstr, M_XENBUS); return (0); } /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffers for communication with the backend, and * inform the backend of the appropriate details for those. Switch to * Connected state. */ static int netfront_probe(device_t dev) { if (!strcmp(xenbus_get_type(dev), "vif")) { device_set_desc(dev, "Virtual Network Interface"); return (0); } return (ENXIO); } static int netfront_attach(device_t dev) { int err; err = create_netdev(dev); if (err) { xenbus_dev_fatal(dev, err, "creating netdev"); return (err); } #if __FreeBSD_version >= 700000 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "enable_lro", CTLFLAG_RW, &xn_enable_lro, 0, "Large Receive Offload"); #endif return (0); } static int netfront_suspend(device_t dev) { struct netfront_info *info = device_get_softc(dev); XN_RX_LOCK(info); XN_TX_LOCK(info); netfront_carrier_off(info); XN_TX_UNLOCK(info); XN_RX_UNLOCK(info); return (0); } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our netif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int netfront_resume(device_t dev) { struct netfront_info *info = device_get_softc(dev); netif_disconnect_backend(info); return (0); } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(device_t dev, struct netfront_info *info) { const char *message; struct xs_transaction xst; const char *node = xenbus_get_node(dev); int err; err = xen_net_read_mac(dev, info->mac); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", node); goto out; } /* Create shared ring, alloc event channel. */ err = setup_device(dev, info); if (err) goto out; again: err = xs_transaction_start(&xst); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_ring; } err = xs_printf(xst, node, "tx-ring-ref","%u", info->tx_ring_ref); if (err) { message = "writing tx ring-ref"; goto abort_transaction; } err = xs_printf(xst, node, "rx-ring-ref","%u", info->rx_ring_ref); if (err) { message = "writing rx ring-ref"; goto abort_transaction; } err = xs_printf(xst, node, "event-channel", "%u", xen_intr_port(info->xen_intr_handle)); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xs_printf(xst, node, "request-rx-copy", "%u", info->copying_receiver); if (err) { message = "writing request-rx-copy"; goto abort_transaction; } err = xs_printf(xst, node, "feature-rx-notify", "%d", 1); if (err) { message = "writing feature-rx-notify"; goto abort_transaction; } err = xs_printf(xst, node, "feature-sg", "%d", 1); if (err) { message = "writing feature-sg"; goto abort_transaction; } #if __FreeBSD_version >= 700000 err = xs_printf(xst, node, "feature-gso-tcpv4", "%d", 1); if (err) { message = "writing feature-gso-tcpv4"; goto abort_transaction; } #endif err = xs_transaction_end(xst, 0); if (err) { if (err == EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_ring; } return 0; abort_transaction: xs_transaction_end(xst, 1); xenbus_dev_fatal(dev, err, "%s", message); destroy_ring: netif_free(info); out: return err; } static int setup_device(device_t dev, struct netfront_info *info) { netif_tx_sring_t *txs; netif_rx_sring_t *rxs; int error; struct ifnet *ifp; ifp = info->xn_ifp; info->tx_ring_ref = GRANT_REF_INVALID; info->rx_ring_ref = GRANT_REF_INVALID; info->rx.sring = NULL; info->tx.sring = NULL; txs = (netif_tx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO); if (!txs) { error = ENOMEM; xenbus_dev_fatal(dev, error, "allocating tx ring page"); goto fail; } SHARED_RING_INIT(txs); FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); error = xenbus_grant_ring(dev, virt_to_mfn(txs), &info->tx_ring_ref); if (error) goto fail; rxs = (netif_rx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO); if (!rxs) { error = ENOMEM; xenbus_dev_fatal(dev, error, "allocating rx ring page"); goto fail; } SHARED_RING_INIT(rxs); FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); error = xenbus_grant_ring(dev, virt_to_mfn(rxs), &info->rx_ring_ref); if (error) goto fail; error = xen_intr_alloc_and_bind_local_port(dev, xenbus_get_otherend_id(dev), /*filter*/NULL, xn_intr, info, INTR_TYPE_NET | INTR_MPSAFE | INTR_ENTROPY, &info->xen_intr_handle); if (error) { xenbus_dev_fatal(dev, error, "xen_intr_alloc_and_bind_local_port failed"); goto fail; } return (0); fail: netif_free(info); return (error); } #ifdef INET /** * If this interface has an ipv4 address, send an arp for it. This * helps to get the network going again after migrating hosts. */ static void netfront_send_fake_arp(device_t dev, struct netfront_info *info) { struct ifnet *ifp; struct ifaddr *ifa; ifp = info->xn_ifp; TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family == AF_INET) { arp_ifinit(ifp, ifa); } } } #endif /** * Callback received when the backend's state changes. */ static void netfront_backend_changed(device_t dev, XenbusState newstate) { struct netfront_info *sc = device_get_softc(dev); DPRINTK("newstate=%d\n", newstate); switch (newstate) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateConnected: case XenbusStateUnknown: case XenbusStateClosed: case XenbusStateReconfigured: case XenbusStateReconfiguring: break; case XenbusStateInitWait: if (xenbus_get_state(dev) != XenbusStateInitialising) break; if (network_connect(sc) != 0) break; xenbus_set_state(dev, XenbusStateConnected); #ifdef INET netfront_send_fake_arp(dev, sc); #endif break; case XenbusStateClosing: xenbus_set_state(dev, XenbusStateClosed); break; } } static void xn_free_rx_ring(struct netfront_info *sc) { #if 0 int i; for (i = 0; i < NET_RX_RING_SIZE; i++) { if (sc->xn_cdata.rx_mbufs[i] != NULL) { m_freem(sc->rx_mbufs[i]); sc->rx_mbufs[i] = NULL; } } sc->rx.rsp_cons = 0; sc->xn_rx_if->req_prod = 0; sc->xn_rx_if->event = sc->rx.rsp_cons ; #endif } static void xn_free_tx_ring(struct netfront_info *sc) { #if 0 int i; for (i = 0; i < NET_TX_RING_SIZE; i++) { if (sc->tx_mbufs[i] != NULL) { m_freem(sc->tx_mbufs[i]); sc->xn_cdata.xn_tx_chain[i] = NULL; } } return; #endif } /** * \brief Verify that there is sufficient space in the Tx ring * buffer for a maximally sized request to be enqueued. * * A transmit request requires a transmit descriptor for each packet * fragment, plus up to 2 entries for "options" (e.g. TSO). */ static inline int xn_tx_slot_available(struct netfront_info *np) { return (RING_FREE_REQUESTS(&np->tx) > (MAX_TX_REQ_FRAGS + 2)); } static void netif_release_tx_bufs(struct netfront_info *np) { int i; for (i = 1; i <= NET_TX_RING_SIZE; i++) { struct mbuf *m; m = np->tx_mbufs[i]; /* * We assume that no kernel addresses are * less than NET_TX_RING_SIZE. Any entry * in the table that is below this number * must be an index from free-list tracking. */ if (((uintptr_t)m) <= NET_TX_RING_SIZE) continue; gnttab_end_foreign_access_ref(np->grant_tx_ref[i]); gnttab_release_grant_reference(&np->gref_tx_head, np->grant_tx_ref[i]); np->grant_tx_ref[i] = GRANT_REF_INVALID; add_id_to_freelist(np->tx_mbufs, i); np->xn_cdata.xn_tx_chain_cnt--; if (np->xn_cdata.xn_tx_chain_cnt < 0) { panic("%s: tx_chain_cnt must be >= 0", __func__); } m_free(m); } } static void network_alloc_rx_buffers(struct netfront_info *sc) { int otherend_id = xenbus_get_otherend_id(sc->xbdev); unsigned short id; struct mbuf *m_new; int i, batch_target, notify; RING_IDX req_prod; struct xen_memory_reservation reservation; grant_ref_t ref; int nr_flips; netif_rx_request_t *req; vm_offset_t vaddr; u_long pfn; req_prod = sc->rx.req_prod_pvt; if (__predict_false(sc->carrier == 0)) return; /* * Allocate mbufs greedily, even though we batch updates to the * receive ring. This creates a less bursty demand on the memory * allocator, and so should reduce the chance of failed allocation * requests both for ourself and for other kernel subsystems. * * Here we attempt to maintain rx_target buffers in flight, counting * buffers that we have yet to process in the receive ring. */ batch_target = sc->rx_target - (req_prod - sc->rx.rsp_cons); for (i = mbufq_len(&sc->xn_rx_batch); i < batch_target; i++) { MGETHDR(m_new, M_NOWAIT, MT_DATA); if (m_new == NULL) { printf("%s: MGETHDR failed\n", __func__); goto no_mbuf; } - m_cljget(m_new, M_NOWAIT, MJUMPAGESIZE); - if ((m_new->m_flags & M_EXT) == 0) { + if (m_cljget(m_new, M_NOWAIT, MJUMPAGESIZE) == NULL) { printf("%s: m_cljget failed\n", __func__); m_freem(m_new); no_mbuf: if (i != 0) goto refill; /* * XXX set timer */ break; } m_new->m_len = m_new->m_pkthdr.len = MJUMPAGESIZE; /* queue the mbufs allocated */ mbufq_tail(&sc->xn_rx_batch, m_new); } /* * If we've allocated at least half of our target number of entries, * submit them to the backend - we have enough to make the overhead * of submission worthwhile. Otherwise wait for more mbufs and * request entries to become available. */ if (i < (sc->rx_target/2)) { if (req_prod >sc->rx.sring->req_prod) goto push; return; } /* * Double floating fill target if we risked having the backend * run out of empty buffers for receive traffic. We define "running * low" as having less than a fourth of our target buffers free * at the time we refilled the queue. */ if ((req_prod - sc->rx.sring->rsp_prod) < (sc->rx_target / 4)) { sc->rx_target *= 2; if (sc->rx_target > sc->rx_max_target) sc->rx_target = sc->rx_max_target; } refill: for (nr_flips = i = 0; ; i++) { if ((m_new = mbufq_dequeue(&sc->xn_rx_batch)) == NULL) break; m_new->m_ext.ext_arg1 = (vm_paddr_t *)(uintptr_t)( vtophys(m_new->m_ext.ext_buf) >> PAGE_SHIFT); id = xennet_rxidx(req_prod + i); KASSERT(sc->rx_mbufs[id] == NULL, ("non-NULL xm_rx_chain")); sc->rx_mbufs[id] = m_new; ref = gnttab_claim_grant_reference(&sc->gref_rx_head); KASSERT(ref != GNTTAB_LIST_END, ("reserved grant references exhuasted")); sc->grant_rx_ref[id] = ref; vaddr = mtod(m_new, vm_offset_t); pfn = vtophys(vaddr) >> PAGE_SHIFT; req = RING_GET_REQUEST(&sc->rx, req_prod + i); if (sc->copying_receiver == 0) { gnttab_grant_foreign_transfer_ref(ref, otherend_id, pfn); sc->rx_pfn_array[nr_flips] = PFNTOMFN(pfn); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remove this page before passing * back to Xen. */ set_phys_to_machine(pfn, INVALID_P2M_ENTRY); MULTI_update_va_mapping(&sc->rx_mcl[i], vaddr, 0, 0); } nr_flips++; } else { gnttab_grant_foreign_access_ref(ref, otherend_id, PFNTOMFN(pfn), 0); } req->id = id; req->gref = ref; sc->rx_pfn_array[i] = vtomach(mtod(m_new,vm_offset_t)) >> PAGE_SHIFT; } KASSERT(i, ("no mbufs processed")); /* should have returned earlier */ KASSERT(mbufq_len(&sc->xn_rx_batch) == 0, ("not all mbufs processed")); /* * We may have allocated buffers which have entries outstanding * in the page * update queue -- make sure we flush those first! */ PT_UPDATES_FLUSH(); if (nr_flips != 0) { #ifdef notyet /* Tell the ballon driver what is going on. */ balloon_update_driver_allowance(i); #endif set_xen_guest_handle(reservation.extent_start, sc->rx_pfn_array); reservation.nr_extents = i; reservation.extent_order = 0; reservation.address_bits = 0; reservation.domid = DOMID_SELF; if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* After all PTEs have been zapped, flush the TLB. */ sc->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; /* Give away a batch of pages. */ sc->rx_mcl[i].op = __HYPERVISOR_memory_op; sc->rx_mcl[i].args[0] = XENMEM_decrease_reservation; sc->rx_mcl[i].args[1] = (u_long)&reservation; /* Zap PTEs and give away pages in one big multicall. */ (void)HYPERVISOR_multicall(sc->rx_mcl, i+1); if (__predict_false(sc->rx_mcl[i].result != i || HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation) != i)) panic("%s: unable to reduce memory " "reservation\n", __func__); } } else { wmb(); } /* Above is a suitable barrier to ensure backend will see requests. */ sc->rx.req_prod_pvt = req_prod + i; push: RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->rx, notify); if (notify) xen_intr_signal(sc->xen_intr_handle); } static void xn_rxeof(struct netfront_info *np) { struct ifnet *ifp; #if __FreeBSD_version >= 700000 && (defined(INET) || defined(INET6)) struct lro_ctrl *lro = &np->xn_lro; struct lro_entry *queued; #endif struct netfront_rx_info rinfo; struct netif_rx_response *rx = &rinfo.rx; struct netif_extra_info *extras = rinfo.extras; RING_IDX i, rp; multicall_entry_t *mcl; struct mbuf *m; struct mbuf_head rxq, errq; int err, pages_flipped = 0, work_to_do; do { XN_RX_LOCK_ASSERT(np); if (!netfront_carrier_ok(np)) return; mbufq_init(&errq); mbufq_init(&rxq); ifp = np->xn_ifp; rp = np->rx.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ i = np->rx.rsp_cons; while ((i != rp)) { memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); memset(extras, 0, sizeof(rinfo.extras)); m = NULL; err = xennet_get_responses(np, &rinfo, rp, &i, &m, &pages_flipped); if (__predict_false(err)) { if (m) mbufq_tail(&errq, m); np->stats.rx_errors++; continue; } m->m_pkthdr.rcvif = ifp; if ( rx->flags & NETRXF_data_validated ) { /* Tell the stack the checksums are okay */ /* * XXX this isn't necessarily the case - need to add * check */ m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); m->m_pkthdr.csum_data = 0xffff; } np->stats.rx_packets++; np->stats.rx_bytes += m->m_pkthdr.len; mbufq_tail(&rxq, m); np->rx.rsp_cons = i; } if (pages_flipped) { /* Some pages are no longer absent... */ #ifdef notyet balloon_update_driver_allowance(-pages_flipped); #endif /* Do all the remapping work, and M->P updates, in one big * hypercall. */ if (!!xen_feature(XENFEAT_auto_translated_physmap)) { mcl = np->rx_mcl + pages_flipped; mcl->op = __HYPERVISOR_mmu_update; mcl->args[0] = (u_long)np->rx_mmu; mcl->args[1] = pages_flipped; mcl->args[2] = 0; mcl->args[3] = DOMID_SELF; (void)HYPERVISOR_multicall(np->rx_mcl, pages_flipped + 1); } } while ((m = mbufq_dequeue(&errq))) m_freem(m); /* * Process all the mbufs after the remapping is complete. * Break the mbuf chain first though. */ while ((m = mbufq_dequeue(&rxq)) != NULL) { if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); /* * Do we really need to drop the rx lock? */ XN_RX_UNLOCK(np); #if __FreeBSD_version >= 700000 && (defined(INET) || defined(INET6)) /* Use LRO if possible */ if ((ifp->if_capenable & IFCAP_LRO) == 0 || lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) { /* * If LRO fails, pass up to the stack * directly. */ (*ifp->if_input)(ifp, m); } #else (*ifp->if_input)(ifp, m); #endif XN_RX_LOCK(np); } np->rx.rsp_cons = i; #if __FreeBSD_version >= 700000 && (defined(INET) || defined(INET6)) /* * Flush any outstanding LRO work */ while (!SLIST_EMPTY(&lro->lro_active)) { queued = SLIST_FIRST(&lro->lro_active); SLIST_REMOVE_HEAD(&lro->lro_active, next); tcp_lro_flush(lro, queued); } #endif #if 0 /* If we get a callback with very few responses, reduce fill target. */ /* NB. Note exponential increase, linear decrease. */ if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target)) np->rx_target = np->rx_min_target; #endif network_alloc_rx_buffers(np); RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, work_to_do); } while (work_to_do); } static void xn_txeof(struct netfront_info *np) { RING_IDX i, prod; unsigned short id; struct ifnet *ifp; netif_tx_response_t *txr; struct mbuf *m; XN_TX_LOCK_ASSERT(np); if (!netfront_carrier_ok(np)) return; ifp = np->xn_ifp; do { prod = np->tx.sring->rsp_prod; rmb(); /* Ensure we see responses up to 'rp'. */ for (i = np->tx.rsp_cons; i != prod; i++) { txr = RING_GET_RESPONSE(&np->tx, i); if (txr->status == NETIF_RSP_NULL) continue; if (txr->status != NETIF_RSP_OKAY) { printf("%s: WARNING: response is %d!\n", __func__, txr->status); } id = txr->id; m = np->tx_mbufs[id]; KASSERT(m != NULL, ("mbuf not found in xn_tx_chain")); KASSERT((uintptr_t)m > NET_TX_RING_SIZE, ("mbuf already on the free list, but we're " "trying to free it again!")); M_ASSERTVALID(m); /* * Increment packet count if this is the last * mbuf of the chain. */ if (!m->m_next) if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if (__predict_false(gnttab_query_foreign_access( np->grant_tx_ref[id]) != 0)) { panic("%s: grant id %u still in use by the " "backend", __func__, id); } gnttab_end_foreign_access_ref( np->grant_tx_ref[id]); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[id]); np->grant_tx_ref[id] = GRANT_REF_INVALID; np->tx_mbufs[id] = NULL; add_id_to_freelist(np->tx_mbufs, id); np->xn_cdata.xn_tx_chain_cnt--; m_free(m); /* Only mark the queue active if we've freed up at least one slot to try */ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } np->tx.rsp_cons = prod; /* * Set a new event, then check for race with update of * tx_cons. Note that it is essential to schedule a * callback, no matter how few buffers are pending. Even if * there is space in the transmit ring, higher layers may * be blocked because too much data is outstanding: in such * cases notification from Xen is likely to be the only kick * that we'll get. */ np->tx.sring->rsp_event = prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; mb(); } while (prod != np->tx.sring->rsp_prod); if (np->tx_full && ((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE)) { np->tx_full = 0; #if 0 if (np->user_state == UST_OPEN) netif_wake_queue(dev); #endif } } static void xn_intr(void *xsc) { struct netfront_info *np = xsc; struct ifnet *ifp = np->xn_ifp; #if 0 if (!(np->rx.rsp_cons != np->rx.sring->rsp_prod && likely(netfront_carrier_ok(np)) && ifp->if_drv_flags & IFF_DRV_RUNNING)) return; #endif if (RING_HAS_UNCONSUMED_RESPONSES(&np->tx)) { XN_TX_LOCK(np); xn_txeof(np); XN_TX_UNLOCK(np); } XN_RX_LOCK(np); xn_rxeof(np); XN_RX_UNLOCK(np); if (ifp->if_drv_flags & IFF_DRV_RUNNING && !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) xn_start(ifp); } static void xennet_move_rx_slot(struct netfront_info *np, struct mbuf *m, grant_ref_t ref) { int new = xennet_rxidx(np->rx.req_prod_pvt); KASSERT(np->rx_mbufs[new] == NULL, ("rx_mbufs != NULL")); np->rx_mbufs[new] = m; np->grant_rx_ref[new] = ref; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; np->rx.req_prod_pvt++; } static int xennet_get_extras(struct netfront_info *np, struct netif_extra_info *extras, RING_IDX rp, RING_IDX *cons) { struct netif_extra_info *extra; int err = 0; do { struct mbuf *m; grant_ref_t ref; if (__predict_false(*cons + 1 == rp)) { #if 0 if (net_ratelimit()) WPRINTK("Missing extra info\n"); #endif err = EINVAL; break; } extra = (struct netif_extra_info *) RING_GET_RESPONSE(&np->rx, ++(*cons)); if (__predict_false(!extra->type || extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { #if 0 if (net_ratelimit()) WPRINTK("Invalid extra type: %d\n", extra->type); #endif err = EINVAL; } else { memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); } m = xennet_get_rx_mbuf(np, *cons); ref = xennet_get_rx_ref(np, *cons); xennet_move_rx_slot(np, m, ref); } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); return err; } static int xennet_get_responses(struct netfront_info *np, struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons, struct mbuf **list, int *pages_flipped_p) { int pages_flipped = *pages_flipped_p; struct mmu_update *mmu; struct multicall_entry *mcl; struct netif_rx_response *rx = &rinfo->rx; struct netif_extra_info *extras = rinfo->extras; struct mbuf *m, *m0, *m_prev; grant_ref_t ref = xennet_get_rx_ref(np, *cons); RING_IDX ref_cons = *cons; int frags = 1; int err = 0; u_long ret; m0 = m = m_prev = xennet_get_rx_mbuf(np, *cons); if (rx->flags & NETRXF_extra_info) { err = xennet_get_extras(np, extras, rp, cons); } if (m0 != NULL) { m0->m_pkthdr.len = 0; m0->m_next = NULL; } for (;;) { u_long mfn; #if 0 DPRINTK("rx->status=%hd rx->offset=%hu frags=%u\n", rx->status, rx->offset, frags); #endif if (__predict_false(rx->status < 0 || rx->offset + rx->status > PAGE_SIZE)) { #if 0 if (net_ratelimit()) WPRINTK("rx->offset: %x, size: %u\n", rx->offset, rx->status); #endif xennet_move_rx_slot(np, m, ref); if (m0 == m) m0 = NULL; m = NULL; err = EINVAL; goto next_skip_queue; } /* * This definitely indicates a bug, either in this driver or in * the backend driver. In future this should flag the bad * situation to the system controller to reboot the backed. */ if (ref == GRANT_REF_INVALID) { #if 0 if (net_ratelimit()) WPRINTK("Bad rx response id %d.\n", rx->id); #endif printf("%s: Bad rx response id %d.\n", __func__,rx->id); err = EINVAL; goto next; } if (!np->copying_receiver) { /* Memory pressure, insufficient buffer * headroom, ... */ if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) { WPRINTK("Unfulfilled rx req (id=%d, st=%d).\n", rx->id, rx->status); xennet_move_rx_slot(np, m, ref); err = ENOMEM; goto next; } if (!xen_feature( XENFEAT_auto_translated_physmap)) { /* Remap the page. */ void *vaddr = mtod(m, void *); uint32_t pfn; mcl = np->rx_mcl + pages_flipped; mmu = np->rx_mmu + pages_flipped; MULTI_update_va_mapping(mcl, (u_long)vaddr, (((vm_paddr_t)mfn) << PAGE_SHIFT) | PG_RW | PG_V | PG_M | PG_A, 0); pfn = (uintptr_t)m->m_ext.ext_arg1; mmu->ptr = ((vm_paddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; set_phys_to_machine(pfn, mfn); } pages_flipped++; } else { ret = gnttab_end_foreign_access_ref(ref); KASSERT(ret, ("ret != 0")); } gnttab_release_grant_reference(&np->gref_rx_head, ref); next: if (m == NULL) break; m->m_len = rx->status; m->m_data += rx->offset; m0->m_pkthdr.len += rx->status; next_skip_queue: if (!(rx->flags & NETRXF_more_data)) break; if (*cons + frags == rp) { if (net_ratelimit()) WPRINTK("Need more frags\n"); err = ENOENT; printf("%s: cons %u frags %u rp %u, not enough frags\n", __func__, *cons, frags, rp); break; } /* * Note that m can be NULL, if rx->status < 0 or if * rx->offset + rx->status > PAGE_SIZE above. */ m_prev = m; rx = RING_GET_RESPONSE(&np->rx, *cons + frags); m = xennet_get_rx_mbuf(np, *cons + frags); /* * m_prev == NULL can happen if rx->status < 0 or if * rx->offset + * rx->status > PAGE_SIZE above. */ if (m_prev != NULL) m_prev->m_next = m; /* * m0 can be NULL if rx->status < 0 or if * rx->offset + * rx->status > PAGE_SIZE above. */ if (m0 == NULL) m0 = m; m->m_next = NULL; ref = xennet_get_rx_ref(np, *cons + frags); ref_cons = *cons + frags; frags++; } *list = m0; *cons += frags; *pages_flipped_p = pages_flipped; return (err); } static void xn_tick_locked(struct netfront_info *sc) { XN_RX_LOCK_ASSERT(sc); callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc); /* XXX placeholder for printing debug information */ } static void xn_tick(void *xsc) { struct netfront_info *sc; sc = xsc; XN_RX_LOCK(sc); xn_tick_locked(sc); XN_RX_UNLOCK(sc); } /** * \brief Count the number of fragments in an mbuf chain. * * Surprisingly, there isn't an M* macro for this. */ static inline int xn_count_frags(struct mbuf *m) { int nfrags; for (nfrags = 0; m != NULL; m = m->m_next) nfrags++; return (nfrags); } /** * Given an mbuf chain, make sure we have enough room and then push * it onto the transmit ring. */ static int xn_assemble_tx_request(struct netfront_info *sc, struct mbuf *m_head) { struct ifnet *ifp; struct mbuf *m; u_int nfrags; netif_extra_info_t *extra; int otherend_id; ifp = sc->xn_ifp; /** * Defragment the mbuf if necessary. */ nfrags = xn_count_frags(m_head); /* * Check to see whether this request is longer than netback * can handle, and try to defrag it. */ /** * It is a bit lame, but the netback driver in Linux can't * deal with nfrags > MAX_TX_REQ_FRAGS, which is a quirk of * the Linux network stack. */ if (nfrags > sc->maxfrags) { m = m_defrag(m_head, M_NOWAIT); if (!m) { /* * Defrag failed, so free the mbuf and * therefore drop the packet. */ m_freem(m_head); return (EMSGSIZE); } m_head = m; } /* Determine how many fragments now exist */ nfrags = xn_count_frags(m_head); /* * Check to see whether the defragmented packet has too many * segments for the Linux netback driver. */ /** * The FreeBSD TCP stack, with TSO enabled, can produce a chain * of mbufs longer than Linux can handle. Make sure we don't * pass a too-long chain over to the other side by dropping the * packet. It doesn't look like there is currently a way to * tell the TCP stack to generate a shorter chain of packets. */ if (nfrags > MAX_TX_REQ_FRAGS) { #ifdef DEBUG printf("%s: nfrags %d > MAX_TX_REQ_FRAGS %d, netback " "won't be able to handle it, dropping\n", __func__, nfrags, MAX_TX_REQ_FRAGS); #endif m_freem(m_head); return (EMSGSIZE); } /* * This check should be redundant. We've already verified that we * have enough slots in the ring to handle a packet of maximum * size, and that our packet is less than the maximum size. Keep * it in here as an assert for now just to make certain that * xn_tx_chain_cnt is accurate. */ KASSERT((sc->xn_cdata.xn_tx_chain_cnt + nfrags) <= NET_TX_RING_SIZE, ("%s: xn_tx_chain_cnt (%d) + nfrags (%d) > NET_TX_RING_SIZE " "(%d)!", __func__, (int) sc->xn_cdata.xn_tx_chain_cnt, (int) nfrags, (int) NET_TX_RING_SIZE)); /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ m = m_head; extra = NULL; otherend_id = xenbus_get_otherend_id(sc->xbdev); for (m = m_head; m; m = m->m_next) { netif_tx_request_t *tx; uintptr_t id; grant_ref_t ref; u_long mfn; /* XXX Wrong type? */ tx = RING_GET_REQUEST(&sc->tx, sc->tx.req_prod_pvt); id = get_id_from_freelist(sc->tx_mbufs); if (id == 0) panic("%s: was allocated the freelist head!\n", __func__); sc->xn_cdata.xn_tx_chain_cnt++; if (sc->xn_cdata.xn_tx_chain_cnt > NET_TX_RING_SIZE) panic("%s: tx_chain_cnt must be <= NET_TX_RING_SIZE\n", __func__); sc->tx_mbufs[id] = m; tx->id = id; ref = gnttab_claim_grant_reference(&sc->gref_tx_head); KASSERT((short)ref >= 0, ("Negative ref")); mfn = virt_to_mfn(mtod(m, vm_offset_t)); gnttab_grant_foreign_access_ref(ref, otherend_id, mfn, GNTMAP_readonly); tx->gref = sc->grant_tx_ref[id] = ref; tx->offset = mtod(m, vm_offset_t) & (PAGE_SIZE - 1); tx->flags = 0; if (m == m_head) { /* * The first fragment has the entire packet * size, subsequent fragments have just the * fragment size. The backend works out the * true size of the first fragment by * subtracting the sizes of the other * fragments. */ tx->size = m->m_pkthdr.len; /* * The first fragment contains the checksum flags * and is optionally followed by extra data for * TSO etc. */ /** * CSUM_TSO requires checksum offloading. * Some versions of FreeBSD fail to * set CSUM_TCP in the CSUM_TSO case, * so we have to test for CSUM_TSO * explicitly. */ if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_TSO)) { tx->flags |= (NETTXF_csum_blank | NETTXF_data_validated); } #if __FreeBSD_version >= 700000 if (m->m_pkthdr.csum_flags & CSUM_TSO) { struct netif_extra_info *gso = (struct netif_extra_info *) RING_GET_REQUEST(&sc->tx, ++sc->tx.req_prod_pvt); tx->flags |= NETTXF_extra_info; gso->u.gso.size = m->m_pkthdr.tso_segsz; gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; gso->u.gso.pad = 0; gso->u.gso.features = 0; gso->type = XEN_NETIF_EXTRA_TYPE_GSO; gso->flags = 0; } #endif } else { tx->size = m->m_len; } if (m->m_next) tx->flags |= NETTXF_more_data; sc->tx.req_prod_pvt++; } BPF_MTAP(ifp, m_head); sc->stats.tx_bytes += m_head->m_pkthdr.len; sc->stats.tx_packets++; return (0); } static void xn_start_locked(struct ifnet *ifp) { struct netfront_info *sc; struct mbuf *m_head; int notify; sc = ifp->if_softc; if (!netfront_carrier_ok(sc)) return; /* * While we have enough transmit slots available for at least one * maximum-sized packet, pull mbufs off the queue and put them on * the transmit ring. */ while (xn_tx_slot_available(sc)) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (xn_assemble_tx_request(sc, m_head) != 0) break; } RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->tx, notify); if (notify) xen_intr_signal(sc->xen_intr_handle); if (RING_FULL(&sc->tx)) { sc->tx_full = 1; #if 0 netif_stop_queue(dev); #endif } } static void xn_start(struct ifnet *ifp) { struct netfront_info *sc; sc = ifp->if_softc; XN_TX_LOCK(sc); xn_start_locked(ifp); XN_TX_UNLOCK(sc); } /* equivalent of network_open() in Linux */ static void xn_ifinit_locked(struct netfront_info *sc) { struct ifnet *ifp; XN_LOCK_ASSERT(sc); ifp = sc->xn_ifp; if (ifp->if_drv_flags & IFF_DRV_RUNNING) return; xn_stop(sc); network_alloc_rx_buffers(sc); sc->rx.sring->rsp_event = sc->rx.rsp_cons + 1; ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if_link_state_change(ifp, LINK_STATE_UP); callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc); } static void xn_ifinit(void *xsc) { struct netfront_info *sc = xsc; XN_LOCK(sc); xn_ifinit_locked(sc); XN_UNLOCK(sc); } static int xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct netfront_info *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; #ifdef INET struct ifaddr *ifa = (struct ifaddr *)data; #endif int mask, error = 0; switch(cmd) { case SIOCSIFADDR: case SIOCGIFADDR: #ifdef INET XN_LOCK(sc); if (ifa->ifa_addr->sa_family == AF_INET) { ifp->if_flags |= IFF_UP; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) xn_ifinit_locked(sc); arp_ifinit(ifp, ifa); XN_UNLOCK(sc); } else { XN_UNLOCK(sc); #endif error = ether_ioctl(ifp, cmd, data); #ifdef INET } #endif break; case SIOCSIFMTU: /* XXX can we alter the MTU on a VN ?*/ #ifdef notyet if (ifr->ifr_mtu > XN_JUMBO_MTU) error = EINVAL; else #endif { ifp->if_mtu = ifr->ifr_mtu; ifp->if_drv_flags &= ~IFF_DRV_RUNNING; xn_ifinit(sc); } break; case SIOCSIFFLAGS: XN_LOCK(sc); if (ifp->if_flags & IFF_UP) { /* * If only the state of the PROMISC flag changed, * then just use the 'set promisc mode' command * instead of reinitializing the entire NIC. Doing * a full re-init means reloading the firmware and * waiting for it to start up, which may take a * second or two. */ #ifdef notyet /* No promiscuous mode with Xen */ if (ifp->if_drv_flags & IFF_DRV_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->xn_if_flags & IFF_PROMISC)) { XN_SETBIT(sc, XN_RX_MODE, XN_RXMODE_RX_PROMISC); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->xn_if_flags & IFF_PROMISC) { XN_CLRBIT(sc, XN_RX_MODE, XN_RXMODE_RX_PROMISC); } else #endif xn_ifinit_locked(sc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { xn_stop(sc); } } sc->xn_if_flags = ifp->if_flags; XN_UNLOCK(sc); error = 0; break; case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ ifp->if_capenable; if (mask & IFCAP_TXCSUM) { if (IFCAP_TXCSUM & ifp->if_capenable) { ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4); ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO); } else { ifp->if_capenable |= IFCAP_TXCSUM; ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP); } } if (mask & IFCAP_RXCSUM) { ifp->if_capenable ^= IFCAP_RXCSUM; } #if __FreeBSD_version >= 700000 if (mask & IFCAP_TSO4) { if (IFCAP_TSO4 & ifp->if_capenable) { ifp->if_capenable &= ~IFCAP_TSO4; ifp->if_hwassist &= ~CSUM_TSO; } else if (IFCAP_TXCSUM & ifp->if_capenable) { ifp->if_capenable |= IFCAP_TSO4; ifp->if_hwassist |= CSUM_TSO; } else { IPRINTK("Xen requires tx checksum offload" " be enabled to use TSO\n"); error = EINVAL; } } if (mask & IFCAP_LRO) { ifp->if_capenable ^= IFCAP_LRO; } #endif error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: #ifdef notyet if (ifp->if_drv_flags & IFF_DRV_RUNNING) { XN_LOCK(sc); xn_setmulti(sc); XN_UNLOCK(sc); error = 0; } #endif /* FALLTHROUGH */ case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); break; default: error = ether_ioctl(ifp, cmd, data); } return (error); } static void xn_stop(struct netfront_info *sc) { struct ifnet *ifp; XN_LOCK_ASSERT(sc); ifp = sc->xn_ifp; callout_stop(&sc->xn_stat_ch); xn_free_rx_ring(sc); xn_free_tx_ring(sc); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); if_link_state_change(ifp, LINK_STATE_DOWN); } /* START of Xenolinux helper functions adapted to FreeBSD */ int network_connect(struct netfront_info *np) { int i, requeue_idx, error; grant_ref_t ref; netif_rx_request_t *req; u_int feature_rx_copy, feature_rx_flip; error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), "feature-rx-copy", NULL, "%u", &feature_rx_copy); if (error) feature_rx_copy = 0; error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), "feature-rx-flip", NULL, "%u", &feature_rx_flip); if (error) feature_rx_flip = 1; /* * Copy packets on receive path if: * (a) This was requested by user, and the backend supports it; or * (b) Flipping was requested, but this is unsupported by the backend. */ np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) || (MODPARM_rx_flip && !feature_rx_flip)); /* Recovery procedure: */ error = talk_to_backend(np->xbdev, np); if (error) return (error); /* Step 1: Reinitialise variables. */ xn_query_features(np); xn_configure_features(np); netif_release_tx_bufs(np); /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { struct mbuf *m; u_long pfn; if (np->rx_mbufs[i] == NULL) continue; m = np->rx_mbufs[requeue_idx] = xennet_get_rx_mbuf(np, i); ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); req = RING_GET_REQUEST(&np->rx, requeue_idx); pfn = vtophys(mtod(m, vm_offset_t)) >> PAGE_SHIFT; if (!np->copying_receiver) { gnttab_grant_foreign_transfer_ref(ref, xenbus_get_otherend_id(np->xbdev), pfn); } else { gnttab_grant_foreign_access_ref(ref, xenbus_get_otherend_id(np->xbdev), PFNTOMFN(pfn), 0); } req->gref = ref; req->id = requeue_idx; requeue_idx++; } np->rx.req_prod_pvt = requeue_idx; /* Step 3: All public and private state should now be sane. Get * ready to start sending and receiving packets and give the driver * domain a kick because we've probably just requeued some * packets. */ netfront_carrier_on(np); xen_intr_signal(np->xen_intr_handle); XN_TX_LOCK(np); xn_txeof(np); XN_TX_UNLOCK(np); network_alloc_rx_buffers(np); return (0); } static void xn_query_features(struct netfront_info *np) { int val; device_printf(np->xbdev, "backend features:"); if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), "feature-sg", NULL, "%d", &val) < 0) val = 0; np->maxfrags = 1; if (val) { np->maxfrags = MAX_TX_REQ_FRAGS; printf(" feature-sg"); } if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), "feature-gso-tcpv4", NULL, "%d", &val) < 0) val = 0; np->xn_ifp->if_capabilities &= ~(IFCAP_TSO4|IFCAP_LRO); if (val) { np->xn_ifp->if_capabilities |= IFCAP_TSO4|IFCAP_LRO; printf(" feature-gso-tcp4"); } printf("\n"); } static int xn_configure_features(struct netfront_info *np) { int err; err = 0; #if __FreeBSD_version >= 700000 && (defined(INET) || defined(INET6)) if ((np->xn_ifp->if_capenable & IFCAP_LRO) != 0) tcp_lro_free(&np->xn_lro); #endif np->xn_ifp->if_capenable = np->xn_ifp->if_capabilities & ~(IFCAP_LRO|IFCAP_TSO4); np->xn_ifp->if_hwassist &= ~CSUM_TSO; #if __FreeBSD_version >= 700000 && (defined(INET) || defined(INET6)) if (xn_enable_lro && (np->xn_ifp->if_capabilities & IFCAP_LRO) != 0) { err = tcp_lro_init(&np->xn_lro); if (err) { device_printf(np->xbdev, "LRO initialization failed\n"); } else { np->xn_lro.ifp = np->xn_ifp; np->xn_ifp->if_capenable |= IFCAP_LRO; } } if ((np->xn_ifp->if_capabilities & IFCAP_TSO4) != 0) { np->xn_ifp->if_capenable |= IFCAP_TSO4; np->xn_ifp->if_hwassist |= CSUM_TSO; } #endif return (err); } /** * Create a network device. * @param dev Newbus device representing this virtual NIC. */ int create_netdev(device_t dev) { int i; struct netfront_info *np; int err; struct ifnet *ifp; np = device_get_softc(dev); np->xbdev = dev; XN_LOCK_INIT(np, xennetif); ifmedia_init(&np->sc_media, 0, xn_ifmedia_upd, xn_ifmedia_sts); ifmedia_add(&np->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); ifmedia_set(&np->sc_media, IFM_ETHER|IFM_MANUAL); np->rx_target = RX_MIN_TARGET; np->rx_min_target = RX_MIN_TARGET; np->rx_max_target = RX_MAX_TARGET; /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */ for (i = 0; i <= NET_TX_RING_SIZE; i++) { np->tx_mbufs[i] = (void *) ((u_long) i+1); np->grant_tx_ref[i] = GRANT_REF_INVALID; } np->tx_mbufs[NET_TX_RING_SIZE] = (void *)0; for (i = 0; i <= NET_RX_RING_SIZE; i++) { np->rx_mbufs[i] = NULL; np->grant_rx_ref[i] = GRANT_REF_INVALID; } /* A grant for every tx ring slot */ if (gnttab_alloc_grant_references(NET_TX_RING_SIZE, &np->gref_tx_head) != 0) { IPRINTK("#### netfront can't alloc tx grant refs\n"); err = ENOMEM; goto exit; } /* A grant for every rx ring slot */ if (gnttab_alloc_grant_references(RX_MAX_TARGET, &np->gref_rx_head) != 0) { WPRINTK("#### netfront can't alloc rx grant refs\n"); gnttab_free_grant_references(np->gref_tx_head); err = ENOMEM; goto exit; } err = xen_net_read_mac(dev, np->mac); if (err) goto out; /* Set up ifnet structure */ ifp = np->xn_ifp = if_alloc(IFT_ETHER); ifp->if_softc = np; if_initname(ifp, "xn", device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = xn_ioctl; ifp->if_output = ether_output; ifp->if_start = xn_start; #ifdef notyet ifp->if_watchdog = xn_watchdog; #endif ifp->if_init = xn_ifinit; ifp->if_snd.ifq_maxlen = NET_TX_RING_SIZE - 1; ifp->if_hwassist = XN_CSUM_FEATURES; ifp->if_capabilities = IFCAP_HWCSUM; ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); ifp->if_hw_tsomaxsegcount = MAX_TX_REQ_FRAGS; ifp->if_hw_tsomaxsegsize = PAGE_SIZE; ether_ifattach(ifp, np->mac); callout_init(&np->xn_stat_ch, CALLOUT_MPSAFE); netfront_carrier_off(np); return (0); exit: gnttab_free_grant_references(np->gref_tx_head); out: return (err); } /** * Handle the change of state of the backend to Closing. We must delete our * device-layer structures now, to ensure that writes are flushed through to * the backend. Once is this done, we can switch to Closed in * acknowledgement. */ #if 0 static void netfront_closing(device_t dev) { #if 0 struct netfront_info *info = dev->dev_driver_data; DPRINTK("netfront_closing: %s removed\n", dev->nodename); close_netdev(info); #endif xenbus_switch_state(dev, XenbusStateClosed); } #endif static int netfront_detach(device_t dev) { struct netfront_info *info = device_get_softc(dev); DPRINTK("%s\n", xenbus_get_node(dev)); netif_free(info); return 0; } static void netif_free(struct netfront_info *info) { XN_LOCK(info); xn_stop(info); XN_UNLOCK(info); callout_drain(&info->xn_stat_ch); netif_disconnect_backend(info); if (info->xn_ifp != NULL) { ether_ifdetach(info->xn_ifp); if_free(info->xn_ifp); info->xn_ifp = NULL; } ifmedia_removeall(&info->sc_media); } static void netif_disconnect_backend(struct netfront_info *info) { XN_RX_LOCK(info); XN_TX_LOCK(info); netfront_carrier_off(info); XN_TX_UNLOCK(info); XN_RX_UNLOCK(info); free_ring(&info->tx_ring_ref, &info->tx.sring); free_ring(&info->rx_ring_ref, &info->rx.sring); xen_intr_unbind(&info->xen_intr_handle); } static void free_ring(int *ref, void *ring_ptr_ref) { void **ring_ptr_ptr = ring_ptr_ref; if (*ref != GRANT_REF_INVALID) { /* This API frees the associated storage. */ gnttab_end_foreign_access(*ref, *ring_ptr_ptr); *ref = GRANT_REF_INVALID; } *ring_ptr_ptr = NULL; } static int xn_ifmedia_upd(struct ifnet *ifp) { return (0); } static void xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE; ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; } /* ** Driver registration ** */ static device_method_t netfront_methods[] = { /* Device interface */ DEVMETHOD(device_probe, netfront_probe), DEVMETHOD(device_attach, netfront_attach), DEVMETHOD(device_detach, netfront_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, netfront_suspend), DEVMETHOD(device_resume, netfront_resume), /* Xenbus interface */ DEVMETHOD(xenbus_otherend_changed, netfront_backend_changed), DEVMETHOD_END }; static driver_t netfront_driver = { "xn", netfront_methods, sizeof(struct netfront_info), }; devclass_t netfront_devclass; DRIVER_MODULE(xe, xenbusb_front, netfront_driver, netfront_devclass, NULL, NULL); Index: head/sys/mips/adm5120/if_admsw.c =================================================================== --- head/sys/mips/adm5120/if_admsw.c (revision 276749) +++ head/sys/mips/adm5120/if_admsw.c (revision 276750) @@ -1,1354 +1,1352 @@ /* $NetBSD: if_admsw.c,v 1.3 2007/04/22 19:26:25 dyoung Exp $ */ /*- * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko. * All rights reserved. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * 3. The names of the authors may not be used to endorse or promote * products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. */ /* * Copyright (c) 2001 Wasabi Systems, Inc. * All rights reserved. * * Written by Jason R. Thorpe for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Device driver for Alchemy Semiconductor Au1x00 Ethernet Media * Access Controller. * * TODO: * * Better Rx buffer management; we want to get new Rx buffers * to the chip more quickly than we currently do. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #endif #include #include #include #include #include /* TODO: add locking */ #define ADMSW_LOCK(sc) do {} while(0); #define ADMSW_UNLOCK(sc) do {} while(0); static uint8_t vlan_matrix[SW_DEVS] = { (1 << 6) | (1 << 0), /* CPU + port0 */ (1 << 6) | (1 << 1), /* CPU + port1 */ (1 << 6) | (1 << 2), /* CPU + port2 */ (1 << 6) | (1 << 3), /* CPU + port3 */ (1 << 6) | (1 << 4), /* CPU + port4 */ (1 << 6) | (1 << 5), /* CPU + port5 */ }; /* ifnet entry points */ static void admsw_start(struct ifnet *); static void admsw_watchdog(void *); static int admsw_ioctl(struct ifnet *, u_long, caddr_t); static void admsw_init(void *); static void admsw_stop(struct ifnet *, int); static void admsw_reset(struct admsw_softc *); static void admsw_set_filter(struct admsw_softc *); static void admsw_txintr(struct admsw_softc *, int); static void admsw_rxintr(struct admsw_softc *, int); static int admsw_add_rxbuf(struct admsw_softc *, int, int); #define admsw_add_rxhbuf(sc, idx) admsw_add_rxbuf(sc, idx, 1) #define admsw_add_rxlbuf(sc, idx) admsw_add_rxbuf(sc, idx, 0) static int admsw_mediachange(struct ifnet *); static void admsw_mediastatus(struct ifnet *, struct ifmediareq *); static int admsw_intr(void *); /* bus entry points */ static int admsw_probe(device_t dev); static int admsw_attach(device_t dev); static int admsw_detach(device_t dev); static int admsw_shutdown(device_t dev); static void admsw_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { uint32_t *addr; if (error) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); addr = arg; *addr = segs->ds_addr; } static void admsw_rxbuf_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct admsw_descsoft *ds; if (error) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); ds = arg; ds->ds_nsegs = nseg; ds->ds_addr[0] = segs[0].ds_addr; ds->ds_len[0] = segs[0].ds_len; } static void admsw_mbuf_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, bus_size_t mapsize, int error) { struct admsw_descsoft *ds; if (error) return; ds = arg; if((nseg != 1) && (nseg != 2)) panic("%s: nseg == %d\n", __func__, nseg); ds->ds_nsegs = nseg; ds->ds_addr[0] = segs[0].ds_addr; ds->ds_len[0] = segs[0].ds_len; if(nseg > 1) { ds->ds_addr[1] = segs[1].ds_addr; ds->ds_len[1] = segs[1].ds_len; } } static int admsw_probe(device_t dev) { device_set_desc(dev, "ADM5120 Switch Engine"); return (0); } #define REG_READ(o) bus_read_4((sc)->mem_res, (o)) #define REG_WRITE(o,v) bus_write_4((sc)->mem_res, (o),(v)) static void admsw_init_bufs(struct admsw_softc *sc) { int i; struct admsw_desc *desc; for (i = 0; i < ADMSW_NTXHDESC; i++) { if (sc->sc_txhsoft[i].ds_mbuf != NULL) { m_freem(sc->sc_txhsoft[i].ds_mbuf); sc->sc_txhsoft[i].ds_mbuf = NULL; } desc = &sc->sc_txhdescs[i]; desc->data = 0; desc->cntl = 0; desc->len = MAC_BUFLEN; desc->status = 0; ADMSW_CDTXHSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); } sc->sc_txhdescs[ADMSW_NTXHDESC - 1].data |= ADM5120_DMA_RINGEND; ADMSW_CDTXHSYNC(sc, ADMSW_NTXHDESC - 1, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); for (i = 0; i < ADMSW_NRXHDESC; i++) { if (sc->sc_rxhsoft[i].ds_mbuf == NULL) { if (admsw_add_rxhbuf(sc, i) != 0) panic("admsw_init_bufs\n"); } else ADMSW_INIT_RXHDESC(sc, i); } for (i = 0; i < ADMSW_NTXLDESC; i++) { if (sc->sc_txlsoft[i].ds_mbuf != NULL) { m_freem(sc->sc_txlsoft[i].ds_mbuf); sc->sc_txlsoft[i].ds_mbuf = NULL; } desc = &sc->sc_txldescs[i]; desc->data = 0; desc->cntl = 0; desc->len = MAC_BUFLEN; desc->status = 0; ADMSW_CDTXLSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); } sc->sc_txldescs[ADMSW_NTXLDESC - 1].data |= ADM5120_DMA_RINGEND; ADMSW_CDTXLSYNC(sc, ADMSW_NTXLDESC - 1, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); for (i = 0; i < ADMSW_NRXLDESC; i++) { if (sc->sc_rxlsoft[i].ds_mbuf == NULL) { if (admsw_add_rxlbuf(sc, i) != 0) panic("admsw_init_bufs\n"); } else ADMSW_INIT_RXLDESC(sc, i); } REG_WRITE(SEND_HBADDR_REG, ADMSW_CDTXHADDR(sc, 0)); REG_WRITE(SEND_LBADDR_REG, ADMSW_CDTXLADDR(sc, 0)); REG_WRITE(RECV_HBADDR_REG, ADMSW_CDRXHADDR(sc, 0)); REG_WRITE(RECV_LBADDR_REG, ADMSW_CDRXLADDR(sc, 0)); sc->sc_txfree = ADMSW_NTXLDESC; sc->sc_txnext = 0; sc->sc_txdirty = 0; sc->sc_rxptr = 0; } static void admsw_setvlan(struct admsw_softc *sc, char matrix[6]) { uint32_t i; i = matrix[0] + (matrix[1] << 8) + (matrix[2] << 16) + (matrix[3] << 24); REG_WRITE(VLAN_G1_REG, i); i = matrix[4] + (matrix[5] << 8); REG_WRITE(VLAN_G2_REG, i); } static void admsw_reset(struct admsw_softc *sc) { uint32_t wdog1; int i; REG_WRITE(PORT_CONF0_REG, REG_READ(PORT_CONF0_REG) | PORT_CONF0_DP_MASK); REG_WRITE(CPUP_CONF_REG, REG_READ(CPUP_CONF_REG) | CPUP_CONF_DCPUP); /* Wait for DMA to complete. Overkill. In 3ms, we can * send at least two entire 1500-byte packets at 10 Mb/s. */ DELAY(3000); /* The datasheet recommends that we move all PHYs to reset * state prior to software reset. */ REG_WRITE(PHY_CNTL2_REG, REG_READ(PHY_CNTL2_REG) & ~PHY_CNTL2_PHYR_MASK); /* Reset the switch. */ REG_WRITE(ADMSW_SW_RES, 0x1); DELAY(100 * 1000); REG_WRITE(ADMSW_BOOT_DONE, ADMSW_BOOT_DONE_BO); /* begin old code */ REG_WRITE(CPUP_CONF_REG, CPUP_CONF_DCPUP | CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK); REG_WRITE(PORT_CONF0_REG, PORT_CONF0_EMCP_MASK | PORT_CONF0_EMBP_MASK); REG_WRITE(PHY_CNTL2_REG, REG_READ(PHY_CNTL2_REG) | PHY_CNTL2_ANE_MASK | PHY_CNTL2_PHYR_MASK | PHY_CNTL2_AMDIX_MASK); REG_WRITE(PHY_CNTL3_REG, REG_READ(PHY_CNTL3_REG) | PHY_CNTL3_RNT); REG_WRITE(ADMSW_INT_MASK, INT_MASK); REG_WRITE(ADMSW_INT_ST, INT_MASK); /* * While in DDB, we stop servicing interrupts, RX ring * fills up and when free block counter falls behind FC * threshold, the switch starts to emit 802.3x PAUSE * frames. This can upset peer switches. * * Stop this from happening by disabling FC and D2 * thresholds. */ REG_WRITE(FC_TH_REG, REG_READ(FC_TH_REG) & ~(FC_TH_FCS_MASK | FC_TH_D2S_MASK)); admsw_setvlan(sc, vlan_matrix); for (i = 0; i < SW_DEVS; i++) { REG_WRITE(MAC_WT1_REG, sc->sc_enaddr[2] | (sc->sc_enaddr[3]<<8) | (sc->sc_enaddr[4]<<16) | ((sc->sc_enaddr[5]+i)<<24)); REG_WRITE(MAC_WT0_REG, (i<sc_enaddr[0]<<16) | (sc->sc_enaddr[1]<<24) | MAC_WT0_WRITE | MAC_WT0_VLANID_EN); while (!(REG_READ(MAC_WT0_REG) & MAC_WT0_WRITE_DONE)); } wdog1 = REG_READ(ADM5120_WDOG1); REG_WRITE(ADM5120_WDOG1, wdog1 & ~ADM5120_WDOG1_WDE); } static int admsw_attach(device_t dev) { uint8_t enaddr[ETHER_ADDR_LEN]; struct admsw_softc *sc = (struct admsw_softc *) device_get_softc(dev); struct ifnet *ifp; int error, i, rid; sc->sc_dev = dev; device_printf(dev, "ADM5120 Switch Engine, %d ports\n", SW_DEVS); sc->ndevs = 0; /* XXXMIPS: fix it */ enaddr[0] = 0x00; enaddr[1] = 0x0C; enaddr[2] = 0x42; enaddr[3] = 0x07; enaddr[4] = 0xB2; enaddr[5] = 0x4E; memcpy(sc->sc_enaddr, enaddr, sizeof(sc->sc_enaddr)); device_printf(sc->sc_dev, "base Ethernet address %s\n", ether_sprintf(enaddr)); callout_init(&sc->sc_watchdog, 1); rid = 0; if ((sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE)) == NULL) { device_printf(dev, "unable to allocate memory resource\n"); return (ENXIO); } /* Hook up the interrupt handler. */ rid = 0; if ((sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(dev, "unable to allocate IRQ resource\n"); return (ENXIO); } if ((error = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET, admsw_intr, NULL, sc, &sc->sc_ih)) != 0) { device_printf(dev, "WARNING: unable to register interrupt handler\n"); return (error); } /* * Allocate the control data structures, and create and load the * DMA map for it. */ if ((error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct admsw_control_data), 1, sizeof(struct admsw_control_data), 0, NULL, NULL, &sc->sc_control_dmat)) != 0) { device_printf(sc->sc_dev, "unable to create control data DMA map, error = %d\n", error); return (error); } if ((error = bus_dmamem_alloc(sc->sc_control_dmat, (void **)&sc->sc_control_data, BUS_DMA_NOWAIT, &sc->sc_cddmamap)) != 0) { device_printf(sc->sc_dev, "unable to allocate control data, error = %d\n", error); return (error); } if ((error = bus_dmamap_load(sc->sc_control_dmat, sc->sc_cddmamap, sc->sc_control_data, sizeof(struct admsw_control_data), admsw_dma_map_addr, &sc->sc_cddma, 0)) != 0) { device_printf(sc->sc_dev, "unable to load control data DMA map, error = %d\n", error); return (error); } /* * Create the transmit buffer DMA maps. */ if ((error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->sc_bufs_dmat)) != 0) { device_printf(sc->sc_dev, "unable to create control data DMA map, error = %d\n", error); return (error); } for (i = 0; i < ADMSW_NTXHDESC; i++) { if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0, &sc->sc_txhsoft[i].ds_dmamap)) != 0) { device_printf(sc->sc_dev, "unable to create txh DMA map %d, error = %d\n", i, error); return (error); } sc->sc_txhsoft[i].ds_mbuf = NULL; } for (i = 0; i < ADMSW_NTXLDESC; i++) { if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0, &sc->sc_txlsoft[i].ds_dmamap)) != 0) { device_printf(sc->sc_dev, "unable to create txl DMA map %d, error = %d\n", i, error); return (error); } sc->sc_txlsoft[i].ds_mbuf = NULL; } /* * Create the receive buffer DMA maps. */ for (i = 0; i < ADMSW_NRXHDESC; i++) { if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0, &sc->sc_rxhsoft[i].ds_dmamap)) != 0) { device_printf(sc->sc_dev, "unable to create rxh DMA map %d, error = %d\n", i, error); return (error); } sc->sc_rxhsoft[i].ds_mbuf = NULL; } for (i = 0; i < ADMSW_NRXLDESC; i++) { if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0, &sc->sc_rxlsoft[i].ds_dmamap)) != 0) { device_printf(sc->sc_dev, "unable to create rxl DMA map %d, error = %d\n", i, error); return (error); } sc->sc_rxlsoft[i].ds_mbuf = NULL; } admsw_init_bufs(sc); admsw_reset(sc); for (i = 0; i < SW_DEVS; i++) { ifmedia_init(&sc->sc_ifmedia[i], 0, admsw_mediachange, admsw_mediastatus); ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T, 0, NULL); ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX, 0, NULL); ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO, 0, NULL); ifmedia_set(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO); ifp = sc->sc_ifnet[i] = if_alloc(IFT_ETHER); /* Setup interface parameters */ ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), i); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = admsw_ioctl; ifp->if_output = ether_output; ifp->if_start = admsw_start; ifp->if_init = admsw_init; ifp->if_mtu = ETHERMTU; ifp->if_baudrate = IF_Mbps(100); IFQ_SET_MAXLEN(&ifp->if_snd, max(ADMSW_NTXLDESC, ifqmaxlen)); ifp->if_snd.ifq_drv_maxlen = max(ADMSW_NTXLDESC, ifqmaxlen); IFQ_SET_READY(&ifp->if_snd); ifp->if_capabilities |= IFCAP_VLAN_MTU; /* Attach the interface. */ ether_ifattach(ifp, enaddr); enaddr[5]++; } /* XXX: admwdog_attach(sc); */ /* leave interrupts and cpu port disabled */ return (0); } static int admsw_detach(device_t dev) { printf("TODO: DETACH\n"); return (0); } /* * admsw_shutdown: * * Make sure the interface is stopped at reboot time. */ static int admsw_shutdown(device_t dev) { struct admsw_softc *sc; int i; sc = device_get_softc(dev); for (i = 0; i < SW_DEVS; i++) admsw_stop(sc->sc_ifnet[i], 1); return (0); } /* * admsw_start: [ifnet interface function] * * Start packet transmission on the interface. */ static void admsw_start(struct ifnet *ifp) { struct admsw_softc *sc = ifp->if_softc; struct mbuf *m0, *m; struct admsw_descsoft *ds; struct admsw_desc *desc; bus_dmamap_t dmamap; struct ether_header *eh; int error, nexttx, len, i; static int vlan = 0; /* * Loop through the send queues, setting up transmit descriptors * unitl we drain the queues, or use up all available transmit * descriptors. */ for (;;) { vlan++; if (vlan == SW_DEVS) vlan = 0; i = vlan; for (;;) { ifp = sc->sc_ifnet[i]; if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) == IFF_DRV_RUNNING) { /* Grab a packet off the queue. */ IF_DEQUEUE(&ifp->if_snd, m0); if (m0 != NULL) break; } i++; if (i == SW_DEVS) i = 0; if (i == vlan) return; } vlan = i; m = NULL; /* Get a spare descriptor. */ if (sc->sc_txfree == 0) { /* No more slots left; notify upper layer. */ ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } nexttx = sc->sc_txnext; desc = &sc->sc_txldescs[nexttx]; ds = &sc->sc_txlsoft[nexttx]; dmamap = ds->ds_dmamap; /* * Load the DMA map. If this fails, the packet either * didn't fit in the alloted number of segments, or we * were short on resources. In this case, we'll copy * and try again. */ if (m0->m_pkthdr.len < ETHER_MIN_LEN || bus_dmamap_load_mbuf(sc->sc_bufs_dmat, dmamap, m0, admsw_mbuf_map_addr, ds, BUS_DMA_NOWAIT) != 0) { MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { device_printf(sc->sc_dev, "unable to allocate Tx mbuf\n"); break; } if (m0->m_pkthdr.len > MHLEN) { - MCLGET(m, M_NOWAIT); - if ((m->m_flags & M_EXT) == 0) { + if (!(MCLGET(m, M_NOWAIT))) { device_printf(sc->sc_dev, "unable to allocate Tx cluster\n"); m_freem(m); break; } } m->m_pkthdr.csum_flags = m0->m_pkthdr.csum_flags; m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; if (m->m_pkthdr.len < ETHER_MIN_LEN) { if (M_TRAILINGSPACE(m) < ETHER_MIN_LEN - m->m_pkthdr.len) panic("admsw_start: M_TRAILINGSPACE\n"); memset(mtod(m, uint8_t *) + m->m_pkthdr.len, 0, ETHER_MIN_LEN - ETHER_CRC_LEN - m->m_pkthdr.len); m->m_pkthdr.len = m->m_len = ETHER_MIN_LEN; } error = bus_dmamap_load_mbuf(sc->sc_bufs_dmat, dmamap, m, admsw_mbuf_map_addr, ds, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "unable to load Tx buffer, error = %d\n", error); break; } } if (m != NULL) { m_freem(m0); m0 = m; } /* * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */ /* Sync the DMA map. */ bus_dmamap_sync(sc->sc_bufs_dmat, dmamap, BUS_DMASYNC_PREWRITE); if (ds->ds_nsegs != 1 && ds->ds_nsegs != 2) panic("admsw_start: nsegs == %d\n", ds->ds_nsegs); desc->data = ds->ds_addr[0]; desc->len = len = ds->ds_len[0]; if (ds->ds_nsegs > 1) { len += ds->ds_len[1]; desc->cntl = ds->ds_addr[1] | ADM5120_DMA_BUF2ENABLE; } else desc->cntl = 0; desc->status = (len << ADM5120_DMA_LENSHIFT) | (1 << vlan); eh = mtod(m0, struct ether_header *); if (ntohs(eh->ether_type) == ETHERTYPE_IP && m0->m_pkthdr.csum_flags & CSUM_IP) desc->status |= ADM5120_DMA_CSUM; if (nexttx == ADMSW_NTXLDESC - 1) desc->data |= ADM5120_DMA_RINGEND; desc->data |= ADM5120_DMA_OWN; /* Sync the descriptor. */ ADMSW_CDTXLSYNC(sc, nexttx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); REG_WRITE(SEND_TRIG_REG, 1); /* printf("send slot %d\n",nexttx); */ /* * Store a pointer to the packet so we can free it later. */ ds->ds_mbuf = m0; /* Advance the Tx pointer. */ sc->sc_txfree--; sc->sc_txnext = ADMSW_NEXTTXL(nexttx); /* Pass the packet to any BPF listeners. */ BPF_MTAP(ifp, m0); /* Set a watchdog timer in case the chip flakes out. */ sc->sc_timer = 5; } } /* * admsw_watchdog: [ifnet interface function] * * Watchdog timer handler. */ static void admsw_watchdog(void *arg) { struct admsw_softc *sc = arg; struct ifnet *ifp; int vlan; callout_reset(&sc->sc_watchdog, hz, admsw_watchdog, sc); if (sc->sc_timer == 0 || --sc->sc_timer > 0) return; /* Check if an interrupt was lost. */ if (sc->sc_txfree == ADMSW_NTXLDESC) { device_printf(sc->sc_dev, "watchdog false alarm\n"); return; } if (sc->sc_timer != 0) device_printf(sc->sc_dev, "watchdog timer is %d!\n", sc->sc_timer); admsw_txintr(sc, 0); if (sc->sc_txfree == ADMSW_NTXLDESC) { device_printf(sc->sc_dev, "tx IRQ lost (queue empty)\n"); return; } if (sc->sc_timer != 0) { device_printf(sc->sc_dev, "tx IRQ lost (timer recharged)\n"); return; } device_printf(sc->sc_dev, "device timeout, txfree = %d\n", sc->sc_txfree); for (vlan = 0; vlan < SW_DEVS; vlan++) admsw_stop(sc->sc_ifnet[vlan], 0); admsw_init(sc); ifp = sc->sc_ifnet[0]; /* Try to get more packets going. */ admsw_start(ifp); } /* * admsw_ioctl: [ifnet interface function] * * Handle control requests from the operator. */ static int admsw_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct admsw_softc *sc = ifp->if_softc; struct ifdrv *ifd; int error, port; ADMSW_LOCK(sc); switch (cmd) { case SIOCSIFMEDIA: case SIOCGIFMEDIA: port = 0; while(port < SW_DEVS) if(ifp == sc->sc_ifnet[port]) break; else port++; if (port >= SW_DEVS) error = EOPNOTSUPP; else error = ifmedia_ioctl(ifp, (struct ifreq *)data, &sc->sc_ifmedia[port], cmd); break; case SIOCGDRVSPEC: case SIOCSDRVSPEC: ifd = (struct ifdrv *) data; if (ifd->ifd_cmd != 0 || ifd->ifd_len != sizeof(vlan_matrix)) { error = EINVAL; break; } if (cmd == SIOCGDRVSPEC) { error = copyout(vlan_matrix, ifd->ifd_data, sizeof(vlan_matrix)); } else { error = copyin(ifd->ifd_data, vlan_matrix, sizeof(vlan_matrix)); admsw_setvlan(sc, vlan_matrix); } break; default: error = ether_ioctl(ifp, cmd, data); if (error == ENETRESET) { /* * Multicast list has changed; set the hardware filter * accordingly. */ admsw_set_filter(sc); error = 0; } break; } /* Try to get more packets going. */ admsw_start(ifp); ADMSW_UNLOCK(sc); return (error); } /* * admsw_intr: * * Interrupt service routine. */ static int admsw_intr(void *arg) { struct admsw_softc *sc = arg; uint32_t pending; pending = REG_READ(ADMSW_INT_ST); REG_WRITE(ADMSW_INT_ST, pending); if (sc->ndevs == 0) return (FILTER_STRAY); if ((pending & ADMSW_INTR_RHD) != 0) admsw_rxintr(sc, 1); if ((pending & ADMSW_INTR_RLD) != 0) admsw_rxintr(sc, 0); if ((pending & ADMSW_INTR_SHD) != 0) admsw_txintr(sc, 1); if ((pending & ADMSW_INTR_SLD) != 0) admsw_txintr(sc, 0); return (FILTER_HANDLED); } /* * admsw_txintr: * * Helper; handle transmit interrupts. */ static void admsw_txintr(struct admsw_softc *sc, int prio) { struct ifnet *ifp; struct admsw_desc *desc; struct admsw_descsoft *ds; int i, vlan; int gotone = 0; /* printf("txintr: txdirty: %d, txfree: %d\n",sc->sc_txdirty, sc->sc_txfree); */ for (i = sc->sc_txdirty; sc->sc_txfree != ADMSW_NTXLDESC; i = ADMSW_NEXTTXL(i)) { ADMSW_CDTXLSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); desc = &sc->sc_txldescs[i]; ds = &sc->sc_txlsoft[i]; if (desc->data & ADM5120_DMA_OWN) { ADMSW_CDTXLSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); break; } bus_dmamap_sync(sc->sc_bufs_dmat, ds->ds_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_bufs_dmat, ds->ds_dmamap); m_freem(ds->ds_mbuf); ds->ds_mbuf = NULL; vlan = ffs(desc->status & 0x3f) - 1; if (vlan < 0 || vlan >= SW_DEVS) panic("admsw_txintr: bad vlan\n"); ifp = sc->sc_ifnet[vlan]; gotone = 1; /* printf("clear tx slot %d\n",i); */ if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); sc->sc_txfree++; } if (gotone) { sc->sc_txdirty = i; for (vlan = 0; vlan < SW_DEVS; vlan++) sc->sc_ifnet[vlan]->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp = sc->sc_ifnet[0]; /* Try to queue more packets. */ admsw_start(ifp); /* * If there are no more pending transmissions, * cancel the watchdog timer. */ if (sc->sc_txfree == ADMSW_NTXLDESC) sc->sc_timer = 0; } /* printf("txintr end: txdirty: %d, txfree: %d\n",sc->sc_txdirty, sc->sc_txfree); */ } /* * admsw_rxintr: * * Helper; handle receive interrupts. */ static void admsw_rxintr(struct admsw_softc *sc, int high) { struct ifnet *ifp; struct admsw_descsoft *ds; struct mbuf *m; uint32_t stat; int i, len, port, vlan; /* printf("rxintr\n"); */ if (high) panic("admsw_rxintr: high priority packet\n"); #if 1 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0) ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); else { i = sc->sc_rxptr; do { ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); i = ADMSW_NEXTRXL(i); /* the ring is empty, just return. */ if (i == sc->sc_rxptr) return; ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); } while (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN); ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0) ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); else { ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); /* We've fallen behind the chip: catch it. */ #if 0 device_printf(sc->sc_dev, "RX ring resync, base=%x, work=%x, %d -> %d\n", REG_READ(RECV_LBADDR_REG), REG_READ(RECV_LWADDR_REG), sc->sc_rxptr, i); #endif sc->sc_rxptr = i; /* ADMSW_EVCNT_INCR(&sc->sc_ev_rxsync); */ } } #endif for (i = sc->sc_rxptr;; i = ADMSW_NEXTRXL(i)) { ds = &sc->sc_rxlsoft[i]; ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); if (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN) { ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); break; } /* printf("process slot %d\n",i); */ bus_dmamap_sync(sc->sc_bufs_dmat, ds->ds_dmamap, BUS_DMASYNC_POSTREAD); stat = sc->sc_rxldescs[i].status; len = (stat & ADM5120_DMA_LEN) >> ADM5120_DMA_LENSHIFT; len -= ETHER_CRC_LEN; port = (stat & ADM5120_DMA_PORTID) >> ADM5120_DMA_PORTSHIFT; for (vlan = 0; vlan < SW_DEVS; vlan++) if ((1 << port) & vlan_matrix[vlan]) break; if (vlan == SW_DEVS) vlan = 0; ifp = sc->sc_ifnet[vlan]; m = ds->ds_mbuf; if (admsw_add_rxlbuf(sc, i) != 0) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); ADMSW_INIT_RXLDESC(sc, i); bus_dmamap_sync(sc->sc_bufs_dmat, ds->ds_dmamap, BUS_DMASYNC_PREREAD); continue; } m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len; if ((stat & ADM5120_DMA_TYPE) == ADM5120_DMA_TYPE_IP) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (!(stat & ADM5120_DMA_CSUMFAIL)) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; } BPF_MTAP(ifp, m); /* Pass it on. */ (*ifp->if_input)(ifp, m); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); } /* Update the receive pointer. */ sc->sc_rxptr = i; } /* * admsw_init: [ifnet interface function] * * Initialize the interface. */ static void admsw_init(void *xsc) { struct admsw_softc *sc = xsc; struct ifnet *ifp; int i; for (i = 0; i < SW_DEVS; i++) { ifp = sc->sc_ifnet[i]; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { if (sc->ndevs == 0) { admsw_init_bufs(sc); admsw_reset(sc); REG_WRITE(CPUP_CONF_REG, CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK); /* clear all pending interrupts */ REG_WRITE(ADMSW_INT_ST, INT_MASK); /* enable needed interrupts */ REG_WRITE(ADMSW_INT_MASK, REG_READ(ADMSW_INT_MASK) & ~(ADMSW_INTR_SHD | ADMSW_INTR_SLD | ADMSW_INTR_RHD | ADMSW_INTR_RLD | ADMSW_INTR_HDF | ADMSW_INTR_LDF)); callout_reset(&sc->sc_watchdog, hz, admsw_watchdog, sc); } sc->ndevs++; } /* mark iface as running */ ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } /* Set the receive filter. */ admsw_set_filter(sc); } /* * admsw_stop: [ifnet interface function] * * Stop transmission on the interface. */ static void admsw_stop(struct ifnet *ifp, int disable) { struct admsw_softc *sc = ifp->if_softc; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) return; if (--sc->ndevs == 0) { /* printf("debug: de-initializing hardware\n"); */ /* disable cpu port */ REG_WRITE(CPUP_CONF_REG, CPUP_CONF_DCPUP | CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK); /* XXX We should disable, then clear? --dyoung */ /* clear all pending interrupts */ REG_WRITE(ADMSW_INT_ST, INT_MASK); /* disable interrupts */ REG_WRITE(ADMSW_INT_MASK, INT_MASK); /* Cancel the watchdog timer. */ sc->sc_timer = 0; callout_stop(&sc->sc_watchdog); } /* Mark the interface as down. */ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); return; } /* * admsw_set_filter: * * Set up the receive filter. */ static void admsw_set_filter(struct admsw_softc *sc) { int i; uint32_t allmc, anymc, conf, promisc; struct ifnet *ifp; struct ifmultiaddr *ifma; /* Find which ports should be operated in promisc mode. */ allmc = anymc = promisc = 0; for (i = 0; i < SW_DEVS; i++) { ifp = sc->sc_ifnet[i]; if (ifp->if_flags & IFF_PROMISC) promisc |= vlan_matrix[i]; ifp->if_flags &= ~IFF_ALLMULTI; if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; anymc |= vlan_matrix[i]; } if_maddr_runlock(ifp); } conf = REG_READ(CPUP_CONF_REG); /* 1 Disable forwarding of unknown & multicast packets to * CPU on all ports. * 2 Enable forwarding of unknown & multicast packets to * CPU on ports where IFF_PROMISC or IFF_ALLMULTI is set. */ conf |= CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK; /* Enable forwarding of unknown packets to CPU on selected ports. */ conf ^= ((promisc << CPUP_CONF_DUNP_SHIFT) & CPUP_CONF_DUNP_MASK); conf ^= ((allmc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK); conf ^= ((anymc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK); REG_WRITE(CPUP_CONF_REG, conf); } /* * admsw_add_rxbuf: * * Add a receive buffer to the indicated descriptor. */ int admsw_add_rxbuf(struct admsw_softc *sc, int idx, int high) { struct admsw_descsoft *ds; struct mbuf *m; int error; if (high) ds = &sc->sc_rxhsoft[idx]; else ds = &sc->sc_rxlsoft[idx]; MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) return (ENOBUFS); - MCLGET(m, M_NOWAIT); - if ((m->m_flags & M_EXT) == 0) { + if (!(MCLGET(m, M_NOWAIT))) { m_freem(m); return (ENOBUFS); } if (ds->ds_mbuf != NULL) bus_dmamap_unload(sc->sc_bufs_dmat, ds->ds_dmamap); ds->ds_mbuf = m; error = bus_dmamap_load(sc->sc_bufs_dmat, ds->ds_dmamap, m->m_ext.ext_buf, m->m_ext.ext_size, admsw_rxbuf_map_addr, ds, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "can't load rx DMA map %d, error = %d\n", idx, error); panic("admsw_add_rxbuf"); /* XXX */ } bus_dmamap_sync(sc->sc_bufs_dmat, ds->ds_dmamap, BUS_DMASYNC_PREREAD); if (high) ADMSW_INIT_RXHDESC(sc, idx); else ADMSW_INIT_RXLDESC(sc, idx); return (0); } int admsw_mediachange(struct ifnet *ifp) { struct admsw_softc *sc = ifp->if_softc; int port = 0; struct ifmedia *ifm; int old, new, val; while(port < SW_DEVS) { if(ifp == sc->sc_ifnet[port]) break; else port++; } ifm = &sc->sc_ifmedia[port]; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { val = PHY_CNTL2_AUTONEG|PHY_CNTL2_100M|PHY_CNTL2_FDX; } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) { if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) val = PHY_CNTL2_100M|PHY_CNTL2_FDX; else val = PHY_CNTL2_100M; } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) { if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) val = PHY_CNTL2_FDX; else val = 0; } else return (EINVAL); old = REG_READ(PHY_CNTL2_REG); new = old & ~((PHY_CNTL2_AUTONEG|PHY_CNTL2_100M|PHY_CNTL2_FDX) << port); new |= (val << port); if (new != old) REG_WRITE(PHY_CNTL2_REG, new); return (0); } void admsw_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) { struct admsw_softc *sc = ifp->if_softc; int port = 0; int status; while(port < SW_DEVS) { if(ifp == sc->sc_ifnet[port]) break; else port++; } ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; status = REG_READ(PHY_ST_REG) >> port; if ((status & PHY_ST_LINKUP) == 0) { ifmr->ifm_active |= IFM_NONE; return; } ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= (status & PHY_ST_100M) ? IFM_100_TX : IFM_10_T; if (status & PHY_ST_FDX) ifmr->ifm_active |= IFM_FDX; } static device_method_t admsw_methods[] = { /* Device interface */ DEVMETHOD(device_probe, admsw_probe), DEVMETHOD(device_attach, admsw_attach), DEVMETHOD(device_detach, admsw_detach), DEVMETHOD(device_shutdown, admsw_shutdown), { 0, 0 } }; static devclass_t admsw_devclass; static driver_t admsw_driver = { "admsw", admsw_methods, sizeof(struct admsw_softc), }; DRIVER_MODULE(admsw, obio, admsw_driver, admsw_devclass, 0, 0); MODULE_DEPEND(admsw, ether, 1, 1, 1); Index: head/sys/netgraph/atm/ngatmbase.c =================================================================== --- head/sys/netgraph/atm/ngatmbase.c (revision 276749) +++ head/sys/netgraph/atm/ngatmbase.c (revision 276750) @@ -1,501 +1,499 @@ /*- * Copyright (c) 2001-2003 * Fraunhofer Institute for Open Communication Systems (FhG Fokus). * All rights reserved. * * Author: Hartmut Brandt * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * In-kernel UNI stack message functions. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #define NGATMBASE_VERSION 1 static int ngatm_handler(module_t, int, void *); static moduledata_t ngatm_data = { "ngatmbase", ngatm_handler, 0 }; MODULE_VERSION(ngatmbase, NGATMBASE_VERSION); DECLARE_MODULE(ngatmbase, ngatm_data, SI_SUB_EXEC, SI_ORDER_ANY); /*********************************************************************/ /* * UNI Stack message handling functions */ static MALLOC_DEFINE(M_UNIMSG, "unimsg", "uni message buffers"); static MALLOC_DEFINE(M_UNIMSGHDR, "unimsghdr", "uni message headers"); #define EXTRA 128 /* mutex to protect the free list (and the used list if debugging) */ static struct mtx ngatm_unilist_mtx; /* * Initialize UNI message subsystem */ static void uni_msg_init(void) { mtx_init(&ngatm_unilist_mtx, "netgraph UNI msg header lists", NULL, MTX_DEF); } /* * Ensure, that the message can be extended by at least s bytes. * Re-allocate the message (not the header). If that failes, * free the entire message and return ENOMEM. Free space at the start of * the message is retained. */ int uni_msg_extend(struct uni_msg *m, size_t s) { u_char *b; size_t len, lead; lead = uni_msg_leading(m); len = uni_msg_len(m); s += lead + len + EXTRA; if ((b = malloc(s, M_UNIMSG, M_NOWAIT)) == NULL) { uni_msg_destroy(m); return (ENOMEM); } bcopy(m->b_rptr, b + lead, len); free(m->b_buf, M_UNIMSG); m->b_buf = b; m->b_rptr = m->b_buf + lead; m->b_wptr = m->b_rptr + len; m->b_lim = m->b_buf + s; return (0); } /* * Append a buffer to the message, making space if needed. * If reallocation files, ENOMEM is returned and the message freed. */ int uni_msg_append(struct uni_msg *m, void *buf, size_t size) { int error; if ((error = uni_msg_ensure(m, size))) return (error); bcopy(buf, m->b_wptr, size); m->b_wptr += size; return (0); } /* * Pack/unpack data from/into mbufs. Assume, that the (optional) header * fits into the first mbuf, ie. hdrlen < MHLEN. Note, that the message * can be NULL, but hdrlen should not be 0 in this case. */ struct mbuf * uni_msg_pack_mbuf(struct uni_msg *msg, void *hdr, size_t hdrlen) { struct mbuf *m, *m0, *last; size_t n; MGETHDR(m0, M_NOWAIT, MT_DATA); if (m0 == NULL) return (NULL); KASSERT(hdrlen <= MHLEN, ("uni_msg_pack_mbuf: hdrlen > MHLEN")); if (hdrlen != 0) { bcopy(hdr, m0->m_data, hdrlen); m0->m_len = hdrlen; m0->m_pkthdr.len = hdrlen; } else { if ((n = uni_msg_len(msg)) > MHLEN) { - MCLGET(m0, M_NOWAIT); - if (!(m0->m_flags & M_EXT)) + if (!(MCLGET(m0, M_NOWAIT))) goto drop; if (n > MCLBYTES) n = MCLBYTES; } bcopy(msg->b_rptr, m0->m_data, n); msg->b_rptr += n; m0->m_len = n; m0->m_pkthdr.len = n; } last = m0; while (msg != NULL && (n = uni_msg_len(msg)) != 0) { MGET(m, M_NOWAIT, MT_DATA); if (m == NULL) goto drop; last->m_next = m; last = m; if (n > MLEN) { - MCLGET(m, M_NOWAIT); - if (!(m->m_flags & M_EXT)) + if (!(MCLGET(m, M_NOWAIT))) goto drop; if (n > MCLBYTES) n = MCLBYTES; } bcopy(msg->b_rptr, m->m_data, n); msg->b_rptr += n; m->m_len = n; m0->m_pkthdr.len += n; } return (m0); drop: m_freem(m0); return (NULL); } #ifdef NGATM_DEBUG /* * Prepend a debugging header to each message */ struct ngatm_msg { LIST_ENTRY(ngatm_msg) link; const char *file; int line; struct uni_msg msg; }; /* * These are the lists of free and used message headers. */ static LIST_HEAD(, ngatm_msg) ngatm_freeuni = LIST_HEAD_INITIALIZER(ngatm_freeuni); static LIST_HEAD(, ngatm_msg) ngatm_useduni = LIST_HEAD_INITIALIZER(ngatm_useduni); /* * Clean-up UNI message subsystem */ static void uni_msg_fini(void) { struct ngatm_msg *h; /* free all free message headers */ while ((h = LIST_FIRST(&ngatm_freeuni)) != NULL) { LIST_REMOVE(h, link); free(h, M_UNIMSGHDR); } /* forget about still used messages */ LIST_FOREACH(h, &ngatm_useduni, link) printf("unimsg header in use: %p (%s, %d)\n", &h->msg, h->file, h->line); mtx_destroy(&ngatm_unilist_mtx); } /* * Allocate a message, that can hold at least s bytes. */ struct uni_msg * _uni_msg_alloc(size_t s, const char *file, int line) { struct ngatm_msg *m; mtx_lock(&ngatm_unilist_mtx); if ((m = LIST_FIRST(&ngatm_freeuni)) != NULL) LIST_REMOVE(m, link); mtx_unlock(&ngatm_unilist_mtx); if (m == NULL && (m = malloc(sizeof(*m), M_UNIMSGHDR, M_NOWAIT)) == NULL) return (NULL); s += EXTRA; if((m->msg.b_buf = malloc(s, M_UNIMSG, M_NOWAIT | M_ZERO)) == NULL) { mtx_lock(&ngatm_unilist_mtx); LIST_INSERT_HEAD(&ngatm_freeuni, m, link); mtx_unlock(&ngatm_unilist_mtx); return (NULL); } m->msg.b_rptr = m->msg.b_wptr = m->msg.b_buf; m->msg.b_lim = m->msg.b_buf + s; m->file = file; m->line = line; mtx_lock(&ngatm_unilist_mtx); LIST_INSERT_HEAD(&ngatm_useduni, m, link); mtx_unlock(&ngatm_unilist_mtx); return (&m->msg); } /* * Destroy a UNI message. * The header is inserted into the free header list. */ void _uni_msg_destroy(struct uni_msg *m, const char *file, int line) { struct ngatm_msg *h, *d; d = (struct ngatm_msg *)((char *)m - offsetof(struct ngatm_msg, msg)); mtx_lock(&ngatm_unilist_mtx); LIST_FOREACH(h, &ngatm_useduni, link) if (h == d) break; if (h == NULL) { /* * Not on used list. Ups. */ LIST_FOREACH(h, &ngatm_freeuni, link) if (h == d) break; if (h == NULL) printf("uni_msg %p was never allocated; found " "in %s:%u\n", m, file, line); else printf("uni_msg %p was already destroyed in %s,%d; " "found in %s:%u\n", m, h->file, h->line, file, line); } else { free(m->b_buf, M_UNIMSG); LIST_REMOVE(d, link); LIST_INSERT_HEAD(&ngatm_freeuni, d, link); d->file = file; d->line = line; } mtx_unlock(&ngatm_unilist_mtx); } #else /* !NGATM_DEBUG */ /* * This assumes, that sizeof(struct uni_msg) >= sizeof(struct ngatm_msg) * and the alignment requirements of are the same. */ struct ngatm_msg { LIST_ENTRY(ngatm_msg) link; }; /* Lists of free message headers. */ static LIST_HEAD(, ngatm_msg) ngatm_freeuni = LIST_HEAD_INITIALIZER(ngatm_freeuni); /* * Clean-up UNI message subsystem */ static void uni_msg_fini(void) { struct ngatm_msg *h; /* free all free message headers */ while ((h = LIST_FIRST(&ngatm_freeuni)) != NULL) { LIST_REMOVE(h, link); free(h, M_UNIMSGHDR); } mtx_destroy(&ngatm_unilist_mtx); } /* * Allocate a message, that can hold at least s bytes. */ struct uni_msg * uni_msg_alloc(size_t s) { struct ngatm_msg *a; struct uni_msg *m; mtx_lock(&ngatm_unilist_mtx); if ((a = LIST_FIRST(&ngatm_freeuni)) != NULL) LIST_REMOVE(a, link); mtx_unlock(&ngatm_unilist_mtx); if (a == NULL) { if ((m = malloc(sizeof(*m), M_UNIMSGHDR, M_NOWAIT)) == NULL) return (NULL); a = (struct ngatm_msg *)m; } else m = (struct uni_msg *)a; s += EXTRA; if((m->b_buf = malloc(s, M_UNIMSG, M_NOWAIT | M_ZERO)) == NULL) { mtx_lock(&ngatm_unilist_mtx); LIST_INSERT_HEAD(&ngatm_freeuni, a, link); mtx_unlock(&ngatm_unilist_mtx); return (NULL); } m->b_rptr = m->b_wptr = m->b_buf; m->b_lim = m->b_buf + s; return (m); } /* * Destroy a UNI message. * The header is inserted into the free header list. */ void uni_msg_destroy(struct uni_msg *m) { struct ngatm_msg *a; a = (struct ngatm_msg *)m; free(m->b_buf, M_UNIMSG); mtx_lock(&ngatm_unilist_mtx); LIST_INSERT_HEAD(&ngatm_freeuni, a, link); mtx_unlock(&ngatm_unilist_mtx); } #endif /* * Build a message from a number of buffers. Arguments are pairs * of (void *, size_t) ending with a NULL pointer. */ #ifdef NGATM_DEBUG struct uni_msg * _uni_msg_build(const char *file, int line, void *ptr, ...) #else struct uni_msg * uni_msg_build(void *ptr, ...) #endif { va_list ap; struct uni_msg *m; size_t len, n; void *p1; len = 0; va_start(ap, ptr); p1 = ptr; while (p1 != NULL) { n = va_arg(ap, size_t); len += n; p1 = va_arg(ap, void *); } va_end(ap); #ifdef NGATM_DEBUG if ((m = _uni_msg_alloc(len, file, line)) == NULL) #else if ((m = uni_msg_alloc(len)) == NULL) #endif return (NULL); va_start(ap, ptr); p1 = ptr; while (p1 != NULL) { n = va_arg(ap, size_t); bcopy(p1, m->b_wptr, n); m->b_wptr += n; p1 = va_arg(ap, void *); } va_end(ap); return (m); } /* * Unpack an mbuf chain into a uni_msg buffer. */ #ifdef NGATM_DEBUG int _uni_msg_unpack_mbuf(struct mbuf *m, struct uni_msg **pmsg, const char *file, int line) #else int uni_msg_unpack_mbuf(struct mbuf *m, struct uni_msg **pmsg) #endif { if (!(m->m_flags & M_PKTHDR)) { printf("%s: bogus packet %p\n", __func__, m); return (EINVAL); } #ifdef NGATM_DEBUG if ((*pmsg = _uni_msg_alloc(m->m_pkthdr.len, file, line)) == NULL) #else if ((*pmsg = uni_msg_alloc(m->m_pkthdr.len)) == NULL) #endif return (ENOMEM); m_copydata(m, 0, m->m_pkthdr.len, (*pmsg)->b_wptr); (*pmsg)->b_wptr += m->m_pkthdr.len; return (0); } /*********************************************************************/ static int ngatm_handler(module_t mod, int what, void *arg) { int error = 0; switch (what) { case MOD_LOAD: uni_msg_init(); break; case MOD_UNLOAD: uni_msg_fini(); break; default: error = EOPNOTSUPP; break; } return (error); } Index: head/sys/netgraph/atm/sscop/ng_sscop_cust.h =================================================================== --- head/sys/netgraph/atm/sscop/ng_sscop_cust.h (revision 276749) +++ head/sys/netgraph/atm/sscop/ng_sscop_cust.h (revision 276750) @@ -1,344 +1,343 @@ /*- * Copyright (c) 2001-2003 * Fraunhofer Institute for Open Communication Systems (FhG Fokus). * All rights reserved. * * Author: Harti Brandt * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * * Customisation of the SSCOP code to ng_sscop. */ #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Allocate zeroed or non-zeroed memory of some size and cast it. * Return NULL on failure. */ #ifndef SSCOP_DEBUG #define MEMINIT() \ MALLOC_DECLARE(M_NG_SSCOP); \ DECL_MSGQ_GET \ DECL_SIGQ_GET \ DECL_MBUF_ALLOC #define MEMZALLOC(PTR, CAST, SIZE) \ ((PTR) = (CAST)malloc((SIZE), M_NG_SSCOP, M_NOWAIT | M_ZERO)) #define MEMFREE(PTR) \ free((PTR), M_NG_SSCOP) #define MSG_ALLOC(PTR) \ MEMZALLOC(PTR, struct sscop_msg *, sizeof(struct sscop_msg)) #define MSG_FREE(PTR) \ MEMFREE(PTR) #define SIG_ALLOC(PTR) \ MEMZALLOC(PTR, struct sscop_sig *, sizeof(struct sscop_sig)) #define SIG_FREE(PTR) \ MEMFREE(PTR) #else #define MEMINIT() \ MALLOC_DEFINE(M_NG_SSCOP_INS, "sscop_ins", "SSCOP instances"); \ MALLOC_DEFINE(M_NG_SSCOP_MSG, "sscop_msg", "SSCOP buffers"); \ MALLOC_DEFINE(M_NG_SSCOP_SIG, "sscop_sig", "SSCOP signals"); \ DECL_MSGQ_GET \ DECL_SIGQ_GET \ DECL_MBUF_ALLOC #define MEMZALLOC(PTR, CAST, SIZE) \ ((PTR) = (CAST)malloc((SIZE), M_NG_SSCOP_INS, M_NOWAIT | M_ZERO)) #define MEMFREE(PTR) \ free((PTR), M_NG_SSCOP_INS) #define MSG_ALLOC(PTR) \ ((PTR) = malloc(sizeof(struct sscop_msg), \ M_NG_SSCOP_MSG, M_NOWAIT | M_ZERO)) #define MSG_FREE(PTR) \ free((PTR), M_NG_SSCOP_MSG) #define SIG_ALLOC(PTR) \ ((PTR) = malloc(sizeof(struct sscop_sig), \ M_NG_SSCOP_SIG, M_NOWAIT | M_ZERO)) #define SIG_FREE(PTR) \ free((PTR), M_NG_SSCOP_SIG) #endif /* * Timer support. */ typedef struct callout sscop_timer_t; #define TIMER_INIT(S, T) ng_callout_init(&(S)->t_##T) #define TIMER_STOP(S,T) do { \ ng_uncallout(&(S)->t_##T, (S)->aarg); \ } while (0) #define TIMER_RESTART(S, T) do { \ TIMER_STOP(S, T); \ ng_callout(&(S)->t_##T, (S)->aarg, NULL, \ hz * (S)->timer##T / 1000, T##_func, (S), 0); \ } while (0) #define TIMER_ISACT(S, T) ((S)->t_##T.c_flags & (CALLOUT_PENDING)) /* * This assumes, that the user argument is the node pointer. */ #define TIMER_FUNC(T,N) \ static void \ T##_func(node_p node, hook_p hook, void *arg1, int arg2) \ { \ struct sscop *sscop = arg1; \ \ VERBOSE(sscop, SSCOP_DBG_TIMER, (sscop, sscop->aarg, \ "timer_" #T " expired")); \ sscop_signal(sscop, SIG_T_##N, NULL); \ } /* * Message queues */ typedef TAILQ_ENTRY(sscop_msg) sscop_msgq_link_t; typedef TAILQ_HEAD(sscop_msgq, sscop_msg) sscop_msgq_head_t; #define MSGQ_EMPTY(Q) TAILQ_EMPTY(Q) #define MSGQ_INIT(Q) TAILQ_INIT(Q) #define MSGQ_FOREACH(P, Q) TAILQ_FOREACH(P, Q, link) #define MSGQ_REMOVE(Q, M) TAILQ_REMOVE(Q, M, link) #define MSGQ_INSERT_BEFORE(B, M) TAILQ_INSERT_BEFORE(B, M, link) #define MSGQ_APPEND(Q, M) TAILQ_INSERT_TAIL(Q, M, link) #define MSGQ_PEEK(Q) TAILQ_FIRST((Q)) #define MSGQ_GET(Q) ng_sscop_msgq_get((Q)) #define DECL_MSGQ_GET \ static __inline struct sscop_msg * \ ng_sscop_msgq_get(struct sscop_msgq *q) \ { \ struct sscop_msg *m; \ \ m = TAILQ_FIRST(q); \ if (m != NULL) \ TAILQ_REMOVE(q, m, link); \ return (m); \ } #define MSGQ_CLEAR(Q) \ do { \ struct sscop_msg *_m1, *_m2; \ \ _m1 = TAILQ_FIRST(Q); \ while (_m1 != NULL) { \ _m2 = TAILQ_NEXT(_m1, link); \ SSCOP_MSG_FREE(_m1); \ _m1 = _m2; \ } \ TAILQ_INIT((Q)); \ } while (0) /* * Signal queues */ typedef TAILQ_ENTRY(sscop_sig) sscop_sigq_link_t; typedef TAILQ_HEAD(sscop_sigq, sscop_sig) sscop_sigq_head_t; #define SIGQ_INIT(Q) TAILQ_INIT(Q) #define SIGQ_APPEND(Q, S) TAILQ_INSERT_TAIL(Q, S, link) #define SIGQ_EMPTY(Q) TAILQ_EMPTY(Q) #define SIGQ_GET(Q) ng_sscop_sigq_get((Q)) #define DECL_SIGQ_GET \ static __inline struct sscop_sig * \ ng_sscop_sigq_get(struct sscop_sigq *q) \ { \ struct sscop_sig *s; \ \ s = TAILQ_FIRST(q); \ if (s != NULL) \ TAILQ_REMOVE(q, s, link); \ return (s); \ } #define SIGQ_MOVE(F, T) \ do { \ struct sscop_sig *_s; \ \ while (!TAILQ_EMPTY(F)) { \ _s = TAILQ_FIRST(F); \ TAILQ_REMOVE(F, _s, link); \ TAILQ_INSERT_TAIL(T, _s, link); \ } \ } while (0) #define SIGQ_PREPEND(F, T) \ do { \ struct sscop_sig *_s; \ \ while (!TAILQ_EMPTY(F)) { \ _s = TAILQ_LAST(F, sscop_sigq); \ TAILQ_REMOVE(F, _s, link); \ TAILQ_INSERT_HEAD(T, _s, link); \ } \ } while (0) #define SIGQ_CLEAR(Q) \ do { \ struct sscop_sig *_s1, *_s2; \ \ _s1 = TAILQ_FIRST(Q); \ while (_s1 != NULL) { \ _s2 = TAILQ_NEXT(_s1, link); \ SSCOP_MSG_FREE(_s1->msg); \ SIG_FREE(_s1); \ _s1 = _s2; \ } \ TAILQ_INIT(Q); \ } while (0) /* * Message buffers */ #define MBUF_FREE(M) do { if ((M)) m_freem((M)); } while(0) #define MBUF_DUP(M) m_copypacket((M), M_NOWAIT) #define MBUF_LEN(M) ((size_t)(M)->m_pkthdr.len) /* * Return the i-th word counted from the end of the buffer. * i=-1 will return the last 32bit word, i=-2 the 2nd last. * Assumes that there is enough space. */ #define MBUF_TRAIL32(M ,I) ng_sscop_mbuf_trail32((M), (I)) static uint32_t __inline ng_sscop_mbuf_trail32(const struct mbuf *m, int i) { uint32_t w; m_copydata(m, m->m_pkthdr.len + 4 * i, 4, (caddr_t)&w); return (ntohl(w)); } /* * Strip 32bit value from the end */ #define MBUF_STRIP32(M) ng_sscop_mbuf_strip32((M)) static uint32_t __inline ng_sscop_mbuf_strip32(struct mbuf *m) { uint32_t w; m_copydata(m, m->m_pkthdr.len - 4, 4, (caddr_t)&w); m_adj(m, -4); return (ntohl(w)); } #define MBUF_GET32(M) ng_sscop_mbuf_get32((M)) static uint32_t __inline ng_sscop_mbuf_get32(struct mbuf *m) { uint32_t w; m_copydata(m, 0, 4, (caddr_t)&w); m_adj(m, 4); return (ntohl(w)); } /* * Append a 32bit value to an mbuf. Failures are ignored. */ #define MBUF_APPEND32(M, W) \ do { \ uint32_t _w = (W); \ \ _w = htonl(_w); \ m_copyback((M), (M)->m_pkthdr.len, 4, (caddr_t)&_w); \ } while (0) /* * Pad a message to a multiple of four byte and return the amount of padding * Failures are ignored. */ #define MBUF_PAD4(M) ng_sscop_mbuf_pad4((M)) static u_int __inline ng_sscop_mbuf_pad4(struct mbuf *m) { static u_char pad[4] = { 0, 0, 0, 0 }; int len = m->m_pkthdr.len; int npad = 3 - ((len + 3) & 3); if (npad != 0) m_copyback(m, len, npad, (caddr_t)pad); return (npad); } #define MBUF_UNPAD(M, P) do { if( (P) > 0) m_adj((M), -(P)); } while (0) /* * Allocate a message that will probably hold N bytes. */ #define MBUF_ALLOC(N) ng_sscop_mbuf_alloc((N)) #define DECL_MBUF_ALLOC \ static __inline struct mbuf * \ ng_sscop_mbuf_alloc(size_t n) \ { \ struct mbuf *m; \ \ MGETHDR(m, M_NOWAIT, MT_DATA); \ if (m != NULL) { \ m->m_len = 0; \ m->m_pkthdr.len = 0; \ if (n > MHLEN) { \ - MCLGET(m, M_NOWAIT); \ - if (!(m->m_flags & M_EXT)){ \ + if (!(MCLGET(m, M_NOWAIT))){ \ m_free(m); \ m = NULL; \ } \ } \ } \ return (m); \ } #ifdef SSCOP_DEBUG #define ASSERT(X) KASSERT(X, (#X)) #else #define ASSERT(X) #endif Index: head/sys/netgraph/bluetooth/drivers/bt3c/ng_bt3c_pccard.c =================================================================== --- head/sys/netgraph/bluetooth/drivers/bt3c/ng_bt3c_pccard.c (revision 276749) +++ head/sys/netgraph/bluetooth/drivers/bt3c/ng_bt3c_pccard.c (revision 276750) @@ -1,1226 +1,1225 @@ /* * ng_bt3c_pccard.c */ /*- * Copyright (c) 2001-2002 Maksim Yevmenkin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: ng_bt3c_pccard.c,v 1.5 2003/04/01 18:15:21 max Exp $ * $FreeBSD$ * * XXX XXX XX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX * * Based on information obrained from: Jose Orlando Pereira * and disassembled w2k driver. * * XXX XXX XX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pccarddevs.h" #include #include #include #include #include #include #include /* Netgraph methods */ static ng_constructor_t ng_bt3c_constructor; static ng_shutdown_t ng_bt3c_shutdown; static ng_newhook_t ng_bt3c_newhook; static ng_connect_t ng_bt3c_connect; static ng_disconnect_t ng_bt3c_disconnect; static ng_rcvmsg_t ng_bt3c_rcvmsg; static ng_rcvdata_t ng_bt3c_rcvdata; /* PCMCIA driver methods */ static int bt3c_pccard_probe (device_t); static int bt3c_pccard_attach (device_t); static int bt3c_pccard_detach (device_t); static void bt3c_intr (void *); static void bt3c_receive (bt3c_softc_p); static void bt3c_swi_intr (void *); static void bt3c_forward (node_p, hook_p, void *, int); static void bt3c_send (node_p, hook_p, void *, int); static void bt3c_download_firmware (bt3c_softc_p, char const *, int); #define bt3c_set_address(sc, address) \ do { \ bus_space_write_1((sc)->iot, (sc)->ioh, BT3C_ADDR_L, ((address) & 0xff)); \ bus_space_write_1((sc)->iot, (sc)->ioh, BT3C_ADDR_H, (((address) >> 8) & 0xff)); \ } while (0) #define bt3c_read_data(sc, data) \ do { \ (data) = bus_space_read_1((sc)->iot, (sc)->ioh, BT3C_DATA_L); \ (data) |= ((bus_space_read_1((sc)->iot, (sc)->ioh, BT3C_DATA_H) & 0xff) << 8); \ } while (0) #define bt3c_write_data(sc, data) \ do { \ bus_space_write_1((sc)->iot, (sc)->ioh, BT3C_DATA_L, ((data) & 0xff)); \ bus_space_write_1((sc)->iot, (sc)->ioh, BT3C_DATA_H, (((data) >> 8) & 0xff)); \ } while (0) #define bt3c_read_control(sc, data) \ do { \ (data) = bus_space_read_1((sc)->iot, (sc)->ioh, BT3C_CONTROL); \ } while (0) #define bt3c_write_control(sc, data) \ do { \ bus_space_write_1((sc)->iot, (sc)->ioh, BT3C_CONTROL, (data)); \ } while (0) #define bt3c_read(sc, address, data) \ do { \ bt3c_set_address((sc), (address)); \ bt3c_read_data((sc), (data)); \ } while(0) #define bt3c_write(sc, address, data) \ do { \ bt3c_set_address((sc), (address)); \ bt3c_write_data((sc), (data)); \ } while(0) static MALLOC_DEFINE(M_BT3C, "bt3c", "bt3c data structures"); /**************************************************************************** **************************************************************************** ** Netgraph specific **************************************************************************** ****************************************************************************/ /* * Netgraph node type */ /* Queue length */ static const struct ng_parse_struct_field ng_bt3c_node_qlen_type_fields[] = { { "queue", &ng_parse_int32_type, }, { "qlen", &ng_parse_int32_type, }, { NULL, } }; static const struct ng_parse_type ng_bt3c_node_qlen_type = { &ng_parse_struct_type, &ng_bt3c_node_qlen_type_fields }; /* Stat info */ static const struct ng_parse_struct_field ng_bt3c_node_stat_type_fields[] = { { "pckts_recv", &ng_parse_uint32_type, }, { "bytes_recv", &ng_parse_uint32_type, }, { "pckts_sent", &ng_parse_uint32_type, }, { "bytes_sent", &ng_parse_uint32_type, }, { "oerrors", &ng_parse_uint32_type, }, { "ierrors", &ng_parse_uint32_type, }, { NULL, } }; static const struct ng_parse_type ng_bt3c_node_stat_type = { &ng_parse_struct_type, &ng_bt3c_node_stat_type_fields }; static const struct ng_cmdlist ng_bt3c_cmdlist[] = { { NGM_BT3C_COOKIE, NGM_BT3C_NODE_GET_STATE, "get_state", NULL, &ng_parse_uint16_type }, { NGM_BT3C_COOKIE, NGM_BT3C_NODE_SET_DEBUG, "set_debug", &ng_parse_uint16_type, NULL }, { NGM_BT3C_COOKIE, NGM_BT3C_NODE_GET_DEBUG, "get_debug", NULL, &ng_parse_uint16_type }, { NGM_BT3C_COOKIE, NGM_BT3C_NODE_GET_QLEN, "get_qlen", NULL, &ng_bt3c_node_qlen_type }, { NGM_BT3C_COOKIE, NGM_BT3C_NODE_SET_QLEN, "set_qlen", &ng_bt3c_node_qlen_type, NULL }, { NGM_BT3C_COOKIE, NGM_BT3C_NODE_GET_STAT, "get_stat", NULL, &ng_bt3c_node_stat_type }, { NGM_BT3C_COOKIE, NGM_BT3C_NODE_RESET_STAT, "reset_stat", NULL, NULL }, { 0, } }; static struct ng_type typestruct = { .version = NG_ABI_VERSION, .name = NG_BT3C_NODE_TYPE, .constructor = ng_bt3c_constructor, .rcvmsg = ng_bt3c_rcvmsg, .shutdown = ng_bt3c_shutdown, .newhook = ng_bt3c_newhook, .connect = ng_bt3c_connect, .rcvdata = ng_bt3c_rcvdata, .disconnect = ng_bt3c_disconnect, .cmdlist = ng_bt3c_cmdlist }; /* * Netgraph node constructor. Do not allow to create node of this type. */ static int ng_bt3c_constructor(node_p node) { return (EINVAL); } /* ng_bt3c_constructor */ /* * Netgraph node destructor. Destroy node only when device has been detached */ static int ng_bt3c_shutdown(node_p node) { bt3c_softc_p sc = (bt3c_softc_p) NG_NODE_PRIVATE(node); /* Let old node go */ NG_NODE_SET_PRIVATE(node, NULL); NG_NODE_UNREF(node); /* Create new fresh one if we are not going down */ if (sc == NULL) goto out; /* Create new Netgraph node */ if (ng_make_node_common(&typestruct, &sc->node) != 0) { device_printf(sc->dev, "Could not create Netgraph node\n"); sc->node = NULL; goto out; } /* Name new Netgraph node */ if (ng_name_node(sc->node, device_get_nameunit(sc->dev)) != 0) { device_printf(sc->dev, "Could not name Netgraph node\n"); NG_NODE_UNREF(sc->node); sc->node = NULL; goto out; } NG_NODE_SET_PRIVATE(sc->node, sc); out: return (0); } /* ng_bt3c_shutdown */ /* * Create new hook. There can only be one. */ static int ng_bt3c_newhook(node_p node, hook_p hook, char const *name) { bt3c_softc_p sc = (bt3c_softc_p) NG_NODE_PRIVATE(node); if (strcmp(name, NG_BT3C_HOOK) != 0) return (EINVAL); if (sc->hook != NULL) return (EISCONN); sc->hook = hook; return (0); } /* ng_bt3c_newhook */ /* * Connect hook. Say YEP, that's OK with me. */ static int ng_bt3c_connect(hook_p hook) { bt3c_softc_p sc = (bt3c_softc_p) NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); if (hook != sc->hook) { sc->hook = NULL; return (EINVAL); } /* set the hook into queueing mode (for incoming (from wire) packets) */ NG_HOOK_FORCE_QUEUE(NG_HOOK_PEER(hook)); return (0); } /* ng_bt3c_connect */ /* * Disconnect hook */ static int ng_bt3c_disconnect(hook_p hook) { bt3c_softc_p sc = (bt3c_softc_p) NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); /* * We need to check for sc != NULL because we can be called from * bt3c_pccard_detach() via ng_rmnode_self() */ if (sc != NULL) { if (hook != sc->hook) return (EINVAL); IF_DRAIN(&sc->inq); IF_DRAIN(&sc->outq); sc->hook = NULL; } return (0); } /* ng_bt3c_disconnect */ /* * Process control message */ static int ng_bt3c_rcvmsg(node_p node, item_p item, hook_p lasthook) { bt3c_softc_p sc = (bt3c_softc_p) NG_NODE_PRIVATE(node); struct ng_mesg *msg = NULL, *rsp = NULL; int error = 0; if (sc == NULL) { NG_FREE_ITEM(item); return (EHOSTDOWN); } NGI_GET_MSG(item, msg); switch (msg->header.typecookie) { case NGM_GENERIC_COOKIE: switch (msg->header.cmd) { case NGM_TEXT_STATUS: NG_MKRESPONSE(rsp, msg, NG_TEXTRESPONSE, M_NOWAIT); if (rsp == NULL) error = ENOMEM; else snprintf(rsp->data, NG_TEXTRESPONSE, "Hook: %s\n" \ "Flags: %#x\n" \ "Debug: %d\n" \ "State: %d\n" \ "IncmQ: [len:%d,max:%d]\n" \ "OutgQ: [len:%d,max:%d]\n", (sc->hook != NULL)? NG_BT3C_HOOK : "", sc->flags, sc->debug, sc->state, _IF_QLEN(&sc->inq), /* XXX */ sc->inq.ifq_maxlen, /* XXX */ _IF_QLEN(&sc->outq), /* XXX */ sc->outq.ifq_maxlen /* XXX */ ); break; default: error = EINVAL; break; } break; case NGM_BT3C_COOKIE: switch (msg->header.cmd) { case NGM_BT3C_NODE_GET_STATE: NG_MKRESPONSE(rsp, msg, sizeof(ng_bt3c_node_state_ep), M_NOWAIT); if (rsp == NULL) error = ENOMEM; else *((ng_bt3c_node_state_ep *)(rsp->data)) = sc->state; break; case NGM_BT3C_NODE_SET_DEBUG: if (msg->header.arglen != sizeof(ng_bt3c_node_debug_ep)) error = EMSGSIZE; else sc->debug = *((ng_bt3c_node_debug_ep *)(msg->data)); break; case NGM_BT3C_NODE_GET_DEBUG: NG_MKRESPONSE(rsp, msg, sizeof(ng_bt3c_node_debug_ep), M_NOWAIT); if (rsp == NULL) error = ENOMEM; else *((ng_bt3c_node_debug_ep *)(rsp->data)) = sc->debug; break; case NGM_BT3C_NODE_GET_QLEN: NG_MKRESPONSE(rsp, msg, sizeof(ng_bt3c_node_qlen_ep), M_NOWAIT); if (rsp == NULL) { error = ENOMEM; break; } switch (((ng_bt3c_node_qlen_ep *)(msg->data))->queue) { case NGM_BT3C_NODE_IN_QUEUE: ((ng_bt3c_node_qlen_ep *)(rsp->data))->queue = NGM_BT3C_NODE_IN_QUEUE; ((ng_bt3c_node_qlen_ep *)(rsp->data))->qlen = sc->inq.ifq_maxlen; break; case NGM_BT3C_NODE_OUT_QUEUE: ((ng_bt3c_node_qlen_ep *)(rsp->data))->queue = NGM_BT3C_NODE_OUT_QUEUE; ((ng_bt3c_node_qlen_ep *)(rsp->data))->qlen = sc->outq.ifq_maxlen; break; default: NG_FREE_MSG(rsp); error = EINVAL; break; } break; case NGM_BT3C_NODE_SET_QLEN: if (msg->header.arglen != sizeof(ng_bt3c_node_qlen_ep)){ error = EMSGSIZE; break; } if (((ng_bt3c_node_qlen_ep *)(msg->data))->qlen <= 0) { error = EINVAL; break; } switch (((ng_bt3c_node_qlen_ep *)(msg->data))->queue) { case NGM_BT3C_NODE_IN_QUEUE: sc->inq.ifq_maxlen = ((ng_bt3c_node_qlen_ep *) (msg->data))->qlen; /* XXX */ break; case NGM_BT3C_NODE_OUT_QUEUE: sc->outq.ifq_maxlen = ((ng_bt3c_node_qlen_ep *) (msg->data))->qlen; /* XXX */ break; default: error = EINVAL; break; } break; case NGM_BT3C_NODE_GET_STAT: NG_MKRESPONSE(rsp, msg, sizeof(ng_bt3c_node_stat_ep), M_NOWAIT); if (rsp == NULL) error = ENOMEM; else bcopy(&sc->stat, rsp->data, sizeof(ng_bt3c_node_stat_ep)); break; case NGM_BT3C_NODE_RESET_STAT: NG_BT3C_STAT_RESET(sc->stat); break; case NGM_BT3C_NODE_DOWNLOAD_FIRMWARE: if (msg->header.arglen < sizeof(ng_bt3c_firmware_block_ep)) error = EMSGSIZE; else bt3c_download_firmware(sc, msg->data, msg->header.arglen); break; default: error = EINVAL; break; } break; default: error = EINVAL; break; } NG_RESPOND_MSG(error, node, item, rsp); NG_FREE_MSG(msg); return (error); } /* ng_bt3c_rcvmsg */ /* * Process data */ static int ng_bt3c_rcvdata(hook_p hook, item_p item) { bt3c_softc_p sc = (bt3c_softc_p)NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); struct mbuf *m = NULL; int error = 0; if (sc == NULL) { error = EHOSTDOWN; goto out; } if (hook != sc->hook) { error = EINVAL; goto out; } NGI_GET_M(item, m); IF_LOCK(&sc->outq); if (_IF_QFULL(&sc->outq)) { NG_BT3C_ERR(sc->dev, "Outgoing queue is full. Dropping mbuf, len=%d\n", m->m_pkthdr.len); NG_BT3C_STAT_OERROR(sc->stat); NG_FREE_M(m); } else _IF_ENQUEUE(&sc->outq, m); IF_UNLOCK(&sc->outq); error = ng_send_fn(sc->node, NULL, bt3c_send, NULL, 0 /* new send */); out: NG_FREE_ITEM(item); return (error); } /* ng_bt3c_rcvdata */ /**************************************************************************** **************************************************************************** ** PCMCIA driver specific **************************************************************************** ****************************************************************************/ /* * PC Card (PCMCIA) probe routine */ static int bt3c_pccard_probe(device_t dev) { static struct pccard_product const bt3c_pccard_products[] = { PCMCIA_CARD(3COM, 3CRWB609), { NULL, } }; struct pccard_product const *pp = NULL; pp = pccard_product_lookup(dev, bt3c_pccard_products, sizeof(bt3c_pccard_products[0]), NULL); if (pp == NULL) return (ENXIO); device_set_desc(dev, pp->pp_name); return (0); } /* bt3c_pccard_probe */ /* * PC Card (PCMCIA) attach routine */ static int bt3c_pccard_attach(device_t dev) { bt3c_softc_p sc = (bt3c_softc_p) device_get_softc(dev); /* Allocate I/O ports */ sc->iobase_rid = 0; sc->iobase = bus_alloc_resource(dev, SYS_RES_IOPORT, &sc->iobase_rid, 0, ~0, 8, RF_ACTIVE); if (sc->iobase == NULL) { device_printf(dev, "Could not allocate I/O ports\n"); goto bad; } sc->iot = rman_get_bustag(sc->iobase); sc->ioh = rman_get_bushandle(sc->iobase); /* Allocate IRQ */ sc->irq_rid = 0; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE); if (sc->irq == NULL) { device_printf(dev, "Could not allocate IRQ\n"); goto bad; } sc->irq_cookie = NULL; if (bus_setup_intr(dev, sc->irq, INTR_TYPE_TTY, NULL, bt3c_intr, sc, &sc->irq_cookie) != 0) { device_printf(dev, "Could not setup ISR\n"); goto bad; } /* Attach handler to TTY SWI thread */ sc->ith = NULL; if (swi_add(&tty_intr_event, device_get_nameunit(dev), bt3c_swi_intr, sc, SWI_TTY, 0, &sc->ith) < 0) { device_printf(dev, "Could not setup SWI ISR\n"); goto bad; } /* Create Netgraph node */ if (ng_make_node_common(&typestruct, &sc->node) != 0) { device_printf(dev, "Could not create Netgraph node\n"); sc->node = NULL; goto bad; } /* Name Netgraph node */ if (ng_name_node(sc->node, device_get_nameunit(dev)) != 0) { device_printf(dev, "Could not name Netgraph node\n"); NG_NODE_UNREF(sc->node); sc->node = NULL; goto bad; } sc->dev = dev; sc->debug = NG_BT3C_WARN_LEVEL; sc->inq.ifq_maxlen = sc->outq.ifq_maxlen = BT3C_DEFAULTQLEN; mtx_init(&sc->inq.ifq_mtx, "BT3C inq", NULL, MTX_DEF); mtx_init(&sc->outq.ifq_mtx, "BT3C outq", NULL, MTX_DEF); sc->state = NG_BT3C_W4_PKT_IND; sc->want = 1; NG_NODE_SET_PRIVATE(sc->node, sc); return (0); bad: if (sc->ith != NULL) { swi_remove(sc->ith); sc->ith = NULL; } if (sc->irq != NULL) { if (sc->irq_cookie != NULL) bus_teardown_intr(dev, sc->irq, sc->irq_cookie); bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq); sc->irq = NULL; sc->irq_rid = 0; } if (sc->iobase != NULL) { bus_release_resource(dev, SYS_RES_IOPORT, sc->iobase_rid, sc->iobase); sc->iobase = NULL; sc->iobase_rid = 0; } return (ENXIO); } /* bt3c_pccacd_attach */ /* * PC Card (PCMCIA) detach routine */ static int bt3c_pccard_detach(device_t dev) { bt3c_softc_p sc = (bt3c_softc_p) device_get_softc(dev); if (sc == NULL) return (0); swi_remove(sc->ith); sc->ith = NULL; bus_teardown_intr(dev, sc->irq, sc->irq_cookie); bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq); sc->irq_cookie = NULL; sc->irq = NULL; sc->irq_rid = 0; bus_release_resource(dev, SYS_RES_IOPORT, sc->iobase_rid, sc->iobase); sc->iobase = NULL; sc->iobase_rid = 0; if (sc->node != NULL) { NG_NODE_SET_PRIVATE(sc->node, NULL); ng_rmnode_self(sc->node); sc->node = NULL; } NG_FREE_M(sc->m); IF_DRAIN(&sc->inq); IF_DRAIN(&sc->outq); mtx_destroy(&sc->inq.ifq_mtx); mtx_destroy(&sc->outq.ifq_mtx); return (0); } /* bt3c_pccacd_detach */ /* * Interrupt service routine's */ static void bt3c_intr(void *context) { bt3c_softc_p sc = (bt3c_softc_p) context; u_int16_t control, status; if (sc == NULL || sc->ith == NULL) { printf("%s: bogus interrupt\n", NG_BT3C_NODE_TYPE); return; } bt3c_read_control(sc, control); if ((control & 0x80) == 0) return; bt3c_read(sc, 0x7001, status); NG_BT3C_INFO(sc->dev, "control=%#x, status=%#x\n", control, status); if ((status & 0xff) == 0x7f || (status & 0xff) == 0xff) { NG_BT3C_WARN(sc->dev, "Strange status=%#x\n", status); return; } /* Receive complete */ if (status & 0x0001) bt3c_receive(sc); /* Record status and schedule SWI */ sc->status |= status; swi_sched(sc->ith, 0); /* Complete interrupt */ bt3c_write(sc, 0x7001, 0x0000); bt3c_write_control(sc, control); } /* bt3c_intr */ /* * Receive data */ static void bt3c_receive(bt3c_softc_p sc) { u_int16_t i, count, c; /* Receive data from the card */ bt3c_read(sc, 0x7006, count); NG_BT3C_INFO(sc->dev, "The card has %d characters\n", count); bt3c_set_address(sc, 0x7480); for (i = 0; i < count; i++) { /* Allocate new mbuf if needed */ if (sc->m == NULL) { sc->state = NG_BT3C_W4_PKT_IND; sc->want = 1; MGETHDR(sc->m, M_NOWAIT, MT_DATA); if (sc->m == NULL) { NG_BT3C_ERR(sc->dev, "Could not get mbuf\n"); NG_BT3C_STAT_IERROR(sc->stat); break; /* XXX lost of sync */ } - MCLGET(sc->m, M_NOWAIT); - if (!(sc->m->m_flags & M_EXT)) { + if (!(MCLGET(sc->m, M_NOWAIT))) { NG_FREE_M(sc->m); NG_BT3C_ERR(sc->dev, "Could not get cluster\n"); NG_BT3C_STAT_IERROR(sc->stat); break; /* XXX lost of sync */ } sc->m->m_len = sc->m->m_pkthdr.len = 0; } /* Read and append character to mbuf */ bt3c_read_data(sc, c); if (sc->m->m_pkthdr.len >= MCLBYTES) { NG_BT3C_ERR(sc->dev, "Oversized frame\n"); NG_FREE_M(sc->m); sc->state = NG_BT3C_W4_PKT_IND; sc->want = 1; break; /* XXX lost of sync */ } mtod(sc->m, u_int8_t *)[sc->m->m_len ++] = (u_int8_t) c; sc->m->m_pkthdr.len ++; NG_BT3C_INFO(sc->dev, "Got char %#x, want=%d, got=%d\n", c, sc->want, sc->m->m_pkthdr.len); if (sc->m->m_pkthdr.len < sc->want) continue; /* wait for more */ switch (sc->state) { /* Got packet indicator */ case NG_BT3C_W4_PKT_IND: NG_BT3C_INFO(sc->dev, "Got packet indicator %#x\n", *mtod(sc->m, u_int8_t *)); sc->state = NG_BT3C_W4_PKT_HDR; /* * Since packet indicator included in the packet * header just set sc->want to sizeof(packet header). */ switch (*mtod(sc->m, u_int8_t *)) { case NG_HCI_ACL_DATA_PKT: sc->want = sizeof(ng_hci_acldata_pkt_t); break; case NG_HCI_SCO_DATA_PKT: sc->want = sizeof(ng_hci_scodata_pkt_t); break; case NG_HCI_EVENT_PKT: sc->want = sizeof(ng_hci_event_pkt_t); break; default: NG_BT3C_ERR(sc->dev, "Ignoring unknown packet type=%#x\n", *mtod(sc->m, u_int8_t *)); NG_BT3C_STAT_IERROR(sc->stat); NG_FREE_M(sc->m); sc->state = NG_BT3C_W4_PKT_IND; sc->want = 1; break; } break; /* Got packet header */ case NG_BT3C_W4_PKT_HDR: sc->state = NG_BT3C_W4_PKT_DATA; switch (*mtod(sc->m, u_int8_t *)) { case NG_HCI_ACL_DATA_PKT: c = le16toh(mtod(sc->m, ng_hci_acldata_pkt_t *)->length); break; case NG_HCI_SCO_DATA_PKT: c = mtod(sc->m, ng_hci_scodata_pkt_t*)->length; break; case NG_HCI_EVENT_PKT: c = mtod(sc->m, ng_hci_event_pkt_t *)->length; break; default: KASSERT(0, ("Invalid packet type=%#x\n", *mtod(sc->m, u_int8_t *))); break; } NG_BT3C_INFO(sc->dev, "Got packet header, packet type=%#x, got so far %d, payload size=%d\n", *mtod(sc->m, u_int8_t *), sc->m->m_pkthdr.len, c); if (c > 0) { sc->want += c; break; } /* else FALLTHROUGH and deliver frame */ /* XXX is this true? should we deliver empty frame? */ /* Got packet data */ case NG_BT3C_W4_PKT_DATA: NG_BT3C_INFO(sc->dev, "Got full packet, packet type=%#x, packet size=%d\n", *mtod(sc->m, u_int8_t *), sc->m->m_pkthdr.len); NG_BT3C_STAT_BYTES_RECV(sc->stat, sc->m->m_pkthdr.len); NG_BT3C_STAT_PCKTS_RECV(sc->stat); IF_LOCK(&sc->inq); if (_IF_QFULL(&sc->inq)) { NG_BT3C_ERR(sc->dev, "Incoming queue is full. Dropping mbuf, len=%d\n", sc->m->m_pkthdr.len); NG_BT3C_STAT_IERROR(sc->stat); NG_FREE_M(sc->m); } else { _IF_ENQUEUE(&sc->inq, sc->m); sc->m = NULL; } IF_UNLOCK(&sc->inq); sc->state = NG_BT3C_W4_PKT_IND; sc->want = 1; break; default: KASSERT(0, ("Invalid node state=%d", sc->state)); break; } } bt3c_write(sc, 0x7006, 0x0000); } /* bt3c_receive */ /* * SWI interrupt handler * Netgraph part is handled via ng_send_fn() to avoid race with hook * connection/disconnection */ static void bt3c_swi_intr(void *context) { bt3c_softc_p sc = (bt3c_softc_p) context; u_int16_t data; /* Receive complete */ if (sc->status & 0x0001) { sc->status &= ~0x0001; /* XXX is it safe? */ if (ng_send_fn(sc->node, NULL, &bt3c_forward, NULL, 0) != 0) NG_BT3C_ALERT(sc->dev, "Could not forward frames!\n"); } /* Send complete */ if (sc->status & 0x0002) { sc->status &= ~0x0002; /* XXX is it safe */ if (ng_send_fn(sc->node, NULL, &bt3c_send, NULL, 1) != 0) NG_BT3C_ALERT(sc->dev, "Could not send frames!\n"); } /* Antenna position */ if (sc->status & 0x0020) { sc->status &= ~0x0020; /* XXX is it safe */ bt3c_read(sc, 0x7002, data); data &= 0x10; if (data) sc->flags |= BT3C_ANTENNA_OUT; else sc->flags &= ~BT3C_ANTENNA_OUT; NG_BT3C_INFO(sc->dev, "Antenna %s\n", data? "OUT" : "IN"); } } /* bt3c_swi_intr */ /* * Send all incoming frames to the upper layer */ static void bt3c_forward(node_p node, hook_p hook, void *arg1, int arg2) { bt3c_softc_p sc = (bt3c_softc_p) NG_NODE_PRIVATE(node); struct mbuf *m = NULL; int error; if (sc == NULL) return; if (sc->hook != NULL && NG_HOOK_IS_VALID(sc->hook)) { for (;;) { IF_DEQUEUE(&sc->inq, m); if (m == NULL) break; NG_SEND_DATA_ONLY(error, sc->hook, m); if (error != 0) NG_BT3C_STAT_IERROR(sc->stat); } } else { IF_LOCK(&sc->inq); for (;;) { _IF_DEQUEUE(&sc->inq, m); if (m == NULL) break; NG_BT3C_STAT_IERROR(sc->stat); NG_FREE_M(m); } IF_UNLOCK(&sc->inq); } } /* bt3c_forward */ /* * Send more data to the device. Must be called when node is locked */ static void bt3c_send(node_p node, hook_p hook, void *arg, int completed) { bt3c_softc_p sc = (bt3c_softc_p) NG_NODE_PRIVATE(node); struct mbuf *m = NULL; int i, wrote, len; if (sc == NULL) return; if (completed) sc->flags &= ~BT3C_XMIT; if (sc->flags & BT3C_XMIT) return; bt3c_set_address(sc, 0x7080); for (wrote = 0; wrote < BT3C_FIFO_SIZE; ) { IF_DEQUEUE(&sc->outq, m); if (m == NULL) break; while (m != NULL) { len = min((BT3C_FIFO_SIZE - wrote), m->m_len); for (i = 0; i < len; i++) bt3c_write_data(sc, m->m_data[i]); wrote += len; m->m_data += len; m->m_len -= len; if (m->m_len > 0) break; m = m_free(m); } if (m != NULL) { IF_PREPEND(&sc->outq, m); break; } NG_BT3C_STAT_PCKTS_SENT(sc->stat); } if (wrote > 0) { NG_BT3C_INFO(sc->dev, "Wrote %d bytes\n", wrote); NG_BT3C_STAT_BYTES_SENT(sc->stat, wrote); bt3c_write(sc, 0x7005, wrote); sc->flags |= BT3C_XMIT; } } /* bt3c_send */ /* * Download chip firmware */ static void bt3c_download_firmware(bt3c_softc_p sc, char const *firmware, int firmware_size) { ng_bt3c_firmware_block_ep const *block = NULL; u_int16_t const *data = NULL; int i, size; u_int8_t c; /* Reset */ device_printf(sc->dev, "Reseting the card...\n"); bt3c_write(sc, 0x8040, 0x0404); bt3c_write(sc, 0x8040, 0x0400); DELAY(1); bt3c_write(sc, 0x8040, 0x0404); DELAY(17); /* Download firmware */ device_printf(sc->dev, "Starting firmware download process...\n"); for (size = 0; size < firmware_size; ) { block = (ng_bt3c_firmware_block_ep const *)(firmware + size); data = (u_int16_t const *)(block + 1); if (bootverbose) device_printf(sc->dev, "Download firmware block, " \ "address=%#08x, size=%d words, aligment=%d\n", block->block_address, block->block_size, block->block_alignment); bt3c_set_address(sc, block->block_address); for (i = 0; i < block->block_size; i++) bt3c_write_data(sc, data[i]); size += (sizeof(*block) + (block->block_size * 2) + block->block_alignment); } DELAY(17); device_printf(sc->dev, "Firmware download process complete\n"); /* Boot */ device_printf(sc->dev, "Starting the card...\n"); bt3c_set_address(sc, 0x3000); bt3c_read_control(sc, c); bt3c_write_control(sc, (c | 0x40)); DELAY(17); /* Clear registers */ device_printf(sc->dev, "Clearing card registers...\n"); bt3c_write(sc, 0x7006, 0x0000); bt3c_write(sc, 0x7005, 0x0000); bt3c_write(sc, 0x7001, 0x0000); DELAY(1000); } /* bt3c_download_firmware */ /**************************************************************************** **************************************************************************** ** Driver module **************************************************************************** ****************************************************************************/ /* * PC Card (PCMCIA) driver */ static device_method_t bt3c_pccard_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bt3c_pccard_probe), DEVMETHOD(device_attach, bt3c_pccard_attach), DEVMETHOD(device_detach, bt3c_pccard_detach), { 0, 0 } }; static driver_t bt3c_pccard_driver = { NG_BT3C_NODE_TYPE, bt3c_pccard_methods, sizeof(bt3c_softc_t) }; static devclass_t bt3c_devclass; /* * Load/Unload the driver module */ static int bt3c_modevent(module_t mod, int event, void *data) { int error; switch (event) { case MOD_LOAD: error = ng_newtype(&typestruct); if (error != 0) printf("%s: Could not register Netgraph node type, " \ "error=%d\n", NG_BT3C_NODE_TYPE, error); break; case MOD_UNLOAD: error = ng_rmtype(&typestruct); break; default: error = EOPNOTSUPP; break; } return (error); } /* bt3c_modevent */ DRIVER_MODULE(bt3c, pccard, bt3c_pccard_driver, bt3c_devclass, bt3c_modevent,0); MODULE_VERSION(ng_bt3c, NG_BLUETOOTH_VERSION); MODULE_DEPEND(ng_bt3c, netgraph, NG_ABI_VERSION, NG_ABI_VERSION,NG_ABI_VERSION); Index: head/sys/netgraph/bluetooth/drivers/ubt/ng_ubt.c =================================================================== --- head/sys/netgraph/bluetooth/drivers/ubt/ng_ubt.c (revision 276749) +++ head/sys/netgraph/bluetooth/drivers/ubt/ng_ubt.c (revision 276750) @@ -1,1878 +1,1875 @@ /* * ng_ubt.c */ /*- * Copyright (c) 2001-2009 Maksim Yevmenkin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: ng_ubt.c,v 1.16 2003/10/10 19:15:06 max Exp $ * $FreeBSD$ */ /* * NOTE: ng_ubt2 driver has a split personality. On one side it is * a USB device driver and on the other it is a Netgraph node. This * driver will *NOT* create traditional /dev/ enties, only Netgraph * node. * * NOTE ON LOCKS USED: ng_ubt2 drives uses 2 locks (mutexes) * * 1) sc_if_mtx - lock for device's interface #0 and #1. This lock is used * by USB for any USB request going over device's interface #0 and #1, * i.e. interrupt, control, bulk and isoc. transfers. * * 2) sc_ng_mtx - this lock is used to protect shared (between USB, Netgraph * and Taskqueue) data, such as outgoing mbuf queues, task flags and hook * pointer. This lock *SHOULD NOT* be grabbed for a long time. In fact, * think of it as a spin lock. * * NOTE ON LOCKING STRATEGY: ng_ubt2 driver operates in 3 different contexts. * * 1) USB context. This is where all the USB related stuff happens. All * callbacks run in this context. All callbacks are called (by USB) with * appropriate interface lock held. It is (generally) allowed to grab * any additional locks. * * 2) Netgraph context. This is where all the Netgraph related stuff happens. * Since we mark node as WRITER, the Netgraph node will be "locked" (from * Netgraph point of view). Any variable that is only modified from the * Netgraph context does not require any additonal locking. It is generally * *NOT* allowed to grab *ANY* additional locks. Whatever you do, *DO NOT* * grab any lock in the Netgraph context that could cause de-scheduling of * the Netgraph thread for significant amount of time. In fact, the only * lock that is allowed in the Netgraph context is the sc_ng_mtx lock. * Also make sure that any code that is called from the Netgraph context * follows the rule above. * * 3) Taskqueue context. This is where ubt_task runs. Since we are generally * NOT allowed to grab any lock that could cause de-scheduling in the * Netgraph context, and, USB requires us to grab interface lock before * doing things with transfers, it is safer to transition from the Netgraph * context to the Taskqueue context before we can call into USB subsystem. * * So, to put everything together, the rules are as follows. * It is OK to call from the USB context or the Taskqueue context into * the Netgraph context (i.e. call NG_SEND_xxx functions). In other words * it is allowed to call into the Netgraph context with locks held. * Is it *NOT* OK to call from the Netgraph context into the USB context, * because USB requires us to grab interface locks, and, it is safer to * avoid it. So, to make things safer we set task flags to indicate which * actions we want to perform and schedule ubt_task which would run in the * Taskqueue context. * Is is OK to call from the Taskqueue context into the USB context, * and, ubt_task does just that (i.e. grabs appropriate interface locks * before calling into USB). * Access to the outgoing queues, task flags and hook pointer is * controlled by the sc_ng_mtx lock. It is an unavoidable evil. Again, * sc_ng_mtx should really be a spin lock (and it is very likely to an * equivalent of spin lock due to adaptive nature of FreeBSD mutexes). * All USB callbacks accept softc pointer as a private data. USB ensures * that this pointer is valid. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "usbdevs.h" #include #include #include #define USB_DEBUG_VAR usb_debug #include #include #include #include #include #include #include #include #include #include #include static int ubt_modevent(module_t, int, void *); static device_probe_t ubt_probe; static device_attach_t ubt_attach; static device_detach_t ubt_detach; static void ubt_task_schedule(ubt_softc_p, int); static task_fn_t ubt_task; #define ubt_xfer_start(sc, i) usbd_transfer_start((sc)->sc_xfer[(i)]) /* Netgraph methods */ static ng_constructor_t ng_ubt_constructor; static ng_shutdown_t ng_ubt_shutdown; static ng_newhook_t ng_ubt_newhook; static ng_connect_t ng_ubt_connect; static ng_disconnect_t ng_ubt_disconnect; static ng_rcvmsg_t ng_ubt_rcvmsg; static ng_rcvdata_t ng_ubt_rcvdata; /* Queue length */ static const struct ng_parse_struct_field ng_ubt_node_qlen_type_fields[] = { { "queue", &ng_parse_int32_type, }, { "qlen", &ng_parse_int32_type, }, { NULL, } }; static const struct ng_parse_type ng_ubt_node_qlen_type = { &ng_parse_struct_type, &ng_ubt_node_qlen_type_fields }; /* Stat info */ static const struct ng_parse_struct_field ng_ubt_node_stat_type_fields[] = { { "pckts_recv", &ng_parse_uint32_type, }, { "bytes_recv", &ng_parse_uint32_type, }, { "pckts_sent", &ng_parse_uint32_type, }, { "bytes_sent", &ng_parse_uint32_type, }, { "oerrors", &ng_parse_uint32_type, }, { "ierrors", &ng_parse_uint32_type, }, { NULL, } }; static const struct ng_parse_type ng_ubt_node_stat_type = { &ng_parse_struct_type, &ng_ubt_node_stat_type_fields }; /* Netgraph node command list */ static const struct ng_cmdlist ng_ubt_cmdlist[] = { { NGM_UBT_COOKIE, NGM_UBT_NODE_SET_DEBUG, "set_debug", &ng_parse_uint16_type, NULL }, { NGM_UBT_COOKIE, NGM_UBT_NODE_GET_DEBUG, "get_debug", NULL, &ng_parse_uint16_type }, { NGM_UBT_COOKIE, NGM_UBT_NODE_SET_QLEN, "set_qlen", &ng_ubt_node_qlen_type, NULL }, { NGM_UBT_COOKIE, NGM_UBT_NODE_GET_QLEN, "get_qlen", &ng_ubt_node_qlen_type, &ng_ubt_node_qlen_type }, { NGM_UBT_COOKIE, NGM_UBT_NODE_GET_STAT, "get_stat", NULL, &ng_ubt_node_stat_type }, { NGM_UBT_COOKIE, NGM_UBT_NODE_RESET_STAT, "reset_stat", NULL, NULL }, { 0, } }; /* Netgraph node type */ static struct ng_type typestruct = { .version = NG_ABI_VERSION, .name = NG_UBT_NODE_TYPE, .constructor = ng_ubt_constructor, .rcvmsg = ng_ubt_rcvmsg, .shutdown = ng_ubt_shutdown, .newhook = ng_ubt_newhook, .connect = ng_ubt_connect, .rcvdata = ng_ubt_rcvdata, .disconnect = ng_ubt_disconnect, .cmdlist = ng_ubt_cmdlist }; /**************************************************************************** **************************************************************************** ** USB specific **************************************************************************** ****************************************************************************/ /* USB methods */ static usb_callback_t ubt_ctrl_write_callback; static usb_callback_t ubt_intr_read_callback; static usb_callback_t ubt_bulk_read_callback; static usb_callback_t ubt_bulk_write_callback; static usb_callback_t ubt_isoc_read_callback; static usb_callback_t ubt_isoc_write_callback; static int ubt_fwd_mbuf_up(ubt_softc_p, struct mbuf **); static int ubt_isoc_read_one_frame(struct usb_xfer *, int); /* * USB config * * The following desribes usb transfers that could be submitted on USB device. * * Interface 0 on the USB device must present the following endpoints * 1) Interrupt endpoint to receive HCI events * 2) Bulk IN endpoint to receive ACL data * 3) Bulk OUT endpoint to send ACL data * * Interface 1 on the USB device must present the following endpoints * 1) Isochronous IN endpoint to receive SCO data * 2) Isochronous OUT endpoint to send SCO data */ static const struct usb_config ubt_config[UBT_N_TRANSFER] = { /* * Interface #0 */ /* Outgoing bulk transfer - ACL packets */ [UBT_IF_0_BULK_DT_WR] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .if_index = 0, .bufsize = UBT_BULK_WRITE_BUFFER_SIZE, .flags = { .pipe_bof = 1, .force_short_xfer = 1, }, .callback = &ubt_bulk_write_callback, }, /* Incoming bulk transfer - ACL packets */ [UBT_IF_0_BULK_DT_RD] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .if_index = 0, .bufsize = UBT_BULK_READ_BUFFER_SIZE, .flags = { .pipe_bof = 1, .short_xfer_ok = 1, }, .callback = &ubt_bulk_read_callback, }, /* Incoming interrupt transfer - HCI events */ [UBT_IF_0_INTR_DT_RD] = { .type = UE_INTERRUPT, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .if_index = 0, .flags = { .pipe_bof = 1, .short_xfer_ok = 1, }, .bufsize = UBT_INTR_BUFFER_SIZE, .callback = &ubt_intr_read_callback, }, /* Outgoing control transfer - HCI commands */ [UBT_IF_0_CTRL_DT_WR] = { .type = UE_CONTROL, .endpoint = 0x00, /* control pipe */ .direction = UE_DIR_ANY, .if_index = 0, .bufsize = UBT_CTRL_BUFFER_SIZE, .callback = &ubt_ctrl_write_callback, .timeout = 5000, /* 5 seconds */ }, /* * Interface #1 */ /* Incoming isochronous transfer #1 - SCO packets */ [UBT_IF_1_ISOC_DT_RD1] = { .type = UE_ISOCHRONOUS, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .if_index = 1, .bufsize = 0, /* use "wMaxPacketSize * frames" */ .frames = UBT_ISOC_NFRAMES, .flags = { .short_xfer_ok = 1, }, .callback = &ubt_isoc_read_callback, }, /* Incoming isochronous transfer #2 - SCO packets */ [UBT_IF_1_ISOC_DT_RD2] = { .type = UE_ISOCHRONOUS, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .if_index = 1, .bufsize = 0, /* use "wMaxPacketSize * frames" */ .frames = UBT_ISOC_NFRAMES, .flags = { .short_xfer_ok = 1, }, .callback = &ubt_isoc_read_callback, }, /* Outgoing isochronous transfer #1 - SCO packets */ [UBT_IF_1_ISOC_DT_WR1] = { .type = UE_ISOCHRONOUS, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .if_index = 1, .bufsize = 0, /* use "wMaxPacketSize * frames" */ .frames = UBT_ISOC_NFRAMES, .flags = { .short_xfer_ok = 1, }, .callback = &ubt_isoc_write_callback, }, /* Outgoing isochronous transfer #2 - SCO packets */ [UBT_IF_1_ISOC_DT_WR2] = { .type = UE_ISOCHRONOUS, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .if_index = 1, .bufsize = 0, /* use "wMaxPacketSize * frames" */ .frames = UBT_ISOC_NFRAMES, .flags = { .short_xfer_ok = 1, }, .callback = &ubt_isoc_write_callback, }, }; /* * If for some reason device should not be attached then put * VendorID/ProductID pair into the list below. The format is * as follows: * * { USB_VPI(VENDOR_ID, PRODUCT_ID, 0) }, * * where VENDOR_ID and PRODUCT_ID are hex numbers. */ static const STRUCT_USB_HOST_ID ubt_ignore_devs[] = { /* AVM USB Bluetooth-Adapter BlueFritz! v1.0 */ { USB_VPI(USB_VENDOR_AVM, 0x2200, 0) }, /* Atheros 3011 with sflash firmware */ { USB_VPI(0x0cf3, 0x3002, 0) }, { USB_VPI(0x0cf3, 0xe019, 0) }, { USB_VPI(0x13d3, 0x3304, 0) }, { USB_VPI(0x0930, 0x0215, 0) }, { USB_VPI(0x0489, 0xe03d, 0) }, { USB_VPI(0x0489, 0xe027, 0) }, /* Atheros AR9285 Malbec with sflash firmware */ { USB_VPI(0x03f0, 0x311d, 0) }, /* Atheros 3012 with sflash firmware */ { USB_VPI(0x0cf3, 0x3004, 0), USB_DEV_BCD_LTEQ(1) }, { USB_VPI(0x0cf3, 0x311d, 0), USB_DEV_BCD_LTEQ(1) }, { USB_VPI(0x13d3, 0x3375, 0), USB_DEV_BCD_LTEQ(1) }, { USB_VPI(0x04ca, 0x3005, 0), USB_DEV_BCD_LTEQ(1) }, { USB_VPI(0x04ca, 0x3006, 0), USB_DEV_BCD_LTEQ(1) }, { USB_VPI(0x04ca, 0x3008, 0), USB_DEV_BCD_LTEQ(1) }, { USB_VPI(0x13d3, 0x3362, 0), USB_DEV_BCD_LTEQ(1) }, { USB_VPI(0x0cf3, 0xe004, 0), USB_DEV_BCD_LTEQ(1) }, { USB_VPI(0x0930, 0x0219, 0), USB_DEV_BCD_LTEQ(1) }, { USB_VPI(0x0489, 0xe057, 0), USB_DEV_BCD_LTEQ(1) }, { USB_VPI(0x13d3, 0x3393, 0), USB_DEV_BCD_LTEQ(1) }, { USB_VPI(0x0489, 0xe04e, 0), USB_DEV_BCD_LTEQ(1) }, { USB_VPI(0x0489, 0xe056, 0), USB_DEV_BCD_LTEQ(1) }, /* Atheros AR5BBU12 with sflash firmware */ { USB_VPI(0x0489, 0xe02c, 0), USB_DEV_BCD_LTEQ(1) }, /* Atheros AR5BBU12 with sflash firmware */ { USB_VPI(0x0489, 0xe03c, 0), USB_DEV_BCD_LTEQ(1) }, { USB_VPI(0x0489, 0xe036, 0), USB_DEV_BCD_LTEQ(1) }, }; /* List of supported bluetooth devices */ static const STRUCT_USB_HOST_ID ubt_devs[] = { /* Generic Bluetooth class devices */ { USB_IFACE_CLASS(UDCLASS_WIRELESS), USB_IFACE_SUBCLASS(UDSUBCLASS_RF), USB_IFACE_PROTOCOL(UDPROTO_BLUETOOTH) }, /* AVM USB Bluetooth-Adapter BlueFritz! v2.0 */ { USB_VPI(USB_VENDOR_AVM, 0x3800, 0) }, /* Broadcom USB dongles, mostly BCM20702 and BCM20702A0 */ { USB_VENDOR(USB_VENDOR_BROADCOM), USB_IFACE_CLASS(UICLASS_VENDOR), USB_IFACE_SUBCLASS(UDSUBCLASS_RF), USB_IFACE_PROTOCOL(UDPROTO_BLUETOOTH) }, /* Apple-specific (Broadcom) devices */ { USB_VENDOR(USB_VENDOR_APPLE), USB_IFACE_CLASS(UICLASS_VENDOR), USB_IFACE_SUBCLASS(UDSUBCLASS_RF), USB_IFACE_PROTOCOL(UDPROTO_BLUETOOTH) }, /* Foxconn - Hon Hai */ { USB_VENDOR(USB_VENDOR_FOXCONN), USB_IFACE_CLASS(UICLASS_VENDOR), USB_IFACE_SUBCLASS(UDSUBCLASS_RF), USB_IFACE_PROTOCOL(UDPROTO_BLUETOOTH) }, /* MediaTek MT76x0E */ { USB_VPI(USB_VENDOR_MEDIATEK, 0x763f, 0) }, /* Broadcom SoftSailing reporting vendor specific */ { USB_VPI(USB_VENDOR_BROADCOM, 0x21e1, 0) }, /* Apple MacBookPro 7,1 */ { USB_VPI(USB_VENDOR_APPLE, 0x8213, 0) }, /* Apple iMac11,1 */ { USB_VPI(USB_VENDOR_APPLE, 0x8215, 0) }, /* Apple MacBookPro6,2 */ { USB_VPI(USB_VENDOR_APPLE, 0x8218, 0) }, /* Apple MacBookAir3,1, MacBookAir3,2 */ { USB_VPI(USB_VENDOR_APPLE, 0x821b, 0) }, /* Apple MacBookAir4,1 */ { USB_VPI(USB_VENDOR_APPLE, 0x821f, 0) }, /* MacBookAir6,1 */ { USB_VPI(USB_VENDOR_APPLE, 0x828f, 0) }, /* Apple MacBookPro8,2 */ { USB_VPI(USB_VENDOR_APPLE, 0x821a, 0) }, /* Apple MacMini5,1 */ { USB_VPI(USB_VENDOR_APPLE, 0x8281, 0) }, /* Bluetooth Ultraport Module from IBM */ { USB_VPI(USB_VENDOR_TDK, 0x030a, 0) }, /* ALPS Modules with non-standard ID */ { USB_VPI(USB_VENDOR_ALPS, 0x3001, 0) }, { USB_VPI(USB_VENDOR_ALPS, 0x3002, 0) }, { USB_VPI(USB_VENDOR_ERICSSON2, 0x1002, 0) }, /* Canyon CN-BTU1 with HID interfaces */ { USB_VPI(USB_VENDOR_CANYON, 0x0000, 0) }, /* Broadcom BCM20702A0 */ { USB_VPI(USB_VENDOR_ASUS, 0x17b5, 0) }, { USB_VPI(USB_VENDOR_ASUS, 0x17cb, 0) }, { USB_VPI(USB_VENDOR_LITEON, 0x2003, 0) }, { USB_VPI(USB_VENDOR_FOXCONN, 0xe042, 0) }, { USB_VPI(USB_VENDOR_DELL, 0x8197, 0) }, }; /* * Probe for a USB Bluetooth device. * USB context. */ static int ubt_probe(device_t dev) { struct usb_attach_arg *uaa = device_get_ivars(dev); int error; if (uaa->usb_mode != USB_MODE_HOST) return (ENXIO); if (uaa->info.bIfaceIndex != 0) return (ENXIO); if (usbd_lookup_id_by_uaa(ubt_ignore_devs, sizeof(ubt_ignore_devs), uaa) == 0) return (ENXIO); error = usbd_lookup_id_by_uaa(ubt_devs, sizeof(ubt_devs), uaa); if (error == 0) return (BUS_PROBE_GENERIC); return (error); } /* ubt_probe */ /* * Attach the device. * USB context. */ static int ubt_attach(device_t dev) { struct usb_attach_arg *uaa = device_get_ivars(dev); struct ubt_softc *sc = device_get_softc(dev); struct usb_endpoint_descriptor *ed; struct usb_interface_descriptor *id; struct usb_interface *iface; uint16_t wMaxPacketSize; uint8_t alt_index, i, j; uint8_t iface_index[2] = { 0, 1 }; device_set_usb_desc(dev); sc->sc_dev = dev; sc->sc_debug = NG_UBT_WARN_LEVEL; /* * Create Netgraph node */ if (ng_make_node_common(&typestruct, &sc->sc_node) != 0) { UBT_ALERT(sc, "could not create Netgraph node\n"); return (ENXIO); } /* Name Netgraph node */ if (ng_name_node(sc->sc_node, device_get_nameunit(dev)) != 0) { UBT_ALERT(sc, "could not name Netgraph node\n"); NG_NODE_UNREF(sc->sc_node); return (ENXIO); } NG_NODE_SET_PRIVATE(sc->sc_node, sc); NG_NODE_FORCE_WRITER(sc->sc_node); /* * Initialize device softc structure */ /* initialize locks */ mtx_init(&sc->sc_ng_mtx, "ubt ng", NULL, MTX_DEF); mtx_init(&sc->sc_if_mtx, "ubt if", NULL, MTX_DEF | MTX_RECURSE); /* initialize packet queues */ NG_BT_MBUFQ_INIT(&sc->sc_cmdq, UBT_DEFAULT_QLEN); NG_BT_MBUFQ_INIT(&sc->sc_aclq, UBT_DEFAULT_QLEN); NG_BT_MBUFQ_INIT(&sc->sc_scoq, UBT_DEFAULT_QLEN); /* initialize glue task */ TASK_INIT(&sc->sc_task, 0, ubt_task, sc); /* * Configure Bluetooth USB device. Discover all required USB * interfaces and endpoints. * * USB device must present two interfaces: * 1) Interface 0 that has 3 endpoints * 1) Interrupt endpoint to receive HCI events * 2) Bulk IN endpoint to receive ACL data * 3) Bulk OUT endpoint to send ACL data * * 2) Interface 1 then has 2 endpoints * 1) Isochronous IN endpoint to receive SCO data * 2) Isochronous OUT endpoint to send SCO data * * Interface 1 (with isochronous endpoints) has several alternate * configurations with different packet size. */ /* * For interface #1 search alternate settings, and find * the descriptor with the largest wMaxPacketSize */ wMaxPacketSize = 0; alt_index = 0; i = 0; j = 0; ed = NULL; /* * Search through all the descriptors looking for the largest * packet size: */ while ((ed = (struct usb_endpoint_descriptor *)usb_desc_foreach( usbd_get_config_descriptor(uaa->device), (struct usb_descriptor *)ed))) { if ((ed->bDescriptorType == UDESC_INTERFACE) && (ed->bLength >= sizeof(*id))) { id = (struct usb_interface_descriptor *)ed; i = id->bInterfaceNumber; j = id->bAlternateSetting; } if ((ed->bDescriptorType == UDESC_ENDPOINT) && (ed->bLength >= sizeof(*ed)) && (i == 1)) { uint16_t temp; temp = UGETW(ed->wMaxPacketSize); if (temp > wMaxPacketSize) { wMaxPacketSize = temp; alt_index = j; } } } /* Set alt configuration on interface #1 only if we found it */ if (wMaxPacketSize > 0 && usbd_set_alt_interface_index(uaa->device, 1, alt_index)) { UBT_ALERT(sc, "could not set alternate setting %d " \ "for interface 1!\n", alt_index); goto detach; } /* Setup transfers for both interfaces */ if (usbd_transfer_setup(uaa->device, iface_index, sc->sc_xfer, ubt_config, UBT_N_TRANSFER, sc, &sc->sc_if_mtx)) { UBT_ALERT(sc, "could not allocate transfers\n"); goto detach; } /* Claim all interfaces belonging to the Bluetooth part */ for (i = 1;; i++) { iface = usbd_get_iface(uaa->device, i); if (iface == NULL) break; id = usbd_get_interface_descriptor(iface); if ((id != NULL) && (id->bInterfaceClass == UICLASS_WIRELESS) && (id->bInterfaceSubClass == UISUBCLASS_RF) && (id->bInterfaceProtocol == UIPROTO_BLUETOOTH)) { usbd_set_parent_iface(uaa->device, i, uaa->info.bIfaceIndex); } } return (0); /* success */ detach: ubt_detach(dev); return (ENXIO); } /* ubt_attach */ /* * Detach the device. * USB context. */ int ubt_detach(device_t dev) { struct ubt_softc *sc = device_get_softc(dev); node_p node = sc->sc_node; /* Destroy Netgraph node */ if (node != NULL) { sc->sc_node = NULL; NG_NODE_REALLY_DIE(node); ng_rmnode_self(node); } /* Make sure ubt_task in gone */ taskqueue_drain(taskqueue_swi, &sc->sc_task); /* Free USB transfers, if any */ usbd_transfer_unsetup(sc->sc_xfer, UBT_N_TRANSFER); /* Destroy queues */ UBT_NG_LOCK(sc); NG_BT_MBUFQ_DESTROY(&sc->sc_cmdq); NG_BT_MBUFQ_DESTROY(&sc->sc_aclq); NG_BT_MBUFQ_DESTROY(&sc->sc_scoq); UBT_NG_UNLOCK(sc); mtx_destroy(&sc->sc_if_mtx); mtx_destroy(&sc->sc_ng_mtx); return (0); } /* ubt_detach */ /* * Called when outgoing control request (HCI command) has completed, i.e. * HCI command was sent to the device. * USB context. */ static void ubt_ctrl_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct ubt_softc *sc = usbd_xfer_softc(xfer); struct usb_device_request req; struct mbuf *m; struct usb_page_cache *pc; int actlen; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: UBT_INFO(sc, "sent %d bytes to control pipe\n", actlen); UBT_STAT_BYTES_SENT(sc, actlen); UBT_STAT_PCKTS_SENT(sc); /* FALLTHROUGH */ case USB_ST_SETUP: send_next: /* Get next command mbuf, if any */ UBT_NG_LOCK(sc); NG_BT_MBUFQ_DEQUEUE(&sc->sc_cmdq, m); UBT_NG_UNLOCK(sc); if (m == NULL) { UBT_INFO(sc, "HCI command queue is empty\n"); break; /* transfer complete */ } /* Initialize a USB control request and then schedule it */ bzero(&req, sizeof(req)); req.bmRequestType = UBT_HCI_REQUEST; USETW(req.wLength, m->m_pkthdr.len); UBT_INFO(sc, "Sending control request, " \ "bmRequestType=0x%02x, wLength=%d\n", req.bmRequestType, UGETW(req.wLength)); pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_in(pc, 0, &req, sizeof(req)); pc = usbd_xfer_get_frame(xfer, 1); usbd_m_copy_in(pc, 0, m, 0, m->m_pkthdr.len); usbd_xfer_set_frame_len(xfer, 0, sizeof(req)); usbd_xfer_set_frame_len(xfer, 1, m->m_pkthdr.len); usbd_xfer_set_frames(xfer, 2); NG_FREE_M(m); usbd_transfer_submit(xfer); break; default: /* Error */ if (error != USB_ERR_CANCELLED) { UBT_WARN(sc, "control transfer failed: %s\n", usbd_errstr(error)); UBT_STAT_OERROR(sc); goto send_next; } /* transfer cancelled */ break; } } /* ubt_ctrl_write_callback */ /* * Called when incoming interrupt transfer (HCI event) has completed, i.e. * HCI event was received from the device. * USB context. */ static void ubt_intr_read_callback(struct usb_xfer *xfer, usb_error_t error) { struct ubt_softc *sc = usbd_xfer_softc(xfer); struct mbuf *m; ng_hci_event_pkt_t *hdr; struct usb_page_cache *pc; int actlen; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); m = NULL; switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: /* Allocate a new mbuf */ MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { UBT_STAT_IERROR(sc); goto submit_next; } - MCLGET(m, M_NOWAIT); - if (!(m->m_flags & M_EXT)) { + if (!(MCLGET(m, M_NOWAIT))) { UBT_STAT_IERROR(sc); goto submit_next; } /* Add HCI packet type */ *mtod(m, uint8_t *)= NG_HCI_EVENT_PKT; m->m_pkthdr.len = m->m_len = 1; if (actlen > MCLBYTES - 1) actlen = MCLBYTES - 1; pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_out(pc, 0, mtod(m, uint8_t *) + 1, actlen); m->m_pkthdr.len += actlen; m->m_len += actlen; UBT_INFO(sc, "got %d bytes from interrupt pipe\n", actlen); /* Validate packet and send it up the stack */ if (m->m_pkthdr.len < (int)sizeof(*hdr)) { UBT_INFO(sc, "HCI event packet is too short\n"); UBT_STAT_IERROR(sc); goto submit_next; } hdr = mtod(m, ng_hci_event_pkt_t *); if (hdr->length != (m->m_pkthdr.len - sizeof(*hdr))) { UBT_ERR(sc, "Invalid HCI event packet size, " \ "length=%d, pktlen=%d\n", hdr->length, m->m_pkthdr.len); UBT_STAT_IERROR(sc); goto submit_next; } UBT_INFO(sc, "got complete HCI event frame, pktlen=%d, " \ "length=%d\n", m->m_pkthdr.len, hdr->length); UBT_STAT_PCKTS_RECV(sc); UBT_STAT_BYTES_RECV(sc, m->m_pkthdr.len); ubt_fwd_mbuf_up(sc, &m); /* m == NULL at this point */ /* FALLTHROUGH */ case USB_ST_SETUP: submit_next: NG_FREE_M(m); /* checks for m != NULL */ usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); break; default: /* Error */ if (error != USB_ERR_CANCELLED) { UBT_WARN(sc, "interrupt transfer failed: %s\n", usbd_errstr(error)); /* Try to clear stall first */ usbd_xfer_set_stall(xfer); goto submit_next; } /* transfer cancelled */ break; } } /* ubt_intr_read_callback */ /* * Called when incoming bulk transfer (ACL packet) has completed, i.e. * ACL packet was received from the device. * USB context. */ static void ubt_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error) { struct ubt_softc *sc = usbd_xfer_softc(xfer); struct mbuf *m; ng_hci_acldata_pkt_t *hdr; struct usb_page_cache *pc; int len; int actlen; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); m = NULL; switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: /* Allocate new mbuf */ MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { UBT_STAT_IERROR(sc); goto submit_next; } - MCLGET(m, M_NOWAIT); - if (!(m->m_flags & M_EXT)) { + if (!(MCLGET(m, M_NOWAIT))) { UBT_STAT_IERROR(sc); goto submit_next; } /* Add HCI packet type */ *mtod(m, uint8_t *)= NG_HCI_ACL_DATA_PKT; m->m_pkthdr.len = m->m_len = 1; if (actlen > MCLBYTES - 1) actlen = MCLBYTES - 1; pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_out(pc, 0, mtod(m, uint8_t *) + 1, actlen); m->m_pkthdr.len += actlen; m->m_len += actlen; UBT_INFO(sc, "got %d bytes from bulk-in pipe\n", actlen); /* Validate packet and send it up the stack */ if (m->m_pkthdr.len < (int)sizeof(*hdr)) { UBT_INFO(sc, "HCI ACL packet is too short\n"); UBT_STAT_IERROR(sc); goto submit_next; } hdr = mtod(m, ng_hci_acldata_pkt_t *); len = le16toh(hdr->length); if (len != (int)(m->m_pkthdr.len - sizeof(*hdr))) { UBT_ERR(sc, "Invalid ACL packet size, length=%d, " \ "pktlen=%d\n", len, m->m_pkthdr.len); UBT_STAT_IERROR(sc); goto submit_next; } UBT_INFO(sc, "got complete ACL data packet, pktlen=%d, " \ "length=%d\n", m->m_pkthdr.len, len); UBT_STAT_PCKTS_RECV(sc); UBT_STAT_BYTES_RECV(sc, m->m_pkthdr.len); ubt_fwd_mbuf_up(sc, &m); /* m == NULL at this point */ /* FALLTHOUGH */ case USB_ST_SETUP: submit_next: NG_FREE_M(m); /* checks for m != NULL */ usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); break; default: /* Error */ if (error != USB_ERR_CANCELLED) { UBT_WARN(sc, "bulk-in transfer failed: %s\n", usbd_errstr(error)); /* Try to clear stall first */ usbd_xfer_set_stall(xfer); goto submit_next; } /* transfer cancelled */ break; } } /* ubt_bulk_read_callback */ /* * Called when outgoing bulk transfer (ACL packet) has completed, i.e. * ACL packet was sent to the device. * USB context. */ static void ubt_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct ubt_softc *sc = usbd_xfer_softc(xfer); struct mbuf *m; struct usb_page_cache *pc; int actlen; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: UBT_INFO(sc, "sent %d bytes to bulk-out pipe\n", actlen); UBT_STAT_BYTES_SENT(sc, actlen); UBT_STAT_PCKTS_SENT(sc); /* FALLTHROUGH */ case USB_ST_SETUP: send_next: /* Get next mbuf, if any */ UBT_NG_LOCK(sc); NG_BT_MBUFQ_DEQUEUE(&sc->sc_aclq, m); UBT_NG_UNLOCK(sc); if (m == NULL) { UBT_INFO(sc, "ACL data queue is empty\n"); break; /* transfer completed */ } /* * Copy ACL data frame back to a linear USB transfer buffer * and schedule transfer */ pc = usbd_xfer_get_frame(xfer, 0); usbd_m_copy_in(pc, 0, m, 0, m->m_pkthdr.len); usbd_xfer_set_frame_len(xfer, 0, m->m_pkthdr.len); UBT_INFO(sc, "bulk-out transfer has been started, len=%d\n", m->m_pkthdr.len); NG_FREE_M(m); usbd_transfer_submit(xfer); break; default: /* Error */ if (error != USB_ERR_CANCELLED) { UBT_WARN(sc, "bulk-out transfer failed: %s\n", usbd_errstr(error)); UBT_STAT_OERROR(sc); /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto send_next; } /* transfer cancelled */ break; } } /* ubt_bulk_write_callback */ /* * Called when incoming isoc transfer (SCO packet) has completed, i.e. * SCO packet was received from the device. * USB context. */ static void ubt_isoc_read_callback(struct usb_xfer *xfer, usb_error_t error) { struct ubt_softc *sc = usbd_xfer_softc(xfer); int n; int actlen, nframes; usbd_xfer_status(xfer, &actlen, NULL, NULL, &nframes); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: for (n = 0; n < nframes; n ++) if (ubt_isoc_read_one_frame(xfer, n) < 0) break; /* FALLTHROUGH */ case USB_ST_SETUP: read_next: for (n = 0; n < nframes; n ++) usbd_xfer_set_frame_len(xfer, n, usbd_xfer_max_framelen(xfer)); usbd_transfer_submit(xfer); break; default: /* Error */ if (error != USB_ERR_CANCELLED) { UBT_STAT_IERROR(sc); goto read_next; } /* transfer cancelled */ break; } } /* ubt_isoc_read_callback */ /* * Helper function. Called from ubt_isoc_read_callback() to read * SCO data from one frame. * USB context. */ static int ubt_isoc_read_one_frame(struct usb_xfer *xfer, int frame_no) { struct ubt_softc *sc = usbd_xfer_softc(xfer); struct usb_page_cache *pc; struct mbuf *m; int len, want, got, total; /* Get existing SCO reassembly buffer */ pc = usbd_xfer_get_frame(xfer, 0); m = sc->sc_isoc_in_buffer; total = usbd_xfer_frame_len(xfer, frame_no); /* While we have data in the frame */ while (total > 0) { if (m == NULL) { /* Start new reassembly buffer */ MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { UBT_STAT_IERROR(sc); return (-1); /* XXX out of sync! */ } - MCLGET(m, M_NOWAIT); - if (!(m->m_flags & M_EXT)) { + if (!(MCLGET(m, M_NOWAIT))) { UBT_STAT_IERROR(sc); NG_FREE_M(m); return (-1); /* XXX out of sync! */ } /* Expect SCO header */ *mtod(m, uint8_t *) = NG_HCI_SCO_DATA_PKT; m->m_pkthdr.len = m->m_len = got = 1; want = sizeof(ng_hci_scodata_pkt_t); } else { /* * Check if we have SCO header and if so * adjust amount of data we want */ got = m->m_pkthdr.len; want = sizeof(ng_hci_scodata_pkt_t); if (got >= want) want += mtod(m, ng_hci_scodata_pkt_t *)->length; } /* Append frame data to the SCO reassembly buffer */ len = total; if (got + len > want) len = want - got; usbd_copy_out(pc, frame_no * usbd_xfer_max_framelen(xfer), mtod(m, uint8_t *) + m->m_pkthdr.len, len); m->m_pkthdr.len += len; m->m_len += len; total -= len; /* Check if we got everything we wanted, if not - continue */ if (got != want) continue; /* If we got here then we got complete SCO frame */ UBT_INFO(sc, "got complete SCO data frame, pktlen=%d, " \ "length=%d\n", m->m_pkthdr.len, mtod(m, ng_hci_scodata_pkt_t *)->length); UBT_STAT_PCKTS_RECV(sc); UBT_STAT_BYTES_RECV(sc, m->m_pkthdr.len); ubt_fwd_mbuf_up(sc, &m); /* m == NULL at this point */ } /* Put SCO reassembly buffer back */ sc->sc_isoc_in_buffer = m; return (0); } /* ubt_isoc_read_one_frame */ /* * Called when outgoing isoc transfer (SCO packet) has completed, i.e. * SCO packet was sent to the device. * USB context. */ static void ubt_isoc_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct ubt_softc *sc = usbd_xfer_softc(xfer); struct usb_page_cache *pc; struct mbuf *m; int n, space, offset; int actlen, nframes; usbd_xfer_status(xfer, &actlen, NULL, NULL, &nframes); pc = usbd_xfer_get_frame(xfer, 0); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: UBT_INFO(sc, "sent %d bytes to isoc-out pipe\n", actlen); UBT_STAT_BYTES_SENT(sc, actlen); UBT_STAT_PCKTS_SENT(sc); /* FALLTHROUGH */ case USB_ST_SETUP: send_next: offset = 0; space = usbd_xfer_max_framelen(xfer) * nframes; m = NULL; while (space > 0) { if (m == NULL) { UBT_NG_LOCK(sc); NG_BT_MBUFQ_DEQUEUE(&sc->sc_scoq, m); UBT_NG_UNLOCK(sc); if (m == NULL) break; } n = min(space, m->m_pkthdr.len); if (n > 0) { usbd_m_copy_in(pc, offset, m,0, n); m_adj(m, n); offset += n; space -= n; } if (m->m_pkthdr.len == 0) NG_FREE_M(m); /* sets m = NULL */ } /* Put whatever is left from mbuf back on queue */ if (m != NULL) { UBT_NG_LOCK(sc); NG_BT_MBUFQ_PREPEND(&sc->sc_scoq, m); UBT_NG_UNLOCK(sc); } /* * Calculate sizes for isoc frames. * Note that offset could be 0 at this point (i.e. we have * nothing to send). That is fine, as we have isoc. transfers * going in both directions all the time. In this case it * would be just empty isoc. transfer. */ for (n = 0; n < nframes; n ++) { usbd_xfer_set_frame_len(xfer, n, min(offset, usbd_xfer_max_framelen(xfer))); offset -= usbd_xfer_frame_len(xfer, n); } usbd_transfer_submit(xfer); break; default: /* Error */ if (error != USB_ERR_CANCELLED) { UBT_STAT_OERROR(sc); goto send_next; } /* transfer cancelled */ break; } } /* * Utility function to forward provided mbuf upstream (i.e. up the stack). * Modifies value of the mbuf pointer (sets it to NULL). * Save to call from any context. */ static int ubt_fwd_mbuf_up(ubt_softc_p sc, struct mbuf **m) { hook_p hook; int error; /* * Close the race with Netgraph hook newhook/disconnect methods. * Save the hook pointer atomically. Two cases are possible: * * 1) The hook pointer is NULL. It means disconnect method got * there first. In this case we are done. * * 2) The hook pointer is not NULL. It means that hook pointer * could be either in valid or invalid (i.e. in the process * of disconnect) state. In any case grab an extra reference * to protect the hook pointer. * * It is ok to pass hook in invalid state to NG_SEND_DATA_ONLY() as * it checks for it. Drop extra reference after NG_SEND_DATA_ONLY(). */ UBT_NG_LOCK(sc); if ((hook = sc->sc_hook) != NULL) NG_HOOK_REF(hook); UBT_NG_UNLOCK(sc); if (hook == NULL) { NG_FREE_M(*m); return (ENETDOWN); } NG_SEND_DATA_ONLY(error, hook, *m); NG_HOOK_UNREF(hook); if (error != 0) UBT_STAT_IERROR(sc); return (error); } /* ubt_fwd_mbuf_up */ /**************************************************************************** **************************************************************************** ** Glue **************************************************************************** ****************************************************************************/ /* * Schedule glue task. Should be called with sc_ng_mtx held. * Netgraph context. */ static void ubt_task_schedule(ubt_softc_p sc, int action) { mtx_assert(&sc->sc_ng_mtx, MA_OWNED); /* * Try to handle corner case when "start all" and "stop all" * actions can both be set before task is executed. * * The rules are * * sc_task_flags action new sc_task_flags * ------------------------------------------------------ * 0 start start * 0 stop stop * start start start * start stop stop * stop start stop|start * stop stop stop * stop|start start stop|start * stop|start stop stop */ if (action != 0) { if ((action & UBT_FLAG_T_STOP_ALL) != 0) sc->sc_task_flags &= ~UBT_FLAG_T_START_ALL; sc->sc_task_flags |= action; } if (sc->sc_task_flags & UBT_FLAG_T_PENDING) return; if (taskqueue_enqueue(taskqueue_swi, &sc->sc_task) == 0) { sc->sc_task_flags |= UBT_FLAG_T_PENDING; return; } /* XXX: i think this should never happen */ } /* ubt_task_schedule */ /* * Glue task. Examines sc_task_flags and does things depending on it. * Taskqueue context. */ static void ubt_task(void *context, int pending) { ubt_softc_p sc = context; int task_flags, i; UBT_NG_LOCK(sc); task_flags = sc->sc_task_flags; sc->sc_task_flags = 0; UBT_NG_UNLOCK(sc); /* * Stop all USB transfers synchronously. * Stop interface #0 and #1 transfers at the same time and in the * same loop. usbd_transfer_drain() will do appropriate locking. */ if (task_flags & UBT_FLAG_T_STOP_ALL) for (i = 0; i < UBT_N_TRANSFER; i ++) usbd_transfer_drain(sc->sc_xfer[i]); /* Start incoming interrupt and bulk, and all isoc. USB transfers */ if (task_flags & UBT_FLAG_T_START_ALL) { /* * Interface #0 */ mtx_lock(&sc->sc_if_mtx); ubt_xfer_start(sc, UBT_IF_0_INTR_DT_RD); ubt_xfer_start(sc, UBT_IF_0_BULK_DT_RD); /* * Interface #1 * Start both read and write isoc. transfers by default. * Get them going all the time even if we have nothing * to send to avoid any delays. */ ubt_xfer_start(sc, UBT_IF_1_ISOC_DT_RD1); ubt_xfer_start(sc, UBT_IF_1_ISOC_DT_RD2); ubt_xfer_start(sc, UBT_IF_1_ISOC_DT_WR1); ubt_xfer_start(sc, UBT_IF_1_ISOC_DT_WR2); mtx_unlock(&sc->sc_if_mtx); } /* Start outgoing control transfer */ if (task_flags & UBT_FLAG_T_START_CTRL) { mtx_lock(&sc->sc_if_mtx); ubt_xfer_start(sc, UBT_IF_0_CTRL_DT_WR); mtx_unlock(&sc->sc_if_mtx); } /* Start outgoing bulk transfer */ if (task_flags & UBT_FLAG_T_START_BULK) { mtx_lock(&sc->sc_if_mtx); ubt_xfer_start(sc, UBT_IF_0_BULK_DT_WR); mtx_unlock(&sc->sc_if_mtx); } } /* ubt_task */ /**************************************************************************** **************************************************************************** ** Netgraph specific **************************************************************************** ****************************************************************************/ /* * Netgraph node constructor. Do not allow to create node of this type. * Netgraph context. */ static int ng_ubt_constructor(node_p node) { return (EINVAL); } /* ng_ubt_constructor */ /* * Netgraph node destructor. Destroy node only when device has been detached. * Netgraph context. */ static int ng_ubt_shutdown(node_p node) { if (node->nd_flags & NGF_REALLY_DIE) { /* * We came here because the USB device is being * detached, so stop being persistant. */ NG_NODE_SET_PRIVATE(node, NULL); NG_NODE_UNREF(node); } else NG_NODE_REVIVE(node); /* tell ng_rmnode we are persisant */ return (0); } /* ng_ubt_shutdown */ /* * Create new hook. There can only be one. * Netgraph context. */ static int ng_ubt_newhook(node_p node, hook_p hook, char const *name) { struct ubt_softc *sc = NG_NODE_PRIVATE(node); if (strcmp(name, NG_UBT_HOOK) != 0) return (EINVAL); UBT_NG_LOCK(sc); if (sc->sc_hook != NULL) { UBT_NG_UNLOCK(sc); return (EISCONN); } sc->sc_hook = hook; UBT_NG_UNLOCK(sc); return (0); } /* ng_ubt_newhook */ /* * Connect hook. Start incoming USB transfers. * Netgraph context. */ static int ng_ubt_connect(hook_p hook) { struct ubt_softc *sc = NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); NG_HOOK_FORCE_QUEUE(NG_HOOK_PEER(hook)); UBT_NG_LOCK(sc); ubt_task_schedule(sc, UBT_FLAG_T_START_ALL); UBT_NG_UNLOCK(sc); return (0); } /* ng_ubt_connect */ /* * Disconnect hook. * Netgraph context. */ static int ng_ubt_disconnect(hook_p hook) { struct ubt_softc *sc = NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); UBT_NG_LOCK(sc); if (hook != sc->sc_hook) { UBT_NG_UNLOCK(sc); return (EINVAL); } sc->sc_hook = NULL; /* Kick off task to stop all USB xfers */ ubt_task_schedule(sc, UBT_FLAG_T_STOP_ALL); /* Drain queues */ NG_BT_MBUFQ_DRAIN(&sc->sc_cmdq); NG_BT_MBUFQ_DRAIN(&sc->sc_aclq); NG_BT_MBUFQ_DRAIN(&sc->sc_scoq); UBT_NG_UNLOCK(sc); return (0); } /* ng_ubt_disconnect */ /* * Process control message. * Netgraph context. */ static int ng_ubt_rcvmsg(node_p node, item_p item, hook_p lasthook) { struct ubt_softc *sc = NG_NODE_PRIVATE(node); struct ng_mesg *msg, *rsp = NULL; struct ng_bt_mbufq *q; int error = 0, queue, qlen; NGI_GET_MSG(item, msg); switch (msg->header.typecookie) { case NGM_GENERIC_COOKIE: switch (msg->header.cmd) { case NGM_TEXT_STATUS: NG_MKRESPONSE(rsp, msg, NG_TEXTRESPONSE, M_NOWAIT); if (rsp == NULL) { error = ENOMEM; break; } snprintf(rsp->data, NG_TEXTRESPONSE, "Hook: %s\n" \ "Task flags: %#x\n" \ "Debug: %d\n" \ "CMD queue: [have:%d,max:%d]\n" \ "ACL queue: [have:%d,max:%d]\n" \ "SCO queue: [have:%d,max:%d]", (sc->sc_hook != NULL) ? NG_UBT_HOOK : "", sc->sc_task_flags, sc->sc_debug, sc->sc_cmdq.len, sc->sc_cmdq.maxlen, sc->sc_aclq.len, sc->sc_aclq.maxlen, sc->sc_scoq.len, sc->sc_scoq.maxlen); break; default: error = EINVAL; break; } break; case NGM_UBT_COOKIE: switch (msg->header.cmd) { case NGM_UBT_NODE_SET_DEBUG: if (msg->header.arglen != sizeof(ng_ubt_node_debug_ep)){ error = EMSGSIZE; break; } sc->sc_debug = *((ng_ubt_node_debug_ep *) (msg->data)); break; case NGM_UBT_NODE_GET_DEBUG: NG_MKRESPONSE(rsp, msg, sizeof(ng_ubt_node_debug_ep), M_NOWAIT); if (rsp == NULL) { error = ENOMEM; break; } *((ng_ubt_node_debug_ep *) (rsp->data)) = sc->sc_debug; break; case NGM_UBT_NODE_SET_QLEN: if (msg->header.arglen != sizeof(ng_ubt_node_qlen_ep)) { error = EMSGSIZE; break; } queue = ((ng_ubt_node_qlen_ep *) (msg->data))->queue; qlen = ((ng_ubt_node_qlen_ep *) (msg->data))->qlen; switch (queue) { case NGM_UBT_NODE_QUEUE_CMD: q = &sc->sc_cmdq; break; case NGM_UBT_NODE_QUEUE_ACL: q = &sc->sc_aclq; break; case NGM_UBT_NODE_QUEUE_SCO: q = &sc->sc_scoq; break; default: error = EINVAL; goto done; /* NOT REACHED */ } q->maxlen = qlen; break; case NGM_UBT_NODE_GET_QLEN: if (msg->header.arglen != sizeof(ng_ubt_node_qlen_ep)) { error = EMSGSIZE; break; } queue = ((ng_ubt_node_qlen_ep *) (msg->data))->queue; switch (queue) { case NGM_UBT_NODE_QUEUE_CMD: q = &sc->sc_cmdq; break; case NGM_UBT_NODE_QUEUE_ACL: q = &sc->sc_aclq; break; case NGM_UBT_NODE_QUEUE_SCO: q = &sc->sc_scoq; break; default: error = EINVAL; goto done; /* NOT REACHED */ } NG_MKRESPONSE(rsp, msg, sizeof(ng_ubt_node_qlen_ep), M_NOWAIT); if (rsp == NULL) { error = ENOMEM; break; } ((ng_ubt_node_qlen_ep *) (rsp->data))->queue = queue; ((ng_ubt_node_qlen_ep *) (rsp->data))->qlen = q->maxlen; break; case NGM_UBT_NODE_GET_STAT: NG_MKRESPONSE(rsp, msg, sizeof(ng_ubt_node_stat_ep), M_NOWAIT); if (rsp == NULL) { error = ENOMEM; break; } bcopy(&sc->sc_stat, rsp->data, sizeof(ng_ubt_node_stat_ep)); break; case NGM_UBT_NODE_RESET_STAT: UBT_STAT_RESET(sc); break; default: error = EINVAL; break; } break; default: error = EINVAL; break; } done: NG_RESPOND_MSG(error, node, item, rsp); NG_FREE_MSG(msg); return (error); } /* ng_ubt_rcvmsg */ /* * Process data. * Netgraph context. */ static int ng_ubt_rcvdata(hook_p hook, item_p item) { struct ubt_softc *sc = NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); struct mbuf *m; struct ng_bt_mbufq *q; int action, error = 0; if (hook != sc->sc_hook) { error = EINVAL; goto done; } /* Deatch mbuf and get HCI frame type */ NGI_GET_M(item, m); /* * Minimal size of the HCI frame is 4 bytes: 1 byte frame type, * 2 bytes connection handle and at least 1 byte of length. * Panic on data frame that has size smaller than 4 bytes (it * should not happen) */ if (m->m_pkthdr.len < 4) panic("HCI frame size is too small! pktlen=%d\n", m->m_pkthdr.len); /* Process HCI frame */ switch (*mtod(m, uint8_t *)) { /* XXX call m_pullup ? */ case NG_HCI_CMD_PKT: if (m->m_pkthdr.len - 1 > (int)UBT_CTRL_BUFFER_SIZE) panic("HCI command frame size is too big! " \ "buffer size=%zd, packet len=%d\n", UBT_CTRL_BUFFER_SIZE, m->m_pkthdr.len); q = &sc->sc_cmdq; action = UBT_FLAG_T_START_CTRL; break; case NG_HCI_ACL_DATA_PKT: if (m->m_pkthdr.len - 1 > UBT_BULK_WRITE_BUFFER_SIZE) panic("ACL data frame size is too big! " \ "buffer size=%d, packet len=%d\n", UBT_BULK_WRITE_BUFFER_SIZE, m->m_pkthdr.len); q = &sc->sc_aclq; action = UBT_FLAG_T_START_BULK; break; case NG_HCI_SCO_DATA_PKT: q = &sc->sc_scoq; action = 0; break; default: UBT_ERR(sc, "Dropping unsupported HCI frame, type=0x%02x, " \ "pktlen=%d\n", *mtod(m, uint8_t *), m->m_pkthdr.len); NG_FREE_M(m); error = EINVAL; goto done; /* NOT REACHED */ } UBT_NG_LOCK(sc); if (NG_BT_MBUFQ_FULL(q)) { NG_BT_MBUFQ_DROP(q); UBT_NG_UNLOCK(sc); UBT_ERR(sc, "Dropping HCI frame 0x%02x, len=%d. Queue full\n", *mtod(m, uint8_t *), m->m_pkthdr.len); NG_FREE_M(m); } else { /* Loose HCI packet type, enqueue mbuf and kick off task */ m_adj(m, sizeof(uint8_t)); NG_BT_MBUFQ_ENQUEUE(q, m); ubt_task_schedule(sc, action); UBT_NG_UNLOCK(sc); } done: NG_FREE_ITEM(item); return (error); } /* ng_ubt_rcvdata */ /**************************************************************************** **************************************************************************** ** Module **************************************************************************** ****************************************************************************/ /* * Load/Unload the driver module */ static int ubt_modevent(module_t mod, int event, void *data) { int error; switch (event) { case MOD_LOAD: error = ng_newtype(&typestruct); if (error != 0) printf("%s: Could not register Netgraph node type, " \ "error=%d\n", NG_UBT_NODE_TYPE, error); break; case MOD_UNLOAD: error = ng_rmtype(&typestruct); break; default: error = EOPNOTSUPP; break; } return (error); } /* ubt_modevent */ static devclass_t ubt_devclass; static device_method_t ubt_methods[] = { DEVMETHOD(device_probe, ubt_probe), DEVMETHOD(device_attach, ubt_attach), DEVMETHOD(device_detach, ubt_detach), DEVMETHOD_END }; static driver_t ubt_driver = { .name = "ubt", .methods = ubt_methods, .size = sizeof(struct ubt_softc), }; DRIVER_MODULE(ng_ubt, uhub, ubt_driver, ubt_devclass, ubt_modevent, 0); MODULE_VERSION(ng_ubt, NG_BLUETOOTH_VERSION); MODULE_DEPEND(ng_ubt, netgraph, NG_ABI_VERSION, NG_ABI_VERSION, NG_ABI_VERSION); MODULE_DEPEND(ng_ubt, ng_hci, NG_BLUETOOTH_VERSION, NG_BLUETOOTH_VERSION, NG_BLUETOOTH_VERSION); MODULE_DEPEND(ng_ubt, usb, 1, 1, 1); Index: head/sys/netgraph/ng_vjc.c =================================================================== --- head/sys/netgraph/ng_vjc.c (revision 276749) +++ head/sys/netgraph/ng_vjc.c (revision 276750) @@ -1,615 +1,614 @@ /* * ng_vjc.c */ /*- * Copyright (c) 1996-1999 Whistle Communications, Inc. * All rights reserved. * * Subject to the following obligations and disclaimer of warranty, use and * redistribution of this software, in source or object code forms, with or * without modifications are expressly permitted by Whistle Communications; * provided, however, that: * 1. Any and all reproductions of the source or object code must include the * copyright notice above and the following disclaimer of warranties; and * 2. No rights are granted, in any manner or form, to use Whistle * Communications, Inc. trademarks, including the mark "WHISTLE * COMMUNICATIONS" on advertising, endorsements, or otherwise except as * such appears in the above copyright notice or in the software. * * THIS SOFTWARE IS BEING PROVIDED BY WHISTLE COMMUNICATIONS "AS IS", AND * TO THE MAXIMUM EXTENT PERMITTED BY LAW, WHISTLE COMMUNICATIONS MAKES NO * REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, REGARDING THIS SOFTWARE, * INCLUDING WITHOUT LIMITATION, ANY AND ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT. * WHISTLE COMMUNICATIONS DOES NOT WARRANT, GUARANTEE, OR MAKE ANY * REPRESENTATIONS REGARDING THE USE OF, OR THE RESULTS OF THE USE OF THIS * SOFTWARE IN TERMS OF ITS CORRECTNESS, ACCURACY, RELIABILITY OR OTHERWISE. * IN NO EVENT SHALL WHISTLE COMMUNICATIONS BE LIABLE FOR ANY DAMAGES * RESULTING FROM OR ARISING OUT OF ANY USE OF THIS SOFTWARE, INCLUDING * WITHOUT LIMITATION, ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, * PUNITIVE, OR CONSEQUENTIAL DAMAGES, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES, LOSS OF USE, DATA OR PROFITS, HOWEVER CAUSED AND UNDER ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF WHISTLE COMMUNICATIONS IS ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * Author: Archie Cobbs * * $FreeBSD$ * $Whistle: ng_vjc.c,v 1.17 1999/11/01 09:24:52 julian Exp $ */ /* * This node performs Van Jacobson IP header (de)compression. * You must have included net/slcompress.c in your kernel compilation. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* Check agreement with slcompress.c */ #if MAX_STATES != NG_VJC_MAX_CHANNELS #error NG_VJC_MAX_CHANNELS must be the same as MAX_STATES #endif /* Maximum length of a compressed TCP VJ header */ #define MAX_VJHEADER 19 /* Node private data */ struct ng_vjc_private { struct ngm_vjc_config conf; struct slcompress slc; hook_p ip; hook_p vjcomp; hook_p vjuncomp; hook_p vjip; }; typedef struct ng_vjc_private *priv_p; #define ERROUT(x) do { error = (x); goto done; } while (0) /* Netgraph node methods */ static ng_constructor_t ng_vjc_constructor; static ng_rcvmsg_t ng_vjc_rcvmsg; static ng_shutdown_t ng_vjc_shutdown; static ng_newhook_t ng_vjc_newhook; static ng_rcvdata_t ng_vjc_rcvdata; static ng_disconnect_t ng_vjc_disconnect; /* Helper stuff */ static struct mbuf *ng_vjc_pulluphdrs(struct mbuf *m, int knownTCP); /* Parse type for struct ngm_vjc_config */ static const struct ng_parse_struct_field ng_vjc_config_type_fields[] = NG_VJC_CONFIG_TYPE_INFO; static const struct ng_parse_type ng_vjc_config_type = { &ng_parse_struct_type, &ng_vjc_config_type_fields }; /* Parse type for the 'last_cs' and 'cs_next' fields in struct slcompress, which are pointers converted to integer indices, so parse them that way. */ #ifndef __LP64__ #define NG_VJC_TSTATE_PTR_TYPE &ng_parse_uint32_type #else #define NG_VJC_TSTATE_PTR_TYPE &ng_parse_uint64_type #endif /* Parse type for the 'cs_hdr' field in a struct cstate. Ideally we would like to use a 'struct ip' type instead of a simple array of bytes. */ static const struct ng_parse_fixedarray_info ng_vjc_cs_hdr_type_info = { &ng_parse_hint8_type, MAX_HDR }; static const struct ng_parse_type ng_vjc_cs_hdr_type = { &ng_parse_fixedarray_type, &ng_vjc_cs_hdr_type_info }; /* Parse type for a struct cstate */ static const struct ng_parse_struct_field ng_vjc_cstate_type_fields[] = { { "cs_next", NG_VJC_TSTATE_PTR_TYPE }, { "cs_hlen", &ng_parse_uint16_type }, { "cs_id", &ng_parse_uint8_type }, { "cs_filler", &ng_parse_uint8_type }, { "cs_hdr", &ng_vjc_cs_hdr_type }, { NULL } }; static const struct ng_parse_type ng_vjc_cstate_type = { &ng_parse_struct_type, &ng_vjc_cstate_type_fields }; /* Parse type for an array of MAX_STATES struct cstate's, ie, tstate & rstate */ static const struct ng_parse_fixedarray_info ng_vjc_cstatearray_type_info = { &ng_vjc_cstate_type, MAX_STATES }; static const struct ng_parse_type ng_vjc_cstatearray_type = { &ng_parse_fixedarray_type, &ng_vjc_cstatearray_type_info }; /* Parse type for struct slcompress. Keep this in sync with the definition of struct slcompress defined in */ static const struct ng_parse_struct_field ng_vjc_slcompress_type_fields[] = { { "last_cs", NG_VJC_TSTATE_PTR_TYPE }, { "last_recv", &ng_parse_uint8_type }, { "last_xmit", &ng_parse_uint8_type }, { "flags", &ng_parse_hint16_type }, #ifndef SL_NO_STATS { "sls_packets", &ng_parse_uint32_type }, { "sls_compressed", &ng_parse_uint32_type }, { "sls_searches", &ng_parse_uint32_type }, { "sls_misses", &ng_parse_uint32_type }, { "sls_uncompressedin", &ng_parse_uint32_type }, { "sls_compressedin", &ng_parse_uint32_type }, { "sls_errorin", &ng_parse_uint32_type }, { "sls_tossed", &ng_parse_uint32_type }, #endif { "tstate", &ng_vjc_cstatearray_type }, { "rstate", &ng_vjc_cstatearray_type }, { NULL } }; static const struct ng_parse_type ng_vjc_slcompress_type = { &ng_parse_struct_type, &ng_vjc_slcompress_type_fields }; /* List of commands and how to convert arguments to/from ASCII */ static const struct ng_cmdlist ng_vjc_cmds[] = { { NGM_VJC_COOKIE, NGM_VJC_SET_CONFIG, "setconfig", &ng_vjc_config_type, NULL }, { NGM_VJC_COOKIE, NGM_VJC_GET_CONFIG, "getconfig", NULL, &ng_vjc_config_type, }, { NGM_VJC_COOKIE, NGM_VJC_GET_STATE, "getstate", NULL, &ng_vjc_slcompress_type, }, { NGM_VJC_COOKIE, NGM_VJC_CLR_STATS, "clrstats", NULL, NULL, }, { NGM_VJC_COOKIE, NGM_VJC_RECV_ERROR, "recverror", NULL, NULL, }, { 0 } }; /* Node type descriptor */ static struct ng_type ng_vjc_typestruct = { .version = NG_ABI_VERSION, .name = NG_VJC_NODE_TYPE, .constructor = ng_vjc_constructor, .rcvmsg = ng_vjc_rcvmsg, .shutdown = ng_vjc_shutdown, .newhook = ng_vjc_newhook, .rcvdata = ng_vjc_rcvdata, .disconnect = ng_vjc_disconnect, .cmdlist = ng_vjc_cmds, }; NETGRAPH_INIT(vjc, &ng_vjc_typestruct); /************************************************************************ NETGRAPH NODE METHODS ************************************************************************/ /* * Create a new node */ static int ng_vjc_constructor(node_p node) { priv_p priv; /* Allocate private structure */ priv = malloc(sizeof(*priv), M_NETGRAPH, M_WAITOK | M_ZERO); NG_NODE_SET_PRIVATE(node, priv); /* slcompress is not thread-safe. Protect it's state here. */ NG_NODE_FORCE_WRITER(node); /* Done */ return (0); } /* * Add a new hook */ static int ng_vjc_newhook(node_p node, hook_p hook, const char *name) { const priv_p priv = NG_NODE_PRIVATE(node); hook_p *hookp; /* Get hook */ if (strcmp(name, NG_VJC_HOOK_IP) == 0) hookp = &priv->ip; else if (strcmp(name, NG_VJC_HOOK_VJCOMP) == 0) hookp = &priv->vjcomp; else if (strcmp(name, NG_VJC_HOOK_VJUNCOMP) == 0) hookp = &priv->vjuncomp; else if (strcmp(name, NG_VJC_HOOK_VJIP) == 0) hookp = &priv->vjip; else return (EINVAL); /* See if already connected */ if (*hookp) return (EISCONN); /* OK */ *hookp = hook; return (0); } /* * Receive a control message */ static int ng_vjc_rcvmsg(node_p node, item_p item, hook_p lasthook) { const priv_p priv = NG_NODE_PRIVATE(node); struct ng_mesg *resp = NULL; int error = 0; struct ng_mesg *msg; NGI_GET_MSG(item, msg); /* Check type cookie */ switch (msg->header.typecookie) { case NGM_VJC_COOKIE: switch (msg->header.cmd) { case NGM_VJC_SET_CONFIG: { struct ngm_vjc_config *const c = (struct ngm_vjc_config *) msg->data; if (msg->header.arglen != sizeof(*c)) ERROUT(EINVAL); if ((priv->conf.enableComp || priv->conf.enableDecomp) && (c->enableComp || c->enableDecomp)) ERROUT(EALREADY); if (c->enableComp) { if (c->maxChannel > NG_VJC_MAX_CHANNELS - 1 || c->maxChannel < NG_VJC_MIN_CHANNELS - 1) ERROUT(EINVAL); } else c->maxChannel = NG_VJC_MAX_CHANNELS - 1; if (c->enableComp != 0 || c->enableDecomp != 0) { bzero(&priv->slc, sizeof(priv->slc)); sl_compress_init(&priv->slc, c->maxChannel); } priv->conf = *c; break; } case NGM_VJC_GET_CONFIG: { struct ngm_vjc_config *conf; NG_MKRESPONSE(resp, msg, sizeof(*conf), M_NOWAIT); if (resp == NULL) ERROUT(ENOMEM); conf = (struct ngm_vjc_config *)resp->data; *conf = priv->conf; break; } case NGM_VJC_GET_STATE: { const struct slcompress *const sl0 = &priv->slc; struct slcompress *sl; u_int16_t index; int i; /* Get response structure */ NG_MKRESPONSE(resp, msg, sizeof(*sl), M_NOWAIT); if (resp == NULL) ERROUT(ENOMEM); sl = (struct slcompress *)resp->data; *sl = *sl0; /* Replace pointers with integer indicies */ if (sl->last_cs != NULL) { index = sl0->last_cs - sl0->tstate; bzero(&sl->last_cs, sizeof(sl->last_cs)); *((u_int16_t *)&sl->last_cs) = index; } for (i = 0; i < MAX_STATES; i++) { struct cstate *const cs = &sl->tstate[i]; index = sl0->tstate[i].cs_next - sl0->tstate; bzero(&cs->cs_next, sizeof(cs->cs_next)); *((u_int16_t *)&cs->cs_next) = index; } break; } case NGM_VJC_CLR_STATS: priv->slc.sls_packets = 0; priv->slc.sls_compressed = 0; priv->slc.sls_searches = 0; priv->slc.sls_misses = 0; priv->slc.sls_uncompressedin = 0; priv->slc.sls_compressedin = 0; priv->slc.sls_errorin = 0; priv->slc.sls_tossed = 0; break; case NGM_VJC_RECV_ERROR: sl_uncompress_tcp(NULL, 0, TYPE_ERROR, &priv->slc); break; default: error = EINVAL; break; } break; default: error = EINVAL; break; } done: NG_RESPOND_MSG(error, node, item, resp); NG_FREE_MSG(msg); return (error); } /* * Receive data */ static int ng_vjc_rcvdata(hook_p hook, item_p item) { const node_p node = NG_HOOK_NODE(hook); const priv_p priv = NG_NODE_PRIVATE(node); int error = 0; struct mbuf *m; NGI_GET_M(item, m); if (hook == priv->ip) { /* outgoing packet */ u_int type = TYPE_IP; /* Compress packet if enabled and proto is TCP */ if (priv->conf.enableComp) { struct ip *ip; if ((m = ng_vjc_pulluphdrs(m, 0)) == NULL) { NG_FREE_ITEM(item); return (ENOBUFS); } ip = mtod(m, struct ip *); if (ip->ip_p == IPPROTO_TCP) { const int origLen = m->m_len; type = sl_compress_tcp(m, ip, &priv->slc, priv->conf.compressCID); m->m_pkthdr.len += m->m_len - origLen; } } /* Dispatch to the appropriate outgoing hook */ switch (type) { case TYPE_IP: hook = priv->vjip; break; case TYPE_UNCOMPRESSED_TCP: hook = priv->vjuncomp; break; case TYPE_COMPRESSED_TCP: hook = priv->vjcomp; break; default: panic("%s: type=%d", __func__, type); } } else if (hook == priv->vjcomp) { /* incoming compressed packet */ int vjlen, need2pullup; struct mbuf *hm; u_char *hdr; u_int hlen; /* Are we decompressing? */ if (!priv->conf.enableDecomp) { NG_FREE_M(m); NG_FREE_ITEM(item); return (ENXIO); } /* Pull up the necessary amount from the mbuf */ need2pullup = MAX_VJHEADER; if (need2pullup > m->m_pkthdr.len) need2pullup = m->m_pkthdr.len; if (m->m_len < need2pullup && (m = m_pullup(m, need2pullup)) == NULL) { priv->slc.sls_errorin++; NG_FREE_ITEM(item); return (ENOBUFS); } /* Uncompress packet to reconstruct TCP/IP header */ vjlen = sl_uncompress_tcp_core(mtod(m, u_char *), m->m_len, m->m_pkthdr.len, TYPE_COMPRESSED_TCP, &priv->slc, &hdr, &hlen); if (vjlen <= 0) { NG_FREE_M(m); NG_FREE_ITEM(item); return (EINVAL); } m_adj(m, vjlen); /* Copy the reconstructed TCP/IP headers into a new mbuf */ MGETHDR(hm, M_NOWAIT, MT_DATA); if (hm == NULL) { priv->slc.sls_errorin++; NG_FREE_M(m); NG_FREE_ITEM(item); return (ENOBUFS); } hm->m_len = 0; hm->m_pkthdr.rcvif = NULL; if (hlen > MHLEN) { /* unlikely, but can happen */ - MCLGET(hm, M_NOWAIT); - if ((hm->m_flags & M_EXT) == 0) { + if (!(MCLGET(hm, M_NOWAIT))) { m_freem(hm); priv->slc.sls_errorin++; NG_FREE_M(m); NG_FREE_ITEM(item); return (ENOBUFS); } } bcopy(hdr, mtod(hm, u_char *), hlen); hm->m_len = hlen; /* Glue TCP/IP headers and rest of packet together */ hm->m_next = m; hm->m_pkthdr.len = hlen + m->m_pkthdr.len; m = hm; hook = priv->ip; } else if (hook == priv->vjuncomp) { /* incoming uncompressed pkt */ u_char *hdr; u_int hlen; /* Are we decompressing? */ if (!priv->conf.enableDecomp) { NG_FREE_M(m); NG_FREE_ITEM(item); return (ENXIO); } /* Pull up IP+TCP headers */ if ((m = ng_vjc_pulluphdrs(m, 1)) == NULL) { NG_FREE_ITEM(item); return (ENOBUFS); } /* Run packet through uncompressor */ if (sl_uncompress_tcp_core(mtod(m, u_char *), m->m_len, m->m_pkthdr.len, TYPE_UNCOMPRESSED_TCP, &priv->slc, &hdr, &hlen) < 0) { NG_FREE_M(m); NG_FREE_ITEM(item); return (EINVAL); } hook = priv->ip; } else if (hook == priv->vjip) /* incoming regular packet (bypass) */ hook = priv->ip; else panic("%s: unknown hook", __func__); /* Send result back out */ NG_FWD_NEW_DATA(error, item, hook, m); return (error); } /* * Shutdown node */ static int ng_vjc_shutdown(node_p node) { const priv_p priv = NG_NODE_PRIVATE(node); bzero(priv, sizeof(*priv)); free(priv, M_NETGRAPH); NG_NODE_SET_PRIVATE(node, NULL); NG_NODE_UNREF(node); return (0); } /* * Hook disconnection */ static int ng_vjc_disconnect(hook_p hook) { const node_p node = NG_HOOK_NODE(hook); const priv_p priv = NG_NODE_PRIVATE(node); /* Zero out hook pointer */ if (hook == priv->ip) priv->ip = NULL; else if (hook == priv->vjcomp) priv->vjcomp = NULL; else if (hook == priv->vjuncomp) priv->vjuncomp = NULL; else if (hook == priv->vjip) priv->vjip = NULL; else panic("%s: unknown hook", __func__); /* Go away if no hooks left */ if ((NG_NODE_NUMHOOKS(node) == 0) && (NG_NODE_IS_VALID(node))) ng_rmnode_self(node); return (0); } /************************************************************************ HELPER STUFF ************************************************************************/ /* * Pull up the full IP and TCP headers of a packet. If packet is not * a TCP packet, just pull up the IP header. */ static struct mbuf * ng_vjc_pulluphdrs(struct mbuf *m, int knownTCP) { struct ip *ip; struct tcphdr *tcp; int ihlen, thlen; if (m->m_len < sizeof(*ip) && (m = m_pullup(m, sizeof(*ip))) == NULL) return (NULL); ip = mtod(m, struct ip *); if (!knownTCP && ip->ip_p != IPPROTO_TCP) return (m); ihlen = ip->ip_hl << 2; if (m->m_len < ihlen + sizeof(*tcp)) { if ((m = m_pullup(m, ihlen + sizeof(*tcp))) == NULL) return (NULL); ip = mtod(m, struct ip *); } tcp = (struct tcphdr *)((u_char *)ip + ihlen); thlen = tcp->th_off << 2; if (m->m_len < ihlen + thlen) m = m_pullup(m, ihlen + thlen); return (m); } Index: head/sys/netipsec/key.c =================================================================== --- head/sys/netipsec/key.c (revision 276749) +++ head/sys/netipsec/key.c (revision 276750) @@ -1,7828 +1,7823 @@ /* $FreeBSD$ */ /* $KAME: key.c,v 1.191 2001/06/27 10:46:49 sakane Exp $ */ /*- * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the project nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * This code is referd to RFC 2367 */ #include "opt_inet.h" #include "opt_inet6.h" #include "opt_ipsec.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET6 #include #include #include #endif /* INET6 */ #if defined(INET) || defined(INET6) #include #endif #ifdef INET6 #include #endif /* INET6 */ #include #include #include #include #include #include #ifdef INET6 #include #endif #include #include /* randomness */ #include #define FULLMASK 0xff #define _BITS(bytes) ((bytes) << 3) /* * Note on SA reference counting: * - SAs that are not in DEAD state will have (total external reference + 1) * following value in reference count field. they cannot be freed and are * referenced from SA header. * - SAs that are in DEAD state will have (total external reference) * in reference count field. they are ready to be freed. reference from * SA header will be removed in key_delsav(), when the reference count * field hits 0 (= no external reference other than from SA header. */ VNET_DEFINE(u_int32_t, key_debug_level) = 0; static VNET_DEFINE(u_int, key_spi_trycnt) = 1000; static VNET_DEFINE(u_int32_t, key_spi_minval) = 0x100; static VNET_DEFINE(u_int32_t, key_spi_maxval) = 0x0fffffff; /* XXX */ static VNET_DEFINE(u_int32_t, policy_id) = 0; /*interval to initialize randseed,1(m)*/ static VNET_DEFINE(u_int, key_int_random) = 60; /* interval to expire acquiring, 30(s)*/ static VNET_DEFINE(u_int, key_larval_lifetime) = 30; /* counter for blocking SADB_ACQUIRE.*/ static VNET_DEFINE(int, key_blockacq_count) = 10; /* lifetime for blocking SADB_ACQUIRE.*/ static VNET_DEFINE(int, key_blockacq_lifetime) = 20; /* preferred old sa rather than new sa.*/ static VNET_DEFINE(int, key_preferred_oldsa) = 1; #define V_key_spi_trycnt VNET(key_spi_trycnt) #define V_key_spi_minval VNET(key_spi_minval) #define V_key_spi_maxval VNET(key_spi_maxval) #define V_policy_id VNET(policy_id) #define V_key_int_random VNET(key_int_random) #define V_key_larval_lifetime VNET(key_larval_lifetime) #define V_key_blockacq_count VNET(key_blockacq_count) #define V_key_blockacq_lifetime VNET(key_blockacq_lifetime) #define V_key_preferred_oldsa VNET(key_preferred_oldsa) static VNET_DEFINE(u_int32_t, acq_seq) = 0; #define V_acq_seq VNET(acq_seq) /* SPD */ static VNET_DEFINE(TAILQ_HEAD(_sptree, secpolicy), sptree[IPSEC_DIR_MAX]); static struct rmlock sptree_lock; #define V_sptree VNET(sptree) #define SPTREE_LOCK_INIT() rm_init(&sptree_lock, "sptree") #define SPTREE_LOCK_DESTROY() rm_destroy(&sptree_lock) #define SPTREE_RLOCK_TRACKER struct rm_priotracker sptree_tracker #define SPTREE_RLOCK() rm_rlock(&sptree_lock, &sptree_tracker) #define SPTREE_RUNLOCK() rm_runlock(&sptree_lock, &sptree_tracker) #define SPTREE_RLOCK_ASSERT() rm_assert(&sptree_lock, RA_RLOCKED) #define SPTREE_WLOCK() rm_wlock(&sptree_lock) #define SPTREE_WUNLOCK() rm_wunlock(&sptree_lock) #define SPTREE_WLOCK_ASSERT() rm_assert(&sptree_lock, RA_WLOCKED) #define SPTREE_UNLOCK_ASSERT() rm_assert(&sptree_lock, RA_UNLOCKED) static VNET_DEFINE(LIST_HEAD(_sahtree, secashead), sahtree); /* SAD */ #define V_sahtree VNET(sahtree) static struct mtx sahtree_lock; #define SAHTREE_LOCK_INIT() \ mtx_init(&sahtree_lock, "sahtree", \ "fast ipsec security association database", MTX_DEF) #define SAHTREE_LOCK_DESTROY() mtx_destroy(&sahtree_lock) #define SAHTREE_LOCK() mtx_lock(&sahtree_lock) #define SAHTREE_UNLOCK() mtx_unlock(&sahtree_lock) #define SAHTREE_LOCK_ASSERT() mtx_assert(&sahtree_lock, MA_OWNED) /* registed list */ static VNET_DEFINE(LIST_HEAD(_regtree, secreg), regtree[SADB_SATYPE_MAX + 1]); #define V_regtree VNET(regtree) static struct mtx regtree_lock; #define REGTREE_LOCK_INIT() \ mtx_init(®tree_lock, "regtree", "fast ipsec regtree", MTX_DEF) #define REGTREE_LOCK_DESTROY() mtx_destroy(®tree_lock) #define REGTREE_LOCK() mtx_lock(®tree_lock) #define REGTREE_UNLOCK() mtx_unlock(®tree_lock) #define REGTREE_LOCK_ASSERT() mtx_assert(®tree_lock, MA_OWNED) static VNET_DEFINE(LIST_HEAD(_acqtree, secacq), acqtree); /* acquiring list */ #define V_acqtree VNET(acqtree) static struct mtx acq_lock; #define ACQ_LOCK_INIT() \ mtx_init(&acq_lock, "acqtree", "fast ipsec acquire list", MTX_DEF) #define ACQ_LOCK_DESTROY() mtx_destroy(&acq_lock) #define ACQ_LOCK() mtx_lock(&acq_lock) #define ACQ_UNLOCK() mtx_unlock(&acq_lock) #define ACQ_LOCK_ASSERT() mtx_assert(&acq_lock, MA_OWNED) /* SP acquiring list */ static VNET_DEFINE(LIST_HEAD(_spacqtree, secspacq), spacqtree); #define V_spacqtree VNET(spacqtree) static struct mtx spacq_lock; #define SPACQ_LOCK_INIT() \ mtx_init(&spacq_lock, "spacqtree", \ "fast ipsec security policy acquire list", MTX_DEF) #define SPACQ_LOCK_DESTROY() mtx_destroy(&spacq_lock) #define SPACQ_LOCK() mtx_lock(&spacq_lock) #define SPACQ_UNLOCK() mtx_unlock(&spacq_lock) #define SPACQ_LOCK_ASSERT() mtx_assert(&spacq_lock, MA_OWNED) /* search order for SAs */ static const u_int saorder_state_valid_prefer_old[] = { SADB_SASTATE_DYING, SADB_SASTATE_MATURE, }; static const u_int saorder_state_valid_prefer_new[] = { SADB_SASTATE_MATURE, SADB_SASTATE_DYING, }; static const u_int saorder_state_alive[] = { /* except DEAD */ SADB_SASTATE_MATURE, SADB_SASTATE_DYING, SADB_SASTATE_LARVAL }; static const u_int saorder_state_any[] = { SADB_SASTATE_MATURE, SADB_SASTATE_DYING, SADB_SASTATE_LARVAL, SADB_SASTATE_DEAD }; static const int minsize[] = { sizeof(struct sadb_msg), /* SADB_EXT_RESERVED */ sizeof(struct sadb_sa), /* SADB_EXT_SA */ sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_CURRENT */ sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_HARD */ sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_SOFT */ sizeof(struct sadb_address), /* SADB_EXT_ADDRESS_SRC */ sizeof(struct sadb_address), /* SADB_EXT_ADDRESS_DST */ sizeof(struct sadb_address), /* SADB_EXT_ADDRESS_PROXY */ sizeof(struct sadb_key), /* SADB_EXT_KEY_AUTH */ sizeof(struct sadb_key), /* SADB_EXT_KEY_ENCRYPT */ sizeof(struct sadb_ident), /* SADB_EXT_IDENTITY_SRC */ sizeof(struct sadb_ident), /* SADB_EXT_IDENTITY_DST */ sizeof(struct sadb_sens), /* SADB_EXT_SENSITIVITY */ sizeof(struct sadb_prop), /* SADB_EXT_PROPOSAL */ sizeof(struct sadb_supported), /* SADB_EXT_SUPPORTED_AUTH */ sizeof(struct sadb_supported), /* SADB_EXT_SUPPORTED_ENCRYPT */ sizeof(struct sadb_spirange), /* SADB_EXT_SPIRANGE */ 0, /* SADB_X_EXT_KMPRIVATE */ sizeof(struct sadb_x_policy), /* SADB_X_EXT_POLICY */ sizeof(struct sadb_x_sa2), /* SADB_X_SA2 */ sizeof(struct sadb_x_nat_t_type),/* SADB_X_EXT_NAT_T_TYPE */ sizeof(struct sadb_x_nat_t_port),/* SADB_X_EXT_NAT_T_SPORT */ sizeof(struct sadb_x_nat_t_port),/* SADB_X_EXT_NAT_T_DPORT */ sizeof(struct sadb_address), /* SADB_X_EXT_NAT_T_OAI */ sizeof(struct sadb_address), /* SADB_X_EXT_NAT_T_OAR */ sizeof(struct sadb_x_nat_t_frag),/* SADB_X_EXT_NAT_T_FRAG */ }; static const int maxsize[] = { sizeof(struct sadb_msg), /* SADB_EXT_RESERVED */ sizeof(struct sadb_sa), /* SADB_EXT_SA */ sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_CURRENT */ sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_HARD */ sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_SOFT */ 0, /* SADB_EXT_ADDRESS_SRC */ 0, /* SADB_EXT_ADDRESS_DST */ 0, /* SADB_EXT_ADDRESS_PROXY */ 0, /* SADB_EXT_KEY_AUTH */ 0, /* SADB_EXT_KEY_ENCRYPT */ 0, /* SADB_EXT_IDENTITY_SRC */ 0, /* SADB_EXT_IDENTITY_DST */ 0, /* SADB_EXT_SENSITIVITY */ 0, /* SADB_EXT_PROPOSAL */ 0, /* SADB_EXT_SUPPORTED_AUTH */ 0, /* SADB_EXT_SUPPORTED_ENCRYPT */ sizeof(struct sadb_spirange), /* SADB_EXT_SPIRANGE */ 0, /* SADB_X_EXT_KMPRIVATE */ 0, /* SADB_X_EXT_POLICY */ sizeof(struct sadb_x_sa2), /* SADB_X_SA2 */ sizeof(struct sadb_x_nat_t_type),/* SADB_X_EXT_NAT_T_TYPE */ sizeof(struct sadb_x_nat_t_port),/* SADB_X_EXT_NAT_T_SPORT */ sizeof(struct sadb_x_nat_t_port),/* SADB_X_EXT_NAT_T_DPORT */ 0, /* SADB_X_EXT_NAT_T_OAI */ 0, /* SADB_X_EXT_NAT_T_OAR */ sizeof(struct sadb_x_nat_t_frag),/* SADB_X_EXT_NAT_T_FRAG */ }; static VNET_DEFINE(int, ipsec_esp_keymin) = 256; static VNET_DEFINE(int, ipsec_esp_auth) = 0; static VNET_DEFINE(int, ipsec_ah_keymin) = 128; #define V_ipsec_esp_keymin VNET(ipsec_esp_keymin) #define V_ipsec_esp_auth VNET(ipsec_esp_auth) #define V_ipsec_ah_keymin VNET(ipsec_ah_keymin) #ifdef SYSCTL_DECL SYSCTL_DECL(_net_key); #endif SYSCTL_INT(_net_key, KEYCTL_DEBUG_LEVEL, debug, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(key_debug_level), 0, ""); /* max count of trial for the decision of spi value */ SYSCTL_INT(_net_key, KEYCTL_SPI_TRY, spi_trycnt, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(key_spi_trycnt), 0, ""); /* minimum spi value to allocate automatically. */ SYSCTL_INT(_net_key, KEYCTL_SPI_MIN_VALUE, spi_minval, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(key_spi_minval), 0, ""); /* maximun spi value to allocate automatically. */ SYSCTL_INT(_net_key, KEYCTL_SPI_MAX_VALUE, spi_maxval, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(key_spi_maxval), 0, ""); /* interval to initialize randseed */ SYSCTL_INT(_net_key, KEYCTL_RANDOM_INT, int_random, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(key_int_random), 0, ""); /* lifetime for larval SA */ SYSCTL_INT(_net_key, KEYCTL_LARVAL_LIFETIME, larval_lifetime, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(key_larval_lifetime), 0, ""); /* counter for blocking to send SADB_ACQUIRE to IKEd */ SYSCTL_INT(_net_key, KEYCTL_BLOCKACQ_COUNT, blockacq_count, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(key_blockacq_count), 0, ""); /* lifetime for blocking to send SADB_ACQUIRE to IKEd */ SYSCTL_INT(_net_key, KEYCTL_BLOCKACQ_LIFETIME, blockacq_lifetime, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(key_blockacq_lifetime), 0, ""); /* ESP auth */ SYSCTL_INT(_net_key, KEYCTL_ESP_AUTH, esp_auth, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ipsec_esp_auth), 0, ""); /* minimum ESP key length */ SYSCTL_INT(_net_key, KEYCTL_ESP_KEYMIN, esp_keymin, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ipsec_esp_keymin), 0, ""); /* minimum AH key length */ SYSCTL_INT(_net_key, KEYCTL_AH_KEYMIN, ah_keymin, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ipsec_ah_keymin), 0, ""); /* perfered old SA rather than new SA */ SYSCTL_INT(_net_key, KEYCTL_PREFERED_OLDSA, preferred_oldsa, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(key_preferred_oldsa), 0, ""); #define __LIST_CHAINED(elm) \ (!((elm)->chain.le_next == NULL && (elm)->chain.le_prev == NULL)) #define LIST_INSERT_TAIL(head, elm, type, field) \ do {\ struct type *curelm = LIST_FIRST(head); \ if (curelm == NULL) {\ LIST_INSERT_HEAD(head, elm, field); \ } else { \ while (LIST_NEXT(curelm, field)) \ curelm = LIST_NEXT(curelm, field);\ LIST_INSERT_AFTER(curelm, elm, field);\ }\ } while (0) #define KEY_CHKSASTATE(head, sav, name) \ do { \ if ((head) != (sav)) { \ ipseclog((LOG_DEBUG, "%s: state mismatched (TREE=%d SA=%d)\n", \ (name), (head), (sav))); \ continue; \ } \ } while (0) #define KEY_CHKSPDIR(head, sp, name) \ do { \ if ((head) != (sp)) { \ ipseclog((LOG_DEBUG, "%s: direction mismatched (TREE=%d SP=%d), " \ "anyway continue.\n", \ (name), (head), (sp))); \ } \ } while (0) MALLOC_DEFINE(M_IPSEC_SA, "secasvar", "ipsec security association"); MALLOC_DEFINE(M_IPSEC_SAH, "sahead", "ipsec sa head"); MALLOC_DEFINE(M_IPSEC_SP, "ipsecpolicy", "ipsec security policy"); MALLOC_DEFINE(M_IPSEC_SR, "ipsecrequest", "ipsec security request"); MALLOC_DEFINE(M_IPSEC_MISC, "ipsec-misc", "ipsec miscellaneous"); MALLOC_DEFINE(M_IPSEC_SAQ, "ipsec-saq", "ipsec sa acquire"); MALLOC_DEFINE(M_IPSEC_SAR, "ipsec-reg", "ipsec sa acquire"); /* * set parameters into secpolicyindex buffer. * Must allocate secpolicyindex buffer passed to this function. */ #define KEY_SETSECSPIDX(_dir, s, d, ps, pd, ulp, idx) \ do { \ bzero((idx), sizeof(struct secpolicyindex)); \ (idx)->dir = (_dir); \ (idx)->prefs = (ps); \ (idx)->prefd = (pd); \ (idx)->ul_proto = (ulp); \ bcopy((s), &(idx)->src, ((const struct sockaddr *)(s))->sa_len); \ bcopy((d), &(idx)->dst, ((const struct sockaddr *)(d))->sa_len); \ } while (0) /* * set parameters into secasindex buffer. * Must allocate secasindex buffer before calling this function. */ #define KEY_SETSECASIDX(p, m, r, s, d, idx) \ do { \ bzero((idx), sizeof(struct secasindex)); \ (idx)->proto = (p); \ (idx)->mode = (m); \ (idx)->reqid = (r); \ bcopy((s), &(idx)->src, ((const struct sockaddr *)(s))->sa_len); \ bcopy((d), &(idx)->dst, ((const struct sockaddr *)(d))->sa_len); \ } while (0) /* key statistics */ struct _keystat { u_long getspi_count; /* the avarage of count to try to get new SPI */ } keystat; struct sadb_msghdr { struct sadb_msg *msg; struct sadb_ext *ext[SADB_EXT_MAX + 1]; int extoff[SADB_EXT_MAX + 1]; int extlen[SADB_EXT_MAX + 1]; }; #ifndef IPSEC_DEBUG2 static struct callout key_timer; #endif static struct secasvar *key_allocsa_policy(const struct secasindex *); static void key_freesp_so(struct secpolicy **); static struct secasvar *key_do_allocsa_policy(struct secashead *, u_int); static void key_unlink(struct secpolicy *); static struct secpolicy *key_getsp(struct secpolicyindex *); static struct secpolicy *key_getspbyid(u_int32_t); static u_int32_t key_newreqid(void); static struct mbuf *key_gather_mbuf(struct mbuf *, const struct sadb_msghdr *, int, int, ...); static int key_spdadd(struct socket *, struct mbuf *, const struct sadb_msghdr *); static u_int32_t key_getnewspid(void); static int key_spddelete(struct socket *, struct mbuf *, const struct sadb_msghdr *); static int key_spddelete2(struct socket *, struct mbuf *, const struct sadb_msghdr *); static int key_spdget(struct socket *, struct mbuf *, const struct sadb_msghdr *); static int key_spdflush(struct socket *, struct mbuf *, const struct sadb_msghdr *); static int key_spddump(struct socket *, struct mbuf *, const struct sadb_msghdr *); static struct mbuf *key_setdumpsp(struct secpolicy *, u_int8_t, u_int32_t, u_int32_t); static u_int key_getspreqmsglen(struct secpolicy *); static int key_spdexpire(struct secpolicy *); static struct secashead *key_newsah(struct secasindex *); static void key_delsah(struct secashead *); static struct secasvar *key_newsav(struct mbuf *, const struct sadb_msghdr *, struct secashead *, int *, const char*, int); #define KEY_NEWSAV(m, sadb, sah, e) \ key_newsav(m, sadb, sah, e, __FILE__, __LINE__) static void key_delsav(struct secasvar *); static struct secashead *key_getsah(struct secasindex *); static struct secasvar *key_checkspidup(struct secasindex *, u_int32_t); static struct secasvar *key_getsavbyspi(struct secashead *, u_int32_t); static int key_setsaval(struct secasvar *, struct mbuf *, const struct sadb_msghdr *); static int key_mature(struct secasvar *); static struct mbuf *key_setdumpsa(struct secasvar *, u_int8_t, u_int8_t, u_int32_t, u_int32_t); static struct mbuf *key_setsadbmsg(u_int8_t, u_int16_t, u_int8_t, u_int32_t, pid_t, u_int16_t); static struct mbuf *key_setsadbsa(struct secasvar *); static struct mbuf *key_setsadbaddr(u_int16_t, const struct sockaddr *, u_int8_t, u_int16_t); #ifdef IPSEC_NAT_T static struct mbuf *key_setsadbxport(u_int16_t, u_int16_t); static struct mbuf *key_setsadbxtype(u_int16_t); #endif static void key_porttosaddr(struct sockaddr *, u_int16_t); #define KEY_PORTTOSADDR(saddr, port) \ key_porttosaddr((struct sockaddr *)(saddr), (port)) static struct mbuf *key_setsadbxsa2(u_int8_t, u_int32_t, u_int32_t); static struct mbuf *key_setsadbxpolicy(u_int16_t, u_int8_t, u_int32_t); static struct seckey *key_dup_keymsg(const struct sadb_key *, u_int, struct malloc_type *); static struct seclifetime *key_dup_lifemsg(const struct sadb_lifetime *src, struct malloc_type *type); #ifdef INET6 static int key_ismyaddr6(struct sockaddr_in6 *); #endif /* flags for key_cmpsaidx() */ #define CMP_HEAD 1 /* protocol, addresses. */ #define CMP_MODE_REQID 2 /* additionally HEAD, reqid, mode. */ #define CMP_REQID 3 /* additionally HEAD, reaid. */ #define CMP_EXACTLY 4 /* all elements. */ static int key_cmpsaidx(const struct secasindex *, const struct secasindex *, int); static int key_cmpspidx_exactly(struct secpolicyindex *, struct secpolicyindex *); static int key_cmpspidx_withmask(struct secpolicyindex *, struct secpolicyindex *); static int key_sockaddrcmp(const struct sockaddr *, const struct sockaddr *, int); static int key_bbcmp(const void *, const void *, u_int); static u_int16_t key_satype2proto(u_int8_t); static u_int8_t key_proto2satype(u_int16_t); static int key_getspi(struct socket *, struct mbuf *, const struct sadb_msghdr *); static u_int32_t key_do_getnewspi(struct sadb_spirange *, struct secasindex *); static int key_update(struct socket *, struct mbuf *, const struct sadb_msghdr *); #ifdef IPSEC_DOSEQCHECK static struct secasvar *key_getsavbyseq(struct secashead *, u_int32_t); #endif static int key_add(struct socket *, struct mbuf *, const struct sadb_msghdr *); static int key_setident(struct secashead *, struct mbuf *, const struct sadb_msghdr *); static struct mbuf *key_getmsgbuf_x1(struct mbuf *, const struct sadb_msghdr *); static int key_delete(struct socket *, struct mbuf *, const struct sadb_msghdr *); static int key_delete_all(struct socket *, struct mbuf *, const struct sadb_msghdr *, u_int16_t); static int key_get(struct socket *, struct mbuf *, const struct sadb_msghdr *); static void key_getcomb_setlifetime(struct sadb_comb *); static struct mbuf *key_getcomb_esp(void); static struct mbuf *key_getcomb_ah(void); static struct mbuf *key_getcomb_ipcomp(void); static struct mbuf *key_getprop(const struct secasindex *); static int key_acquire(const struct secasindex *, struct secpolicy *); static struct secacq *key_newacq(const struct secasindex *); static struct secacq *key_getacq(const struct secasindex *); static struct secacq *key_getacqbyseq(u_int32_t); static struct secspacq *key_newspacq(struct secpolicyindex *); static struct secspacq *key_getspacq(struct secpolicyindex *); static int key_acquire2(struct socket *, struct mbuf *, const struct sadb_msghdr *); static int key_register(struct socket *, struct mbuf *, const struct sadb_msghdr *); static int key_expire(struct secasvar *); static int key_flush(struct socket *, struct mbuf *, const struct sadb_msghdr *); static int key_dump(struct socket *, struct mbuf *, const struct sadb_msghdr *); static int key_promisc(struct socket *, struct mbuf *, const struct sadb_msghdr *); static int key_senderror(struct socket *, struct mbuf *, int); static int key_validate_ext(const struct sadb_ext *, int); static int key_align(struct mbuf *, struct sadb_msghdr *); static struct mbuf *key_setlifetime(struct seclifetime *src, u_int16_t exttype); static struct mbuf *key_setkey(struct seckey *src, u_int16_t exttype); #if 0 static const char *key_getfqdn(void); static const char *key_getuserfqdn(void); #endif static void key_sa_chgstate(struct secasvar *, u_int8_t); static __inline void sa_initref(struct secasvar *sav) { refcount_init(&sav->refcnt, 1); } static __inline void sa_addref(struct secasvar *sav) { refcount_acquire(&sav->refcnt); IPSEC_ASSERT(sav->refcnt != 0, ("SA refcnt overflow")); } static __inline int sa_delref(struct secasvar *sav) { IPSEC_ASSERT(sav->refcnt > 0, ("SA refcnt underflow")); return (refcount_release(&sav->refcnt)); } #define SP_ADDREF(p) refcount_acquire(&(p)->refcnt) #define SP_DELREF(p) refcount_release(&(p)->refcnt) /* * Update the refcnt while holding the SPTREE lock. */ void key_addref(struct secpolicy *sp) { SP_ADDREF(sp); } /* * Return 0 when there are known to be no SP's for the specified * direction. Otherwise return 1. This is used by IPsec code * to optimize performance. */ int key_havesp(u_int dir) { return (dir == IPSEC_DIR_INBOUND || dir == IPSEC_DIR_OUTBOUND ? TAILQ_FIRST(&V_sptree[dir]) != NULL : 1); } /* %%% IPsec policy management */ /* * allocating a SP for OUTBOUND or INBOUND packet. * Must call key_freesp() later. * OUT: NULL: not found * others: found and return the pointer. */ struct secpolicy * key_allocsp(struct secpolicyindex *spidx, u_int dir, const char* where, int tag) { SPTREE_RLOCK_TRACKER; struct secpolicy *sp; IPSEC_ASSERT(spidx != NULL, ("null spidx")); IPSEC_ASSERT(dir == IPSEC_DIR_INBOUND || dir == IPSEC_DIR_OUTBOUND, ("invalid direction %u", dir)); KEYDEBUG(KEYDEBUG_IPSEC_STAMP, printf("DP %s from %s:%u\n", __func__, where, tag)); /* get a SP entry */ KEYDEBUG(KEYDEBUG_IPSEC_DATA, printf("*** objects\n"); kdebug_secpolicyindex(spidx)); SPTREE_RLOCK(); TAILQ_FOREACH(sp, &V_sptree[dir], chain) { KEYDEBUG(KEYDEBUG_IPSEC_DATA, printf("*** in SPD\n"); kdebug_secpolicyindex(&sp->spidx)); if (key_cmpspidx_withmask(&sp->spidx, spidx)) goto found; } sp = NULL; found: if (sp) { /* sanity check */ KEY_CHKSPDIR(sp->spidx.dir, dir, __func__); /* found a SPD entry */ sp->lastused = time_second; SP_ADDREF(sp); } SPTREE_RUNLOCK(); KEYDEBUG(KEYDEBUG_IPSEC_STAMP, printf("DP %s return SP:%p (ID=%u) refcnt %u\n", __func__, sp, sp ? sp->id : 0, sp ? sp->refcnt : 0)); return sp; } /* * allocating a SP for OUTBOUND or INBOUND packet. * Must call key_freesp() later. * OUT: NULL: not found * others: found and return the pointer. */ struct secpolicy * key_allocsp2(u_int32_t spi, union sockaddr_union *dst, u_int8_t proto, u_int dir, const char* where, int tag) { SPTREE_RLOCK_TRACKER; struct secpolicy *sp; IPSEC_ASSERT(dst != NULL, ("null dst")); IPSEC_ASSERT(dir == IPSEC_DIR_INBOUND || dir == IPSEC_DIR_OUTBOUND, ("invalid direction %u", dir)); KEYDEBUG(KEYDEBUG_IPSEC_STAMP, printf("DP %s from %s:%u\n", __func__, where, tag)); /* get a SP entry */ KEYDEBUG(KEYDEBUG_IPSEC_DATA, printf("*** objects\n"); printf("spi %u proto %u dir %u\n", spi, proto, dir); kdebug_sockaddr(&dst->sa)); SPTREE_RLOCK(); TAILQ_FOREACH(sp, &V_sptree[dir], chain) { KEYDEBUG(KEYDEBUG_IPSEC_DATA, printf("*** in SPD\n"); kdebug_secpolicyindex(&sp->spidx)); /* compare simple values, then dst address */ if (sp->spidx.ul_proto != proto) continue; /* NB: spi's must exist and match */ if (!sp->req || !sp->req->sav || sp->req->sav->spi != spi) continue; if (key_sockaddrcmp(&sp->spidx.dst.sa, &dst->sa, 1) == 0) goto found; } sp = NULL; found: if (sp) { /* sanity check */ KEY_CHKSPDIR(sp->spidx.dir, dir, __func__); /* found a SPD entry */ sp->lastused = time_second; SP_ADDREF(sp); } SPTREE_RUNLOCK(); KEYDEBUG(KEYDEBUG_IPSEC_STAMP, printf("DP %s return SP:%p (ID=%u) refcnt %u\n", __func__, sp, sp ? sp->id : 0, sp ? sp->refcnt : 0)); return sp; } #if 0 /* * return a policy that matches this particular inbound packet. * XXX slow */ struct secpolicy * key_gettunnel(const struct sockaddr *osrc, const struct sockaddr *odst, const struct sockaddr *isrc, const struct sockaddr *idst, const char* where, int tag) { struct secpolicy *sp; const int dir = IPSEC_DIR_INBOUND; struct ipsecrequest *r1, *r2, *p; struct secpolicyindex spidx; KEYDEBUG(KEYDEBUG_IPSEC_STAMP, printf("DP %s from %s:%u\n", __func__, where, tag)); if (isrc->sa_family != idst->sa_family) { ipseclog((LOG_ERR, "%s: protocol family mismatched %d != %d\n.", __func__, isrc->sa_family, idst->sa_family)); sp = NULL; goto done; } SPTREE_LOCK(); LIST_FOREACH(sp, &V_sptree[dir], chain) { if (sp->state == IPSEC_SPSTATE_DEAD) continue; r1 = r2 = NULL; for (p = sp->req; p; p = p->next) { if (p->saidx.mode != IPSEC_MODE_TUNNEL) continue; r1 = r2; r2 = p; if (!r1) { /* here we look at address matches only */ spidx = sp->spidx; if (isrc->sa_len > sizeof(spidx.src) || idst->sa_len > sizeof(spidx.dst)) continue; bcopy(isrc, &spidx.src, isrc->sa_len); bcopy(idst, &spidx.dst, idst->sa_len); if (!key_cmpspidx_withmask(&sp->spidx, &spidx)) continue; } else { if (key_sockaddrcmp(&r1->saidx.src.sa, isrc, 0) || key_sockaddrcmp(&r1->saidx.dst.sa, idst, 0)) continue; } if (key_sockaddrcmp(&r2->saidx.src.sa, osrc, 0) || key_sockaddrcmp(&r2->saidx.dst.sa, odst, 0)) continue; goto found; } } sp = NULL; found: if (sp) { sp->lastused = time_second; SP_ADDREF(sp); } SPTREE_UNLOCK(); done: KEYDEBUG(KEYDEBUG_IPSEC_STAMP, printf("DP %s return SP:%p (ID=%u) refcnt %u\n", __func__, sp, sp ? sp->id : 0, sp ? sp->refcnt : 0)); return sp; } #endif /* * allocating an SA entry for an *OUTBOUND* packet. * checking each request entries in SP, and acquire an SA if need. * OUT: 0: there are valid requests. * ENOENT: policy may be valid, but SA with REQUIRE is on acquiring. */ int key_checkrequest(struct ipsecrequest *isr, const struct secasindex *saidx) { u_int level; int error; struct secasvar *sav; IPSEC_ASSERT(isr != NULL, ("null isr")); IPSEC_ASSERT(saidx != NULL, ("null saidx")); IPSEC_ASSERT(saidx->mode == IPSEC_MODE_TRANSPORT || saidx->mode == IPSEC_MODE_TUNNEL, ("unexpected policy %u", saidx->mode)); /* * XXX guard against protocol callbacks from the crypto * thread as they reference ipsecrequest.sav which we * temporarily null out below. Need to rethink how we * handle bundled SA's in the callback thread. */ IPSECREQUEST_LOCK_ASSERT(isr); /* get current level */ level = ipsec_get_reqlevel(isr); /* * We check new SA in the IPsec request because a different * SA may be involved each time this request is checked, either * because new SAs are being configured, or this request is * associated with an unconnected datagram socket, or this request * is associated with a system default policy. * * key_allocsa_policy should allocate the oldest SA available. * See key_do_allocsa_policy(), and draft-jenkins-ipsec-rekeying-03.txt. */ sav = key_allocsa_policy(saidx); if (sav != isr->sav) { /* SA need to be updated. */ if (!IPSECREQUEST_UPGRADE(isr)) { /* Kick everyone off. */ IPSECREQUEST_UNLOCK(isr); IPSECREQUEST_WLOCK(isr); } if (isr->sav != NULL) KEY_FREESAV(&isr->sav); isr->sav = sav; IPSECREQUEST_DOWNGRADE(isr); } else if (sav != NULL) KEY_FREESAV(&sav); /* When there is SA. */ if (isr->sav != NULL) { if (isr->sav->state != SADB_SASTATE_MATURE && isr->sav->state != SADB_SASTATE_DYING) return EINVAL; return 0; } /* there is no SA */ error = key_acquire(saidx, isr->sp); if (error != 0) { /* XXX What should I do ? */ ipseclog((LOG_DEBUG, "%s: error %d returned from key_acquire\n", __func__, error)); return error; } if (level != IPSEC_LEVEL_REQUIRE) { /* XXX sigh, the interface to this routine is botched */ IPSEC_ASSERT(isr->sav == NULL, ("unexpected SA")); return 0; } else { return ENOENT; } } /* * allocating a SA for policy entry from SAD. * NOTE: searching SAD of aliving state. * OUT: NULL: not found. * others: found and return the pointer. */ static struct secasvar * key_allocsa_policy(const struct secasindex *saidx) { #define N(a) _ARRAYLEN(a) struct secashead *sah; struct secasvar *sav; u_int stateidx, arraysize; const u_int *state_valid; state_valid = NULL; /* silence gcc */ arraysize = 0; /* silence gcc */ SAHTREE_LOCK(); LIST_FOREACH(sah, &V_sahtree, chain) { if (sah->state == SADB_SASTATE_DEAD) continue; if (key_cmpsaidx(&sah->saidx, saidx, CMP_MODE_REQID)) { if (V_key_preferred_oldsa) { state_valid = saorder_state_valid_prefer_old; arraysize = N(saorder_state_valid_prefer_old); } else { state_valid = saorder_state_valid_prefer_new; arraysize = N(saorder_state_valid_prefer_new); } break; } } SAHTREE_UNLOCK(); if (sah == NULL) return NULL; /* search valid state */ for (stateidx = 0; stateidx < arraysize; stateidx++) { sav = key_do_allocsa_policy(sah, state_valid[stateidx]); if (sav != NULL) return sav; } return NULL; #undef N } /* * searching SAD with direction, protocol, mode and state. * called by key_allocsa_policy(). * OUT: * NULL : not found * others : found, pointer to a SA. */ static struct secasvar * key_do_allocsa_policy(struct secashead *sah, u_int state) { struct secasvar *sav, *nextsav, *candidate, *d; /* initilize */ candidate = NULL; SAHTREE_LOCK(); for (sav = LIST_FIRST(&sah->savtree[state]); sav != NULL; sav = nextsav) { nextsav = LIST_NEXT(sav, chain); /* sanity check */ KEY_CHKSASTATE(sav->state, state, __func__); /* initialize */ if (candidate == NULL) { candidate = sav; continue; } /* Which SA is the better ? */ IPSEC_ASSERT(candidate->lft_c != NULL, ("null candidate lifetime")); IPSEC_ASSERT(sav->lft_c != NULL, ("null sav lifetime")); /* What the best method is to compare ? */ if (V_key_preferred_oldsa) { if (candidate->lft_c->addtime > sav->lft_c->addtime) { candidate = sav; } continue; /*NOTREACHED*/ } /* preferred new sa rather than old sa */ if (candidate->lft_c->addtime < sav->lft_c->addtime) { d = candidate; candidate = sav; } else d = sav; /* * prepared to delete the SA when there is more * suitable candidate and the lifetime of the SA is not * permanent. */ if (d->lft_h->addtime != 0) { struct mbuf *m, *result; u_int8_t satype; key_sa_chgstate(d, SADB_SASTATE_DEAD); IPSEC_ASSERT(d->refcnt > 0, ("bogus ref count")); satype = key_proto2satype(d->sah->saidx.proto); if (satype == 0) goto msgfail; m = key_setsadbmsg(SADB_DELETE, 0, satype, 0, 0, d->refcnt - 1); if (!m) goto msgfail; result = m; /* set sadb_address for saidx's. */ m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC, &d->sah->saidx.src.sa, d->sah->saidx.src.sa.sa_len << 3, IPSEC_ULPROTO_ANY); if (!m) goto msgfail; m_cat(result, m); /* set sadb_address for saidx's. */ m = key_setsadbaddr(SADB_EXT_ADDRESS_DST, &d->sah->saidx.dst.sa, d->sah->saidx.dst.sa.sa_len << 3, IPSEC_ULPROTO_ANY); if (!m) goto msgfail; m_cat(result, m); /* create SA extension */ m = key_setsadbsa(d); if (!m) goto msgfail; m_cat(result, m); if (result->m_len < sizeof(struct sadb_msg)) { result = m_pullup(result, sizeof(struct sadb_msg)); if (result == NULL) goto msgfail; } result->m_pkthdr.len = 0; for (m = result; m; m = m->m_next) result->m_pkthdr.len += m->m_len; mtod(result, struct sadb_msg *)->sadb_msg_len = PFKEY_UNIT64(result->m_pkthdr.len); if (key_sendup_mbuf(NULL, result, KEY_SENDUP_REGISTERED)) goto msgfail; msgfail: KEY_FREESAV(&d); } } if (candidate) { sa_addref(candidate); KEYDEBUG(KEYDEBUG_IPSEC_STAMP, printf("DP %s cause refcnt++:%d SA:%p\n", __func__, candidate->refcnt, candidate)); } SAHTREE_UNLOCK(); return candidate; } /* * allocating a usable SA entry for a *INBOUND* packet. * Must call key_freesav() later. * OUT: positive: pointer to a usable sav (i.e. MATURE or DYING state). * NULL: not found, or error occured. * * In the comparison, no source address is used--for RFC2401 conformance. * To quote, from section 4.1: * A security association is uniquely identified by a triple consisting * of a Security Parameter Index (SPI), an IP Destination Address, and a * security protocol (AH or ESP) identifier. * Note that, however, we do need to keep source address in IPsec SA. * IKE specification and PF_KEY specification do assume that we * keep source address in IPsec SA. We see a tricky situation here. */ struct secasvar * key_allocsa(union sockaddr_union *dst, u_int proto, u_int32_t spi, const char* where, int tag) { struct secashead *sah; struct secasvar *sav; u_int stateidx, arraysize, state; const u_int *saorder_state_valid; #ifdef IPSEC_NAT_T int natt_chkport; #endif IPSEC_ASSERT(dst != NULL, ("null dst address")); KEYDEBUG(KEYDEBUG_IPSEC_STAMP, printf("DP %s from %s:%u\n", __func__, where, tag)); #ifdef IPSEC_NAT_T natt_chkport = (dst->sa.sa_family == AF_INET && dst->sa.sa_len == sizeof(struct sockaddr_in) && dst->sin.sin_port != 0); #endif /* * searching SAD. * XXX: to be checked internal IP header somewhere. Also when * IPsec tunnel packet is received. But ESP tunnel mode is * encrypted so we can't check internal IP header. */ SAHTREE_LOCK(); if (V_key_preferred_oldsa) { saorder_state_valid = saorder_state_valid_prefer_old; arraysize = _ARRAYLEN(saorder_state_valid_prefer_old); } else { saorder_state_valid = saorder_state_valid_prefer_new; arraysize = _ARRAYLEN(saorder_state_valid_prefer_new); } LIST_FOREACH(sah, &V_sahtree, chain) { int checkport; /* search valid state */ for (stateidx = 0; stateidx < arraysize; stateidx++) { state = saorder_state_valid[stateidx]; LIST_FOREACH(sav, &sah->savtree[state], chain) { /* sanity check */ KEY_CHKSASTATE(sav->state, state, __func__); /* do not return entries w/ unusable state */ if (sav->state != SADB_SASTATE_MATURE && sav->state != SADB_SASTATE_DYING) continue; if (proto != sav->sah->saidx.proto) continue; if (spi != sav->spi) continue; checkport = 0; #ifdef IPSEC_NAT_T /* * Really only check ports when this is a NAT-T * SA. Otherwise other lookups providing ports * might suffer. */ if (sav->natt_type && natt_chkport) checkport = 1; #endif #if 0 /* don't check src */ /* check src address */ if (key_sockaddrcmp(&src->sa, &sav->sah->saidx.src.sa, checkport) != 0) continue; #endif /* check dst address */ if (key_sockaddrcmp(&dst->sa, &sav->sah->saidx.dst.sa, checkport) != 0) continue; sa_addref(sav); goto done; } } } sav = NULL; done: SAHTREE_UNLOCK(); KEYDEBUG(KEYDEBUG_IPSEC_STAMP, printf("DP %s return SA:%p; refcnt %u\n", __func__, sav, sav ? sav->refcnt : 0)); return sav; } /* * Must be called after calling key_allocsp(). * For both the packet without socket and key_freeso(). */ void _key_freesp(struct secpolicy **spp, const char* where, int tag) { struct ipsecrequest *isr, *nextisr; struct secpolicy *sp = *spp; IPSEC_ASSERT(sp != NULL, ("null sp")); KEYDEBUG(KEYDEBUG_IPSEC_STAMP, printf("DP %s SP:%p (ID=%u) from %s:%u; refcnt now %u\n", __func__, sp, sp->id, where, tag, sp->refcnt)); if (SP_DELREF(sp) == 0) return; *spp = NULL; for (isr = sp->req; isr != NULL; isr = nextisr) { if (isr->sav != NULL) { KEY_FREESAV(&isr->sav); isr->sav = NULL; } nextisr = isr->next; ipsec_delisr(isr); } free(sp, M_IPSEC_SP); } static void key_unlink(struct secpolicy *sp) { IPSEC_ASSERT(sp != NULL, ("null sp")); IPSEC_ASSERT(sp->spidx.dir == IPSEC_DIR_INBOUND || sp->spidx.dir == IPSEC_DIR_OUTBOUND, ("invalid direction %u", sp->spidx.dir)); SPTREE_UNLOCK_ASSERT(); SPTREE_WLOCK(); TAILQ_REMOVE(&V_sptree[sp->spidx.dir], sp, chain); SPTREE_WUNLOCK(); } /* * Must be called after calling key_allocsp(). * For the packet with socket. */ void key_freeso(struct socket *so) { IPSEC_ASSERT(so != NULL, ("null so")); switch (so->so_proto->pr_domain->dom_family) { #if defined(INET) || defined(INET6) #ifdef INET case PF_INET: #endif #ifdef INET6 case PF_INET6: #endif { struct inpcb *pcb = sotoinpcb(so); /* Does it have a PCB ? */ if (pcb == NULL) return; key_freesp_so(&pcb->inp_sp->sp_in); key_freesp_so(&pcb->inp_sp->sp_out); } break; #endif /* INET || INET6 */ default: ipseclog((LOG_DEBUG, "%s: unknown address family=%d.\n", __func__, so->so_proto->pr_domain->dom_family)); return; } } static void key_freesp_so(struct secpolicy **sp) { IPSEC_ASSERT(sp != NULL && *sp != NULL, ("null sp")); if ((*sp)->policy == IPSEC_POLICY_ENTRUST || (*sp)->policy == IPSEC_POLICY_BYPASS) return; IPSEC_ASSERT((*sp)->policy == IPSEC_POLICY_IPSEC, ("invalid policy %u", (*sp)->policy)); KEY_FREESP(sp); } void key_addrefsa(struct secasvar *sav, const char* where, int tag) { IPSEC_ASSERT(sav != NULL, ("null sav")); IPSEC_ASSERT(sav->refcnt > 0, ("refcount must exist")); sa_addref(sav); } /* * Must be called after calling key_allocsa(). * This function is called by key_freesp() to free some SA allocated * for a policy. */ void key_freesav(struct secasvar **psav, const char* where, int tag) { struct secasvar *sav = *psav; IPSEC_ASSERT(sav != NULL, ("null sav")); if (sa_delref(sav)) { KEYDEBUG(KEYDEBUG_IPSEC_STAMP, printf("DP %s SA:%p (SPI %u) from %s:%u; refcnt now %u\n", __func__, sav, ntohl(sav->spi), where, tag, sav->refcnt)); *psav = NULL; key_delsav(sav); } else { KEYDEBUG(KEYDEBUG_IPSEC_STAMP, printf("DP %s SA:%p (SPI %u) from %s:%u; refcnt now %u\n", __func__, sav, ntohl(sav->spi), where, tag, sav->refcnt)); } } /* %%% SPD management */ /* * search SPD * OUT: NULL : not found * others : found, pointer to a SP. */ static struct secpolicy * key_getsp(struct secpolicyindex *spidx) { SPTREE_RLOCK_TRACKER; struct secpolicy *sp; IPSEC_ASSERT(spidx != NULL, ("null spidx")); SPTREE_RLOCK(); TAILQ_FOREACH(sp, &V_sptree[spidx->dir], chain) { if (key_cmpspidx_exactly(spidx, &sp->spidx)) { SP_ADDREF(sp); break; } } SPTREE_RUNLOCK(); return sp; } /* * get SP by index. * OUT: NULL : not found * others : found, pointer to a SP. */ static struct secpolicy * key_getspbyid(u_int32_t id) { SPTREE_RLOCK_TRACKER; struct secpolicy *sp; SPTREE_RLOCK(); TAILQ_FOREACH(sp, &V_sptree[IPSEC_DIR_INBOUND], chain) { if (sp->id == id) { SP_ADDREF(sp); goto done; } } TAILQ_FOREACH(sp, &V_sptree[IPSEC_DIR_OUTBOUND], chain) { if (sp->id == id) { SP_ADDREF(sp); goto done; } } done: SPTREE_RUNLOCK(); return sp; } struct secpolicy * key_newsp(const char* where, int tag) { struct secpolicy *newsp = NULL; newsp = (struct secpolicy *) malloc(sizeof(struct secpolicy), M_IPSEC_SP, M_NOWAIT|M_ZERO); if (newsp) refcount_init(&newsp->refcnt, 1); KEYDEBUG(KEYDEBUG_IPSEC_STAMP, printf("DP %s from %s:%u return SP:%p\n", __func__, where, tag, newsp)); return newsp; } /* * create secpolicy structure from sadb_x_policy structure. * NOTE: `state', `secpolicyindex' in secpolicy structure are not set, * so must be set properly later. */ struct secpolicy * key_msg2sp(struct sadb_x_policy *xpl0, size_t len, int *error) { struct secpolicy *newsp; IPSEC_ASSERT(xpl0 != NULL, ("null xpl0")); IPSEC_ASSERT(len >= sizeof(*xpl0), ("policy too short: %zu", len)); if (len != PFKEY_EXTLEN(xpl0)) { ipseclog((LOG_DEBUG, "%s: Invalid msg length.\n", __func__)); *error = EINVAL; return NULL; } if ((newsp = KEY_NEWSP()) == NULL) { *error = ENOBUFS; return NULL; } newsp->spidx.dir = xpl0->sadb_x_policy_dir; newsp->policy = xpl0->sadb_x_policy_type; /* check policy */ switch (xpl0->sadb_x_policy_type) { case IPSEC_POLICY_DISCARD: case IPSEC_POLICY_NONE: case IPSEC_POLICY_ENTRUST: case IPSEC_POLICY_BYPASS: newsp->req = NULL; break; case IPSEC_POLICY_IPSEC: { int tlen; struct sadb_x_ipsecrequest *xisr; struct ipsecrequest **p_isr = &newsp->req; /* validity check */ if (PFKEY_EXTLEN(xpl0) < sizeof(*xpl0)) { ipseclog((LOG_DEBUG, "%s: Invalid msg length.\n", __func__)); KEY_FREESP(&newsp); *error = EINVAL; return NULL; } tlen = PFKEY_EXTLEN(xpl0) - sizeof(*xpl0); xisr = (struct sadb_x_ipsecrequest *)(xpl0 + 1); while (tlen > 0) { /* length check */ if (xisr->sadb_x_ipsecrequest_len < sizeof(*xisr)) { ipseclog((LOG_DEBUG, "%s: invalid ipsecrequest " "length.\n", __func__)); KEY_FREESP(&newsp); *error = EINVAL; return NULL; } /* allocate request buffer */ /* NB: data structure is zero'd */ *p_isr = ipsec_newisr(); if ((*p_isr) == NULL) { ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__)); KEY_FREESP(&newsp); *error = ENOBUFS; return NULL; } /* set values */ switch (xisr->sadb_x_ipsecrequest_proto) { case IPPROTO_ESP: case IPPROTO_AH: case IPPROTO_IPCOMP: break; default: ipseclog((LOG_DEBUG, "%s: invalid proto type=%u\n", __func__, xisr->sadb_x_ipsecrequest_proto)); KEY_FREESP(&newsp); *error = EPROTONOSUPPORT; return NULL; } (*p_isr)->saidx.proto = xisr->sadb_x_ipsecrequest_proto; switch (xisr->sadb_x_ipsecrequest_mode) { case IPSEC_MODE_TRANSPORT: case IPSEC_MODE_TUNNEL: break; case IPSEC_MODE_ANY: default: ipseclog((LOG_DEBUG, "%s: invalid mode=%u\n", __func__, xisr->sadb_x_ipsecrequest_mode)); KEY_FREESP(&newsp); *error = EINVAL; return NULL; } (*p_isr)->saidx.mode = xisr->sadb_x_ipsecrequest_mode; switch (xisr->sadb_x_ipsecrequest_level) { case IPSEC_LEVEL_DEFAULT: case IPSEC_LEVEL_USE: case IPSEC_LEVEL_REQUIRE: break; case IPSEC_LEVEL_UNIQUE: /* validity check */ /* * If range violation of reqid, kernel will * update it, don't refuse it. */ if (xisr->sadb_x_ipsecrequest_reqid > IPSEC_MANUAL_REQID_MAX) { ipseclog((LOG_DEBUG, "%s: reqid=%d range " "violation, updated by kernel.\n", __func__, xisr->sadb_x_ipsecrequest_reqid)); xisr->sadb_x_ipsecrequest_reqid = 0; } /* allocate new reqid id if reqid is zero. */ if (xisr->sadb_x_ipsecrequest_reqid == 0) { u_int32_t reqid; if ((reqid = key_newreqid()) == 0) { KEY_FREESP(&newsp); *error = ENOBUFS; return NULL; } (*p_isr)->saidx.reqid = reqid; xisr->sadb_x_ipsecrequest_reqid = reqid; } else { /* set it for manual keying. */ (*p_isr)->saidx.reqid = xisr->sadb_x_ipsecrequest_reqid; } break; default: ipseclog((LOG_DEBUG, "%s: invalid level=%u\n", __func__, xisr->sadb_x_ipsecrequest_level)); KEY_FREESP(&newsp); *error = EINVAL; return NULL; } (*p_isr)->level = xisr->sadb_x_ipsecrequest_level; /* set IP addresses if there */ if (xisr->sadb_x_ipsecrequest_len > sizeof(*xisr)) { struct sockaddr *paddr; paddr = (struct sockaddr *)(xisr + 1); /* validity check */ if (paddr->sa_len > sizeof((*p_isr)->saidx.src)) { ipseclog((LOG_DEBUG, "%s: invalid " "request address length.\n", __func__)); KEY_FREESP(&newsp); *error = EINVAL; return NULL; } bcopy(paddr, &(*p_isr)->saidx.src, paddr->sa_len); paddr = (struct sockaddr *)((caddr_t)paddr + paddr->sa_len); /* validity check */ if (paddr->sa_len > sizeof((*p_isr)->saidx.dst)) { ipseclog((LOG_DEBUG, "%s: invalid " "request address length.\n", __func__)); KEY_FREESP(&newsp); *error = EINVAL; return NULL; } bcopy(paddr, &(*p_isr)->saidx.dst, paddr->sa_len); } (*p_isr)->sp = newsp; /* initialization for the next. */ p_isr = &(*p_isr)->next; tlen -= xisr->sadb_x_ipsecrequest_len; /* validity check */ if (tlen < 0) { ipseclog((LOG_DEBUG, "%s: becoming tlen < 0.\n", __func__)); KEY_FREESP(&newsp); *error = EINVAL; return NULL; } xisr = (struct sadb_x_ipsecrequest *)((caddr_t)xisr + xisr->sadb_x_ipsecrequest_len); } } break; default: ipseclog((LOG_DEBUG, "%s: invalid policy type.\n", __func__)); KEY_FREESP(&newsp); *error = EINVAL; return NULL; } *error = 0; return newsp; } static u_int32_t key_newreqid() { static u_int32_t auto_reqid = IPSEC_MANUAL_REQID_MAX + 1; auto_reqid = (auto_reqid == ~0 ? IPSEC_MANUAL_REQID_MAX + 1 : auto_reqid + 1); /* XXX should be unique check */ return auto_reqid; } /* * copy secpolicy struct to sadb_x_policy structure indicated. */ struct mbuf * key_sp2msg(struct secpolicy *sp) { struct sadb_x_policy *xpl; int tlen; caddr_t p; struct mbuf *m; IPSEC_ASSERT(sp != NULL, ("null policy")); tlen = key_getspreqmsglen(sp); m = m_get2(tlen, M_NOWAIT, MT_DATA, 0); if (m == NULL) return (NULL); m_align(m, tlen); m->m_len = tlen; xpl = mtod(m, struct sadb_x_policy *); bzero(xpl, tlen); xpl->sadb_x_policy_len = PFKEY_UNIT64(tlen); xpl->sadb_x_policy_exttype = SADB_X_EXT_POLICY; xpl->sadb_x_policy_type = sp->policy; xpl->sadb_x_policy_dir = sp->spidx.dir; xpl->sadb_x_policy_id = sp->id; p = (caddr_t)xpl + sizeof(*xpl); /* if is the policy for ipsec ? */ if (sp->policy == IPSEC_POLICY_IPSEC) { struct sadb_x_ipsecrequest *xisr; struct ipsecrequest *isr; for (isr = sp->req; isr != NULL; isr = isr->next) { xisr = (struct sadb_x_ipsecrequest *)p; xisr->sadb_x_ipsecrequest_proto = isr->saidx.proto; xisr->sadb_x_ipsecrequest_mode = isr->saidx.mode; xisr->sadb_x_ipsecrequest_level = isr->level; xisr->sadb_x_ipsecrequest_reqid = isr->saidx.reqid; p += sizeof(*xisr); bcopy(&isr->saidx.src, p, isr->saidx.src.sa.sa_len); p += isr->saidx.src.sa.sa_len; bcopy(&isr->saidx.dst, p, isr->saidx.dst.sa.sa_len); p += isr->saidx.src.sa.sa_len; xisr->sadb_x_ipsecrequest_len = PFKEY_ALIGN8(sizeof(*xisr) + isr->saidx.src.sa.sa_len + isr->saidx.dst.sa.sa_len); } } return m; } /* m will not be freed nor modified */ static struct mbuf * key_gather_mbuf(struct mbuf *m, const struct sadb_msghdr *mhp, int ndeep, int nitem, ...) { va_list ap; int idx; int i; struct mbuf *result = NULL, *n; int len; IPSEC_ASSERT(m != NULL, ("null mbuf")); IPSEC_ASSERT(mhp != NULL, ("null msghdr")); va_start(ap, nitem); for (i = 0; i < nitem; i++) { idx = va_arg(ap, int); if (idx < 0 || idx > SADB_EXT_MAX) goto fail; /* don't attempt to pull empty extension */ if (idx == SADB_EXT_RESERVED && mhp->msg == NULL) continue; if (idx != SADB_EXT_RESERVED && (mhp->ext[idx] == NULL || mhp->extlen[idx] == 0)) continue; if (idx == SADB_EXT_RESERVED) { len = PFKEY_ALIGN8(sizeof(struct sadb_msg)); IPSEC_ASSERT(len <= MHLEN, ("header too big %u", len)); MGETHDR(n, M_NOWAIT, MT_DATA); if (!n) goto fail; n->m_len = len; n->m_next = NULL; m_copydata(m, 0, sizeof(struct sadb_msg), mtod(n, caddr_t)); } else if (i < ndeep) { len = mhp->extlen[idx]; n = m_get2(len, M_NOWAIT, MT_DATA, 0); if (n == NULL) goto fail; m_align(n, len); n->m_len = len; m_copydata(m, mhp->extoff[idx], mhp->extlen[idx], mtod(n, caddr_t)); } else { n = m_copym(m, mhp->extoff[idx], mhp->extlen[idx], M_NOWAIT); } if (n == NULL) goto fail; if (result) m_cat(result, n); else result = n; } va_end(ap); if ((result->m_flags & M_PKTHDR) != 0) { result->m_pkthdr.len = 0; for (n = result; n; n = n->m_next) result->m_pkthdr.len += n->m_len; } return result; fail: m_freem(result); va_end(ap); return NULL; } /* * SADB_X_SPDADD, SADB_X_SPDSETIDX or SADB_X_SPDUPDATE processing * add an entry to SP database, when received * * from the user(?). * Adding to SP database, * and send * * to the socket which was send. * * SPDADD set a unique policy entry. * SPDSETIDX like SPDADD without a part of policy requests. * SPDUPDATE replace a unique policy entry. * * m will always be freed. */ static int key_spdadd(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { struct sadb_address *src0, *dst0; struct sadb_x_policy *xpl0, *xpl; struct sadb_lifetime *lft = NULL; struct secpolicyindex spidx; struct secpolicy *newsp; int error; IPSEC_ASSERT(so != NULL, ("null socket")); IPSEC_ASSERT(m != NULL, ("null mbuf")); IPSEC_ASSERT(mhp != NULL, ("null msghdr")); IPSEC_ASSERT(mhp->msg != NULL, ("null msg")); if (mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL || mhp->ext[SADB_EXT_ADDRESS_DST] == NULL || mhp->ext[SADB_X_EXT_POLICY] == NULL) { ipseclog((LOG_DEBUG, "key_spdadd: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) || mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address) || mhp->extlen[SADB_X_EXT_POLICY] < sizeof(struct sadb_x_policy)) { ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n", __func__)); return key_senderror(so, m, EINVAL); } if (mhp->ext[SADB_EXT_LIFETIME_HARD] != NULL) { if (mhp->extlen[SADB_EXT_LIFETIME_HARD] < sizeof(struct sadb_lifetime)) { ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n", __func__)); return key_senderror(so, m, EINVAL); } lft = (struct sadb_lifetime *)mhp->ext[SADB_EXT_LIFETIME_HARD]; } src0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_SRC]; dst0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_DST]; xpl0 = (struct sadb_x_policy *)mhp->ext[SADB_X_EXT_POLICY]; /* * Note: do not parse SADB_X_EXT_NAT_T_* here: * we are processing traffic endpoints. */ /* make secindex */ /* XXX boundary check against sa_len */ KEY_SETSECSPIDX(xpl0->sadb_x_policy_dir, src0 + 1, dst0 + 1, src0->sadb_address_prefixlen, dst0->sadb_address_prefixlen, src0->sadb_address_proto, &spidx); /* checking the direciton. */ switch (xpl0->sadb_x_policy_dir) { case IPSEC_DIR_INBOUND: case IPSEC_DIR_OUTBOUND: break; default: ipseclog((LOG_DEBUG, "%s: Invalid SP direction.\n", __func__)); mhp->msg->sadb_msg_errno = EINVAL; return 0; } /* check policy */ /* key_spdadd() accepts DISCARD, NONE and IPSEC. */ if (xpl0->sadb_x_policy_type == IPSEC_POLICY_ENTRUST || xpl0->sadb_x_policy_type == IPSEC_POLICY_BYPASS) { ipseclog((LOG_DEBUG, "%s: Invalid policy type.\n", __func__)); return key_senderror(so, m, EINVAL); } /* policy requests are mandatory when action is ipsec. */ if (mhp->msg->sadb_msg_type != SADB_X_SPDSETIDX && xpl0->sadb_x_policy_type == IPSEC_POLICY_IPSEC && mhp->extlen[SADB_X_EXT_POLICY] <= sizeof(*xpl0)) { ipseclog((LOG_DEBUG, "%s: some policy requests part required\n", __func__)); return key_senderror(so, m, EINVAL); } /* * checking there is SP already or not. * SPDUPDATE doesn't depend on whether there is a SP or not. * If the type is either SPDADD or SPDSETIDX AND a SP is found, * then error. */ newsp = key_getsp(&spidx); if (mhp->msg->sadb_msg_type == SADB_X_SPDUPDATE) { if (newsp) { key_unlink(newsp); KEY_FREESP(&newsp); } } else { if (newsp != NULL) { KEY_FREESP(&newsp); ipseclog((LOG_DEBUG, "%s: a SP entry exists already.\n", __func__)); return key_senderror(so, m, EEXIST); } } /* XXX: there is race between key_getsp and key_msg2sp. */ /* allocation new SP entry */ if ((newsp = key_msg2sp(xpl0, PFKEY_EXTLEN(xpl0), &error)) == NULL) { return key_senderror(so, m, error); } if ((newsp->id = key_getnewspid()) == 0) { KEY_FREESP(&newsp); return key_senderror(so, m, ENOBUFS); } /* XXX boundary check against sa_len */ KEY_SETSECSPIDX(xpl0->sadb_x_policy_dir, src0 + 1, dst0 + 1, src0->sadb_address_prefixlen, dst0->sadb_address_prefixlen, src0->sadb_address_proto, &newsp->spidx); /* sanity check on addr pair */ if (((struct sockaddr *)(src0 + 1))->sa_family != ((struct sockaddr *)(dst0+ 1))->sa_family) { KEY_FREESP(&newsp); return key_senderror(so, m, EINVAL); } if (((struct sockaddr *)(src0 + 1))->sa_len != ((struct sockaddr *)(dst0+ 1))->sa_len) { KEY_FREESP(&newsp); return key_senderror(so, m, EINVAL); } #if 1 if (newsp->req && newsp->req->saidx.src.sa.sa_family && newsp->req->saidx.dst.sa.sa_family) { if (newsp->req->saidx.src.sa.sa_family != newsp->req->saidx.dst.sa.sa_family) { KEY_FREESP(&newsp); return key_senderror(so, m, EINVAL); } } #endif newsp->created = time_second; newsp->lastused = newsp->created; newsp->lifetime = lft ? lft->sadb_lifetime_addtime : 0; newsp->validtime = lft ? lft->sadb_lifetime_usetime : 0; SPTREE_WLOCK(); TAILQ_INSERT_TAIL(&V_sptree[newsp->spidx.dir], newsp, chain); SPTREE_WUNLOCK(); /* delete the entry in spacqtree */ if (mhp->msg->sadb_msg_type == SADB_X_SPDUPDATE) { struct secspacq *spacq = key_getspacq(&spidx); if (spacq != NULL) { /* reset counter in order to deletion by timehandler. */ spacq->created = time_second; spacq->count = 0; SPACQ_UNLOCK(); } } { struct mbuf *n, *mpolicy; struct sadb_msg *newmsg; int off; /* * Note: do not send SADB_X_EXT_NAT_T_* here: * we are sending traffic endpoints. */ /* create new sadb_msg to reply. */ if (lft) { n = key_gather_mbuf(m, mhp, 2, 5, SADB_EXT_RESERVED, SADB_X_EXT_POLICY, SADB_EXT_LIFETIME_HARD, SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST); } else { n = key_gather_mbuf(m, mhp, 2, 4, SADB_EXT_RESERVED, SADB_X_EXT_POLICY, SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST); } if (!n) return key_senderror(so, m, ENOBUFS); if (n->m_len < sizeof(*newmsg)) { n = m_pullup(n, sizeof(*newmsg)); if (!n) return key_senderror(so, m, ENOBUFS); } newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_errno = 0; newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); off = 0; mpolicy = m_pulldown(n, PFKEY_ALIGN8(sizeof(struct sadb_msg)), sizeof(*xpl), &off); if (mpolicy == NULL) { /* n is already freed */ return key_senderror(so, m, ENOBUFS); } xpl = (struct sadb_x_policy *)(mtod(mpolicy, caddr_t) + off); if (xpl->sadb_x_policy_exttype != SADB_X_EXT_POLICY) { m_freem(n); return key_senderror(so, m, EINVAL); } xpl->sadb_x_policy_id = newsp->id; m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); } } /* * get new policy id. * OUT: * 0: failure. * others: success. */ static u_int32_t key_getnewspid() { u_int32_t newid = 0; int count = V_key_spi_trycnt; /* XXX */ struct secpolicy *sp; /* when requesting to allocate spi ranged */ while (count--) { newid = (V_policy_id = (V_policy_id == ~0 ? 1 : V_policy_id + 1)); if ((sp = key_getspbyid(newid)) == NULL) break; KEY_FREESP(&sp); } if (count == 0 || newid == 0) { ipseclog((LOG_DEBUG, "%s: to allocate policy id is failed.\n", __func__)); return 0; } return newid; } /* * SADB_SPDDELETE processing * receive * * from the user(?), and set SADB_SASTATE_DEAD, * and send, * * to the ikmpd. * policy(*) including direction of policy. * * m will always be freed. */ static int key_spddelete(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { struct sadb_address *src0, *dst0; struct sadb_x_policy *xpl0; struct secpolicyindex spidx; struct secpolicy *sp; IPSEC_ASSERT(so != NULL, ("null so")); IPSEC_ASSERT(m != NULL, ("null mbuf")); IPSEC_ASSERT(mhp != NULL, ("null msghdr")); IPSEC_ASSERT(mhp->msg != NULL, ("null msg")); if (mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL || mhp->ext[SADB_EXT_ADDRESS_DST] == NULL || mhp->ext[SADB_X_EXT_POLICY] == NULL) { ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n", __func__)); return key_senderror(so, m, EINVAL); } if (mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) || mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address) || mhp->extlen[SADB_X_EXT_POLICY] < sizeof(struct sadb_x_policy)) { ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n", __func__)); return key_senderror(so, m, EINVAL); } src0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_SRC]; dst0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_DST]; xpl0 = (struct sadb_x_policy *)mhp->ext[SADB_X_EXT_POLICY]; /* * Note: do not parse SADB_X_EXT_NAT_T_* here: * we are processing traffic endpoints. */ /* make secindex */ /* XXX boundary check against sa_len */ KEY_SETSECSPIDX(xpl0->sadb_x_policy_dir, src0 + 1, dst0 + 1, src0->sadb_address_prefixlen, dst0->sadb_address_prefixlen, src0->sadb_address_proto, &spidx); /* checking the direciton. */ switch (xpl0->sadb_x_policy_dir) { case IPSEC_DIR_INBOUND: case IPSEC_DIR_OUTBOUND: break; default: ipseclog((LOG_DEBUG, "%s: Invalid SP direction.\n", __func__)); return key_senderror(so, m, EINVAL); } /* Is there SP in SPD ? */ if ((sp = key_getsp(&spidx)) == NULL) { ipseclog((LOG_DEBUG, "%s: no SP found.\n", __func__)); return key_senderror(so, m, EINVAL); } /* save policy id to buffer to be returned. */ xpl0->sadb_x_policy_id = sp->id; key_unlink(sp); KEY_FREESP(&sp); { struct mbuf *n; struct sadb_msg *newmsg; /* * Note: do not send SADB_X_EXT_NAT_T_* here: * we are sending traffic endpoints. */ /* create new sadb_msg to reply. */ n = key_gather_mbuf(m, mhp, 1, 4, SADB_EXT_RESERVED, SADB_X_EXT_POLICY, SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST); if (!n) return key_senderror(so, m, ENOBUFS); newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_errno = 0; newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); } } /* * SADB_SPDDELETE2 processing * receive * * from the user(?), and set SADB_SASTATE_DEAD, * and send, * * to the ikmpd. * policy(*) including direction of policy. * * m will always be freed. */ static int key_spddelete2(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { u_int32_t id; struct secpolicy *sp; IPSEC_ASSERT(so != NULL, ("null socket")); IPSEC_ASSERT(m != NULL, ("null mbuf")); IPSEC_ASSERT(mhp != NULL, ("null msghdr")); IPSEC_ASSERT(mhp->msg != NULL, ("null msg")); if (mhp->ext[SADB_X_EXT_POLICY] == NULL || mhp->extlen[SADB_X_EXT_POLICY] < sizeof(struct sadb_x_policy)) { ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n", __func__)); return key_senderror(so, m, EINVAL); } id = ((struct sadb_x_policy *)mhp->ext[SADB_X_EXT_POLICY])->sadb_x_policy_id; /* Is there SP in SPD ? */ if ((sp = key_getspbyid(id)) == NULL) { ipseclog((LOG_DEBUG, "%s: no SP found id:%u.\n", __func__, id)); return key_senderror(so, m, EINVAL); } key_unlink(sp); KEY_FREESP(&sp); { struct mbuf *n, *nn; struct sadb_msg *newmsg; int off, len; /* create new sadb_msg to reply. */ len = PFKEY_ALIGN8(sizeof(struct sadb_msg)); MGETHDR(n, M_NOWAIT, MT_DATA); if (n && len > MHLEN) { - MCLGET(n, M_NOWAIT); - if ((n->m_flags & M_EXT) == 0) { + if (!(MCLGET(n, M_NOWAIT))) { m_freem(n); n = NULL; } } if (!n) return key_senderror(so, m, ENOBUFS); n->m_len = len; n->m_next = NULL; off = 0; m_copydata(m, 0, sizeof(struct sadb_msg), mtod(n, caddr_t) + off); off += PFKEY_ALIGN8(sizeof(struct sadb_msg)); IPSEC_ASSERT(off == len, ("length inconsistency (off %u len %u)", off, len)); n->m_next = m_copym(m, mhp->extoff[SADB_X_EXT_POLICY], mhp->extlen[SADB_X_EXT_POLICY], M_NOWAIT); if (!n->m_next) { m_freem(n); return key_senderror(so, m, ENOBUFS); } n->m_pkthdr.len = 0; for (nn = n; nn; nn = nn->m_next) n->m_pkthdr.len += nn->m_len; newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_errno = 0; newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); } } /* * SADB_X_GET processing * receive * * from the user(?), * and send, * * to the ikmpd. * policy(*) including direction of policy. * * m will always be freed. */ static int key_spdget(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { u_int32_t id; struct secpolicy *sp; struct mbuf *n; IPSEC_ASSERT(so != NULL, ("null socket")); IPSEC_ASSERT(m != NULL, ("null mbuf")); IPSEC_ASSERT(mhp != NULL, ("null msghdr")); IPSEC_ASSERT(mhp->msg != NULL, ("null msg")); if (mhp->ext[SADB_X_EXT_POLICY] == NULL || mhp->extlen[SADB_X_EXT_POLICY] < sizeof(struct sadb_x_policy)) { ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n", __func__)); return key_senderror(so, m, EINVAL); } id = ((struct sadb_x_policy *)mhp->ext[SADB_X_EXT_POLICY])->sadb_x_policy_id; /* Is there SP in SPD ? */ if ((sp = key_getspbyid(id)) == NULL) { ipseclog((LOG_DEBUG, "%s: no SP found id:%u.\n", __func__, id)); return key_senderror(so, m, ENOENT); } n = key_setdumpsp(sp, SADB_X_SPDGET, 0, mhp->msg->sadb_msg_pid); KEY_FREESP(&sp); if (n != NULL) { m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ONE); } else return key_senderror(so, m, ENOBUFS); } /* * SADB_X_SPDACQUIRE processing. * Acquire policy and SA(s) for a *OUTBOUND* packet. * send * * to KMD, and expect to receive * with SADB_X_SPDACQUIRE if error occured, * or * * with SADB_X_SPDUPDATE from KMD by PF_KEY. * policy(*) is without policy requests. * * 0 : succeed * others: error number */ int key_spdacquire(struct secpolicy *sp) { struct mbuf *result = NULL, *m; struct secspacq *newspacq; IPSEC_ASSERT(sp != NULL, ("null secpolicy")); IPSEC_ASSERT(sp->req == NULL, ("policy exists")); IPSEC_ASSERT(sp->policy == IPSEC_POLICY_IPSEC, ("policy not IPSEC %u", sp->policy)); /* Get an entry to check whether sent message or not. */ newspacq = key_getspacq(&sp->spidx); if (newspacq != NULL) { if (V_key_blockacq_count < newspacq->count) { /* reset counter and do send message. */ newspacq->count = 0; } else { /* increment counter and do nothing. */ newspacq->count++; SPACQ_UNLOCK(); return (0); } SPACQ_UNLOCK(); } else { /* make new entry for blocking to send SADB_ACQUIRE. */ newspacq = key_newspacq(&sp->spidx); if (newspacq == NULL) return ENOBUFS; } /* create new sadb_msg to reply. */ m = key_setsadbmsg(SADB_X_SPDACQUIRE, 0, 0, 0, 0, 0); if (!m) return ENOBUFS; result = m; result->m_pkthdr.len = 0; for (m = result; m; m = m->m_next) result->m_pkthdr.len += m->m_len; mtod(result, struct sadb_msg *)->sadb_msg_len = PFKEY_UNIT64(result->m_pkthdr.len); return key_sendup_mbuf(NULL, m, KEY_SENDUP_REGISTERED); } /* * SADB_SPDFLUSH processing * receive * * from the user, and free all entries in secpctree. * and send, * * to the user. * NOTE: what to do is only marking SADB_SASTATE_DEAD. * * m will always be freed. */ static int key_spdflush(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { TAILQ_HEAD(, secpolicy) drainq; struct sadb_msg *newmsg; struct secpolicy *sp, *nextsp; u_int dir; IPSEC_ASSERT(so != NULL, ("null socket")); IPSEC_ASSERT(m != NULL, ("null mbuf")); IPSEC_ASSERT(mhp != NULL, ("null msghdr")); IPSEC_ASSERT(mhp->msg != NULL, ("null msg")); if (m->m_len != PFKEY_ALIGN8(sizeof(struct sadb_msg))) return key_senderror(so, m, EINVAL); TAILQ_INIT(&drainq); SPTREE_WLOCK(); for (dir = 0; dir < IPSEC_DIR_MAX; dir++) { TAILQ_CONCAT(&drainq, &V_sptree[dir], chain); } SPTREE_WUNLOCK(); sp = TAILQ_FIRST(&drainq); while (sp != NULL) { nextsp = TAILQ_NEXT(sp, chain); KEY_FREESP(&sp); sp = nextsp; } if (sizeof(struct sadb_msg) > m->m_len + M_TRAILINGSPACE(m)) { ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__)); return key_senderror(so, m, ENOBUFS); } if (m->m_next) m_freem(m->m_next); m->m_next = NULL; m->m_pkthdr.len = m->m_len = PFKEY_ALIGN8(sizeof(struct sadb_msg)); newmsg = mtod(m, struct sadb_msg *); newmsg->sadb_msg_errno = 0; newmsg->sadb_msg_len = PFKEY_UNIT64(m->m_pkthdr.len); return key_sendup_mbuf(so, m, KEY_SENDUP_ALL); } /* * SADB_SPDDUMP processing * receive * * from the user, and dump all SP leaves * and send, * ..... * to the ikmpd. * * m will always be freed. */ static int key_spddump(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { SPTREE_RLOCK_TRACKER; struct secpolicy *sp; int cnt; u_int dir; struct mbuf *n; IPSEC_ASSERT(so != NULL, ("null socket")); IPSEC_ASSERT(m != NULL, ("null mbuf")); IPSEC_ASSERT(mhp != NULL, ("null msghdr")); IPSEC_ASSERT(mhp->msg != NULL, ("null msg")); /* search SPD entry and get buffer size. */ cnt = 0; SPTREE_RLOCK(); for (dir = 0; dir < IPSEC_DIR_MAX; dir++) { TAILQ_FOREACH(sp, &V_sptree[dir], chain) { cnt++; } } if (cnt == 0) { SPTREE_RUNLOCK(); return key_senderror(so, m, ENOENT); } for (dir = 0; dir < IPSEC_DIR_MAX; dir++) { TAILQ_FOREACH(sp, &V_sptree[dir], chain) { --cnt; n = key_setdumpsp(sp, SADB_X_SPDDUMP, cnt, mhp->msg->sadb_msg_pid); if (n) key_sendup_mbuf(so, n, KEY_SENDUP_ONE); } } SPTREE_RUNLOCK(); m_freem(m); return 0; } static struct mbuf * key_setdumpsp(struct secpolicy *sp, u_int8_t type, u_int32_t seq, u_int32_t pid) { struct mbuf *result = NULL, *m; struct seclifetime lt; SPTREE_RLOCK_ASSERT(); m = key_setsadbmsg(type, 0, SADB_SATYPE_UNSPEC, seq, pid, sp->refcnt); if (!m) goto fail; result = m; /* * Note: do not send SADB_X_EXT_NAT_T_* here: * we are sending traffic endpoints. */ m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC, &sp->spidx.src.sa, sp->spidx.prefs, sp->spidx.ul_proto); if (!m) goto fail; m_cat(result, m); m = key_setsadbaddr(SADB_EXT_ADDRESS_DST, &sp->spidx.dst.sa, sp->spidx.prefd, sp->spidx.ul_proto); if (!m) goto fail; m_cat(result, m); m = key_sp2msg(sp); if (!m) goto fail; m_cat(result, m); if(sp->lifetime){ lt.addtime=sp->created; lt.usetime= sp->lastused; m = key_setlifetime(<, SADB_EXT_LIFETIME_CURRENT); if (!m) goto fail; m_cat(result, m); lt.addtime=sp->lifetime; lt.usetime= sp->validtime; m = key_setlifetime(<, SADB_EXT_LIFETIME_HARD); if (!m) goto fail; m_cat(result, m); } if ((result->m_flags & M_PKTHDR) == 0) goto fail; if (result->m_len < sizeof(struct sadb_msg)) { result = m_pullup(result, sizeof(struct sadb_msg)); if (result == NULL) goto fail; } result->m_pkthdr.len = 0; for (m = result; m; m = m->m_next) result->m_pkthdr.len += m->m_len; mtod(result, struct sadb_msg *)->sadb_msg_len = PFKEY_UNIT64(result->m_pkthdr.len); return result; fail: m_freem(result); return NULL; } /* * get PFKEY message length for security policy and request. */ static u_int key_getspreqmsglen(struct secpolicy *sp) { u_int tlen; tlen = sizeof(struct sadb_x_policy); /* if is the policy for ipsec ? */ if (sp->policy != IPSEC_POLICY_IPSEC) return tlen; /* get length of ipsec requests */ { struct ipsecrequest *isr; int len; for (isr = sp->req; isr != NULL; isr = isr->next) { len = sizeof(struct sadb_x_ipsecrequest) + isr->saidx.src.sa.sa_len + isr->saidx.dst.sa.sa_len; tlen += PFKEY_ALIGN8(len); } } return tlen; } /* * SADB_SPDEXPIRE processing * send * * to KMD by PF_KEY. * * OUT: 0 : succeed * others : error number */ static int key_spdexpire(struct secpolicy *sp) { struct mbuf *result = NULL, *m; int len; int error = -1; struct sadb_lifetime *lt; /* XXX: Why do we lock ? */ IPSEC_ASSERT(sp != NULL, ("null secpolicy")); /* set msg header */ m = key_setsadbmsg(SADB_X_SPDEXPIRE, 0, 0, 0, 0, 0); if (!m) { error = ENOBUFS; goto fail; } result = m; /* create lifetime extension (current and hard) */ len = PFKEY_ALIGN8(sizeof(*lt)) * 2; m = m_get2(len, M_NOWAIT, MT_DATA, 0); if (m == NULL) { error = ENOBUFS; goto fail; } m_align(m, len); m->m_len = len; bzero(mtod(m, caddr_t), len); lt = mtod(m, struct sadb_lifetime *); lt->sadb_lifetime_len = PFKEY_UNIT64(sizeof(struct sadb_lifetime)); lt->sadb_lifetime_exttype = SADB_EXT_LIFETIME_CURRENT; lt->sadb_lifetime_allocations = 0; lt->sadb_lifetime_bytes = 0; lt->sadb_lifetime_addtime = sp->created; lt->sadb_lifetime_usetime = sp->lastused; lt = (struct sadb_lifetime *)(mtod(m, caddr_t) + len / 2); lt->sadb_lifetime_len = PFKEY_UNIT64(sizeof(struct sadb_lifetime)); lt->sadb_lifetime_exttype = SADB_EXT_LIFETIME_HARD; lt->sadb_lifetime_allocations = 0; lt->sadb_lifetime_bytes = 0; lt->sadb_lifetime_addtime = sp->lifetime; lt->sadb_lifetime_usetime = sp->validtime; m_cat(result, m); /* * Note: do not send SADB_X_EXT_NAT_T_* here: * we are sending traffic endpoints. */ /* set sadb_address for source */ m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC, &sp->spidx.src.sa, sp->spidx.prefs, sp->spidx.ul_proto); if (!m) { error = ENOBUFS; goto fail; } m_cat(result, m); /* set sadb_address for destination */ m = key_setsadbaddr(SADB_EXT_ADDRESS_DST, &sp->spidx.dst.sa, sp->spidx.prefd, sp->spidx.ul_proto); if (!m) { error = ENOBUFS; goto fail; } m_cat(result, m); /* set secpolicy */ m = key_sp2msg(sp); if (!m) { error = ENOBUFS; goto fail; } m_cat(result, m); if ((result->m_flags & M_PKTHDR) == 0) { error = EINVAL; goto fail; } if (result->m_len < sizeof(struct sadb_msg)) { result = m_pullup(result, sizeof(struct sadb_msg)); if (result == NULL) { error = ENOBUFS; goto fail; } } result->m_pkthdr.len = 0; for (m = result; m; m = m->m_next) result->m_pkthdr.len += m->m_len; mtod(result, struct sadb_msg *)->sadb_msg_len = PFKEY_UNIT64(result->m_pkthdr.len); return key_sendup_mbuf(NULL, result, KEY_SENDUP_REGISTERED); fail: if (result) m_freem(result); return error; } /* %%% SAD management */ /* * allocating a memory for new SA head, and copy from the values of mhp. * OUT: NULL : failure due to the lack of memory. * others : pointer to new SA head. */ static struct secashead * key_newsah(struct secasindex *saidx) { struct secashead *newsah; IPSEC_ASSERT(saidx != NULL, ("null saidx")); newsah = malloc(sizeof(struct secashead), M_IPSEC_SAH, M_NOWAIT|M_ZERO); if (newsah != NULL) { int i; for (i = 0; i < sizeof(newsah->savtree)/sizeof(newsah->savtree[0]); i++) LIST_INIT(&newsah->savtree[i]); newsah->saidx = *saidx; /* add to saidxtree */ newsah->state = SADB_SASTATE_MATURE; SAHTREE_LOCK(); LIST_INSERT_HEAD(&V_sahtree, newsah, chain); SAHTREE_UNLOCK(); } return(newsah); } /* * delete SA index and all SA registerd. */ static void key_delsah(struct secashead *sah) { struct secasvar *sav, *nextsav; u_int stateidx; int zombie = 0; IPSEC_ASSERT(sah != NULL, ("NULL sah")); SAHTREE_LOCK_ASSERT(); /* searching all SA registerd in the secindex. */ for (stateidx = 0; stateidx < _ARRAYLEN(saorder_state_any); stateidx++) { u_int state = saorder_state_any[stateidx]; LIST_FOREACH_SAFE(sav, &sah->savtree[state], chain, nextsav) { if (sav->refcnt == 0) { /* sanity check */ KEY_CHKSASTATE(state, sav->state, __func__); /* * do NOT call KEY_FREESAV here: * it will only delete the sav if refcnt == 1, * where we already know that refcnt == 0 */ key_delsav(sav); } else { /* give up to delete this sa */ zombie++; } } } if (!zombie) { /* delete only if there are savs */ /* remove from tree of SA index */ if (__LIST_CHAINED(sah)) LIST_REMOVE(sah, chain); free(sah, M_IPSEC_SAH); } } /* * allocating a new SA with LARVAL state. key_add() and key_getspi() call, * and copy the values of mhp into new buffer. * When SAD message type is GETSPI: * to set sequence number from acq_seq++, * to set zero to SPI. * not to call key_setsava(). * OUT: NULL : fail * others : pointer to new secasvar. * * does not modify mbuf. does not free mbuf on error. */ static struct secasvar * key_newsav(struct mbuf *m, const struct sadb_msghdr *mhp, struct secashead *sah, int *errp, const char *where, int tag) { struct secasvar *newsav; const struct sadb_sa *xsa; IPSEC_ASSERT(m != NULL, ("null mbuf")); IPSEC_ASSERT(mhp != NULL, ("null msghdr")); IPSEC_ASSERT(mhp->msg != NULL, ("null msg")); IPSEC_ASSERT(sah != NULL, ("null secashead")); newsav = malloc(sizeof(struct secasvar), M_IPSEC_SA, M_NOWAIT|M_ZERO); if (newsav == NULL) { ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__)); *errp = ENOBUFS; goto done; } switch (mhp->msg->sadb_msg_type) { case SADB_GETSPI: newsav->spi = 0; #ifdef IPSEC_DOSEQCHECK /* sync sequence number */ if (mhp->msg->sadb_msg_seq == 0) newsav->seq = (V_acq_seq = (V_acq_seq == ~0 ? 1 : ++V_acq_seq)); else #endif newsav->seq = mhp->msg->sadb_msg_seq; break; case SADB_ADD: /* sanity check */ if (mhp->ext[SADB_EXT_SA] == NULL) { free(newsav, M_IPSEC_SA); newsav = NULL; ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n", __func__)); *errp = EINVAL; goto done; } xsa = (const struct sadb_sa *)mhp->ext[SADB_EXT_SA]; newsav->spi = xsa->sadb_sa_spi; newsav->seq = mhp->msg->sadb_msg_seq; break; default: free(newsav, M_IPSEC_SA); newsav = NULL; *errp = EINVAL; goto done; } /* copy sav values */ if (mhp->msg->sadb_msg_type != SADB_GETSPI) { *errp = key_setsaval(newsav, m, mhp); if (*errp) { free(newsav, M_IPSEC_SA); newsav = NULL; goto done; } } SECASVAR_LOCK_INIT(newsav); /* reset created */ newsav->created = time_second; newsav->pid = mhp->msg->sadb_msg_pid; /* add to satree */ newsav->sah = sah; sa_initref(newsav); newsav->state = SADB_SASTATE_LARVAL; SAHTREE_LOCK(); LIST_INSERT_TAIL(&sah->savtree[SADB_SASTATE_LARVAL], newsav, secasvar, chain); SAHTREE_UNLOCK(); done: KEYDEBUG(KEYDEBUG_IPSEC_STAMP, printf("DP %s from %s:%u return SP:%p\n", __func__, where, tag, newsav)); return newsav; } /* * free() SA variable entry. */ static void key_cleansav(struct secasvar *sav) { /* * Cleanup xform state. Note that zeroize'ing causes the * keys to be cleared; otherwise we must do it ourself. */ if (sav->tdb_xform != NULL) { sav->tdb_xform->xf_zeroize(sav); sav->tdb_xform = NULL; } else { KASSERT(sav->iv == NULL, ("iv but no xform")); if (sav->key_auth != NULL) bzero(sav->key_auth->key_data, _KEYLEN(sav->key_auth)); if (sav->key_enc != NULL) bzero(sav->key_enc->key_data, _KEYLEN(sav->key_enc)); } if (sav->key_auth != NULL) { if (sav->key_auth->key_data != NULL) free(sav->key_auth->key_data, M_IPSEC_MISC); free(sav->key_auth, M_IPSEC_MISC); sav->key_auth = NULL; } if (sav->key_enc != NULL) { if (sav->key_enc->key_data != NULL) free(sav->key_enc->key_data, M_IPSEC_MISC); free(sav->key_enc, M_IPSEC_MISC); sav->key_enc = NULL; } if (sav->sched) { bzero(sav->sched, sav->schedlen); free(sav->sched, M_IPSEC_MISC); sav->sched = NULL; } if (sav->replay != NULL) { free(sav->replay, M_IPSEC_MISC); sav->replay = NULL; } if (sav->lft_c != NULL) { free(sav->lft_c, M_IPSEC_MISC); sav->lft_c = NULL; } if (sav->lft_h != NULL) { free(sav->lft_h, M_IPSEC_MISC); sav->lft_h = NULL; } if (sav->lft_s != NULL) { free(sav->lft_s, M_IPSEC_MISC); sav->lft_s = NULL; } } /* * free() SA variable entry. */ static void key_delsav(struct secasvar *sav) { IPSEC_ASSERT(sav != NULL, ("null sav")); IPSEC_ASSERT(sav->refcnt == 0, ("reference count %u > 0", sav->refcnt)); /* remove from SA header */ if (__LIST_CHAINED(sav)) LIST_REMOVE(sav, chain); key_cleansav(sav); SECASVAR_LOCK_DESTROY(sav); free(sav, M_IPSEC_SA); } /* * search SAD. * OUT: * NULL : not found * others : found, pointer to a SA. */ static struct secashead * key_getsah(struct secasindex *saidx) { struct secashead *sah; SAHTREE_LOCK(); LIST_FOREACH(sah, &V_sahtree, chain) { if (sah->state == SADB_SASTATE_DEAD) continue; if (key_cmpsaidx(&sah->saidx, saidx, CMP_REQID)) break; } SAHTREE_UNLOCK(); return sah; } /* * check not to be duplicated SPI. * NOTE: this function is too slow due to searching all SAD. * OUT: * NULL : not found * others : found, pointer to a SA. */ static struct secasvar * key_checkspidup(struct secasindex *saidx, u_int32_t spi) { struct secashead *sah; struct secasvar *sav; /* check address family */ if (saidx->src.sa.sa_family != saidx->dst.sa.sa_family) { ipseclog((LOG_DEBUG, "%s: address family mismatched.\n", __func__)); return NULL; } sav = NULL; /* check all SAD */ SAHTREE_LOCK(); LIST_FOREACH(sah, &V_sahtree, chain) { if (!key_ismyaddr((struct sockaddr *)&sah->saidx.dst)) continue; sav = key_getsavbyspi(sah, spi); if (sav != NULL) break; } SAHTREE_UNLOCK(); return sav; } /* * search SAD litmited alive SA, protocol, SPI. * OUT: * NULL : not found * others : found, pointer to a SA. */ static struct secasvar * key_getsavbyspi(struct secashead *sah, u_int32_t spi) { struct secasvar *sav; u_int stateidx, state; sav = NULL; SAHTREE_LOCK_ASSERT(); /* search all status */ for (stateidx = 0; stateidx < _ARRAYLEN(saorder_state_alive); stateidx++) { state = saorder_state_alive[stateidx]; LIST_FOREACH(sav, &sah->savtree[state], chain) { /* sanity check */ if (sav->state != state) { ipseclog((LOG_DEBUG, "%s: " "invalid sav->state (queue: %d SA: %d)\n", __func__, state, sav->state)); continue; } if (sav->spi == spi) return sav; } } return NULL; } /* * copy SA values from PF_KEY message except *SPI, SEQ, PID, STATE and TYPE*. * You must update these if need. * OUT: 0: success. * !0: failure. * * does not modify mbuf. does not free mbuf on error. */ static int key_setsaval(struct secasvar *sav, struct mbuf *m, const struct sadb_msghdr *mhp) { int error = 0; IPSEC_ASSERT(m != NULL, ("null mbuf")); IPSEC_ASSERT(mhp != NULL, ("null msghdr")); IPSEC_ASSERT(mhp->msg != NULL, ("null msg")); /* initialization */ sav->replay = NULL; sav->key_auth = NULL; sav->key_enc = NULL; sav->sched = NULL; sav->schedlen = 0; sav->iv = NULL; sav->lft_c = NULL; sav->lft_h = NULL; sav->lft_s = NULL; sav->tdb_xform = NULL; /* transform */ sav->tdb_encalgxform = NULL; /* encoding algorithm */ sav->tdb_authalgxform = NULL; /* authentication algorithm */ sav->tdb_compalgxform = NULL; /* compression algorithm */ /* Initialize even if NAT-T not compiled in: */ sav->natt_type = 0; sav->natt_esp_frag_len = 0; /* SA */ if (mhp->ext[SADB_EXT_SA] != NULL) { const struct sadb_sa *sa0; sa0 = (const struct sadb_sa *)mhp->ext[SADB_EXT_SA]; if (mhp->extlen[SADB_EXT_SA] < sizeof(*sa0)) { error = EINVAL; goto fail; } sav->alg_auth = sa0->sadb_sa_auth; sav->alg_enc = sa0->sadb_sa_encrypt; sav->flags = sa0->sadb_sa_flags; /* replay window */ if ((sa0->sadb_sa_flags & SADB_X_EXT_OLD) == 0) { sav->replay = (struct secreplay *) malloc(sizeof(struct secreplay)+sa0->sadb_sa_replay, M_IPSEC_MISC, M_NOWAIT|M_ZERO); if (sav->replay == NULL) { ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__)); error = ENOBUFS; goto fail; } if (sa0->sadb_sa_replay != 0) sav->replay->bitmap = (caddr_t)(sav->replay+1); sav->replay->wsize = sa0->sadb_sa_replay; } } /* Authentication keys */ if (mhp->ext[SADB_EXT_KEY_AUTH] != NULL) { const struct sadb_key *key0; int len; key0 = (const struct sadb_key *)mhp->ext[SADB_EXT_KEY_AUTH]; len = mhp->extlen[SADB_EXT_KEY_AUTH]; error = 0; if (len < sizeof(*key0)) { error = EINVAL; goto fail; } switch (mhp->msg->sadb_msg_satype) { case SADB_SATYPE_AH: case SADB_SATYPE_ESP: case SADB_X_SATYPE_TCPSIGNATURE: if (len == PFKEY_ALIGN8(sizeof(struct sadb_key)) && sav->alg_auth != SADB_X_AALG_NULL) error = EINVAL; break; case SADB_X_SATYPE_IPCOMP: default: error = EINVAL; break; } if (error) { ipseclog((LOG_DEBUG, "%s: invalid key_auth values.\n", __func__)); goto fail; } sav->key_auth = (struct seckey *)key_dup_keymsg(key0, len, M_IPSEC_MISC); if (sav->key_auth == NULL ) { ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__)); error = ENOBUFS; goto fail; } } /* Encryption key */ if (mhp->ext[SADB_EXT_KEY_ENCRYPT] != NULL) { const struct sadb_key *key0; int len; key0 = (const struct sadb_key *)mhp->ext[SADB_EXT_KEY_ENCRYPT]; len = mhp->extlen[SADB_EXT_KEY_ENCRYPT]; error = 0; if (len < sizeof(*key0)) { error = EINVAL; goto fail; } switch (mhp->msg->sadb_msg_satype) { case SADB_SATYPE_ESP: if (len == PFKEY_ALIGN8(sizeof(struct sadb_key)) && sav->alg_enc != SADB_EALG_NULL) { error = EINVAL; break; } sav->key_enc = (struct seckey *)key_dup_keymsg(key0, len, M_IPSEC_MISC); if (sav->key_enc == NULL) { ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__)); error = ENOBUFS; goto fail; } break; case SADB_X_SATYPE_IPCOMP: if (len != PFKEY_ALIGN8(sizeof(struct sadb_key))) error = EINVAL; sav->key_enc = NULL; /*just in case*/ break; case SADB_SATYPE_AH: case SADB_X_SATYPE_TCPSIGNATURE: default: error = EINVAL; break; } if (error) { ipseclog((LOG_DEBUG, "%s: invalid key_enc value.\n", __func__)); goto fail; } } /* set iv */ sav->ivlen = 0; switch (mhp->msg->sadb_msg_satype) { case SADB_SATYPE_AH: error = xform_init(sav, XF_AH); break; case SADB_SATYPE_ESP: error = xform_init(sav, XF_ESP); break; case SADB_X_SATYPE_IPCOMP: error = xform_init(sav, XF_IPCOMP); break; case SADB_X_SATYPE_TCPSIGNATURE: error = xform_init(sav, XF_TCPSIGNATURE); break; } if (error) { ipseclog((LOG_DEBUG, "%s: unable to initialize SA type %u.\n", __func__, mhp->msg->sadb_msg_satype)); goto fail; } /* reset created */ sav->created = time_second; /* make lifetime for CURRENT */ sav->lft_c = malloc(sizeof(struct seclifetime), M_IPSEC_MISC, M_NOWAIT); if (sav->lft_c == NULL) { ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__)); error = ENOBUFS; goto fail; } sav->lft_c->allocations = 0; sav->lft_c->bytes = 0; sav->lft_c->addtime = time_second; sav->lft_c->usetime = 0; /* lifetimes for HARD and SOFT */ { const struct sadb_lifetime *lft0; lft0 = (struct sadb_lifetime *)mhp->ext[SADB_EXT_LIFETIME_HARD]; if (lft0 != NULL) { if (mhp->extlen[SADB_EXT_LIFETIME_HARD] < sizeof(*lft0)) { error = EINVAL; goto fail; } sav->lft_h = key_dup_lifemsg(lft0, M_IPSEC_MISC); if (sav->lft_h == NULL) { ipseclog((LOG_DEBUG, "%s: No more memory.\n",__func__)); error = ENOBUFS; goto fail; } /* to be initialize ? */ } lft0 = (struct sadb_lifetime *)mhp->ext[SADB_EXT_LIFETIME_SOFT]; if (lft0 != NULL) { if (mhp->extlen[SADB_EXT_LIFETIME_SOFT] < sizeof(*lft0)) { error = EINVAL; goto fail; } sav->lft_s = key_dup_lifemsg(lft0, M_IPSEC_MISC); if (sav->lft_s == NULL) { ipseclog((LOG_DEBUG, "%s: No more memory.\n",__func__)); error = ENOBUFS; goto fail; } /* to be initialize ? */ } } return 0; fail: /* initialization */ key_cleansav(sav); return error; } /* * validation with a secasvar entry, and set SADB_SATYPE_MATURE. * OUT: 0: valid * other: errno */ static int key_mature(struct secasvar *sav) { int error; /* check SPI value */ switch (sav->sah->saidx.proto) { case IPPROTO_ESP: case IPPROTO_AH: /* * RFC 4302, 2.4. Security Parameters Index (SPI), SPI values * 1-255 reserved by IANA for future use, * 0 for implementation specific, local use. */ if (ntohl(sav->spi) <= 255) { ipseclog((LOG_DEBUG, "%s: illegal range of SPI %u.\n", __func__, (u_int32_t)ntohl(sav->spi))); return EINVAL; } break; } /* check satype */ switch (sav->sah->saidx.proto) { case IPPROTO_ESP: /* check flags */ if ((sav->flags & (SADB_X_EXT_OLD|SADB_X_EXT_DERIV)) == (SADB_X_EXT_OLD|SADB_X_EXT_DERIV)) { ipseclog((LOG_DEBUG, "%s: invalid flag (derived) " "given to old-esp.\n", __func__)); return EINVAL; } error = xform_init(sav, XF_ESP); break; case IPPROTO_AH: /* check flags */ if (sav->flags & SADB_X_EXT_DERIV) { ipseclog((LOG_DEBUG, "%s: invalid flag (derived) " "given to AH SA.\n", __func__)); return EINVAL; } if (sav->alg_enc != SADB_EALG_NONE) { ipseclog((LOG_DEBUG, "%s: protocol and algorithm " "mismated.\n", __func__)); return(EINVAL); } error = xform_init(sav, XF_AH); break; case IPPROTO_IPCOMP: if (sav->alg_auth != SADB_AALG_NONE) { ipseclog((LOG_DEBUG, "%s: protocol and algorithm " "mismated.\n", __func__)); return(EINVAL); } if ((sav->flags & SADB_X_EXT_RAWCPI) == 0 && ntohl(sav->spi) >= 0x10000) { ipseclog((LOG_DEBUG, "%s: invalid cpi for IPComp.\n", __func__)); return(EINVAL); } error = xform_init(sav, XF_IPCOMP); break; case IPPROTO_TCP: if (sav->alg_enc != SADB_EALG_NONE) { ipseclog((LOG_DEBUG, "%s: protocol and algorithm " "mismated.\n", __func__)); return(EINVAL); } error = xform_init(sav, XF_TCPSIGNATURE); break; default: ipseclog((LOG_DEBUG, "%s: Invalid satype.\n", __func__)); error = EPROTONOSUPPORT; break; } if (error == 0) { SAHTREE_LOCK(); key_sa_chgstate(sav, SADB_SASTATE_MATURE); SAHTREE_UNLOCK(); } return (error); } /* * subroutine for SADB_GET and SADB_DUMP. */ static struct mbuf * key_setdumpsa(struct secasvar *sav, u_int8_t type, u_int8_t satype, u_int32_t seq, u_int32_t pid) { struct mbuf *result = NULL, *tres = NULL, *m; int i; int dumporder[] = { SADB_EXT_SA, SADB_X_EXT_SA2, SADB_EXT_LIFETIME_HARD, SADB_EXT_LIFETIME_SOFT, SADB_EXT_LIFETIME_CURRENT, SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST, SADB_EXT_ADDRESS_PROXY, SADB_EXT_KEY_AUTH, SADB_EXT_KEY_ENCRYPT, SADB_EXT_IDENTITY_SRC, SADB_EXT_IDENTITY_DST, SADB_EXT_SENSITIVITY, #ifdef IPSEC_NAT_T SADB_X_EXT_NAT_T_TYPE, SADB_X_EXT_NAT_T_SPORT, SADB_X_EXT_NAT_T_DPORT, SADB_X_EXT_NAT_T_OAI, SADB_X_EXT_NAT_T_OAR, SADB_X_EXT_NAT_T_FRAG, #endif }; m = key_setsadbmsg(type, 0, satype, seq, pid, sav->refcnt); if (m == NULL) goto fail; result = m; for (i = sizeof(dumporder)/sizeof(dumporder[0]) - 1; i >= 0; i--) { m = NULL; switch (dumporder[i]) { case SADB_EXT_SA: m = key_setsadbsa(sav); if (!m) goto fail; break; case SADB_X_EXT_SA2: m = key_setsadbxsa2(sav->sah->saidx.mode, sav->replay ? sav->replay->count : 0, sav->sah->saidx.reqid); if (!m) goto fail; break; case SADB_EXT_ADDRESS_SRC: m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC, &sav->sah->saidx.src.sa, FULLMASK, IPSEC_ULPROTO_ANY); if (!m) goto fail; break; case SADB_EXT_ADDRESS_DST: m = key_setsadbaddr(SADB_EXT_ADDRESS_DST, &sav->sah->saidx.dst.sa, FULLMASK, IPSEC_ULPROTO_ANY); if (!m) goto fail; break; case SADB_EXT_KEY_AUTH: if (!sav->key_auth) continue; m = key_setkey(sav->key_auth, SADB_EXT_KEY_AUTH); if (!m) goto fail; break; case SADB_EXT_KEY_ENCRYPT: if (!sav->key_enc) continue; m = key_setkey(sav->key_enc, SADB_EXT_KEY_ENCRYPT); if (!m) goto fail; break; case SADB_EXT_LIFETIME_CURRENT: if (!sav->lft_c) continue; m = key_setlifetime(sav->lft_c, SADB_EXT_LIFETIME_CURRENT); if (!m) goto fail; break; case SADB_EXT_LIFETIME_HARD: if (!sav->lft_h) continue; m = key_setlifetime(sav->lft_h, SADB_EXT_LIFETIME_HARD); if (!m) goto fail; break; case SADB_EXT_LIFETIME_SOFT: if (!sav->lft_s) continue; m = key_setlifetime(sav->lft_s, SADB_EXT_LIFETIME_SOFT); if (!m) goto fail; break; #ifdef IPSEC_NAT_T case SADB_X_EXT_NAT_T_TYPE: m = key_setsadbxtype(sav->natt_type); if (!m) goto fail; break; case SADB_X_EXT_NAT_T_DPORT: m = key_setsadbxport( KEY_PORTFROMSADDR(&sav->sah->saidx.dst), SADB_X_EXT_NAT_T_DPORT); if (!m) goto fail; break; case SADB_X_EXT_NAT_T_SPORT: m = key_setsadbxport( KEY_PORTFROMSADDR(&sav->sah->saidx.src), SADB_X_EXT_NAT_T_SPORT); if (!m) goto fail; break; case SADB_X_EXT_NAT_T_OAI: case SADB_X_EXT_NAT_T_OAR: case SADB_X_EXT_NAT_T_FRAG: /* We do not (yet) support those. */ continue; #endif case SADB_EXT_ADDRESS_PROXY: case SADB_EXT_IDENTITY_SRC: case SADB_EXT_IDENTITY_DST: /* XXX: should we brought from SPD ? */ case SADB_EXT_SENSITIVITY: default: continue; } if (!m) goto fail; if (tres) m_cat(m, tres); tres = m; } m_cat(result, tres); if (result->m_len < sizeof(struct sadb_msg)) { result = m_pullup(result, sizeof(struct sadb_msg)); if (result == NULL) goto fail; } result->m_pkthdr.len = 0; for (m = result; m; m = m->m_next) result->m_pkthdr.len += m->m_len; mtod(result, struct sadb_msg *)->sadb_msg_len = PFKEY_UNIT64(result->m_pkthdr.len); return result; fail: m_freem(result); m_freem(tres); return NULL; } /* * set data into sadb_msg. */ static struct mbuf * key_setsadbmsg(u_int8_t type, u_int16_t tlen, u_int8_t satype, u_int32_t seq, pid_t pid, u_int16_t reserved) { struct mbuf *m; struct sadb_msg *p; int len; len = PFKEY_ALIGN8(sizeof(struct sadb_msg)); if (len > MCLBYTES) return NULL; MGETHDR(m, M_NOWAIT, MT_DATA); if (m && len > MHLEN) { - MCLGET(m, M_NOWAIT); - if ((m->m_flags & M_EXT) == 0) { + if (!(MCLGET(m, M_NOWAIT))) { m_freem(m); m = NULL; } } if (!m) return NULL; m->m_pkthdr.len = m->m_len = len; m->m_next = NULL; p = mtod(m, struct sadb_msg *); bzero(p, len); p->sadb_msg_version = PF_KEY_V2; p->sadb_msg_type = type; p->sadb_msg_errno = 0; p->sadb_msg_satype = satype; p->sadb_msg_len = PFKEY_UNIT64(tlen); p->sadb_msg_reserved = reserved; p->sadb_msg_seq = seq; p->sadb_msg_pid = (u_int32_t)pid; return m; } /* * copy secasvar data into sadb_address. */ static struct mbuf * key_setsadbsa(struct secasvar *sav) { struct mbuf *m; struct sadb_sa *p; int len; len = PFKEY_ALIGN8(sizeof(struct sadb_sa)); m = m_get2(len, M_NOWAIT, MT_DATA, 0); if (m == NULL) return (NULL); m_align(m, len); m->m_len = len; p = mtod(m, struct sadb_sa *); bzero(p, len); p->sadb_sa_len = PFKEY_UNIT64(len); p->sadb_sa_exttype = SADB_EXT_SA; p->sadb_sa_spi = sav->spi; p->sadb_sa_replay = (sav->replay != NULL ? sav->replay->wsize : 0); p->sadb_sa_state = sav->state; p->sadb_sa_auth = sav->alg_auth; p->sadb_sa_encrypt = sav->alg_enc; p->sadb_sa_flags = sav->flags; return m; } /* * set data into sadb_address. */ static struct mbuf * key_setsadbaddr(u_int16_t exttype, const struct sockaddr *saddr, u_int8_t prefixlen, u_int16_t ul_proto) { struct mbuf *m; struct sadb_address *p; size_t len; len = PFKEY_ALIGN8(sizeof(struct sadb_address)) + PFKEY_ALIGN8(saddr->sa_len); m = m_get2(len, M_NOWAIT, MT_DATA, 0); if (m == NULL) return (NULL); m_align(m, len); m->m_len = len; p = mtod(m, struct sadb_address *); bzero(p, len); p->sadb_address_len = PFKEY_UNIT64(len); p->sadb_address_exttype = exttype; p->sadb_address_proto = ul_proto; if (prefixlen == FULLMASK) { switch (saddr->sa_family) { case AF_INET: prefixlen = sizeof(struct in_addr) << 3; break; case AF_INET6: prefixlen = sizeof(struct in6_addr) << 3; break; default: ; /*XXX*/ } } p->sadb_address_prefixlen = prefixlen; p->sadb_address_reserved = 0; bcopy(saddr, mtod(m, caddr_t) + PFKEY_ALIGN8(sizeof(struct sadb_address)), saddr->sa_len); return m; } /* * set data into sadb_x_sa2. */ static struct mbuf * key_setsadbxsa2(u_int8_t mode, u_int32_t seq, u_int32_t reqid) { struct mbuf *m; struct sadb_x_sa2 *p; size_t len; len = PFKEY_ALIGN8(sizeof(struct sadb_x_sa2)); m = m_get2(len, M_NOWAIT, MT_DATA, 0); if (m == NULL) return (NULL); m_align(m, len); m->m_len = len; p = mtod(m, struct sadb_x_sa2 *); bzero(p, len); p->sadb_x_sa2_len = PFKEY_UNIT64(len); p->sadb_x_sa2_exttype = SADB_X_EXT_SA2; p->sadb_x_sa2_mode = mode; p->sadb_x_sa2_reserved1 = 0; p->sadb_x_sa2_reserved2 = 0; p->sadb_x_sa2_sequence = seq; p->sadb_x_sa2_reqid = reqid; return m; } #ifdef IPSEC_NAT_T /* * Set a type in sadb_x_nat_t_type. */ static struct mbuf * key_setsadbxtype(u_int16_t type) { struct mbuf *m; size_t len; struct sadb_x_nat_t_type *p; len = PFKEY_ALIGN8(sizeof(struct sadb_x_nat_t_type)); m = m_get2(len, M_NOWAIT, MT_DATA, 0); if (m == NULL) return (NULL); m_align(m, len); m->m_len = len; p = mtod(m, struct sadb_x_nat_t_type *); bzero(p, len); p->sadb_x_nat_t_type_len = PFKEY_UNIT64(len); p->sadb_x_nat_t_type_exttype = SADB_X_EXT_NAT_T_TYPE; p->sadb_x_nat_t_type_type = type; return (m); } /* * Set a port in sadb_x_nat_t_port. * In contrast to default RFC 2367 behaviour, port is in network byte order. */ static struct mbuf * key_setsadbxport(u_int16_t port, u_int16_t type) { struct mbuf *m; size_t len; struct sadb_x_nat_t_port *p; len = PFKEY_ALIGN8(sizeof(struct sadb_x_nat_t_port)); m = m_get2(len, M_NOWAIT, MT_DATA, 0); if (m == NULL) return (NULL); m_align(m, len); m->m_len = len; p = mtod(m, struct sadb_x_nat_t_port *); bzero(p, len); p->sadb_x_nat_t_port_len = PFKEY_UNIT64(len); p->sadb_x_nat_t_port_exttype = type; p->sadb_x_nat_t_port_port = port; return (m); } /* * Get port from sockaddr. Port is in network byte order. */ u_int16_t key_portfromsaddr(struct sockaddr *sa) { switch (sa->sa_family) { #ifdef INET case AF_INET: return ((struct sockaddr_in *)sa)->sin_port; #endif #ifdef INET6 case AF_INET6: return ((struct sockaddr_in6 *)sa)->sin6_port; #endif } KEYDEBUG(KEYDEBUG_IPSEC_STAMP, printf("DP %s unexpected address family %d\n", __func__, sa->sa_family)); return (0); } #endif /* IPSEC_NAT_T */ /* * Set port in struct sockaddr. Port is in network byte order. */ static void key_porttosaddr(struct sockaddr *sa, u_int16_t port) { switch (sa->sa_family) { #ifdef INET case AF_INET: ((struct sockaddr_in *)sa)->sin_port = port; break; #endif #ifdef INET6 case AF_INET6: ((struct sockaddr_in6 *)sa)->sin6_port = port; break; #endif default: ipseclog((LOG_DEBUG, "%s: unexpected address family %d.\n", __func__, sa->sa_family)); break; } } /* * set data into sadb_x_policy */ static struct mbuf * key_setsadbxpolicy(u_int16_t type, u_int8_t dir, u_int32_t id) { struct mbuf *m; struct sadb_x_policy *p; size_t len; len = PFKEY_ALIGN8(sizeof(struct sadb_x_policy)); m = m_get2(len, M_NOWAIT, MT_DATA, 0); if (m == NULL) return (NULL); m_align(m, len); m->m_len = len; p = mtod(m, struct sadb_x_policy *); bzero(p, len); p->sadb_x_policy_len = PFKEY_UNIT64(len); p->sadb_x_policy_exttype = SADB_X_EXT_POLICY; p->sadb_x_policy_type = type; p->sadb_x_policy_dir = dir; p->sadb_x_policy_id = id; return m; } /* %%% utilities */ /* Take a key message (sadb_key) from the socket and turn it into one * of the kernel's key structures (seckey). * * IN: pointer to the src * OUT: NULL no more memory */ struct seckey * key_dup_keymsg(const struct sadb_key *src, u_int len, struct malloc_type *type) { struct seckey *dst; dst = (struct seckey *)malloc(sizeof(struct seckey), type, M_NOWAIT); if (dst != NULL) { dst->bits = src->sadb_key_bits; dst->key_data = (char *)malloc(len, type, M_NOWAIT); if (dst->key_data != NULL) { bcopy((const char *)src + sizeof(struct sadb_key), dst->key_data, len); } else { ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__)); free(dst, type); dst = NULL; } } else { ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__)); } return dst; } /* Take a lifetime message (sadb_lifetime) passed in on a socket and * turn it into one of the kernel's lifetime structures (seclifetime). * * IN: pointer to the destination, source and malloc type * OUT: NULL, no more memory */ static struct seclifetime * key_dup_lifemsg(const struct sadb_lifetime *src, struct malloc_type *type) { struct seclifetime *dst = NULL; dst = (struct seclifetime *)malloc(sizeof(struct seclifetime), type, M_NOWAIT); if (dst == NULL) { /* XXX counter */ ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__)); } else { dst->allocations = src->sadb_lifetime_allocations; dst->bytes = src->sadb_lifetime_bytes; dst->addtime = src->sadb_lifetime_addtime; dst->usetime = src->sadb_lifetime_usetime; } return dst; } /* compare my own address * OUT: 1: true, i.e. my address. * 0: false */ int key_ismyaddr(struct sockaddr *sa) { IPSEC_ASSERT(sa != NULL, ("null sockaddr")); switch (sa->sa_family) { #ifdef INET case AF_INET: return (in_localip(satosin(sa)->sin_addr)); #endif #ifdef INET6 case AF_INET6: return key_ismyaddr6((struct sockaddr_in6 *)sa); #endif } return 0; } #ifdef INET6 /* * compare my own address for IPv6. * 1: ours * 0: other * NOTE: derived ip6_input() in KAME. This is necessary to modify more. */ #include static int key_ismyaddr6(struct sockaddr_in6 *sin6) { struct in6_ifaddr *ia; #if 0 struct in6_multi *in6m; #endif IN6_IFADDR_RLOCK(); TAILQ_FOREACH(ia, &V_in6_ifaddrhead, ia_link) { if (key_sockaddrcmp((struct sockaddr *)&sin6, (struct sockaddr *)&ia->ia_addr, 0) == 0) { IN6_IFADDR_RUNLOCK(); return 1; } #if 0 /* * XXX Multicast * XXX why do we care about multlicast here while we don't care * about IPv4 multicast?? * XXX scope */ in6m = NULL; IN6_LOOKUP_MULTI(sin6->sin6_addr, ia->ia_ifp, in6m); if (in6m) { IN6_IFADDR_RUNLOCK(); return 1; } #endif } IN6_IFADDR_RUNLOCK(); /* loopback, just for safety */ if (IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr)) return 1; return 0; } #endif /*INET6*/ /* * compare two secasindex structure. * flag can specify to compare 2 saidxes. * compare two secasindex structure without both mode and reqid. * don't compare port. * IN: * saidx0: source, it can be in SAD. * saidx1: object. * OUT: * 1 : equal * 0 : not equal */ static int key_cmpsaidx(const struct secasindex *saidx0, const struct secasindex *saidx1, int flag) { int chkport = 0; /* sanity */ if (saidx0 == NULL && saidx1 == NULL) return 1; if (saidx0 == NULL || saidx1 == NULL) return 0; if (saidx0->proto != saidx1->proto) return 0; if (flag == CMP_EXACTLY) { if (saidx0->mode != saidx1->mode) return 0; if (saidx0->reqid != saidx1->reqid) return 0; if (bcmp(&saidx0->src, &saidx1->src, saidx0->src.sa.sa_len) != 0 || bcmp(&saidx0->dst, &saidx1->dst, saidx0->dst.sa.sa_len) != 0) return 0; } else { /* CMP_MODE_REQID, CMP_REQID, CMP_HEAD */ if (flag == CMP_MODE_REQID ||flag == CMP_REQID) { /* * If reqid of SPD is non-zero, unique SA is required. * The result must be of same reqid in this case. */ if (saidx1->reqid != 0 && saidx0->reqid != saidx1->reqid) return 0; } if (flag == CMP_MODE_REQID) { if (saidx0->mode != IPSEC_MODE_ANY && saidx0->mode != saidx1->mode) return 0; } #ifdef IPSEC_NAT_T /* * If NAT-T is enabled, check ports for tunnel mode. * Do not check ports if they are set to zero in the SPD. * Also do not do it for native transport mode, as there * is no port information available in the SP. */ if ((saidx1->mode == IPSEC_MODE_TUNNEL || (saidx1->mode == IPSEC_MODE_TRANSPORT && saidx1->proto == IPPROTO_ESP)) && saidx1->src.sa.sa_family == AF_INET && saidx1->dst.sa.sa_family == AF_INET && ((const struct sockaddr_in *)(&saidx1->src))->sin_port && ((const struct sockaddr_in *)(&saidx1->dst))->sin_port) chkport = 1; #endif /* IPSEC_NAT_T */ if (key_sockaddrcmp(&saidx0->src.sa, &saidx1->src.sa, chkport) != 0) { return 0; } if (key_sockaddrcmp(&saidx0->dst.sa, &saidx1->dst.sa, chkport) != 0) { return 0; } } return 1; } /* * compare two secindex structure exactly. * IN: * spidx0: source, it is often in SPD. * spidx1: object, it is often from PFKEY message. * OUT: * 1 : equal * 0 : not equal */ static int key_cmpspidx_exactly(struct secpolicyindex *spidx0, struct secpolicyindex *spidx1) { /* sanity */ if (spidx0 == NULL && spidx1 == NULL) return 1; if (spidx0 == NULL || spidx1 == NULL) return 0; if (spidx0->prefs != spidx1->prefs || spidx0->prefd != spidx1->prefd || spidx0->ul_proto != spidx1->ul_proto) return 0; return key_sockaddrcmp(&spidx0->src.sa, &spidx1->src.sa, 1) == 0 && key_sockaddrcmp(&spidx0->dst.sa, &spidx1->dst.sa, 1) == 0; } /* * compare two secindex structure with mask. * IN: * spidx0: source, it is often in SPD. * spidx1: object, it is often from IP header. * OUT: * 1 : equal * 0 : not equal */ static int key_cmpspidx_withmask(struct secpolicyindex *spidx0, struct secpolicyindex *spidx1) { /* sanity */ if (spidx0 == NULL && spidx1 == NULL) return 1; if (spidx0 == NULL || spidx1 == NULL) return 0; if (spidx0->src.sa.sa_family != spidx1->src.sa.sa_family || spidx0->dst.sa.sa_family != spidx1->dst.sa.sa_family || spidx0->src.sa.sa_len != spidx1->src.sa.sa_len || spidx0->dst.sa.sa_len != spidx1->dst.sa.sa_len) return 0; /* if spidx.ul_proto == IPSEC_ULPROTO_ANY, ignore. */ if (spidx0->ul_proto != (u_int16_t)IPSEC_ULPROTO_ANY && spidx0->ul_proto != spidx1->ul_proto) return 0; switch (spidx0->src.sa.sa_family) { case AF_INET: if (spidx0->src.sin.sin_port != IPSEC_PORT_ANY && spidx0->src.sin.sin_port != spidx1->src.sin.sin_port) return 0; if (!key_bbcmp(&spidx0->src.sin.sin_addr, &spidx1->src.sin.sin_addr, spidx0->prefs)) return 0; break; case AF_INET6: if (spidx0->src.sin6.sin6_port != IPSEC_PORT_ANY && spidx0->src.sin6.sin6_port != spidx1->src.sin6.sin6_port) return 0; /* * scope_id check. if sin6_scope_id is 0, we regard it * as a wildcard scope, which matches any scope zone ID. */ if (spidx0->src.sin6.sin6_scope_id && spidx1->src.sin6.sin6_scope_id && spidx0->src.sin6.sin6_scope_id != spidx1->src.sin6.sin6_scope_id) return 0; if (!key_bbcmp(&spidx0->src.sin6.sin6_addr, &spidx1->src.sin6.sin6_addr, spidx0->prefs)) return 0; break; default: /* XXX */ if (bcmp(&spidx0->src, &spidx1->src, spidx0->src.sa.sa_len) != 0) return 0; break; } switch (spidx0->dst.sa.sa_family) { case AF_INET: if (spidx0->dst.sin.sin_port != IPSEC_PORT_ANY && spidx0->dst.sin.sin_port != spidx1->dst.sin.sin_port) return 0; if (!key_bbcmp(&spidx0->dst.sin.sin_addr, &spidx1->dst.sin.sin_addr, spidx0->prefd)) return 0; break; case AF_INET6: if (spidx0->dst.sin6.sin6_port != IPSEC_PORT_ANY && spidx0->dst.sin6.sin6_port != spidx1->dst.sin6.sin6_port) return 0; /* * scope_id check. if sin6_scope_id is 0, we regard it * as a wildcard scope, which matches any scope zone ID. */ if (spidx0->dst.sin6.sin6_scope_id && spidx1->dst.sin6.sin6_scope_id && spidx0->dst.sin6.sin6_scope_id != spidx1->dst.sin6.sin6_scope_id) return 0; if (!key_bbcmp(&spidx0->dst.sin6.sin6_addr, &spidx1->dst.sin6.sin6_addr, spidx0->prefd)) return 0; break; default: /* XXX */ if (bcmp(&spidx0->dst, &spidx1->dst, spidx0->dst.sa.sa_len) != 0) return 0; break; } /* XXX Do we check other field ? e.g. flowinfo */ return 1; } /* returns 0 on match */ static int key_sockaddrcmp(const struct sockaddr *sa1, const struct sockaddr *sa2, int port) { #ifdef satosin #undef satosin #endif #define satosin(s) ((const struct sockaddr_in *)s) #ifdef satosin6 #undef satosin6 #endif #define satosin6(s) ((const struct sockaddr_in6 *)s) if (sa1->sa_family != sa2->sa_family || sa1->sa_len != sa2->sa_len) return 1; switch (sa1->sa_family) { case AF_INET: if (sa1->sa_len != sizeof(struct sockaddr_in)) return 1; if (satosin(sa1)->sin_addr.s_addr != satosin(sa2)->sin_addr.s_addr) { return 1; } if (port && satosin(sa1)->sin_port != satosin(sa2)->sin_port) return 1; break; case AF_INET6: if (sa1->sa_len != sizeof(struct sockaddr_in6)) return 1; /*EINVAL*/ if (satosin6(sa1)->sin6_scope_id != satosin6(sa2)->sin6_scope_id) { return 1; } if (!IN6_ARE_ADDR_EQUAL(&satosin6(sa1)->sin6_addr, &satosin6(sa2)->sin6_addr)) { return 1; } if (port && satosin6(sa1)->sin6_port != satosin6(sa2)->sin6_port) { return 1; } break; default: if (bcmp(sa1, sa2, sa1->sa_len) != 0) return 1; break; } return 0; #undef satosin #undef satosin6 } /* * compare two buffers with mask. * IN: * addr1: source * addr2: object * bits: Number of bits to compare * OUT: * 1 : equal * 0 : not equal */ static int key_bbcmp(const void *a1, const void *a2, u_int bits) { const unsigned char *p1 = a1; const unsigned char *p2 = a2; /* XXX: This could be considerably faster if we compare a word * at a time, but it is complicated on LSB Endian machines */ /* Handle null pointers */ if (p1 == NULL || p2 == NULL) return (p1 == p2); while (bits >= 8) { if (*p1++ != *p2++) return 0; bits -= 8; } if (bits > 0) { u_int8_t mask = ~((1<<(8-bits))-1); if ((*p1 & mask) != (*p2 & mask)) return 0; } return 1; /* Match! */ } static void key_flush_spd(time_t now) { SPTREE_RLOCK_TRACKER; struct secpolicy *sp; u_int dir; /* SPD */ for (dir = 0; dir < IPSEC_DIR_MAX; dir++) { restart: SPTREE_RLOCK(); TAILQ_FOREACH(sp, &V_sptree[dir], chain) { if (sp->lifetime == 0 && sp->validtime == 0) continue; if ((sp->lifetime && now - sp->created > sp->lifetime) || (sp->validtime && now - sp->lastused > sp->validtime)) { SPTREE_RUNLOCK(); key_unlink(sp); key_spdexpire(sp); KEY_FREESP(&sp); goto restart; } } SPTREE_RUNLOCK(); } } static void key_flush_sad(time_t now) { struct secashead *sah, *nextsah; struct secasvar *sav, *nextsav; /* SAD */ SAHTREE_LOCK(); LIST_FOREACH_SAFE(sah, &V_sahtree, chain, nextsah) { /* if sah has been dead, then delete it and process next sah. */ if (sah->state == SADB_SASTATE_DEAD) { key_delsah(sah); continue; } /* if LARVAL entry doesn't become MATURE, delete it. */ LIST_FOREACH_SAFE(sav, &sah->savtree[SADB_SASTATE_LARVAL], chain, nextsav) { /* Need to also check refcnt for a larval SA ??? */ if (now - sav->created > V_key_larval_lifetime) KEY_FREESAV(&sav); } /* * check MATURE entry to start to send expire message * whether or not. */ LIST_FOREACH_SAFE(sav, &sah->savtree[SADB_SASTATE_MATURE], chain, nextsav) { /* we don't need to check. */ if (sav->lft_s == NULL) continue; /* sanity check */ if (sav->lft_c == NULL) { ipseclog((LOG_DEBUG,"%s: there is no CURRENT " "time, why?\n", __func__)); continue; } /* check SOFT lifetime */ if (sav->lft_s->addtime != 0 && now - sav->created > sav->lft_s->addtime) { key_sa_chgstate(sav, SADB_SASTATE_DYING); /* * Actually, only send expire message if * SA has been used, as it was done before, * but should we always send such message, * and let IKE daemon decide if it should be * renegotiated or not ? * XXX expire message will actually NOT be * sent if SA is only used after soft * lifetime has been reached, see below * (DYING state) */ if (sav->lft_c->usetime != 0) key_expire(sav); } /* check SOFT lifetime by bytes */ /* * XXX I don't know the way to delete this SA * when new SA is installed. Caution when it's * installed too big lifetime by time. */ else if (sav->lft_s->bytes != 0 && sav->lft_s->bytes < sav->lft_c->bytes) { key_sa_chgstate(sav, SADB_SASTATE_DYING); /* * XXX If we keep to send expire * message in the status of * DYING. Do remove below code. */ key_expire(sav); } } /* check DYING entry to change status to DEAD. */ LIST_FOREACH_SAFE(sav, &sah->savtree[SADB_SASTATE_DYING], chain, nextsav) { /* we don't need to check. */ if (sav->lft_h == NULL) continue; /* sanity check */ if (sav->lft_c == NULL) { ipseclog((LOG_DEBUG, "%s: there is no CURRENT " "time, why?\n", __func__)); continue; } if (sav->lft_h->addtime != 0 && now - sav->created > sav->lft_h->addtime) { key_sa_chgstate(sav, SADB_SASTATE_DEAD); KEY_FREESAV(&sav); } #if 0 /* XXX Should we keep to send expire message until HARD lifetime ? */ else if (sav->lft_s != NULL && sav->lft_s->addtime != 0 && now - sav->created > sav->lft_s->addtime) { /* * XXX: should be checked to be * installed the valid SA. */ /* * If there is no SA then sending * expire message. */ key_expire(sav); } #endif /* check HARD lifetime by bytes */ else if (sav->lft_h->bytes != 0 && sav->lft_h->bytes < sav->lft_c->bytes) { key_sa_chgstate(sav, SADB_SASTATE_DEAD); KEY_FREESAV(&sav); } } /* delete entry in DEAD */ LIST_FOREACH_SAFE(sav, &sah->savtree[SADB_SASTATE_DEAD], chain, nextsav) { /* sanity check */ if (sav->state != SADB_SASTATE_DEAD) { ipseclog((LOG_DEBUG, "%s: invalid sav->state " "(queue: %d SA: %d): kill it anyway\n", __func__, SADB_SASTATE_DEAD, sav->state)); } /* * do not call key_freesav() here. * sav should already be freed, and sav->refcnt * shows other references to sav * (such as from SPD). */ } } SAHTREE_UNLOCK(); } static void key_flush_acq(time_t now) { struct secacq *acq, *nextacq; /* ACQ tree */ ACQ_LOCK(); for (acq = LIST_FIRST(&V_acqtree); acq != NULL; acq = nextacq) { nextacq = LIST_NEXT(acq, chain); if (now - acq->created > V_key_blockacq_lifetime && __LIST_CHAINED(acq)) { LIST_REMOVE(acq, chain); free(acq, M_IPSEC_SAQ); } } ACQ_UNLOCK(); } static void key_flush_spacq(time_t now) { struct secspacq *acq, *nextacq; /* SP ACQ tree */ SPACQ_LOCK(); for (acq = LIST_FIRST(&V_spacqtree); acq != NULL; acq = nextacq) { nextacq = LIST_NEXT(acq, chain); if (now - acq->created > V_key_blockacq_lifetime && __LIST_CHAINED(acq)) { LIST_REMOVE(acq, chain); free(acq, M_IPSEC_SAQ); } } SPACQ_UNLOCK(); } /* * time handler. * scanning SPD and SAD to check status for each entries, * and do to remove or to expire. * XXX: year 2038 problem may remain. */ static void key_timehandler(void *arg) { VNET_ITERATOR_DECL(vnet_iter); time_t now = time_second; VNET_LIST_RLOCK_NOSLEEP(); VNET_FOREACH(vnet_iter) { CURVNET_SET(vnet_iter); key_flush_spd(now); key_flush_sad(now); key_flush_acq(now); key_flush_spacq(now); CURVNET_RESTORE(); } VNET_LIST_RUNLOCK_NOSLEEP(); #ifndef IPSEC_DEBUG2 /* do exchange to tick time !! */ callout_schedule(&key_timer, hz); #endif /* IPSEC_DEBUG2 */ } u_long key_random() { u_long value; key_randomfill(&value, sizeof(value)); return value; } void key_randomfill(void *p, size_t l) { size_t n; u_long v; static int warn = 1; n = 0; n = (size_t)read_random(p, (u_int)l); /* last resort */ while (n < l) { v = random(); bcopy(&v, (u_int8_t *)p + n, l - n < sizeof(v) ? l - n : sizeof(v)); n += sizeof(v); if (warn) { printf("WARNING: pseudo-random number generator " "used for IPsec processing\n"); warn = 0; } } } /* * map SADB_SATYPE_* to IPPROTO_*. * if satype == SADB_SATYPE then satype is mapped to ~0. * OUT: * 0: invalid satype. */ static u_int16_t key_satype2proto(u_int8_t satype) { switch (satype) { case SADB_SATYPE_UNSPEC: return IPSEC_PROTO_ANY; case SADB_SATYPE_AH: return IPPROTO_AH; case SADB_SATYPE_ESP: return IPPROTO_ESP; case SADB_X_SATYPE_IPCOMP: return IPPROTO_IPCOMP; case SADB_X_SATYPE_TCPSIGNATURE: return IPPROTO_TCP; default: return 0; } /* NOTREACHED */ } /* * map IPPROTO_* to SADB_SATYPE_* * OUT: * 0: invalid protocol type. */ static u_int8_t key_proto2satype(u_int16_t proto) { switch (proto) { case IPPROTO_AH: return SADB_SATYPE_AH; case IPPROTO_ESP: return SADB_SATYPE_ESP; case IPPROTO_IPCOMP: return SADB_X_SATYPE_IPCOMP; case IPPROTO_TCP: return SADB_X_SATYPE_TCPSIGNATURE; default: return 0; } /* NOTREACHED */ } /* %%% PF_KEY */ /* * SADB_GETSPI processing is to receive * * from the IKMPd, to assign a unique spi value, to hang on the INBOUND * tree with the status of LARVAL, and send * * to the IKMPd. * * IN: mhp: pointer to the pointer to each header. * OUT: NULL if fail. * other if success, return pointer to the message to send. */ static int key_getspi(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { struct sadb_address *src0, *dst0; struct secasindex saidx; struct secashead *newsah; struct secasvar *newsav; u_int8_t proto; u_int32_t spi; u_int8_t mode; u_int32_t reqid; int error; IPSEC_ASSERT(so != NULL, ("null socket")); IPSEC_ASSERT(m != NULL, ("null mbuf")); IPSEC_ASSERT(mhp != NULL, ("null msghdr")); IPSEC_ASSERT(mhp->msg != NULL, ("null msg")); if (mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL || mhp->ext[SADB_EXT_ADDRESS_DST] == NULL) { ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n", __func__)); return key_senderror(so, m, EINVAL); } if (mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) || mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) { ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n", __func__)); return key_senderror(so, m, EINVAL); } if (mhp->ext[SADB_X_EXT_SA2] != NULL) { mode = ((struct sadb_x_sa2 *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_mode; reqid = ((struct sadb_x_sa2 *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_reqid; } else { mode = IPSEC_MODE_ANY; reqid = 0; } src0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_SRC]); dst0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_DST]); /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { ipseclog((LOG_DEBUG, "%s: invalid satype is passed.\n", __func__)); return key_senderror(so, m, EINVAL); } /* * Make sure the port numbers are zero. * In case of NAT-T we will update them later if needed. */ switch (((struct sockaddr *)(src0 + 1))->sa_family) { case AF_INET: if (((struct sockaddr *)(src0 + 1))->sa_len != sizeof(struct sockaddr_in)) return key_senderror(so, m, EINVAL); ((struct sockaddr_in *)(src0 + 1))->sin_port = 0; break; case AF_INET6: if (((struct sockaddr *)(src0 + 1))->sa_len != sizeof(struct sockaddr_in6)) return key_senderror(so, m, EINVAL); ((struct sockaddr_in6 *)(src0 + 1))->sin6_port = 0; break; default: ; /*???*/ } switch (((struct sockaddr *)(dst0 + 1))->sa_family) { case AF_INET: if (((struct sockaddr *)(dst0 + 1))->sa_len != sizeof(struct sockaddr_in)) return key_senderror(so, m, EINVAL); ((struct sockaddr_in *)(dst0 + 1))->sin_port = 0; break; case AF_INET6: if (((struct sockaddr *)(dst0 + 1))->sa_len != sizeof(struct sockaddr_in6)) return key_senderror(so, m, EINVAL); ((struct sockaddr_in6 *)(dst0 + 1))->sin6_port = 0; break; default: ; /*???*/ } /* XXX boundary check against sa_len */ KEY_SETSECASIDX(proto, mode, reqid, src0 + 1, dst0 + 1, &saidx); #ifdef IPSEC_NAT_T /* * Handle NAT-T info if present. * We made sure the port numbers are zero above, so we do * not have to worry in case we do not update them. */ if (mhp->ext[SADB_X_EXT_NAT_T_OAI] != NULL) ipseclog((LOG_DEBUG, "%s: NAT-T OAi present\n", __func__)); if (mhp->ext[SADB_X_EXT_NAT_T_OAR] != NULL) ipseclog((LOG_DEBUG, "%s: NAT-T OAr present\n", __func__)); if (mhp->ext[SADB_X_EXT_NAT_T_TYPE] != NULL && mhp->ext[SADB_X_EXT_NAT_T_SPORT] != NULL && mhp->ext[SADB_X_EXT_NAT_T_DPORT] != NULL) { struct sadb_x_nat_t_type *type; struct sadb_x_nat_t_port *sport, *dport; if (mhp->extlen[SADB_X_EXT_NAT_T_TYPE] < sizeof(*type) || mhp->extlen[SADB_X_EXT_NAT_T_SPORT] < sizeof(*sport) || mhp->extlen[SADB_X_EXT_NAT_T_DPORT] < sizeof(*dport)) { ipseclog((LOG_DEBUG, "%s: invalid nat-t message " "passed.\n", __func__)); return key_senderror(so, m, EINVAL); } sport = (struct sadb_x_nat_t_port *) mhp->ext[SADB_X_EXT_NAT_T_SPORT]; dport = (struct sadb_x_nat_t_port *) mhp->ext[SADB_X_EXT_NAT_T_DPORT]; if (sport) KEY_PORTTOSADDR(&saidx.src, sport->sadb_x_nat_t_port_port); if (dport) KEY_PORTTOSADDR(&saidx.dst, dport->sadb_x_nat_t_port_port); } #endif /* SPI allocation */ spi = key_do_getnewspi((struct sadb_spirange *)mhp->ext[SADB_EXT_SPIRANGE], &saidx); if (spi == 0) return key_senderror(so, m, EINVAL); /* get a SA index */ if ((newsah = key_getsah(&saidx)) == NULL) { /* create a new SA index */ if ((newsah = key_newsah(&saidx)) == NULL) { ipseclog((LOG_DEBUG, "%s: No more memory.\n",__func__)); return key_senderror(so, m, ENOBUFS); } } /* get a new SA */ /* XXX rewrite */ newsav = KEY_NEWSAV(m, mhp, newsah, &error); if (newsav == NULL) { /* XXX don't free new SA index allocated in above. */ return key_senderror(so, m, error); } /* set spi */ newsav->spi = htonl(spi); /* delete the entry in acqtree */ if (mhp->msg->sadb_msg_seq != 0) { struct secacq *acq; if ((acq = key_getacqbyseq(mhp->msg->sadb_msg_seq)) != NULL) { /* reset counter in order to deletion by timehandler. */ acq->created = time_second; acq->count = 0; } } { struct mbuf *n, *nn; struct sadb_sa *m_sa; struct sadb_msg *newmsg; int off, len; /* create new sadb_msg to reply. */ len = PFKEY_ALIGN8(sizeof(struct sadb_msg)) + PFKEY_ALIGN8(sizeof(struct sadb_sa)); MGETHDR(n, M_NOWAIT, MT_DATA); if (len > MHLEN) { - MCLGET(n, M_NOWAIT); - if ((n->m_flags & M_EXT) == 0) { + if (!(MCLGET(n, M_NOWAIT))) { m_freem(n); n = NULL; } } if (!n) return key_senderror(so, m, ENOBUFS); n->m_len = len; n->m_next = NULL; off = 0; m_copydata(m, 0, sizeof(struct sadb_msg), mtod(n, caddr_t) + off); off += PFKEY_ALIGN8(sizeof(struct sadb_msg)); m_sa = (struct sadb_sa *)(mtod(n, caddr_t) + off); m_sa->sadb_sa_len = PFKEY_UNIT64(sizeof(struct sadb_sa)); m_sa->sadb_sa_exttype = SADB_EXT_SA; m_sa->sadb_sa_spi = htonl(spi); off += PFKEY_ALIGN8(sizeof(struct sadb_sa)); IPSEC_ASSERT(off == len, ("length inconsistency (off %u len %u)", off, len)); n->m_next = key_gather_mbuf(m, mhp, 0, 2, SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST); if (!n->m_next) { m_freem(n); return key_senderror(so, m, ENOBUFS); } if (n->m_len < sizeof(struct sadb_msg)) { n = m_pullup(n, sizeof(struct sadb_msg)); if (n == NULL) return key_sendup_mbuf(so, m, KEY_SENDUP_ONE); } n->m_pkthdr.len = 0; for (nn = n; nn; nn = nn->m_next) n->m_pkthdr.len += nn->m_len; newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_seq = newsav->seq; newmsg->sadb_msg_errno = 0; newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ONE); } } /* * allocating new SPI * called by key_getspi(). * OUT: * 0: failure. * others: success. */ static u_int32_t key_do_getnewspi(struct sadb_spirange *spirange, struct secasindex *saidx) { u_int32_t newspi; u_int32_t min, max; int count = V_key_spi_trycnt; /* set spi range to allocate */ if (spirange != NULL) { min = spirange->sadb_spirange_min; max = spirange->sadb_spirange_max; } else { min = V_key_spi_minval; max = V_key_spi_maxval; } /* IPCOMP needs 2-byte SPI */ if (saidx->proto == IPPROTO_IPCOMP) { u_int32_t t; if (min >= 0x10000) min = 0xffff; if (max >= 0x10000) max = 0xffff; if (min > max) { t = min; min = max; max = t; } } if (min == max) { if (key_checkspidup(saidx, min) != NULL) { ipseclog((LOG_DEBUG, "%s: SPI %u exists already.\n", __func__, min)); return 0; } count--; /* taking one cost. */ newspi = min; } else { /* init SPI */ newspi = 0; /* when requesting to allocate spi ranged */ while (count--) { /* generate pseudo-random SPI value ranged. */ newspi = min + (key_random() % (max - min + 1)); if (key_checkspidup(saidx, newspi) == NULL) break; } if (count == 0 || newspi == 0) { ipseclog((LOG_DEBUG, "%s: to allocate spi is failed.\n", __func__)); return 0; } } /* statistics */ keystat.getspi_count = (keystat.getspi_count + V_key_spi_trycnt - count) / 2; return newspi; } /* * SADB_UPDATE processing * receive * * from the ikmpd, and update a secasvar entry whose status is SADB_SASTATE_LARVAL. * and send * * to the ikmpd. * * m will always be freed. */ static int key_update(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { struct sadb_sa *sa0; struct sadb_address *src0, *dst0; #ifdef IPSEC_NAT_T struct sadb_x_nat_t_type *type; struct sadb_x_nat_t_port *sport, *dport; struct sadb_address *iaddr, *raddr; struct sadb_x_nat_t_frag *frag; #endif struct secasindex saidx; struct secashead *sah; struct secasvar *sav; u_int16_t proto; u_int8_t mode; u_int32_t reqid; int error; IPSEC_ASSERT(so != NULL, ("null socket")); IPSEC_ASSERT(m != NULL, ("null mbuf")); IPSEC_ASSERT(mhp != NULL, ("null msghdr")); IPSEC_ASSERT(mhp->msg != NULL, ("null msg")); /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { ipseclog((LOG_DEBUG, "%s: invalid satype is passed.\n", __func__)); return key_senderror(so, m, EINVAL); } if (mhp->ext[SADB_EXT_SA] == NULL || mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL || mhp->ext[SADB_EXT_ADDRESS_DST] == NULL || (mhp->msg->sadb_msg_satype == SADB_SATYPE_ESP && mhp->ext[SADB_EXT_KEY_ENCRYPT] == NULL) || (mhp->msg->sadb_msg_satype == SADB_SATYPE_AH && mhp->ext[SADB_EXT_KEY_AUTH] == NULL) || (mhp->ext[SADB_EXT_LIFETIME_HARD] != NULL && mhp->ext[SADB_EXT_LIFETIME_SOFT] == NULL) || (mhp->ext[SADB_EXT_LIFETIME_HARD] == NULL && mhp->ext[SADB_EXT_LIFETIME_SOFT] != NULL)) { ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n", __func__)); return key_senderror(so, m, EINVAL); } if (mhp->extlen[SADB_EXT_SA] < sizeof(struct sadb_sa) || mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) || mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) { ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n", __func__)); return key_senderror(so, m, EINVAL); } if (mhp->ext[SADB_X_EXT_SA2] != NULL) { mode = ((struct sadb_x_sa2 *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_mode; reqid = ((struct sadb_x_sa2 *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_reqid; } else { mode = IPSEC_MODE_ANY; reqid = 0; } /* XXX boundary checking for other extensions */ sa0 = (struct sadb_sa *)mhp->ext[SADB_EXT_SA]; src0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_SRC]); dst0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_DST]); /* XXX boundary check against sa_len */ KEY_SETSECASIDX(proto, mode, reqid, src0 + 1, dst0 + 1, &saidx); /* * Make sure the port numbers are zero. * In case of NAT-T we will update them later if needed. */ KEY_PORTTOSADDR(&saidx.src, 0); KEY_PORTTOSADDR(&saidx.dst, 0); #ifdef IPSEC_NAT_T /* * Handle NAT-T info if present. */ if (mhp->ext[SADB_X_EXT_NAT_T_TYPE] != NULL && mhp->ext[SADB_X_EXT_NAT_T_SPORT] != NULL && mhp->ext[SADB_X_EXT_NAT_T_DPORT] != NULL) { if (mhp->extlen[SADB_X_EXT_NAT_T_TYPE] < sizeof(*type) || mhp->extlen[SADB_X_EXT_NAT_T_SPORT] < sizeof(*sport) || mhp->extlen[SADB_X_EXT_NAT_T_DPORT] < sizeof(*dport)) { ipseclog((LOG_DEBUG, "%s: invalid message.\n", __func__)); return key_senderror(so, m, EINVAL); } type = (struct sadb_x_nat_t_type *) mhp->ext[SADB_X_EXT_NAT_T_TYPE]; sport = (struct sadb_x_nat_t_port *) mhp->ext[SADB_X_EXT_NAT_T_SPORT]; dport = (struct sadb_x_nat_t_port *) mhp->ext[SADB_X_EXT_NAT_T_DPORT]; } else { type = 0; sport = dport = 0; } if (mhp->ext[SADB_X_EXT_NAT_T_OAI] != NULL && mhp->ext[SADB_X_EXT_NAT_T_OAR] != NULL) { if (mhp->extlen[SADB_X_EXT_NAT_T_OAI] < sizeof(*iaddr) || mhp->extlen[SADB_X_EXT_NAT_T_OAR] < sizeof(*raddr)) { ipseclog((LOG_DEBUG, "%s: invalid message\n", __func__)); return key_senderror(so, m, EINVAL); } iaddr = (struct sadb_address *)mhp->ext[SADB_X_EXT_NAT_T_OAI]; raddr = (struct sadb_address *)mhp->ext[SADB_X_EXT_NAT_T_OAR]; ipseclog((LOG_DEBUG, "%s: NAT-T OAi/r present\n", __func__)); } else { iaddr = raddr = NULL; } if (mhp->ext[SADB_X_EXT_NAT_T_FRAG] != NULL) { if (mhp->extlen[SADB_X_EXT_NAT_T_FRAG] < sizeof(*frag)) { ipseclog((LOG_DEBUG, "%s: invalid message\n", __func__)); return key_senderror(so, m, EINVAL); } frag = (struct sadb_x_nat_t_frag *) mhp->ext[SADB_X_EXT_NAT_T_FRAG]; } else { frag = 0; } #endif /* get a SA header */ if ((sah = key_getsah(&saidx)) == NULL) { ipseclog((LOG_DEBUG, "%s: no SA index found.\n", __func__)); return key_senderror(so, m, ENOENT); } /* set spidx if there */ /* XXX rewrite */ error = key_setident(sah, m, mhp); if (error) return key_senderror(so, m, error); /* find a SA with sequence number. */ #ifdef IPSEC_DOSEQCHECK if (mhp->msg->sadb_msg_seq != 0 && (sav = key_getsavbyseq(sah, mhp->msg->sadb_msg_seq)) == NULL) { ipseclog((LOG_DEBUG, "%s: no larval SA with sequence %u " "exists.\n", __func__, mhp->msg->sadb_msg_seq)); return key_senderror(so, m, ENOENT); } #else SAHTREE_LOCK(); sav = key_getsavbyspi(sah, sa0->sadb_sa_spi); SAHTREE_UNLOCK(); if (sav == NULL) { ipseclog((LOG_DEBUG, "%s: no such a SA found (spi:%u)\n", __func__, (u_int32_t)ntohl(sa0->sadb_sa_spi))); return key_senderror(so, m, EINVAL); } #endif /* validity check */ if (sav->sah->saidx.proto != proto) { ipseclog((LOG_DEBUG, "%s: protocol mismatched " "(DB=%u param=%u)\n", __func__, sav->sah->saidx.proto, proto)); return key_senderror(so, m, EINVAL); } #ifdef IPSEC_DOSEQCHECK if (sav->spi != sa0->sadb_sa_spi) { ipseclog((LOG_DEBUG, "%s: SPI mismatched (DB:%u param:%u)\n", __func__, (u_int32_t)ntohl(sav->spi), (u_int32_t)ntohl(sa0->sadb_sa_spi))); return key_senderror(so, m, EINVAL); } #endif if (sav->pid != mhp->msg->sadb_msg_pid) { ipseclog((LOG_DEBUG, "%s: pid mismatched (DB:%u param:%u)\n", __func__, sav->pid, mhp->msg->sadb_msg_pid)); return key_senderror(so, m, EINVAL); } /* copy sav values */ error = key_setsaval(sav, m, mhp); if (error) { KEY_FREESAV(&sav); return key_senderror(so, m, error); } #ifdef IPSEC_NAT_T /* * Handle more NAT-T info if present, * now that we have a sav to fill. */ if (type) sav->natt_type = type->sadb_x_nat_t_type_type; if (sport) KEY_PORTTOSADDR(&sav->sah->saidx.src, sport->sadb_x_nat_t_port_port); if (dport) KEY_PORTTOSADDR(&sav->sah->saidx.dst, dport->sadb_x_nat_t_port_port); #if 0 /* * In case SADB_X_EXT_NAT_T_FRAG was not given, leave it at 0. * We should actually check for a minimum MTU here, if we * want to support it in ip_output. */ if (frag) sav->natt_esp_frag_len = frag->sadb_x_nat_t_frag_fraglen; #endif #endif /* check SA values to be mature. */ if ((mhp->msg->sadb_msg_errno = key_mature(sav)) != 0) { KEY_FREESAV(&sav); return key_senderror(so, m, 0); } { struct mbuf *n; /* set msg buf from mhp */ n = key_getmsgbuf_x1(m, mhp); if (n == NULL) { ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__)); return key_senderror(so, m, ENOBUFS); } m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); } } /* * search SAD with sequence for a SA which state is SADB_SASTATE_LARVAL. * only called by key_update(). * OUT: * NULL : not found * others : found, pointer to a SA. */ #ifdef IPSEC_DOSEQCHECK static struct secasvar * key_getsavbyseq(struct secashead *sah, u_int32_t seq) { struct secasvar *sav; u_int state; state = SADB_SASTATE_LARVAL; /* search SAD with sequence number ? */ LIST_FOREACH(sav, &sah->savtree[state], chain) { KEY_CHKSASTATE(state, sav->state, __func__); if (sav->seq == seq) { sa_addref(sav); KEYDEBUG(KEYDEBUG_IPSEC_STAMP, printf("DP %s cause refcnt++:%d SA:%p\n", __func__, sav->refcnt, sav)); return sav; } } return NULL; } #endif /* * SADB_ADD processing * add an entry to SA database, when received * * from the ikmpd, * and send * * to the ikmpd. * * IGNORE identity and sensitivity messages. * * m will always be freed. */ static int key_add(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { struct sadb_sa *sa0; struct sadb_address *src0, *dst0; #ifdef IPSEC_NAT_T struct sadb_x_nat_t_type *type; struct sadb_address *iaddr, *raddr; struct sadb_x_nat_t_frag *frag; #endif struct secasindex saidx; struct secashead *newsah; struct secasvar *newsav; u_int16_t proto; u_int8_t mode; u_int32_t reqid; int error; IPSEC_ASSERT(so != NULL, ("null socket")); IPSEC_ASSERT(m != NULL, ("null mbuf")); IPSEC_ASSERT(mhp != NULL, ("null msghdr")); IPSEC_ASSERT(mhp->msg != NULL, ("null msg")); /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { ipseclog((LOG_DEBUG, "%s: invalid satype is passed.\n", __func__)); return key_senderror(so, m, EINVAL); } if (mhp->ext[SADB_EXT_SA] == NULL || mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL || mhp->ext[SADB_EXT_ADDRESS_DST] == NULL || (mhp->msg->sadb_msg_satype == SADB_SATYPE_ESP && mhp->ext[SADB_EXT_KEY_ENCRYPT] == NULL) || (mhp->msg->sadb_msg_satype == SADB_SATYPE_AH && mhp->ext[SADB_EXT_KEY_AUTH] == NULL) || (mhp->ext[SADB_EXT_LIFETIME_HARD] != NULL && mhp->ext[SADB_EXT_LIFETIME_SOFT] == NULL) || (mhp->ext[SADB_EXT_LIFETIME_HARD] == NULL && mhp->ext[SADB_EXT_LIFETIME_SOFT] != NULL)) { ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n", __func__)); return key_senderror(so, m, EINVAL); } if (mhp->extlen[SADB_EXT_SA] < sizeof(struct sadb_sa) || mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) || mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) { /* XXX need more */ ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n", __func__)); return key_senderror(so, m, EINVAL); } if (mhp->ext[SADB_X_EXT_SA2] != NULL) { mode = ((struct sadb_x_sa2 *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_mode; reqid = ((struct sadb_x_sa2 *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_reqid; } else { mode = IPSEC_MODE_ANY; reqid = 0; } sa0 = (struct sadb_sa *)mhp->ext[SADB_EXT_SA]; src0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_SRC]; dst0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_DST]; /* XXX boundary check against sa_len */ KEY_SETSECASIDX(proto, mode, reqid, src0 + 1, dst0 + 1, &saidx); /* * Make sure the port numbers are zero. * In case of NAT-T we will update them later if needed. */ KEY_PORTTOSADDR(&saidx.src, 0); KEY_PORTTOSADDR(&saidx.dst, 0); #ifdef IPSEC_NAT_T /* * Handle NAT-T info if present. */ if (mhp->ext[SADB_X_EXT_NAT_T_TYPE] != NULL && mhp->ext[SADB_X_EXT_NAT_T_SPORT] != NULL && mhp->ext[SADB_X_EXT_NAT_T_DPORT] != NULL) { struct sadb_x_nat_t_port *sport, *dport; if (mhp->extlen[SADB_X_EXT_NAT_T_TYPE] < sizeof(*type) || mhp->extlen[SADB_X_EXT_NAT_T_SPORT] < sizeof(*sport) || mhp->extlen[SADB_X_EXT_NAT_T_DPORT] < sizeof(*dport)) { ipseclog((LOG_DEBUG, "%s: invalid message.\n", __func__)); return key_senderror(so, m, EINVAL); } type = (struct sadb_x_nat_t_type *) mhp->ext[SADB_X_EXT_NAT_T_TYPE]; sport = (struct sadb_x_nat_t_port *) mhp->ext[SADB_X_EXT_NAT_T_SPORT]; dport = (struct sadb_x_nat_t_port *) mhp->ext[SADB_X_EXT_NAT_T_DPORT]; if (sport) KEY_PORTTOSADDR(&saidx.src, sport->sadb_x_nat_t_port_port); if (dport) KEY_PORTTOSADDR(&saidx.dst, dport->sadb_x_nat_t_port_port); } else { type = 0; } if (mhp->ext[SADB_X_EXT_NAT_T_OAI] != NULL && mhp->ext[SADB_X_EXT_NAT_T_OAR] != NULL) { if (mhp->extlen[SADB_X_EXT_NAT_T_OAI] < sizeof(*iaddr) || mhp->extlen[SADB_X_EXT_NAT_T_OAR] < sizeof(*raddr)) { ipseclog((LOG_DEBUG, "%s: invalid message\n", __func__)); return key_senderror(so, m, EINVAL); } iaddr = (struct sadb_address *)mhp->ext[SADB_X_EXT_NAT_T_OAI]; raddr = (struct sadb_address *)mhp->ext[SADB_X_EXT_NAT_T_OAR]; ipseclog((LOG_DEBUG, "%s: NAT-T OAi/r present\n", __func__)); } else { iaddr = raddr = NULL; } if (mhp->ext[SADB_X_EXT_NAT_T_FRAG] != NULL) { if (mhp->extlen[SADB_X_EXT_NAT_T_FRAG] < sizeof(*frag)) { ipseclog((LOG_DEBUG, "%s: invalid message\n", __func__)); return key_senderror(so, m, EINVAL); } frag = (struct sadb_x_nat_t_frag *) mhp->ext[SADB_X_EXT_NAT_T_FRAG]; } else { frag = 0; } #endif /* get a SA header */ if ((newsah = key_getsah(&saidx)) == NULL) { /* create a new SA header */ if ((newsah = key_newsah(&saidx)) == NULL) { ipseclog((LOG_DEBUG, "%s: No more memory.\n",__func__)); return key_senderror(so, m, ENOBUFS); } } /* set spidx if there */ /* XXX rewrite */ error = key_setident(newsah, m, mhp); if (error) { return key_senderror(so, m, error); } /* create new SA entry. */ /* We can create new SA only if SPI is differenct. */ SAHTREE_LOCK(); newsav = key_getsavbyspi(newsah, sa0->sadb_sa_spi); SAHTREE_UNLOCK(); if (newsav != NULL) { ipseclog((LOG_DEBUG, "%s: SA already exists.\n", __func__)); return key_senderror(so, m, EEXIST); } newsav = KEY_NEWSAV(m, mhp, newsah, &error); if (newsav == NULL) { return key_senderror(so, m, error); } #ifdef IPSEC_NAT_T /* * Handle more NAT-T info if present, * now that we have a sav to fill. */ if (type) newsav->natt_type = type->sadb_x_nat_t_type_type; #if 0 /* * In case SADB_X_EXT_NAT_T_FRAG was not given, leave it at 0. * We should actually check for a minimum MTU here, if we * want to support it in ip_output. */ if (frag) newsav->natt_esp_frag_len = frag->sadb_x_nat_t_frag_fraglen; #endif #endif /* check SA values to be mature. */ if ((error = key_mature(newsav)) != 0) { KEY_FREESAV(&newsav); return key_senderror(so, m, error); } /* * don't call key_freesav() here, as we would like to keep the SA * in the database on success. */ { struct mbuf *n; /* set msg buf from mhp */ n = key_getmsgbuf_x1(m, mhp); if (n == NULL) { ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__)); return key_senderror(so, m, ENOBUFS); } m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); } } /* m is retained */ static int key_setident(struct secashead *sah, struct mbuf *m, const struct sadb_msghdr *mhp) { const struct sadb_ident *idsrc, *iddst; int idsrclen, iddstlen; IPSEC_ASSERT(sah != NULL, ("null secashead")); IPSEC_ASSERT(m != NULL, ("null mbuf")); IPSEC_ASSERT(mhp != NULL, ("null msghdr")); IPSEC_ASSERT(mhp->msg != NULL, ("null msg")); /* don't make buffer if not there */ if (mhp->ext[SADB_EXT_IDENTITY_SRC] == NULL && mhp->ext[SADB_EXT_IDENTITY_DST] == NULL) { sah->idents = NULL; sah->identd = NULL; return 0; } if (mhp->ext[SADB_EXT_IDENTITY_SRC] == NULL || mhp->ext[SADB_EXT_IDENTITY_DST] == NULL) { ipseclog((LOG_DEBUG, "%s: invalid identity.\n", __func__)); return EINVAL; } idsrc = (const struct sadb_ident *)mhp->ext[SADB_EXT_IDENTITY_SRC]; iddst = (const struct sadb_ident *)mhp->ext[SADB_EXT_IDENTITY_DST]; idsrclen = mhp->extlen[SADB_EXT_IDENTITY_SRC]; iddstlen = mhp->extlen[SADB_EXT_IDENTITY_DST]; /* validity check */ if (idsrc->sadb_ident_type != iddst->sadb_ident_type) { ipseclog((LOG_DEBUG, "%s: ident type mismatch.\n", __func__)); return EINVAL; } switch (idsrc->sadb_ident_type) { case SADB_IDENTTYPE_PREFIX: case SADB_IDENTTYPE_FQDN: case SADB_IDENTTYPE_USERFQDN: default: /* XXX do nothing */ sah->idents = NULL; sah->identd = NULL; return 0; } /* make structure */ sah->idents = malloc(sizeof(struct secident), M_IPSEC_MISC, M_NOWAIT); if (sah->idents == NULL) { ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__)); return ENOBUFS; } sah->identd = malloc(sizeof(struct secident), M_IPSEC_MISC, M_NOWAIT); if (sah->identd == NULL) { free(sah->idents, M_IPSEC_MISC); sah->idents = NULL; ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__)); return ENOBUFS; } sah->idents->type = idsrc->sadb_ident_type; sah->idents->id = idsrc->sadb_ident_id; sah->identd->type = iddst->sadb_ident_type; sah->identd->id = iddst->sadb_ident_id; return 0; } /* * m will not be freed on return. * it is caller's responsibility to free the result. */ static struct mbuf * key_getmsgbuf_x1(struct mbuf *m, const struct sadb_msghdr *mhp) { struct mbuf *n; IPSEC_ASSERT(m != NULL, ("null mbuf")); IPSEC_ASSERT(mhp != NULL, ("null msghdr")); IPSEC_ASSERT(mhp->msg != NULL, ("null msg")); /* create new sadb_msg to reply. */ n = key_gather_mbuf(m, mhp, 1, 9, SADB_EXT_RESERVED, SADB_EXT_SA, SADB_X_EXT_SA2, SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST, SADB_EXT_LIFETIME_HARD, SADB_EXT_LIFETIME_SOFT, SADB_EXT_IDENTITY_SRC, SADB_EXT_IDENTITY_DST); if (!n) return NULL; if (n->m_len < sizeof(struct sadb_msg)) { n = m_pullup(n, sizeof(struct sadb_msg)); if (n == NULL) return NULL; } mtod(n, struct sadb_msg *)->sadb_msg_errno = 0; mtod(n, struct sadb_msg *)->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); return n; } /* * SADB_DELETE processing * receive * * from the ikmpd, and set SADB_SASTATE_DEAD, * and send, * * to the ikmpd. * * m will always be freed. */ static int key_delete(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { struct sadb_sa *sa0; struct sadb_address *src0, *dst0; struct secasindex saidx; struct secashead *sah; struct secasvar *sav = NULL; u_int16_t proto; IPSEC_ASSERT(so != NULL, ("null socket")); IPSEC_ASSERT(m != NULL, ("null mbuf")); IPSEC_ASSERT(mhp != NULL, ("null msghdr")); IPSEC_ASSERT(mhp->msg != NULL, ("null msg")); /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { ipseclog((LOG_DEBUG, "%s: invalid satype is passed.\n", __func__)); return key_senderror(so, m, EINVAL); } if (mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL || mhp->ext[SADB_EXT_ADDRESS_DST] == NULL) { ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n", __func__)); return key_senderror(so, m, EINVAL); } if (mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) || mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) { ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n", __func__)); return key_senderror(so, m, EINVAL); } if (mhp->ext[SADB_EXT_SA] == NULL) { /* * Caller wants us to delete all non-LARVAL SAs * that match the src/dst. This is used during * IKE INITIAL-CONTACT. */ ipseclog((LOG_DEBUG, "%s: doing delete all.\n", __func__)); return key_delete_all(so, m, mhp, proto); } else if (mhp->extlen[SADB_EXT_SA] < sizeof(struct sadb_sa)) { ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n", __func__)); return key_senderror(so, m, EINVAL); } sa0 = (struct sadb_sa *)mhp->ext[SADB_EXT_SA]; src0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_SRC]); dst0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_DST]); /* XXX boundary check against sa_len */ KEY_SETSECASIDX(proto, IPSEC_MODE_ANY, 0, src0 + 1, dst0 + 1, &saidx); /* * Make sure the port numbers are zero. * In case of NAT-T we will update them later if needed. */ KEY_PORTTOSADDR(&saidx.src, 0); KEY_PORTTOSADDR(&saidx.dst, 0); #ifdef IPSEC_NAT_T /* * Handle NAT-T info if present. */ if (mhp->ext[SADB_X_EXT_NAT_T_SPORT] != NULL && mhp->ext[SADB_X_EXT_NAT_T_DPORT] != NULL) { struct sadb_x_nat_t_port *sport, *dport; if (mhp->extlen[SADB_X_EXT_NAT_T_SPORT] < sizeof(*sport) || mhp->extlen[SADB_X_EXT_NAT_T_DPORT] < sizeof(*dport)) { ipseclog((LOG_DEBUG, "%s: invalid message.\n", __func__)); return key_senderror(so, m, EINVAL); } sport = (struct sadb_x_nat_t_port *) mhp->ext[SADB_X_EXT_NAT_T_SPORT]; dport = (struct sadb_x_nat_t_port *) mhp->ext[SADB_X_EXT_NAT_T_DPORT]; if (sport) KEY_PORTTOSADDR(&saidx.src, sport->sadb_x_nat_t_port_port); if (dport) KEY_PORTTOSADDR(&saidx.dst, dport->sadb_x_nat_t_port_port); } #endif /* get a SA header */ SAHTREE_LOCK(); LIST_FOREACH(sah, &V_sahtree, chain) { if (sah->state == SADB_SASTATE_DEAD) continue; if (key_cmpsaidx(&sah->saidx, &saidx, CMP_HEAD) == 0) continue; /* get a SA with SPI. */ sav = key_getsavbyspi(sah, sa0->sadb_sa_spi); if (sav) break; } if (sah == NULL) { SAHTREE_UNLOCK(); ipseclog((LOG_DEBUG, "%s: no SA found.\n", __func__)); return key_senderror(so, m, ENOENT); } key_sa_chgstate(sav, SADB_SASTATE_DEAD); KEY_FREESAV(&sav); SAHTREE_UNLOCK(); { struct mbuf *n; struct sadb_msg *newmsg; /* create new sadb_msg to reply. */ /* XXX-BZ NAT-T extensions? */ n = key_gather_mbuf(m, mhp, 1, 4, SADB_EXT_RESERVED, SADB_EXT_SA, SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST); if (!n) return key_senderror(so, m, ENOBUFS); if (n->m_len < sizeof(struct sadb_msg)) { n = m_pullup(n, sizeof(struct sadb_msg)); if (n == NULL) return key_senderror(so, m, ENOBUFS); } newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_errno = 0; newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); } } /* * delete all SAs for src/dst. Called from key_delete(). */ static int key_delete_all(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp, u_int16_t proto) { struct sadb_address *src0, *dst0; struct secasindex saidx; struct secashead *sah; struct secasvar *sav, *nextsav; u_int stateidx, state; src0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_SRC]); dst0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_DST]); /* XXX boundary check against sa_len */ KEY_SETSECASIDX(proto, IPSEC_MODE_ANY, 0, src0 + 1, dst0 + 1, &saidx); /* * Make sure the port numbers are zero. * In case of NAT-T we will update them later if needed. */ KEY_PORTTOSADDR(&saidx.src, 0); KEY_PORTTOSADDR(&saidx.dst, 0); #ifdef IPSEC_NAT_T /* * Handle NAT-T info if present. */ if (mhp->ext[SADB_X_EXT_NAT_T_SPORT] != NULL && mhp->ext[SADB_X_EXT_NAT_T_DPORT] != NULL) { struct sadb_x_nat_t_port *sport, *dport; if (mhp->extlen[SADB_X_EXT_NAT_T_SPORT] < sizeof(*sport) || mhp->extlen[SADB_X_EXT_NAT_T_DPORT] < sizeof(*dport)) { ipseclog((LOG_DEBUG, "%s: invalid message.\n", __func__)); return key_senderror(so, m, EINVAL); } sport = (struct sadb_x_nat_t_port *) mhp->ext[SADB_X_EXT_NAT_T_SPORT]; dport = (struct sadb_x_nat_t_port *) mhp->ext[SADB_X_EXT_NAT_T_DPORT]; if (sport) KEY_PORTTOSADDR(&saidx.src, sport->sadb_x_nat_t_port_port); if (dport) KEY_PORTTOSADDR(&saidx.dst, dport->sadb_x_nat_t_port_port); } #endif SAHTREE_LOCK(); LIST_FOREACH(sah, &V_sahtree, chain) { if (sah->state == SADB_SASTATE_DEAD) continue; if (key_cmpsaidx(&sah->saidx, &saidx, CMP_HEAD) == 0) continue; /* Delete all non-LARVAL SAs. */ for (stateidx = 0; stateidx < _ARRAYLEN(saorder_state_alive); stateidx++) { state = saorder_state_alive[stateidx]; if (state == SADB_SASTATE_LARVAL) continue; for (sav = LIST_FIRST(&sah->savtree[state]); sav != NULL; sav = nextsav) { nextsav = LIST_NEXT(sav, chain); /* sanity check */ if (sav->state != state) { ipseclog((LOG_DEBUG, "%s: invalid " "sav->state (queue %d SA %d)\n", __func__, state, sav->state)); continue; } key_sa_chgstate(sav, SADB_SASTATE_DEAD); KEY_FREESAV(&sav); } } } SAHTREE_UNLOCK(); { struct mbuf *n; struct sadb_msg *newmsg; /* create new sadb_msg to reply. */ /* XXX-BZ NAT-T extensions? */ n = key_gather_mbuf(m, mhp, 1, 3, SADB_EXT_RESERVED, SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST); if (!n) return key_senderror(so, m, ENOBUFS); if (n->m_len < sizeof(struct sadb_msg)) { n = m_pullup(n, sizeof(struct sadb_msg)); if (n == NULL) return key_senderror(so, m, ENOBUFS); } newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_errno = 0; newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); } } /* * SADB_GET processing * receive * * from the ikmpd, and get a SP and a SA to respond, * and send, * * to the ikmpd. * * m will always be freed. */ static int key_get(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { struct sadb_sa *sa0; struct sadb_address *src0, *dst0; struct secasindex saidx; struct secashead *sah; struct secasvar *sav = NULL; u_int16_t proto; IPSEC_ASSERT(so != NULL, ("null socket")); IPSEC_ASSERT(m != NULL, ("null mbuf")); IPSEC_ASSERT(mhp != NULL, ("null msghdr")); IPSEC_ASSERT(mhp->msg != NULL, ("null msg")); /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { ipseclog((LOG_DEBUG, "%s: invalid satype is passed.\n", __func__)); return key_senderror(so, m, EINVAL); } if (mhp->ext[SADB_EXT_SA] == NULL || mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL || mhp->ext[SADB_EXT_ADDRESS_DST] == NULL) { ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n", __func__)); return key_senderror(so, m, EINVAL); } if (mhp->extlen[SADB_EXT_SA] < sizeof(struct sadb_sa) || mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) || mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) { ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n", __func__)); return key_senderror(so, m, EINVAL); } sa0 = (struct sadb_sa *)mhp->ext[SADB_EXT_SA]; src0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_SRC]; dst0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_DST]; /* XXX boundary check against sa_len */ KEY_SETSECASIDX(proto, IPSEC_MODE_ANY, 0, src0 + 1, dst0 + 1, &saidx); /* * Make sure the port numbers are zero. * In case of NAT-T we will update them later if needed. */ KEY_PORTTOSADDR(&saidx.src, 0); KEY_PORTTOSADDR(&saidx.dst, 0); #ifdef IPSEC_NAT_T /* * Handle NAT-T info if present. */ if (mhp->ext[SADB_X_EXT_NAT_T_SPORT] != NULL && mhp->ext[SADB_X_EXT_NAT_T_DPORT] != NULL) { struct sadb_x_nat_t_port *sport, *dport; if (mhp->extlen[SADB_X_EXT_NAT_T_SPORT] < sizeof(*sport) || mhp->extlen[SADB_X_EXT_NAT_T_DPORT] < sizeof(*dport)) { ipseclog((LOG_DEBUG, "%s: invalid message.\n", __func__)); return key_senderror(so, m, EINVAL); } sport = (struct sadb_x_nat_t_port *) mhp->ext[SADB_X_EXT_NAT_T_SPORT]; dport = (struct sadb_x_nat_t_port *) mhp->ext[SADB_X_EXT_NAT_T_DPORT]; if (sport) KEY_PORTTOSADDR(&saidx.src, sport->sadb_x_nat_t_port_port); if (dport) KEY_PORTTOSADDR(&saidx.dst, dport->sadb_x_nat_t_port_port); } #endif /* get a SA header */ SAHTREE_LOCK(); LIST_FOREACH(sah, &V_sahtree, chain) { if (sah->state == SADB_SASTATE_DEAD) continue; if (key_cmpsaidx(&sah->saidx, &saidx, CMP_HEAD) == 0) continue; /* get a SA with SPI. */ sav = key_getsavbyspi(sah, sa0->sadb_sa_spi); if (sav) break; } SAHTREE_UNLOCK(); if (sah == NULL) { ipseclog((LOG_DEBUG, "%s: no SA found.\n", __func__)); return key_senderror(so, m, ENOENT); } { struct mbuf *n; u_int8_t satype; /* map proto to satype */ if ((satype = key_proto2satype(sah->saidx.proto)) == 0) { ipseclog((LOG_DEBUG, "%s: there was invalid proto in SAD.\n", __func__)); return key_senderror(so, m, EINVAL); } /* create new sadb_msg to reply. */ n = key_setdumpsa(sav, SADB_GET, satype, mhp->msg->sadb_msg_seq, mhp->msg->sadb_msg_pid); if (!n) return key_senderror(so, m, ENOBUFS); m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ONE); } } /* XXX make it sysctl-configurable? */ static void key_getcomb_setlifetime(struct sadb_comb *comb) { comb->sadb_comb_soft_allocations = 1; comb->sadb_comb_hard_allocations = 1; comb->sadb_comb_soft_bytes = 0; comb->sadb_comb_hard_bytes = 0; comb->sadb_comb_hard_addtime = 86400; /* 1 day */ comb->sadb_comb_soft_addtime = comb->sadb_comb_soft_addtime * 80 / 100; comb->sadb_comb_soft_usetime = 28800; /* 8 hours */ comb->sadb_comb_hard_usetime = comb->sadb_comb_hard_usetime * 80 / 100; } /* * XXX reorder combinations by preference * XXX no idea if the user wants ESP authentication or not */ static struct mbuf * key_getcomb_esp() { struct sadb_comb *comb; struct enc_xform *algo; struct mbuf *result = NULL, *m, *n; int encmin; int i, off, o; int totlen; const int l = PFKEY_ALIGN8(sizeof(struct sadb_comb)); m = NULL; for (i = 1; i <= SADB_EALG_MAX; i++) { algo = esp_algorithm_lookup(i); if (algo == NULL) continue; /* discard algorithms with key size smaller than system min */ if (_BITS(algo->maxkey) < V_ipsec_esp_keymin) continue; if (_BITS(algo->minkey) < V_ipsec_esp_keymin) encmin = V_ipsec_esp_keymin; else encmin = _BITS(algo->minkey); if (V_ipsec_esp_auth) m = key_getcomb_ah(); else { IPSEC_ASSERT(l <= MLEN, ("l=%u > MLEN=%lu", l, (u_long) MLEN)); MGET(m, M_NOWAIT, MT_DATA); if (m) { M_ALIGN(m, l); m->m_len = l; m->m_next = NULL; bzero(mtod(m, caddr_t), m->m_len); } } if (!m) goto fail; totlen = 0; for (n = m; n; n = n->m_next) totlen += n->m_len; IPSEC_ASSERT((totlen % l) == 0, ("totlen=%u, l=%u", totlen, l)); for (off = 0; off < totlen; off += l) { n = m_pulldown(m, off, l, &o); if (!n) { /* m is already freed */ goto fail; } comb = (struct sadb_comb *)(mtod(n, caddr_t) + o); bzero(comb, sizeof(*comb)); key_getcomb_setlifetime(comb); comb->sadb_comb_encrypt = i; comb->sadb_comb_encrypt_minbits = encmin; comb->sadb_comb_encrypt_maxbits = _BITS(algo->maxkey); } if (!result) result = m; else m_cat(result, m); } return result; fail: if (result) m_freem(result); return NULL; } static void key_getsizes_ah(const struct auth_hash *ah, int alg, u_int16_t* min, u_int16_t* max) { *min = *max = ah->keysize; if (ah->keysize == 0) { /* * Transform takes arbitrary key size but algorithm * key size is restricted. Enforce this here. */ switch (alg) { case SADB_X_AALG_MD5: *min = *max = 16; break; case SADB_X_AALG_SHA: *min = *max = 20; break; case SADB_X_AALG_NULL: *min = 1; *max = 256; break; case SADB_X_AALG_SHA2_256: *min = *max = 32; break; case SADB_X_AALG_SHA2_384: *min = *max = 48; break; case SADB_X_AALG_SHA2_512: *min = *max = 64; break; default: DPRINTF(("%s: unknown AH algorithm %u\n", __func__, alg)); break; } } } /* * XXX reorder combinations by preference */ static struct mbuf * key_getcomb_ah() { struct sadb_comb *comb; struct auth_hash *algo; struct mbuf *m; u_int16_t minkeysize, maxkeysize; int i; const int l = PFKEY_ALIGN8(sizeof(struct sadb_comb)); m = NULL; for (i = 1; i <= SADB_AALG_MAX; i++) { #if 1 /* we prefer HMAC algorithms, not old algorithms */ if (i != SADB_AALG_SHA1HMAC && i != SADB_AALG_MD5HMAC && i != SADB_X_AALG_SHA2_256 && i != SADB_X_AALG_SHA2_384 && i != SADB_X_AALG_SHA2_512) continue; #endif algo = ah_algorithm_lookup(i); if (!algo) continue; key_getsizes_ah(algo, i, &minkeysize, &maxkeysize); /* discard algorithms with key size smaller than system min */ if (_BITS(minkeysize) < V_ipsec_ah_keymin) continue; if (!m) { IPSEC_ASSERT(l <= MLEN, ("l=%u > MLEN=%lu", l, (u_long) MLEN)); MGET(m, M_NOWAIT, MT_DATA); if (m) { M_ALIGN(m, l); m->m_len = l; m->m_next = NULL; } } else M_PREPEND(m, l, M_NOWAIT); if (!m) return NULL; comb = mtod(m, struct sadb_comb *); bzero(comb, sizeof(*comb)); key_getcomb_setlifetime(comb); comb->sadb_comb_auth = i; comb->sadb_comb_auth_minbits = _BITS(minkeysize); comb->sadb_comb_auth_maxbits = _BITS(maxkeysize); } return m; } /* * not really an official behavior. discussed in pf_key@inner.net in Sep2000. * XXX reorder combinations by preference */ static struct mbuf * key_getcomb_ipcomp() { struct sadb_comb *comb; struct comp_algo *algo; struct mbuf *m; int i; const int l = PFKEY_ALIGN8(sizeof(struct sadb_comb)); m = NULL; for (i = 1; i <= SADB_X_CALG_MAX; i++) { algo = ipcomp_algorithm_lookup(i); if (!algo) continue; if (!m) { IPSEC_ASSERT(l <= MLEN, ("l=%u > MLEN=%lu", l, (u_long) MLEN)); MGET(m, M_NOWAIT, MT_DATA); if (m) { M_ALIGN(m, l); m->m_len = l; m->m_next = NULL; } } else M_PREPEND(m, l, M_NOWAIT); if (!m) return NULL; comb = mtod(m, struct sadb_comb *); bzero(comb, sizeof(*comb)); key_getcomb_setlifetime(comb); comb->sadb_comb_encrypt = i; /* what should we set into sadb_comb_*_{min,max}bits? */ } return m; } /* * XXX no way to pass mode (transport/tunnel) to userland * XXX replay checking? * XXX sysctl interface to ipsec_{ah,esp}_keymin */ static struct mbuf * key_getprop(const struct secasindex *saidx) { struct sadb_prop *prop; struct mbuf *m, *n; const int l = PFKEY_ALIGN8(sizeof(struct sadb_prop)); int totlen; switch (saidx->proto) { case IPPROTO_ESP: m = key_getcomb_esp(); break; case IPPROTO_AH: m = key_getcomb_ah(); break; case IPPROTO_IPCOMP: m = key_getcomb_ipcomp(); break; default: return NULL; } if (!m) return NULL; M_PREPEND(m, l, M_NOWAIT); if (!m) return NULL; totlen = 0; for (n = m; n; n = n->m_next) totlen += n->m_len; prop = mtod(m, struct sadb_prop *); bzero(prop, sizeof(*prop)); prop->sadb_prop_len = PFKEY_UNIT64(totlen); prop->sadb_prop_exttype = SADB_EXT_PROPOSAL; prop->sadb_prop_replay = 32; /* XXX */ return m; } /* * SADB_ACQUIRE processing called by key_checkrequest() and key_acquire2(). * send * * to KMD, and expect to receive * with SADB_ACQUIRE if error occured, * or * with SADB_GETSPI * from KMD by PF_KEY. * * XXX x_policy is outside of RFC2367 (KAME extension). * XXX sensitivity is not supported. * XXX for ipcomp, RFC2367 does not define how to fill in proposal. * see comment for key_getcomb_ipcomp(). * * OUT: * 0 : succeed * others: error number */ static int key_acquire(const struct secasindex *saidx, struct secpolicy *sp) { struct mbuf *result = NULL, *m; struct secacq *newacq; u_int8_t satype; int error = -1; u_int32_t seq; IPSEC_ASSERT(saidx != NULL, ("null saidx")); satype = key_proto2satype(saidx->proto); IPSEC_ASSERT(satype != 0, ("null satype, protocol %u", saidx->proto)); /* * We never do anything about acquirng SA. There is anather * solution that kernel blocks to send SADB_ACQUIRE message until * getting something message from IKEd. In later case, to be * managed with ACQUIRING list. */ /* Get an entry to check whether sending message or not. */ if ((newacq = key_getacq(saidx)) != NULL) { if (V_key_blockacq_count < newacq->count) { /* reset counter and do send message. */ newacq->count = 0; } else { /* increment counter and do nothing. */ newacq->count++; return 0; } } else { /* make new entry for blocking to send SADB_ACQUIRE. */ if ((newacq = key_newacq(saidx)) == NULL) return ENOBUFS; } seq = newacq->seq; m = key_setsadbmsg(SADB_ACQUIRE, 0, satype, seq, 0, 0); if (!m) { error = ENOBUFS; goto fail; } result = m; /* * No SADB_X_EXT_NAT_T_* here: we do not know * anything related to NAT-T at this time. */ /* set sadb_address for saidx's. */ m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC, &saidx->src.sa, FULLMASK, IPSEC_ULPROTO_ANY); if (!m) { error = ENOBUFS; goto fail; } m_cat(result, m); m = key_setsadbaddr(SADB_EXT_ADDRESS_DST, &saidx->dst.sa, FULLMASK, IPSEC_ULPROTO_ANY); if (!m) { error = ENOBUFS; goto fail; } m_cat(result, m); /* XXX proxy address (optional) */ /* set sadb_x_policy */ if (sp) { m = key_setsadbxpolicy(sp->policy, sp->spidx.dir, sp->id); if (!m) { error = ENOBUFS; goto fail; } m_cat(result, m); } /* XXX identity (optional) */ #if 0 if (idexttype && fqdn) { /* create identity extension (FQDN) */ struct sadb_ident *id; int fqdnlen; fqdnlen = strlen(fqdn) + 1; /* +1 for terminating-NUL */ id = (struct sadb_ident *)p; bzero(id, sizeof(*id) + PFKEY_ALIGN8(fqdnlen)); id->sadb_ident_len = PFKEY_UNIT64(sizeof(*id) + PFKEY_ALIGN8(fqdnlen)); id->sadb_ident_exttype = idexttype; id->sadb_ident_type = SADB_IDENTTYPE_FQDN; bcopy(fqdn, id + 1, fqdnlen); p += sizeof(struct sadb_ident) + PFKEY_ALIGN8(fqdnlen); } if (idexttype) { /* create identity extension (USERFQDN) */ struct sadb_ident *id; int userfqdnlen; if (userfqdn) { /* +1 for terminating-NUL */ userfqdnlen = strlen(userfqdn) + 1; } else userfqdnlen = 0; id = (struct sadb_ident *)p; bzero(id, sizeof(*id) + PFKEY_ALIGN8(userfqdnlen)); id->sadb_ident_len = PFKEY_UNIT64(sizeof(*id) + PFKEY_ALIGN8(userfqdnlen)); id->sadb_ident_exttype = idexttype; id->sadb_ident_type = SADB_IDENTTYPE_USERFQDN; /* XXX is it correct? */ if (curproc && curproc->p_cred) id->sadb_ident_id = curproc->p_cred->p_ruid; if (userfqdn && userfqdnlen) bcopy(userfqdn, id + 1, userfqdnlen); p += sizeof(struct sadb_ident) + PFKEY_ALIGN8(userfqdnlen); } #endif /* XXX sensitivity (optional) */ /* create proposal/combination extension */ m = key_getprop(saidx); #if 0 /* * spec conformant: always attach proposal/combination extension, * the problem is that we have no way to attach it for ipcomp, * due to the way sadb_comb is declared in RFC2367. */ if (!m) { error = ENOBUFS; goto fail; } m_cat(result, m); #else /* * outside of spec; make proposal/combination extension optional. */ if (m) m_cat(result, m); #endif if ((result->m_flags & M_PKTHDR) == 0) { error = EINVAL; goto fail; } if (result->m_len < sizeof(struct sadb_msg)) { result = m_pullup(result, sizeof(struct sadb_msg)); if (result == NULL) { error = ENOBUFS; goto fail; } } result->m_pkthdr.len = 0; for (m = result; m; m = m->m_next) result->m_pkthdr.len += m->m_len; mtod(result, struct sadb_msg *)->sadb_msg_len = PFKEY_UNIT64(result->m_pkthdr.len); return key_sendup_mbuf(NULL, result, KEY_SENDUP_REGISTERED); fail: if (result) m_freem(result); return error; } static struct secacq * key_newacq(const struct secasindex *saidx) { struct secacq *newacq; /* get new entry */ newacq = malloc(sizeof(struct secacq), M_IPSEC_SAQ, M_NOWAIT|M_ZERO); if (newacq == NULL) { ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__)); return NULL; } /* copy secindex */ bcopy(saidx, &newacq->saidx, sizeof(newacq->saidx)); newacq->seq = (V_acq_seq == ~0 ? 1 : ++V_acq_seq); newacq->created = time_second; newacq->count = 0; /* add to acqtree */ ACQ_LOCK(); LIST_INSERT_HEAD(&V_acqtree, newacq, chain); ACQ_UNLOCK(); return newacq; } static struct secacq * key_getacq(const struct secasindex *saidx) { struct secacq *acq; ACQ_LOCK(); LIST_FOREACH(acq, &V_acqtree, chain) { if (key_cmpsaidx(saidx, &acq->saidx, CMP_EXACTLY)) break; } ACQ_UNLOCK(); return acq; } static struct secacq * key_getacqbyseq(u_int32_t seq) { struct secacq *acq; ACQ_LOCK(); LIST_FOREACH(acq, &V_acqtree, chain) { if (acq->seq == seq) break; } ACQ_UNLOCK(); return acq; } static struct secspacq * key_newspacq(struct secpolicyindex *spidx) { struct secspacq *acq; /* get new entry */ acq = malloc(sizeof(struct secspacq), M_IPSEC_SAQ, M_NOWAIT|M_ZERO); if (acq == NULL) { ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__)); return NULL; } /* copy secindex */ bcopy(spidx, &acq->spidx, sizeof(acq->spidx)); acq->created = time_second; acq->count = 0; /* add to spacqtree */ SPACQ_LOCK(); LIST_INSERT_HEAD(&V_spacqtree, acq, chain); SPACQ_UNLOCK(); return acq; } static struct secspacq * key_getspacq(struct secpolicyindex *spidx) { struct secspacq *acq; SPACQ_LOCK(); LIST_FOREACH(acq, &V_spacqtree, chain) { if (key_cmpspidx_exactly(spidx, &acq->spidx)) { /* NB: return holding spacq_lock */ return acq; } } SPACQ_UNLOCK(); return NULL; } /* * SADB_ACQUIRE processing, * in first situation, is receiving * * from the ikmpd, and clear sequence of its secasvar entry. * * In second situation, is receiving * * from a user land process, and return * * to the socket. * * m will always be freed. */ static int key_acquire2(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { const struct sadb_address *src0, *dst0; struct secasindex saidx; struct secashead *sah; u_int16_t proto; int error; IPSEC_ASSERT(so != NULL, ("null socket")); IPSEC_ASSERT(m != NULL, ("null mbuf")); IPSEC_ASSERT(mhp != NULL, ("null msghdr")); IPSEC_ASSERT(mhp->msg != NULL, ("null msg")); /* * Error message from KMd. * We assume that if error was occured in IKEd, the length of PFKEY * message is equal to the size of sadb_msg structure. * We do not raise error even if error occured in this function. */ if (mhp->msg->sadb_msg_len == PFKEY_UNIT64(sizeof(struct sadb_msg))) { struct secacq *acq; /* check sequence number */ if (mhp->msg->sadb_msg_seq == 0) { ipseclog((LOG_DEBUG, "%s: must specify sequence " "number.\n", __func__)); m_freem(m); return 0; } if ((acq = key_getacqbyseq(mhp->msg->sadb_msg_seq)) == NULL) { /* * the specified larval SA is already gone, or we got * a bogus sequence number. we can silently ignore it. */ m_freem(m); return 0; } /* reset acq counter in order to deletion by timehander. */ acq->created = time_second; acq->count = 0; m_freem(m); return 0; } /* * This message is from user land. */ /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { ipseclog((LOG_DEBUG, "%s: invalid satype is passed.\n", __func__)); return key_senderror(so, m, EINVAL); } if (mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL || mhp->ext[SADB_EXT_ADDRESS_DST] == NULL || mhp->ext[SADB_EXT_PROPOSAL] == NULL) { /* error */ ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n", __func__)); return key_senderror(so, m, EINVAL); } if (mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) || mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address) || mhp->extlen[SADB_EXT_PROPOSAL] < sizeof(struct sadb_prop)) { /* error */ ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n", __func__)); return key_senderror(so, m, EINVAL); } src0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_SRC]; dst0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_DST]; /* XXX boundary check against sa_len */ KEY_SETSECASIDX(proto, IPSEC_MODE_ANY, 0, src0 + 1, dst0 + 1, &saidx); /* * Make sure the port numbers are zero. * In case of NAT-T we will update them later if needed. */ KEY_PORTTOSADDR(&saidx.src, 0); KEY_PORTTOSADDR(&saidx.dst, 0); #ifndef IPSEC_NAT_T /* * Handle NAT-T info if present. */ if (mhp->ext[SADB_X_EXT_NAT_T_SPORT] != NULL && mhp->ext[SADB_X_EXT_NAT_T_DPORT] != NULL) { struct sadb_x_nat_t_port *sport, *dport; if (mhp->extlen[SADB_X_EXT_NAT_T_SPORT] < sizeof(*sport) || mhp->extlen[SADB_X_EXT_NAT_T_DPORT] < sizeof(*dport)) { ipseclog((LOG_DEBUG, "%s: invalid message.\n", __func__)); return key_senderror(so, m, EINVAL); } sport = (struct sadb_x_nat_t_port *) mhp->ext[SADB_X_EXT_NAT_T_SPORT]; dport = (struct sadb_x_nat_t_port *) mhp->ext[SADB_X_EXT_NAT_T_DPORT]; if (sport) KEY_PORTTOSADDR(&saidx.src, sport->sadb_x_nat_t_port_port); if (dport) KEY_PORTTOSADDR(&saidx.dst, dport->sadb_x_nat_t_port_port); } #endif /* get a SA index */ SAHTREE_LOCK(); LIST_FOREACH(sah, &V_sahtree, chain) { if (sah->state == SADB_SASTATE_DEAD) continue; if (key_cmpsaidx(&sah->saidx, &saidx, CMP_MODE_REQID)) break; } SAHTREE_UNLOCK(); if (sah != NULL) { ipseclog((LOG_DEBUG, "%s: a SA exists already.\n", __func__)); return key_senderror(so, m, EEXIST); } error = key_acquire(&saidx, NULL); if (error != 0) { ipseclog((LOG_DEBUG, "%s: error %d returned from key_acquire\n", __func__, mhp->msg->sadb_msg_errno)); return key_senderror(so, m, error); } return key_sendup_mbuf(so, m, KEY_SENDUP_REGISTERED); } /* * SADB_REGISTER processing. * If SATYPE_UNSPEC has been passed as satype, only return sabd_supported. * receive * * from the ikmpd, and register a socket to send PF_KEY messages, * and send * * to KMD by PF_KEY. * If socket is detached, must free from regnode. * * m will always be freed. */ static int key_register(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { struct secreg *reg, *newreg = 0; IPSEC_ASSERT(so != NULL, ("null socket")); IPSEC_ASSERT(m != NULL, ("null mbuf")); IPSEC_ASSERT(mhp != NULL, ("null msghdr")); IPSEC_ASSERT(mhp->msg != NULL, ("null msg")); /* check for invalid register message */ if (mhp->msg->sadb_msg_satype >= sizeof(V_regtree)/sizeof(V_regtree[0])) return key_senderror(so, m, EINVAL); /* When SATYPE_UNSPEC is specified, only return sabd_supported. */ if (mhp->msg->sadb_msg_satype == SADB_SATYPE_UNSPEC) goto setmsg; /* check whether existing or not */ REGTREE_LOCK(); LIST_FOREACH(reg, &V_regtree[mhp->msg->sadb_msg_satype], chain) { if (reg->so == so) { REGTREE_UNLOCK(); ipseclog((LOG_DEBUG, "%s: socket exists already.\n", __func__)); return key_senderror(so, m, EEXIST); } } /* create regnode */ newreg = malloc(sizeof(struct secreg), M_IPSEC_SAR, M_NOWAIT|M_ZERO); if (newreg == NULL) { REGTREE_UNLOCK(); ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__)); return key_senderror(so, m, ENOBUFS); } newreg->so = so; ((struct keycb *)sotorawcb(so))->kp_registered++; /* add regnode to regtree. */ LIST_INSERT_HEAD(&V_regtree[mhp->msg->sadb_msg_satype], newreg, chain); REGTREE_UNLOCK(); setmsg: { struct mbuf *n; struct sadb_msg *newmsg; struct sadb_supported *sup; u_int len, alen, elen; int off; int i; struct sadb_alg *alg; /* create new sadb_msg to reply. */ alen = 0; for (i = 1; i <= SADB_AALG_MAX; i++) { if (ah_algorithm_lookup(i)) alen += sizeof(struct sadb_alg); } if (alen) alen += sizeof(struct sadb_supported); elen = 0; for (i = 1; i <= SADB_EALG_MAX; i++) { if (esp_algorithm_lookup(i)) elen += sizeof(struct sadb_alg); } if (elen) elen += sizeof(struct sadb_supported); len = sizeof(struct sadb_msg) + alen + elen; if (len > MCLBYTES) return key_senderror(so, m, ENOBUFS); MGETHDR(n, M_NOWAIT, MT_DATA); if (len > MHLEN) { - MCLGET(n, M_NOWAIT); - if ((n->m_flags & M_EXT) == 0) { + if (!(MCLGET(n, M_NOWAIT))) { m_freem(n); n = NULL; } } if (!n) return key_senderror(so, m, ENOBUFS); n->m_pkthdr.len = n->m_len = len; n->m_next = NULL; off = 0; m_copydata(m, 0, sizeof(struct sadb_msg), mtod(n, caddr_t) + off); newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_errno = 0; newmsg->sadb_msg_len = PFKEY_UNIT64(len); off += PFKEY_ALIGN8(sizeof(struct sadb_msg)); /* for authentication algorithm */ if (alen) { sup = (struct sadb_supported *)(mtod(n, caddr_t) + off); sup->sadb_supported_len = PFKEY_UNIT64(alen); sup->sadb_supported_exttype = SADB_EXT_SUPPORTED_AUTH; off += PFKEY_ALIGN8(sizeof(*sup)); for (i = 1; i <= SADB_AALG_MAX; i++) { struct auth_hash *aalgo; u_int16_t minkeysize, maxkeysize; aalgo = ah_algorithm_lookup(i); if (!aalgo) continue; alg = (struct sadb_alg *)(mtod(n, caddr_t) + off); alg->sadb_alg_id = i; alg->sadb_alg_ivlen = 0; key_getsizes_ah(aalgo, i, &minkeysize, &maxkeysize); alg->sadb_alg_minbits = _BITS(minkeysize); alg->sadb_alg_maxbits = _BITS(maxkeysize); off += PFKEY_ALIGN8(sizeof(*alg)); } } /* for encryption algorithm */ if (elen) { sup = (struct sadb_supported *)(mtod(n, caddr_t) + off); sup->sadb_supported_len = PFKEY_UNIT64(elen); sup->sadb_supported_exttype = SADB_EXT_SUPPORTED_ENCRYPT; off += PFKEY_ALIGN8(sizeof(*sup)); for (i = 1; i <= SADB_EALG_MAX; i++) { struct enc_xform *ealgo; ealgo = esp_algorithm_lookup(i); if (!ealgo) continue; alg = (struct sadb_alg *)(mtod(n, caddr_t) + off); alg->sadb_alg_id = i; alg->sadb_alg_ivlen = ealgo->blocksize; alg->sadb_alg_minbits = _BITS(ealgo->minkey); alg->sadb_alg_maxbits = _BITS(ealgo->maxkey); off += PFKEY_ALIGN8(sizeof(struct sadb_alg)); } } IPSEC_ASSERT(off == len, ("length assumption failed (off %u len %u)", off, len)); m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_REGISTERED); } } /* * free secreg entry registered. * XXX: I want to do free a socket marked done SADB_RESIGER to socket. */ void key_freereg(struct socket *so) { struct secreg *reg; int i; IPSEC_ASSERT(so != NULL, ("NULL so")); /* * check whether existing or not. * check all type of SA, because there is a potential that * one socket is registered to multiple type of SA. */ REGTREE_LOCK(); for (i = 0; i <= SADB_SATYPE_MAX; i++) { LIST_FOREACH(reg, &V_regtree[i], chain) { if (reg->so == so && __LIST_CHAINED(reg)) { LIST_REMOVE(reg, chain); free(reg, M_IPSEC_SAR); break; } } } REGTREE_UNLOCK(); } /* * SADB_EXPIRE processing * send * * to KMD by PF_KEY. * NOTE: We send only soft lifetime extension. * * OUT: 0 : succeed * others : error number */ static int key_expire(struct secasvar *sav) { int satype; struct mbuf *result = NULL, *m; int len; int error = -1; struct sadb_lifetime *lt; IPSEC_ASSERT (sav != NULL, ("null sav")); IPSEC_ASSERT (sav->sah != NULL, ("null sa header")); /* set msg header */ satype = key_proto2satype(sav->sah->saidx.proto); IPSEC_ASSERT(satype != 0, ("invalid proto, satype %u", satype)); m = key_setsadbmsg(SADB_EXPIRE, 0, satype, sav->seq, 0, sav->refcnt); if (!m) { error = ENOBUFS; goto fail; } result = m; /* create SA extension */ m = key_setsadbsa(sav); if (!m) { error = ENOBUFS; goto fail; } m_cat(result, m); /* create SA extension */ m = key_setsadbxsa2(sav->sah->saidx.mode, sav->replay ? sav->replay->count : 0, sav->sah->saidx.reqid); if (!m) { error = ENOBUFS; goto fail; } m_cat(result, m); /* create lifetime extension (current and soft) */ len = PFKEY_ALIGN8(sizeof(*lt)) * 2; m = m_get2(len, M_NOWAIT, MT_DATA, 0); if (m == NULL) { error = ENOBUFS; goto fail; } m_align(m, len); m->m_len = len; bzero(mtod(m, caddr_t), len); lt = mtod(m, struct sadb_lifetime *); lt->sadb_lifetime_len = PFKEY_UNIT64(sizeof(struct sadb_lifetime)); lt->sadb_lifetime_exttype = SADB_EXT_LIFETIME_CURRENT; lt->sadb_lifetime_allocations = sav->lft_c->allocations; lt->sadb_lifetime_bytes = sav->lft_c->bytes; lt->sadb_lifetime_addtime = sav->lft_c->addtime; lt->sadb_lifetime_usetime = sav->lft_c->usetime; lt = (struct sadb_lifetime *)(mtod(m, caddr_t) + len / 2); lt->sadb_lifetime_len = PFKEY_UNIT64(sizeof(struct sadb_lifetime)); lt->sadb_lifetime_exttype = SADB_EXT_LIFETIME_SOFT; lt->sadb_lifetime_allocations = sav->lft_s->allocations; lt->sadb_lifetime_bytes = sav->lft_s->bytes; lt->sadb_lifetime_addtime = sav->lft_s->addtime; lt->sadb_lifetime_usetime = sav->lft_s->usetime; m_cat(result, m); /* set sadb_address for source */ m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC, &sav->sah->saidx.src.sa, FULLMASK, IPSEC_ULPROTO_ANY); if (!m) { error = ENOBUFS; goto fail; } m_cat(result, m); /* set sadb_address for destination */ m = key_setsadbaddr(SADB_EXT_ADDRESS_DST, &sav->sah->saidx.dst.sa, FULLMASK, IPSEC_ULPROTO_ANY); if (!m) { error = ENOBUFS; goto fail; } m_cat(result, m); /* * XXX-BZ Handle NAT-T extensions here. */ if ((result->m_flags & M_PKTHDR) == 0) { error = EINVAL; goto fail; } if (result->m_len < sizeof(struct sadb_msg)) { result = m_pullup(result, sizeof(struct sadb_msg)); if (result == NULL) { error = ENOBUFS; goto fail; } } result->m_pkthdr.len = 0; for (m = result; m; m = m->m_next) result->m_pkthdr.len += m->m_len; mtod(result, struct sadb_msg *)->sadb_msg_len = PFKEY_UNIT64(result->m_pkthdr.len); return key_sendup_mbuf(NULL, result, KEY_SENDUP_REGISTERED); fail: if (result) m_freem(result); return error; } /* * SADB_FLUSH processing * receive * * from the ikmpd, and free all entries in secastree. * and send, * * to the ikmpd. * NOTE: to do is only marking SADB_SASTATE_DEAD. * * m will always be freed. */ static int key_flush(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { struct sadb_msg *newmsg; struct secashead *sah, *nextsah; struct secasvar *sav, *nextsav; u_int16_t proto; u_int8_t state; u_int stateidx; IPSEC_ASSERT(so != NULL, ("null socket")); IPSEC_ASSERT(mhp != NULL, ("null msghdr")); IPSEC_ASSERT(mhp->msg != NULL, ("null msg")); /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { ipseclog((LOG_DEBUG, "%s: invalid satype is passed.\n", __func__)); return key_senderror(so, m, EINVAL); } /* no SATYPE specified, i.e. flushing all SA. */ SAHTREE_LOCK(); for (sah = LIST_FIRST(&V_sahtree); sah != NULL; sah = nextsah) { nextsah = LIST_NEXT(sah, chain); if (mhp->msg->sadb_msg_satype != SADB_SATYPE_UNSPEC && proto != sah->saidx.proto) continue; for (stateidx = 0; stateidx < _ARRAYLEN(saorder_state_alive); stateidx++) { state = saorder_state_any[stateidx]; for (sav = LIST_FIRST(&sah->savtree[state]); sav != NULL; sav = nextsav) { nextsav = LIST_NEXT(sav, chain); key_sa_chgstate(sav, SADB_SASTATE_DEAD); KEY_FREESAV(&sav); } } sah->state = SADB_SASTATE_DEAD; } SAHTREE_UNLOCK(); if (m->m_len < sizeof(struct sadb_msg) || sizeof(struct sadb_msg) > m->m_len + M_TRAILINGSPACE(m)) { ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__)); return key_senderror(so, m, ENOBUFS); } if (m->m_next) m_freem(m->m_next); m->m_next = NULL; m->m_pkthdr.len = m->m_len = sizeof(struct sadb_msg); newmsg = mtod(m, struct sadb_msg *); newmsg->sadb_msg_errno = 0; newmsg->sadb_msg_len = PFKEY_UNIT64(m->m_pkthdr.len); return key_sendup_mbuf(so, m, KEY_SENDUP_ALL); } /* * SADB_DUMP processing * dump all entries including status of DEAD in SAD. * receive * * from the ikmpd, and dump all secasvar leaves * and send, * ..... * to the ikmpd. * * m will always be freed. */ static int key_dump(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { struct secashead *sah; struct secasvar *sav; u_int16_t proto; u_int stateidx; u_int8_t satype; u_int8_t state; int cnt; struct sadb_msg *newmsg; struct mbuf *n; IPSEC_ASSERT(so != NULL, ("null socket")); IPSEC_ASSERT(m != NULL, ("null mbuf")); IPSEC_ASSERT(mhp != NULL, ("null msghdr")); IPSEC_ASSERT(mhp->msg != NULL, ("null msg")); /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { ipseclog((LOG_DEBUG, "%s: invalid satype is passed.\n", __func__)); return key_senderror(so, m, EINVAL); } /* count sav entries to be sent to the userland. */ cnt = 0; SAHTREE_LOCK(); LIST_FOREACH(sah, &V_sahtree, chain) { if (mhp->msg->sadb_msg_satype != SADB_SATYPE_UNSPEC && proto != sah->saidx.proto) continue; for (stateidx = 0; stateidx < _ARRAYLEN(saorder_state_any); stateidx++) { state = saorder_state_any[stateidx]; LIST_FOREACH(sav, &sah->savtree[state], chain) { cnt++; } } } if (cnt == 0) { SAHTREE_UNLOCK(); return key_senderror(so, m, ENOENT); } /* send this to the userland, one at a time. */ newmsg = NULL; LIST_FOREACH(sah, &V_sahtree, chain) { if (mhp->msg->sadb_msg_satype != SADB_SATYPE_UNSPEC && proto != sah->saidx.proto) continue; /* map proto to satype */ if ((satype = key_proto2satype(sah->saidx.proto)) == 0) { SAHTREE_UNLOCK(); ipseclog((LOG_DEBUG, "%s: there was invalid proto in " "SAD.\n", __func__)); return key_senderror(so, m, EINVAL); } for (stateidx = 0; stateidx < _ARRAYLEN(saorder_state_any); stateidx++) { state = saorder_state_any[stateidx]; LIST_FOREACH(sav, &sah->savtree[state], chain) { n = key_setdumpsa(sav, SADB_DUMP, satype, --cnt, mhp->msg->sadb_msg_pid); if (!n) { SAHTREE_UNLOCK(); return key_senderror(so, m, ENOBUFS); } key_sendup_mbuf(so, n, KEY_SENDUP_ONE); } } } SAHTREE_UNLOCK(); m_freem(m); return 0; } /* * SADB_X_PROMISC processing * * m will always be freed. */ static int key_promisc(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { int olen; IPSEC_ASSERT(so != NULL, ("null socket")); IPSEC_ASSERT(m != NULL, ("null mbuf")); IPSEC_ASSERT(mhp != NULL, ("null msghdr")); IPSEC_ASSERT(mhp->msg != NULL, ("null msg")); olen = PFKEY_UNUNIT64(mhp->msg->sadb_msg_len); if (olen < sizeof(struct sadb_msg)) { #if 1 return key_senderror(so, m, EINVAL); #else m_freem(m); return 0; #endif } else if (olen == sizeof(struct sadb_msg)) { /* enable/disable promisc mode */ struct keycb *kp; if ((kp = (struct keycb *)sotorawcb(so)) == NULL) return key_senderror(so, m, EINVAL); mhp->msg->sadb_msg_errno = 0; switch (mhp->msg->sadb_msg_satype) { case 0: case 1: kp->kp_promisc = mhp->msg->sadb_msg_satype; break; default: return key_senderror(so, m, EINVAL); } /* send the original message back to everyone */ mhp->msg->sadb_msg_errno = 0; return key_sendup_mbuf(so, m, KEY_SENDUP_ALL); } else { /* send packet as is */ m_adj(m, PFKEY_ALIGN8(sizeof(struct sadb_msg))); /* TODO: if sadb_msg_seq is specified, send to specific pid */ return key_sendup_mbuf(so, m, KEY_SENDUP_ALL); } } static int (*key_typesw[])(struct socket *, struct mbuf *, const struct sadb_msghdr *) = { NULL, /* SADB_RESERVED */ key_getspi, /* SADB_GETSPI */ key_update, /* SADB_UPDATE */ key_add, /* SADB_ADD */ key_delete, /* SADB_DELETE */ key_get, /* SADB_GET */ key_acquire2, /* SADB_ACQUIRE */ key_register, /* SADB_REGISTER */ NULL, /* SADB_EXPIRE */ key_flush, /* SADB_FLUSH */ key_dump, /* SADB_DUMP */ key_promisc, /* SADB_X_PROMISC */ NULL, /* SADB_X_PCHANGE */ key_spdadd, /* SADB_X_SPDUPDATE */ key_spdadd, /* SADB_X_SPDADD */ key_spddelete, /* SADB_X_SPDDELETE */ key_spdget, /* SADB_X_SPDGET */ NULL, /* SADB_X_SPDACQUIRE */ key_spddump, /* SADB_X_SPDDUMP */ key_spdflush, /* SADB_X_SPDFLUSH */ key_spdadd, /* SADB_X_SPDSETIDX */ NULL, /* SADB_X_SPDEXPIRE */ key_spddelete2, /* SADB_X_SPDDELETE2 */ }; /* * parse sadb_msg buffer to process PFKEYv2, * and create a data to response if needed. * I think to be dealed with mbuf directly. * IN: * msgp : pointer to pointer to a received buffer pulluped. * This is rewrited to response. * so : pointer to socket. * OUT: * length for buffer to send to user process. */ int key_parse(struct mbuf *m, struct socket *so) { struct sadb_msg *msg; struct sadb_msghdr mh; u_int orglen; int error; int target; IPSEC_ASSERT(so != NULL, ("null socket")); IPSEC_ASSERT(m != NULL, ("null mbuf")); #if 0 /*kdebug_sadb assumes msg in linear buffer*/ KEYDEBUG(KEYDEBUG_KEY_DUMP, ipseclog((LOG_DEBUG, "%s: passed sadb_msg\n", __func__)); kdebug_sadb(msg)); #endif if (m->m_len < sizeof(struct sadb_msg)) { m = m_pullup(m, sizeof(struct sadb_msg)); if (!m) return ENOBUFS; } msg = mtod(m, struct sadb_msg *); orglen = PFKEY_UNUNIT64(msg->sadb_msg_len); target = KEY_SENDUP_ONE; if ((m->m_flags & M_PKTHDR) == 0 || m->m_pkthdr.len != m->m_pkthdr.len) { ipseclog((LOG_DEBUG, "%s: invalid message length.\n",__func__)); PFKEYSTAT_INC(out_invlen); error = EINVAL; goto senderror; } if (msg->sadb_msg_version != PF_KEY_V2) { ipseclog((LOG_DEBUG, "%s: PF_KEY version %u is mismatched.\n", __func__, msg->sadb_msg_version)); PFKEYSTAT_INC(out_invver); error = EINVAL; goto senderror; } if (msg->sadb_msg_type > SADB_MAX) { ipseclog((LOG_DEBUG, "%s: invalid type %u is passed.\n", __func__, msg->sadb_msg_type)); PFKEYSTAT_INC(out_invmsgtype); error = EINVAL; goto senderror; } /* for old-fashioned code - should be nuked */ if (m->m_pkthdr.len > MCLBYTES) { m_freem(m); return ENOBUFS; } if (m->m_next) { struct mbuf *n; MGETHDR(n, M_NOWAIT, MT_DATA); if (n && m->m_pkthdr.len > MHLEN) { - MCLGET(n, M_NOWAIT); - if ((n->m_flags & M_EXT) == 0) { + if (!(MCLGET(n, M_NOWAIT))) { m_free(n); n = NULL; } } if (!n) { m_freem(m); return ENOBUFS; } m_copydata(m, 0, m->m_pkthdr.len, mtod(n, caddr_t)); n->m_pkthdr.len = n->m_len = m->m_pkthdr.len; n->m_next = NULL; m_freem(m); m = n; } /* align the mbuf chain so that extensions are in contiguous region. */ error = key_align(m, &mh); if (error) return error; msg = mh.msg; /* check SA type */ switch (msg->sadb_msg_satype) { case SADB_SATYPE_UNSPEC: switch (msg->sadb_msg_type) { case SADB_GETSPI: case SADB_UPDATE: case SADB_ADD: case SADB_DELETE: case SADB_GET: case SADB_ACQUIRE: case SADB_EXPIRE: ipseclog((LOG_DEBUG, "%s: must specify satype " "when msg type=%u.\n", __func__, msg->sadb_msg_type)); PFKEYSTAT_INC(out_invsatype); error = EINVAL; goto senderror; } break; case SADB_SATYPE_AH: case SADB_SATYPE_ESP: case SADB_X_SATYPE_IPCOMP: case SADB_X_SATYPE_TCPSIGNATURE: switch (msg->sadb_msg_type) { case SADB_X_SPDADD: case SADB_X_SPDDELETE: case SADB_X_SPDGET: case SADB_X_SPDDUMP: case SADB_X_SPDFLUSH: case SADB_X_SPDSETIDX: case SADB_X_SPDUPDATE: case SADB_X_SPDDELETE2: ipseclog((LOG_DEBUG, "%s: illegal satype=%u\n", __func__, msg->sadb_msg_type)); PFKEYSTAT_INC(out_invsatype); error = EINVAL; goto senderror; } break; case SADB_SATYPE_RSVP: case SADB_SATYPE_OSPFV2: case SADB_SATYPE_RIPV2: case SADB_SATYPE_MIP: ipseclog((LOG_DEBUG, "%s: type %u isn't supported.\n", __func__, msg->sadb_msg_satype)); PFKEYSTAT_INC(out_invsatype); error = EOPNOTSUPP; goto senderror; case 1: /* XXX: What does it do? */ if (msg->sadb_msg_type == SADB_X_PROMISC) break; /*FALLTHROUGH*/ default: ipseclog((LOG_DEBUG, "%s: invalid type %u is passed.\n", __func__, msg->sadb_msg_satype)); PFKEYSTAT_INC(out_invsatype); error = EINVAL; goto senderror; } /* check field of upper layer protocol and address family */ if (mh.ext[SADB_EXT_ADDRESS_SRC] != NULL && mh.ext[SADB_EXT_ADDRESS_DST] != NULL) { struct sadb_address *src0, *dst0; u_int plen; src0 = (struct sadb_address *)(mh.ext[SADB_EXT_ADDRESS_SRC]); dst0 = (struct sadb_address *)(mh.ext[SADB_EXT_ADDRESS_DST]); /* check upper layer protocol */ if (src0->sadb_address_proto != dst0->sadb_address_proto) { ipseclog((LOG_DEBUG, "%s: upper layer protocol " "mismatched.\n", __func__)); PFKEYSTAT_INC(out_invaddr); error = EINVAL; goto senderror; } /* check family */ if (PFKEY_ADDR_SADDR(src0)->sa_family != PFKEY_ADDR_SADDR(dst0)->sa_family) { ipseclog((LOG_DEBUG, "%s: address family mismatched.\n", __func__)); PFKEYSTAT_INC(out_invaddr); error = EINVAL; goto senderror; } if (PFKEY_ADDR_SADDR(src0)->sa_len != PFKEY_ADDR_SADDR(dst0)->sa_len) { ipseclog((LOG_DEBUG, "%s: address struct size " "mismatched.\n", __func__)); PFKEYSTAT_INC(out_invaddr); error = EINVAL; goto senderror; } switch (PFKEY_ADDR_SADDR(src0)->sa_family) { case AF_INET: if (PFKEY_ADDR_SADDR(src0)->sa_len != sizeof(struct sockaddr_in)) { PFKEYSTAT_INC(out_invaddr); error = EINVAL; goto senderror; } break; case AF_INET6: if (PFKEY_ADDR_SADDR(src0)->sa_len != sizeof(struct sockaddr_in6)) { PFKEYSTAT_INC(out_invaddr); error = EINVAL; goto senderror; } break; default: ipseclog((LOG_DEBUG, "%s: unsupported address family\n", __func__)); PFKEYSTAT_INC(out_invaddr); error = EAFNOSUPPORT; goto senderror; } switch (PFKEY_ADDR_SADDR(src0)->sa_family) { case AF_INET: plen = sizeof(struct in_addr) << 3; break; case AF_INET6: plen = sizeof(struct in6_addr) << 3; break; default: plen = 0; /*fool gcc*/ break; } /* check max prefix length */ if (src0->sadb_address_prefixlen > plen || dst0->sadb_address_prefixlen > plen) { ipseclog((LOG_DEBUG, "%s: illegal prefixlen.\n", __func__)); PFKEYSTAT_INC(out_invaddr); error = EINVAL; goto senderror; } /* * prefixlen == 0 is valid because there can be a case when * all addresses are matched. */ } if (msg->sadb_msg_type >= sizeof(key_typesw)/sizeof(key_typesw[0]) || key_typesw[msg->sadb_msg_type] == NULL) { PFKEYSTAT_INC(out_invmsgtype); error = EINVAL; goto senderror; } return (*key_typesw[msg->sadb_msg_type])(so, m, &mh); senderror: msg->sadb_msg_errno = error; return key_sendup_mbuf(so, m, target); } static int key_senderror(struct socket *so, struct mbuf *m, int code) { struct sadb_msg *msg; IPSEC_ASSERT(m->m_len >= sizeof(struct sadb_msg), ("mbuf too small, len %u", m->m_len)); msg = mtod(m, struct sadb_msg *); msg->sadb_msg_errno = code; return key_sendup_mbuf(so, m, KEY_SENDUP_ONE); } /* * set the pointer to each header into message buffer. * m will be freed on error. * XXX larger-than-MCLBYTES extension? */ static int key_align(struct mbuf *m, struct sadb_msghdr *mhp) { struct mbuf *n; struct sadb_ext *ext; size_t off, end; int extlen; int toff; IPSEC_ASSERT(m != NULL, ("null mbuf")); IPSEC_ASSERT(mhp != NULL, ("null msghdr")); IPSEC_ASSERT(m->m_len >= sizeof(struct sadb_msg), ("mbuf too small, len %u", m->m_len)); /* initialize */ bzero(mhp, sizeof(*mhp)); mhp->msg = mtod(m, struct sadb_msg *); mhp->ext[0] = (struct sadb_ext *)mhp->msg; /*XXX backward compat */ end = PFKEY_UNUNIT64(mhp->msg->sadb_msg_len); extlen = end; /*just in case extlen is not updated*/ for (off = sizeof(struct sadb_msg); off < end; off += extlen) { n = m_pulldown(m, off, sizeof(struct sadb_ext), &toff); if (!n) { /* m is already freed */ return ENOBUFS; } ext = (struct sadb_ext *)(mtod(n, caddr_t) + toff); /* set pointer */ switch (ext->sadb_ext_type) { case SADB_EXT_SA: case SADB_EXT_ADDRESS_SRC: case SADB_EXT_ADDRESS_DST: case SADB_EXT_ADDRESS_PROXY: case SADB_EXT_LIFETIME_CURRENT: case SADB_EXT_LIFETIME_HARD: case SADB_EXT_LIFETIME_SOFT: case SADB_EXT_KEY_AUTH: case SADB_EXT_KEY_ENCRYPT: case SADB_EXT_IDENTITY_SRC: case SADB_EXT_IDENTITY_DST: case SADB_EXT_SENSITIVITY: case SADB_EXT_PROPOSAL: case SADB_EXT_SUPPORTED_AUTH: case SADB_EXT_SUPPORTED_ENCRYPT: case SADB_EXT_SPIRANGE: case SADB_X_EXT_POLICY: case SADB_X_EXT_SA2: #ifdef IPSEC_NAT_T case SADB_X_EXT_NAT_T_TYPE: case SADB_X_EXT_NAT_T_SPORT: case SADB_X_EXT_NAT_T_DPORT: case SADB_X_EXT_NAT_T_OAI: case SADB_X_EXT_NAT_T_OAR: case SADB_X_EXT_NAT_T_FRAG: #endif /* duplicate check */ /* * XXX Are there duplication payloads of either * KEY_AUTH or KEY_ENCRYPT ? */ if (mhp->ext[ext->sadb_ext_type] != NULL) { ipseclog((LOG_DEBUG, "%s: duplicate ext_type " "%u\n", __func__, ext->sadb_ext_type)); m_freem(m); PFKEYSTAT_INC(out_dupext); return EINVAL; } break; default: ipseclog((LOG_DEBUG, "%s: invalid ext_type %u\n", __func__, ext->sadb_ext_type)); m_freem(m); PFKEYSTAT_INC(out_invexttype); return EINVAL; } extlen = PFKEY_UNUNIT64(ext->sadb_ext_len); if (key_validate_ext(ext, extlen)) { m_freem(m); PFKEYSTAT_INC(out_invlen); return EINVAL; } n = m_pulldown(m, off, extlen, &toff); if (!n) { /* m is already freed */ return ENOBUFS; } ext = (struct sadb_ext *)(mtod(n, caddr_t) + toff); mhp->ext[ext->sadb_ext_type] = ext; mhp->extoff[ext->sadb_ext_type] = off; mhp->extlen[ext->sadb_ext_type] = extlen; } if (off != end) { m_freem(m); PFKEYSTAT_INC(out_invlen); return EINVAL; } return 0; } static int key_validate_ext(const struct sadb_ext *ext, int len) { const struct sockaddr *sa; enum { NONE, ADDR } checktype = NONE; int baselen = 0; const int sal = offsetof(struct sockaddr, sa_len) + sizeof(sa->sa_len); if (len != PFKEY_UNUNIT64(ext->sadb_ext_len)) return EINVAL; /* if it does not match minimum/maximum length, bail */ if (ext->sadb_ext_type >= sizeof(minsize) / sizeof(minsize[0]) || ext->sadb_ext_type >= sizeof(maxsize) / sizeof(maxsize[0])) return EINVAL; if (!minsize[ext->sadb_ext_type] || len < minsize[ext->sadb_ext_type]) return EINVAL; if (maxsize[ext->sadb_ext_type] && len > maxsize[ext->sadb_ext_type]) return EINVAL; /* more checks based on sadb_ext_type XXX need more */ switch (ext->sadb_ext_type) { case SADB_EXT_ADDRESS_SRC: case SADB_EXT_ADDRESS_DST: case SADB_EXT_ADDRESS_PROXY: baselen = PFKEY_ALIGN8(sizeof(struct sadb_address)); checktype = ADDR; break; case SADB_EXT_IDENTITY_SRC: case SADB_EXT_IDENTITY_DST: if (((const struct sadb_ident *)ext)->sadb_ident_type == SADB_X_IDENTTYPE_ADDR) { baselen = PFKEY_ALIGN8(sizeof(struct sadb_ident)); checktype = ADDR; } else checktype = NONE; break; default: checktype = NONE; break; } switch (checktype) { case NONE: break; case ADDR: sa = (const struct sockaddr *)(((const u_int8_t*)ext)+baselen); if (len < baselen + sal) return EINVAL; if (baselen + PFKEY_ALIGN8(sa->sa_len) != len) return EINVAL; break; } return 0; } void key_init(void) { int i; for (i = 0; i < IPSEC_DIR_MAX; i++) TAILQ_INIT(&V_sptree[i]); LIST_INIT(&V_sahtree); for (i = 0; i <= SADB_SATYPE_MAX; i++) LIST_INIT(&V_regtree[i]); LIST_INIT(&V_acqtree); LIST_INIT(&V_spacqtree); if (!IS_DEFAULT_VNET(curvnet)) return; SPTREE_LOCK_INIT(); REGTREE_LOCK_INIT(); SAHTREE_LOCK_INIT(); ACQ_LOCK_INIT(); SPACQ_LOCK_INIT(); #ifndef IPSEC_DEBUG2 callout_init(&key_timer, CALLOUT_MPSAFE); callout_reset(&key_timer, hz, key_timehandler, NULL); #endif /*IPSEC_DEBUG2*/ /* initialize key statistics */ keystat.getspi_count = 1; printf("IPsec: Initialized Security Association Processing.\n"); } #ifdef VIMAGE void key_destroy(void) { TAILQ_HEAD(, secpolicy) drainq; struct secpolicy *sp, *nextsp; struct secacq *acq, *nextacq; struct secspacq *spacq, *nextspacq; struct secashead *sah, *nextsah; struct secreg *reg; int i; TAILQ_INIT(&drainq); SPTREE_WLOCK(); for (i = 0; i < IPSEC_DIR_MAX; i++) { TAILQ_CONCAT(&drainq, &V_sptree[i], chain); } SPTREE_WUNLOCK(); sp = TAILQ_FIRST(&drainq); while (sp != NULL) { nextsp = TAILQ_NEXT(sp, chain); KEY_FREESP(&sp); sp = nextsp; } SAHTREE_LOCK(); for (sah = LIST_FIRST(&V_sahtree); sah != NULL; sah = nextsah) { nextsah = LIST_NEXT(sah, chain); if (__LIST_CHAINED(sah)) { LIST_REMOVE(sah, chain); free(sah, M_IPSEC_SAH); } } SAHTREE_UNLOCK(); REGTREE_LOCK(); for (i = 0; i <= SADB_SATYPE_MAX; i++) { LIST_FOREACH(reg, &V_regtree[i], chain) { if (__LIST_CHAINED(reg)) { LIST_REMOVE(reg, chain); free(reg, M_IPSEC_SAR); break; } } } REGTREE_UNLOCK(); ACQ_LOCK(); for (acq = LIST_FIRST(&V_acqtree); acq != NULL; acq = nextacq) { nextacq = LIST_NEXT(acq, chain); if (__LIST_CHAINED(acq)) { LIST_REMOVE(acq, chain); free(acq, M_IPSEC_SAQ); } } ACQ_UNLOCK(); SPACQ_LOCK(); for (spacq = LIST_FIRST(&V_spacqtree); spacq != NULL; spacq = nextspacq) { nextspacq = LIST_NEXT(spacq, chain); if (__LIST_CHAINED(spacq)) { LIST_REMOVE(spacq, chain); free(spacq, M_IPSEC_SAQ); } } SPACQ_UNLOCK(); } #endif /* * XXX: maybe This function is called after INBOUND IPsec processing. * * Special check for tunnel-mode packets. * We must make some checks for consistency between inner and outer IP header. * * xxx more checks to be provided */ int key_checktunnelsanity(struct secasvar *sav, u_int family, caddr_t src, caddr_t dst) { IPSEC_ASSERT(sav->sah != NULL, ("null SA header")); /* XXX: check inner IP header */ return 1; } /* record data transfer on SA, and update timestamps */ void key_sa_recordxfer(struct secasvar *sav, struct mbuf *m) { IPSEC_ASSERT(sav != NULL, ("Null secasvar")); IPSEC_ASSERT(m != NULL, ("Null mbuf")); if (!sav->lft_c) return; /* * XXX Currently, there is a difference of bytes size * between inbound and outbound processing. */ sav->lft_c->bytes += m->m_pkthdr.len; /* to check bytes lifetime is done in key_timehandler(). */ /* * We use the number of packets as the unit of * allocations. We increment the variable * whenever {esp,ah}_{in,out}put is called. */ sav->lft_c->allocations++; /* XXX check for expires? */ /* * NOTE: We record CURRENT usetime by using wall clock, * in seconds. HARD and SOFT lifetime are measured by the time * difference (again in seconds) from usetime. * * usetime * v expire expire * -----+-----+--------+---> t * <--------------> HARD * <-----> SOFT */ sav->lft_c->usetime = time_second; /* XXX check for expires? */ return; } static void key_sa_chgstate(struct secasvar *sav, u_int8_t state) { IPSEC_ASSERT(sav != NULL, ("NULL sav")); SAHTREE_LOCK_ASSERT(); if (sav->state != state) { if (__LIST_CHAINED(sav)) LIST_REMOVE(sav, chain); sav->state = state; LIST_INSERT_HEAD(&sav->sah->savtree[state], sav, chain); } } void key_sa_stir_iv(struct secasvar *sav) { IPSEC_ASSERT(sav->iv != NULL, ("null IV")); key_randomfill(sav->iv, sav->ivlen); } /* * Take one of the kernel's security keys and convert it into a PF_KEY * structure within an mbuf, suitable for sending up to a waiting * application in user land. * * IN: * src: A pointer to a kernel security key. * exttype: Which type of key this is. Refer to the PF_KEY data structures. * OUT: * a valid mbuf or NULL indicating an error * */ static struct mbuf * key_setkey(struct seckey *src, u_int16_t exttype) { struct mbuf *m; struct sadb_key *p; int len; if (src == NULL) return NULL; len = PFKEY_ALIGN8(sizeof(struct sadb_key) + _KEYLEN(src)); m = m_get2(len, M_NOWAIT, MT_DATA, 0); if (m == NULL) return NULL; m_align(m, len); m->m_len = len; p = mtod(m, struct sadb_key *); bzero(p, len); p->sadb_key_len = PFKEY_UNIT64(len); p->sadb_key_exttype = exttype; p->sadb_key_bits = src->bits; bcopy(src->key_data, _KEYBUF(p), _KEYLEN(src)); return m; } /* * Take one of the kernel's lifetime data structures and convert it * into a PF_KEY structure within an mbuf, suitable for sending up to * a waiting application in user land. * * IN: * src: A pointer to a kernel lifetime structure. * exttype: Which type of lifetime this is. Refer to the PF_KEY * data structures for more information. * OUT: * a valid mbuf or NULL indicating an error * */ static struct mbuf * key_setlifetime(struct seclifetime *src, u_int16_t exttype) { struct mbuf *m = NULL; struct sadb_lifetime *p; int len = PFKEY_ALIGN8(sizeof(struct sadb_lifetime)); if (src == NULL) return NULL; m = m_get2(len, M_NOWAIT, MT_DATA, 0); if (m == NULL) return m; m_align(m, len); m->m_len = len; p = mtod(m, struct sadb_lifetime *); bzero(p, len); p->sadb_lifetime_len = PFKEY_UNIT64(len); p->sadb_lifetime_exttype = exttype; p->sadb_lifetime_allocations = src->allocations; p->sadb_lifetime_bytes = src->bytes; p->sadb_lifetime_addtime = src->addtime; p->sadb_lifetime_usetime = src->usetime; return m; } Index: head/sys/netipsec/keysock.c =================================================================== --- head/sys/netipsec/keysock.c (revision 276749) +++ head/sys/netipsec/keysock.c (revision 276750) @@ -1,573 +1,572 @@ /* $FreeBSD$ */ /* $KAME: keysock.c,v 1.25 2001/08/13 20:07:41 itojun Exp $ */ /*- * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the project nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_ipsec.h" /* This code has derived from sys/net/rtsock.c on FreeBSD2.2.5 */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct key_cb { int key_count; int any_count; }; static VNET_DEFINE(struct key_cb, key_cb); #define V_key_cb VNET(key_cb) static struct sockaddr key_src = { 2, PF_KEY, }; static int key_sendup0(struct rawcb *, struct mbuf *, int); VNET_PCPUSTAT_DEFINE(struct pfkeystat, pfkeystat); VNET_PCPUSTAT_SYSINIT(pfkeystat); #ifdef VIMAGE VNET_PCPUSTAT_SYSUNINIT(pfkeystat); #endif /* VIMAGE */ /* * key_output() */ int key_output(struct mbuf *m, struct socket *so, ...) { struct sadb_msg *msg; int len, error = 0; if (m == 0) panic("%s: NULL pointer was passed.\n", __func__); PFKEYSTAT_INC(out_total); PFKEYSTAT_ADD(out_bytes, m->m_pkthdr.len); len = m->m_pkthdr.len; if (len < sizeof(struct sadb_msg)) { PFKEYSTAT_INC(out_tooshort); error = EINVAL; goto end; } if (m->m_len < sizeof(struct sadb_msg)) { if ((m = m_pullup(m, sizeof(struct sadb_msg))) == 0) { PFKEYSTAT_INC(out_nomem); error = ENOBUFS; goto end; } } M_ASSERTPKTHDR(m); KEYDEBUG(KEYDEBUG_KEY_DUMP, kdebug_mbuf(m)); msg = mtod(m, struct sadb_msg *); PFKEYSTAT_INC(out_msgtype[msg->sadb_msg_type]); if (len != PFKEY_UNUNIT64(msg->sadb_msg_len)) { PFKEYSTAT_INC(out_invlen); error = EINVAL; goto end; } error = key_parse(m, so); m = NULL; end: if (m) m_freem(m); return error; } /* * send message to the socket. */ static int key_sendup0(struct rawcb *rp, struct mbuf *m, int promisc) { int error; if (promisc) { struct sadb_msg *pmsg; M_PREPEND(m, sizeof(struct sadb_msg), M_NOWAIT); if (m == NULL) { PFKEYSTAT_INC(in_nomem); return (ENOBUFS); } m->m_pkthdr.len += sizeof(*pmsg); /* XXX: is this correct? */ pmsg = mtod(m, struct sadb_msg *); bzero(pmsg, sizeof(*pmsg)); pmsg->sadb_msg_version = PF_KEY_V2; pmsg->sadb_msg_type = SADB_X_PROMISC; pmsg->sadb_msg_len = PFKEY_UNIT64(m->m_pkthdr.len); /* pid and seq? */ PFKEYSTAT_INC(in_msgtype[pmsg->sadb_msg_type]); } if (!sbappendaddr(&rp->rcb_socket->so_rcv, (struct sockaddr *)&key_src, m, NULL)) { PFKEYSTAT_INC(in_nomem); m_freem(m); error = ENOBUFS; } else error = 0; sorwakeup(rp->rcb_socket); return error; } /* XXX this interface should be obsoleted. */ int key_sendup(struct socket *so, struct sadb_msg *msg, u_int len, int target) { struct mbuf *m, *n, *mprev; int tlen; /* sanity check */ if (so == 0 || msg == 0) panic("%s: NULL pointer was passed.\n", __func__); KEYDEBUG(KEYDEBUG_KEY_DUMP, printf("%s: \n", __func__); kdebug_sadb(msg)); /* * we increment statistics here, just in case we have ENOBUFS * in this function. */ PFKEYSTAT_INC(in_total); PFKEYSTAT_ADD(in_bytes, len); PFKEYSTAT_INC(in_msgtype[msg->sadb_msg_type]); /* * Get mbuf chain whenever possible (not clusters), * to save socket buffer. We'll be generating many SADB_ACQUIRE * messages to listening key sockets. If we simply allocate clusters, * sbappendaddr() will raise ENOBUFS due to too little sbspace(). * sbspace() computes # of actual data bytes AND mbuf region. * * TODO: SADB_ACQUIRE filters should be implemented. */ tlen = len; m = mprev = NULL; while (tlen > 0) { if (tlen == len) { MGETHDR(n, M_NOWAIT, MT_DATA); if (n == NULL) { PFKEYSTAT_INC(in_nomem); return ENOBUFS; } n->m_len = MHLEN; } else { MGET(n, M_NOWAIT, MT_DATA); if (n == NULL) { PFKEYSTAT_INC(in_nomem); return ENOBUFS; } n->m_len = MLEN; } if (tlen >= MCLBYTES) { /*XXX better threshold? */ - MCLGET(n, M_NOWAIT); - if ((n->m_flags & M_EXT) == 0) { + if (!(MCLGET(n, M_NOWAIT))) { m_free(n); m_freem(m); PFKEYSTAT_INC(in_nomem); return ENOBUFS; } n->m_len = MCLBYTES; } if (tlen < n->m_len) n->m_len = tlen; n->m_next = NULL; if (m == NULL) m = mprev = n; else { mprev->m_next = n; mprev = n; } tlen -= n->m_len; n = NULL; } m->m_pkthdr.len = len; m->m_pkthdr.rcvif = NULL; m_copyback(m, 0, len, (caddr_t)msg); /* avoid duplicated statistics */ PFKEYSTAT_ADD(in_total, -1); PFKEYSTAT_ADD(in_bytes, -len); PFKEYSTAT_ADD(in_msgtype[msg->sadb_msg_type], -1); return key_sendup_mbuf(so, m, target); } /* so can be NULL if target != KEY_SENDUP_ONE */ int key_sendup_mbuf(struct socket *so, struct mbuf *m, int target) { struct mbuf *n; struct keycb *kp; int sendup; struct rawcb *rp; int error = 0; if (m == NULL) panic("key_sendup_mbuf: NULL pointer was passed.\n"); if (so == NULL && target == KEY_SENDUP_ONE) panic("%s: NULL pointer was passed.\n", __func__); PFKEYSTAT_INC(in_total); PFKEYSTAT_ADD(in_bytes, m->m_pkthdr.len); if (m->m_len < sizeof(struct sadb_msg)) { m = m_pullup(m, sizeof(struct sadb_msg)); if (m == NULL) { PFKEYSTAT_INC(in_nomem); return ENOBUFS; } } if (m->m_len >= sizeof(struct sadb_msg)) { struct sadb_msg *msg; msg = mtod(m, struct sadb_msg *); PFKEYSTAT_INC(in_msgtype[msg->sadb_msg_type]); } mtx_lock(&rawcb_mtx); LIST_FOREACH(rp, &V_rawcb_list, list) { if (rp->rcb_proto.sp_family != PF_KEY) continue; if (rp->rcb_proto.sp_protocol && rp->rcb_proto.sp_protocol != PF_KEY_V2) { continue; } kp = (struct keycb *)rp; /* * If you are in promiscuous mode, and when you get broadcasted * reply, you'll get two PF_KEY messages. * (based on pf_key@inner.net message on 14 Oct 1998) */ if (((struct keycb *)rp)->kp_promisc) { if ((n = m_copy(m, 0, (int)M_COPYALL)) != NULL) { (void)key_sendup0(rp, n, 1); n = NULL; } } /* the exact target will be processed later */ if (so && sotorawcb(so) == rp) continue; sendup = 0; switch (target) { case KEY_SENDUP_ONE: /* the statement has no effect */ if (so && sotorawcb(so) == rp) sendup++; break; case KEY_SENDUP_ALL: sendup++; break; case KEY_SENDUP_REGISTERED: if (kp->kp_registered) sendup++; break; } PFKEYSTAT_INC(in_msgtarget[target]); if (!sendup) continue; if ((n = m_copy(m, 0, (int)M_COPYALL)) == NULL) { m_freem(m); PFKEYSTAT_INC(in_nomem); mtx_unlock(&rawcb_mtx); return ENOBUFS; } if ((error = key_sendup0(rp, n, 0)) != 0) { m_freem(m); mtx_unlock(&rawcb_mtx); return error; } n = NULL; } if (so) { error = key_sendup0(sotorawcb(so), m, 0); m = NULL; } else { error = 0; m_freem(m); } mtx_unlock(&rawcb_mtx); return error; } /* * key_abort() * derived from net/rtsock.c:rts_abort() */ static void key_abort(struct socket *so) { raw_usrreqs.pru_abort(so); } /* * key_attach() * derived from net/rtsock.c:rts_attach() */ static int key_attach(struct socket *so, int proto, struct thread *td) { struct keycb *kp; int error; KASSERT(so->so_pcb == NULL, ("key_attach: so_pcb != NULL")); if (td != NULL) { error = priv_check(td, PRIV_NET_RAW); if (error) return error; } /* XXX */ kp = malloc(sizeof *kp, M_PCB, M_WAITOK | M_ZERO); if (kp == 0) return ENOBUFS; so->so_pcb = (caddr_t)kp; error = raw_attach(so, proto); kp = (struct keycb *)sotorawcb(so); if (error) { free(kp, M_PCB); so->so_pcb = (caddr_t) 0; return error; } kp->kp_promisc = kp->kp_registered = 0; if (kp->kp_raw.rcb_proto.sp_protocol == PF_KEY) /* XXX: AF_KEY */ V_key_cb.key_count++; V_key_cb.any_count++; soisconnected(so); so->so_options |= SO_USELOOPBACK; return 0; } /* * key_bind() * derived from net/rtsock.c:rts_bind() */ static int key_bind(struct socket *so, struct sockaddr *nam, struct thread *td) { return EINVAL; } /* * key_close() * derived from net/rtsock.c:rts_close(). */ static void key_close(struct socket *so) { raw_usrreqs.pru_close(so); } /* * key_connect() * derived from net/rtsock.c:rts_connect() */ static int key_connect(struct socket *so, struct sockaddr *nam, struct thread *td) { return EINVAL; } /* * key_detach() * derived from net/rtsock.c:rts_detach() */ static void key_detach(struct socket *so) { struct keycb *kp = (struct keycb *)sotorawcb(so); KASSERT(kp != NULL, ("key_detach: kp == NULL")); if (kp->kp_raw.rcb_proto.sp_protocol == PF_KEY) /* XXX: AF_KEY */ V_key_cb.key_count--; V_key_cb.any_count--; key_freereg(so); raw_usrreqs.pru_detach(so); } /* * key_disconnect() * derived from net/rtsock.c:key_disconnect() */ static int key_disconnect(struct socket *so) { return(raw_usrreqs.pru_disconnect(so)); } /* * key_peeraddr() * derived from net/rtsock.c:rts_peeraddr() */ static int key_peeraddr(struct socket *so, struct sockaddr **nam) { return(raw_usrreqs.pru_peeraddr(so, nam)); } /* * key_send() * derived from net/rtsock.c:rts_send() */ static int key_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, struct mbuf *control, struct thread *td) { return(raw_usrreqs.pru_send(so, flags, m, nam, control, td)); } /* * key_shutdown() * derived from net/rtsock.c:rts_shutdown() */ static int key_shutdown(struct socket *so) { return(raw_usrreqs.pru_shutdown(so)); } /* * key_sockaddr() * derived from net/rtsock.c:rts_sockaddr() */ static int key_sockaddr(struct socket *so, struct sockaddr **nam) { return(raw_usrreqs.pru_sockaddr(so, nam)); } struct pr_usrreqs key_usrreqs = { .pru_abort = key_abort, .pru_attach = key_attach, .pru_bind = key_bind, .pru_connect = key_connect, .pru_detach = key_detach, .pru_disconnect = key_disconnect, .pru_peeraddr = key_peeraddr, .pru_send = key_send, .pru_shutdown = key_shutdown, .pru_sockaddr = key_sockaddr, .pru_close = key_close, }; /* sysctl */ SYSCTL_NODE(_net, PF_KEY, key, CTLFLAG_RW, 0, "Key Family"); /* * Definitions of protocols supported in the KEY domain. */ extern struct domain keydomain; struct protosw keysw[] = { { .pr_type = SOCK_RAW, .pr_domain = &keydomain, .pr_protocol = PF_KEY_V2, .pr_flags = PR_ATOMIC|PR_ADDR, .pr_output = key_output, .pr_ctlinput = raw_ctlinput, .pr_init = raw_init, .pr_usrreqs = &key_usrreqs } }; static void key_init0(void) { bzero((caddr_t)&V_key_cb, sizeof(V_key_cb)); key_init(); } struct domain keydomain = { .dom_family = PF_KEY, .dom_name = "key", .dom_init = key_init0, #ifdef VIMAGE .dom_destroy = key_destroy, #endif .dom_protosw = keysw, .dom_protoswNPROTOSW = &keysw[sizeof(keysw)/sizeof(keysw[0])] }; VNET_DOMAIN_SET(key); Index: head/sys/sys/mbuf.h =================================================================== --- head/sys/sys/mbuf.h (revision 276749) +++ head/sys/sys/mbuf.h (revision 276750) @@ -1,1197 +1,1198 @@ /*- * Copyright (c) 1982, 1986, 1988, 1993 * The Regents of the University of California. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)mbuf.h 8.5 (Berkeley) 2/19/95 * $FreeBSD$ */ #ifndef _SYS_MBUF_H_ #define _SYS_MBUF_H_ /* XXX: These includes suck. Sorry! */ #include #ifdef _KERNEL #include #include #ifdef WITNESS #include #endif #endif /* * Mbufs are of a single size, MSIZE (sys/param.h), which includes overhead. * An mbuf may add a single "mbuf cluster" of size MCLBYTES (also in * sys/param.h), which has no additional overhead and is used instead of the * internal data area; this is done when at least MINCLSIZE of data must be * stored. Additionally, it is possible to allocate a separate buffer * externally and attach it to the mbuf in a way similar to that of mbuf * clusters. * * NB: These calculation do not take actual compiler-induced alignment and * padding inside the complete struct mbuf into account. Appropriate * attention is required when changing members of struct mbuf. * * MLEN is data length in a normal mbuf. * MHLEN is data length in an mbuf with pktheader. * MINCLSIZE is a smallest amount of data that should be put into cluster. */ #define MLEN ((int)(MSIZE - sizeof(struct m_hdr))) #define MHLEN ((int)(MLEN - sizeof(struct pkthdr))) #define MINCLSIZE (MHLEN + 1) #ifdef _KERNEL /*- * Macro for type conversion: convert mbuf pointer to data pointer of correct * type: * * mtod(m, t) -- Convert mbuf pointer to data pointer of correct type. * mtodo(m, o) -- Same as above but with offset 'o' into data. */ #define mtod(m, t) ((t)((m)->m_data)) #define mtodo(m, o) ((void *)(((m)->m_data) + (o))) /* * Argument structure passed to UMA routines during mbuf and packet * allocations. */ struct mb_args { int flags; /* Flags for mbuf being allocated */ short type; /* Type of mbuf being allocated */ }; #endif /* _KERNEL */ /* * Header present at the beginning of every mbuf. * Size ILP32: 24 * LP64: 32 */ struct m_hdr { struct mbuf *mh_next; /* next buffer in chain */ struct mbuf *mh_nextpkt; /* next chain in queue/record */ caddr_t mh_data; /* location of data */ int32_t mh_len; /* amount of data in this mbuf */ uint32_t mh_type:8, /* type of data in this mbuf */ mh_flags:24; /* flags; see below */ #if !defined(__LP64__) uint32_t mh_pad; /* pad for 64bit alignment */ #endif }; /* * Packet tag structure (see below for details). */ struct m_tag { SLIST_ENTRY(m_tag) m_tag_link; /* List of packet tags */ u_int16_t m_tag_id; /* Tag ID */ u_int16_t m_tag_len; /* Length of data */ u_int32_t m_tag_cookie; /* ABI/Module ID */ void (*m_tag_free)(struct m_tag *); }; /* * Record/packet header in first mbuf of chain; valid only if M_PKTHDR is set. * Size ILP32: 48 * LP64: 56 */ struct pkthdr { struct ifnet *rcvif; /* rcv interface */ SLIST_HEAD(packet_tags, m_tag) tags; /* list of packet tags */ int32_t len; /* total packet length */ /* Layer crossing persistent information. */ uint32_t flowid; /* packet's 4-tuple system */ uint64_t csum_flags; /* checksum and offload features */ uint16_t fibnum; /* this packet should use this fib */ uint8_t cosqos; /* class/quality of service */ uint8_t rsstype; /* hash type */ uint8_t l2hlen; /* layer 2 header length */ uint8_t l3hlen; /* layer 3 header length */ uint8_t l4hlen; /* layer 4 header length */ uint8_t l5hlen; /* layer 5 header length */ union { uint8_t eight[8]; uint16_t sixteen[4]; uint32_t thirtytwo[2]; uint64_t sixtyfour[1]; uintptr_t unintptr[1]; void *ptr; } PH_per; /* Layer specific non-persistent local storage for reassembly, etc. */ union { uint8_t eight[8]; uint16_t sixteen[4]; uint32_t thirtytwo[2]; uint64_t sixtyfour[1]; uintptr_t unintptr[1]; void *ptr; } PH_loc; }; #define ether_vtag PH_per.sixteen[0] #define PH_vt PH_per #define vt_nrecs sixteen[0] #define tso_segsz PH_per.sixteen[1] #define csum_phsum PH_per.sixteen[2] #define csum_data PH_per.thirtytwo[1] #define pkt_tcphdr PH_loc.ptr /* * Description of external storage mapped into mbuf; valid only if M_EXT is * set. * Size ILP32: 28 * LP64: 48 */ struct m_ext { volatile u_int *ext_cnt; /* pointer to ref count info */ caddr_t ext_buf; /* start of buffer */ uint32_t ext_size; /* size of buffer, for ext_free */ uint32_t ext_type:8, /* type of external storage */ ext_flags:24; /* external storage mbuf flags */ void (*ext_free) /* free routine if not the usual */ (struct mbuf *, void *, void *); void *ext_arg1; /* optional argument pointer */ void *ext_arg2; /* optional argument pointer */ }; /* * The core of the mbuf object along with some shortcut defines for practical * purposes. */ struct mbuf { struct m_hdr m_hdr; union { struct { struct pkthdr MH_pkthdr; /* M_PKTHDR set */ union { struct m_ext MH_ext; /* M_EXT set */ char MH_databuf[MHLEN]; } MH_dat; } MH; char M_databuf[MLEN]; /* !M_PKTHDR, !M_EXT */ } M_dat; }; #define m_next m_hdr.mh_next #define m_len m_hdr.mh_len #define m_data m_hdr.mh_data #define m_type m_hdr.mh_type #define m_flags m_hdr.mh_flags #define m_nextpkt m_hdr.mh_nextpkt #define m_pkthdr M_dat.MH.MH_pkthdr #define m_ext M_dat.MH.MH_dat.MH_ext #define m_pktdat M_dat.MH.MH_dat.MH_databuf #define m_dat M_dat.M_databuf /* * mbuf flags of global significance and layer crossing. * Those of only protocol/layer specific significance are to be mapped * to M_PROTO[1-12] and cleared at layer handoff boundaries. * NB: Limited to the lower 24 bits. */ #define M_EXT 0x00000001 /* has associated external storage */ #define M_PKTHDR 0x00000002 /* start of record */ #define M_EOR 0x00000004 /* end of record */ #define M_RDONLY 0x00000008 /* associated data is marked read-only */ #define M_BCAST 0x00000010 /* send/received as link-level broadcast */ #define M_MCAST 0x00000020 /* send/received as link-level multicast */ #define M_PROMISC 0x00000040 /* packet was not for us */ #define M_VLANTAG 0x00000080 /* ether_vtag is valid */ #define M_FLOWID 0x00000100 /* deprecated: flowid is valid */ #define M_NOFREE 0x00000200 /* do not free mbuf, embedded in cluster */ #define M_PROTO1 0x00001000 /* protocol-specific */ #define M_PROTO2 0x00002000 /* protocol-specific */ #define M_PROTO3 0x00004000 /* protocol-specific */ #define M_PROTO4 0x00008000 /* protocol-specific */ #define M_PROTO5 0x00010000 /* protocol-specific */ #define M_PROTO6 0x00020000 /* protocol-specific */ #define M_PROTO7 0x00040000 /* protocol-specific */ #define M_PROTO8 0x00080000 /* protocol-specific */ #define M_PROTO9 0x00100000 /* protocol-specific */ #define M_PROTO10 0x00200000 /* protocol-specific */ #define M_PROTO11 0x00400000 /* protocol-specific */ #define M_PROTO12 0x00800000 /* protocol-specific */ /* * Flags to purge when crossing layers. */ #define M_PROTOFLAGS \ (M_PROTO1|M_PROTO2|M_PROTO3|M_PROTO4|M_PROTO5|M_PROTO6|M_PROTO7|M_PROTO8|\ M_PROTO9|M_PROTO10|M_PROTO11|M_PROTO12) /* * Flags preserved when copying m_pkthdr. */ #define M_COPYFLAGS \ (M_PKTHDR|M_EOR|M_RDONLY|M_BCAST|M_MCAST|M_PROMISC|M_VLANTAG|M_FLOWID| \ M_PROTOFLAGS) /* * Mbuf flag description for use with printf(9) %b identifier. */ #define M_FLAG_BITS \ "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY\5M_BCAST\6M_MCAST" \ "\7M_PROMISC\10M_VLANTAG\11M_FLOWID" #define M_FLAG_PROTOBITS \ "\15M_PROTO1\16M_PROTO2\17M_PROTO3\20M_PROTO4\21M_PROTO5" \ "\22M_PROTO6\23M_PROTO7\24M_PROTO8\25M_PROTO9\26M_PROTO10" \ "\27M_PROTO11\30M_PROTO12" #define M_FLAG_PRINTF (M_FLAG_BITS M_FLAG_PROTOBITS) /* * Network interface cards are able to hash protocol fields (such as IPv4 * addresses and TCP port numbers) classify packets into flows. These flows * can then be used to maintain ordering while delivering packets to the OS * via parallel input queues, as well as to provide a stateless affinity * model. NIC drivers can pass up the hash via m->m_pkthdr.flowid, and set * m_flag fields to indicate how the hash should be interpreted by the * network stack. * * Most NICs support RSS, which provides ordering and explicit affinity, and * use the hash m_flag bits to indicate what header fields were covered by * the hash. M_HASHTYPE_OPAQUE can be set by non-RSS cards or configurations * that provide an opaque flow identifier, allowing for ordering and * distribution without explicit affinity. */ /* Microsoft RSS standard hash types */ #define M_HASHTYPE_NONE 0 #define M_HASHTYPE_RSS_IPV4 1 /* IPv4 2-tuple */ #define M_HASHTYPE_RSS_TCP_IPV4 2 /* TCPv4 4-tuple */ #define M_HASHTYPE_RSS_IPV6 3 /* IPv6 2-tuple */ #define M_HASHTYPE_RSS_TCP_IPV6 4 /* TCPv6 4-tuple */ #define M_HASHTYPE_RSS_IPV6_EX 5 /* IPv6 2-tuple + ext hdrs */ #define M_HASHTYPE_RSS_TCP_IPV6_EX 6 /* TCPv6 4-tiple + ext hdrs */ /* Non-standard RSS hash types */ #define M_HASHTYPE_RSS_UDP_IPV4 7 /* IPv4 UDP 4-tuple */ #define M_HASHTYPE_RSS_UDP_IPV4_EX 8 /* IPv4 UDP 4-tuple + ext hdrs */ #define M_HASHTYPE_RSS_UDP_IPV6 9 /* IPv6 UDP 4-tuple */ #define M_HASHTYPE_RSS_UDP_IPV6_EX 10 /* IPv6 UDP 4-tuple + ext hdrs */ #define M_HASHTYPE_OPAQUE 255 /* ordering, not affinity */ #define M_HASHTYPE_CLEAR(m) ((m)->m_pkthdr.rsstype = 0) #define M_HASHTYPE_GET(m) ((m)->m_pkthdr.rsstype) #define M_HASHTYPE_SET(m, v) ((m)->m_pkthdr.rsstype = (v)) #define M_HASHTYPE_TEST(m, v) (M_HASHTYPE_GET(m) == (v)) /* * COS/QOS class and quality of service tags. * It uses DSCP code points as base. */ #define QOS_DSCP_CS0 0x00 #define QOS_DSCP_DEF QOS_DSCP_CS0 #define QOS_DSCP_CS1 0x20 #define QOS_DSCP_AF11 0x28 #define QOS_DSCP_AF12 0x30 #define QOS_DSCP_AF13 0x38 #define QOS_DSCP_CS2 0x40 #define QOS_DSCP_AF21 0x48 #define QOS_DSCP_AF22 0x50 #define QOS_DSCP_AF23 0x58 #define QOS_DSCP_CS3 0x60 #define QOS_DSCP_AF31 0x68 #define QOS_DSCP_AF32 0x70 #define QOS_DSCP_AF33 0x78 #define QOS_DSCP_CS4 0x80 #define QOS_DSCP_AF41 0x88 #define QOS_DSCP_AF42 0x90 #define QOS_DSCP_AF43 0x98 #define QOS_DSCP_CS5 0xa0 #define QOS_DSCP_EF 0xb8 #define QOS_DSCP_CS6 0xc0 #define QOS_DSCP_CS7 0xe0 /* * External mbuf storage buffer types. */ #define EXT_CLUSTER 1 /* mbuf cluster */ #define EXT_SFBUF 2 /* sendfile(2)'s sf_bufs */ #define EXT_JUMBOP 3 /* jumbo cluster 4096 bytes */ #define EXT_JUMBO9 4 /* jumbo cluster 9216 bytes */ #define EXT_JUMBO16 5 /* jumbo cluster 16184 bytes */ #define EXT_PACKET 6 /* mbuf+cluster from packet zone */ #define EXT_MBUF 7 /* external mbuf reference (M_IOVEC) */ #define EXT_VENDOR1 224 /* for vendor-internal use */ #define EXT_VENDOR2 225 /* for vendor-internal use */ #define EXT_VENDOR3 226 /* for vendor-internal use */ #define EXT_VENDOR4 227 /* for vendor-internal use */ #define EXT_EXP1 244 /* for experimental use */ #define EXT_EXP2 245 /* for experimental use */ #define EXT_EXP3 246 /* for experimental use */ #define EXT_EXP4 247 /* for experimental use */ #define EXT_NET_DRV 252 /* custom ext_buf provided by net driver(s) */ #define EXT_MOD_TYPE 253 /* custom module's ext_buf type */ #define EXT_DISPOSABLE 254 /* can throw this buffer away w/page flipping */ #define EXT_EXTREF 255 /* has externally maintained ext_cnt ptr */ /* * Flags for external mbuf buffer types. * NB: limited to the lower 24 bits. */ #define EXT_FLAG_EMBREF 0x000001 /* embedded ext_cnt, notyet */ #define EXT_FLAG_EXTREF 0x000002 /* external ext_cnt, notyet */ #define EXT_FLAG_NOFREE 0x000010 /* don't free mbuf to pool, notyet */ #define EXT_FLAG_VENDOR1 0x010000 /* for vendor-internal use */ #define EXT_FLAG_VENDOR2 0x020000 /* for vendor-internal use */ #define EXT_FLAG_VENDOR3 0x040000 /* for vendor-internal use */ #define EXT_FLAG_VENDOR4 0x080000 /* for vendor-internal use */ #define EXT_FLAG_EXP1 0x100000 /* for experimental use */ #define EXT_FLAG_EXP2 0x200000 /* for experimental use */ #define EXT_FLAG_EXP3 0x400000 /* for experimental use */ #define EXT_FLAG_EXP4 0x800000 /* for experimental use */ /* * EXT flag description for use with printf(9) %b identifier. */ #define EXT_FLAG_BITS \ "\20\1EXT_FLAG_EMBREF\2EXT_FLAG_EXTREF\5EXT_FLAG_NOFREE" \ "\21EXT_FLAG_VENDOR1\22EXT_FLAG_VENDOR2\23EXT_FLAG_VENDOR3" \ "\24EXT_FLAG_VENDOR4\25EXT_FLAG_EXP1\26EXT_FLAG_EXP2\27EXT_FLAG_EXP3" \ "\30EXT_FLAG_EXP4" /* * External reference/free functions. */ void sf_ext_ref(void *, void *); void sf_ext_free(void *, void *); /* * Flags indicating checksum, segmentation and other offload work to be * done, or already done, by hardware or lower layers. It is split into * separate inbound and outbound flags. * * Outbound flags that are set by upper protocol layers requesting lower * layers, or ideally the hardware, to perform these offloading tasks. * For outbound packets this field and its flags can be directly tested * against ifnet if_hwassist. */ #define CSUM_IP 0x00000001 /* IP header checksum offload */ #define CSUM_IP_UDP 0x00000002 /* UDP checksum offload */ #define CSUM_IP_TCP 0x00000004 /* TCP checksum offload */ #define CSUM_IP_SCTP 0x00000008 /* SCTP checksum offload */ #define CSUM_IP_TSO 0x00000010 /* TCP segmentation offload */ #define CSUM_IP_ISCSI 0x00000020 /* iSCSI checksum offload */ #define CSUM_IP6_UDP 0x00000200 /* UDP checksum offload */ #define CSUM_IP6_TCP 0x00000400 /* TCP checksum offload */ #define CSUM_IP6_SCTP 0x00000800 /* SCTP checksum offload */ #define CSUM_IP6_TSO 0x00001000 /* TCP segmentation offload */ #define CSUM_IP6_ISCSI 0x00002000 /* iSCSI checksum offload */ /* Inbound checksum support where the checksum was verified by hardware. */ #define CSUM_L3_CALC 0x01000000 /* calculated layer 3 csum */ #define CSUM_L3_VALID 0x02000000 /* checksum is correct */ #define CSUM_L4_CALC 0x04000000 /* calculated layer 4 csum */ #define CSUM_L4_VALID 0x08000000 /* checksum is correct */ #define CSUM_L5_CALC 0x10000000 /* calculated layer 5 csum */ #define CSUM_L5_VALID 0x20000000 /* checksum is correct */ #define CSUM_COALESED 0x40000000 /* contains merged segments */ /* * CSUM flag description for use with printf(9) %b identifier. */ #define CSUM_BITS \ "\20\1CSUM_IP\2CSUM_IP_UDP\3CSUM_IP_TCP\4CSUM_IP_SCTP\5CSUM_IP_TSO" \ "\6CSUM_IP_ISCSI" \ "\12CSUM_IP6_UDP\13CSUM_IP6_TCP\14CSUM_IP6_SCTP\15CSUM_IP6_TSO" \ "\16CSUM_IP6_ISCSI" \ "\31CSUM_L3_CALC\32CSUM_L3_VALID\33CSUM_L4_CALC\34CSUM_L4_VALID" \ "\35CSUM_L5_CALC\36CSUM_L5_VALID\37CSUM_COALESED" /* CSUM flags compatibility mappings. */ #define CSUM_IP_CHECKED CSUM_L3_CALC #define CSUM_IP_VALID CSUM_L3_VALID #define CSUM_DATA_VALID CSUM_L4_VALID #define CSUM_PSEUDO_HDR CSUM_L4_CALC #define CSUM_SCTP_VALID CSUM_L4_VALID #define CSUM_DELAY_DATA (CSUM_TCP|CSUM_UDP) #define CSUM_DELAY_IP CSUM_IP /* Only v4, no v6 IP hdr csum */ #define CSUM_DELAY_DATA_IPV6 (CSUM_TCP_IPV6|CSUM_UDP_IPV6) #define CSUM_DATA_VALID_IPV6 CSUM_DATA_VALID #define CSUM_TCP CSUM_IP_TCP #define CSUM_UDP CSUM_IP_UDP #define CSUM_SCTP CSUM_IP_SCTP #define CSUM_TSO (CSUM_IP_TSO|CSUM_IP6_TSO) #define CSUM_UDP_IPV6 CSUM_IP6_UDP #define CSUM_TCP_IPV6 CSUM_IP6_TCP #define CSUM_SCTP_IPV6 CSUM_IP6_SCTP /* * mbuf types describing the content of the mbuf (including external storage). */ #define MT_NOTMBUF 0 /* USED INTERNALLY ONLY! Object is not mbuf */ #define MT_DATA 1 /* dynamic (data) allocation */ #define MT_HEADER MT_DATA /* packet header, use M_PKTHDR instead */ #define MT_VENDOR1 4 /* for vendor-internal use */ #define MT_VENDOR2 5 /* for vendor-internal use */ #define MT_VENDOR3 6 /* for vendor-internal use */ #define MT_VENDOR4 7 /* for vendor-internal use */ #define MT_SONAME 8 /* socket name */ #define MT_EXP1 9 /* for experimental use */ #define MT_EXP2 10 /* for experimental use */ #define MT_EXP3 11 /* for experimental use */ #define MT_EXP4 12 /* for experimental use */ #define MT_CONTROL 14 /* extra-data protocol message */ #define MT_OOBDATA 15 /* expedited data */ #define MT_NTYPES 16 /* number of mbuf types for mbtypes[] */ #define MT_NOINIT 255 /* Not a type but a flag to allocate a non-initialized mbuf */ /* * String names of mbuf-related UMA(9) and malloc(9) types. Exposed to * !_KERNEL so that monitoring tools can look up the zones with * libmemstat(3). */ #define MBUF_MEM_NAME "mbuf" #define MBUF_CLUSTER_MEM_NAME "mbuf_cluster" #define MBUF_PACKET_MEM_NAME "mbuf_packet" #define MBUF_JUMBOP_MEM_NAME "mbuf_jumbo_page" #define MBUF_JUMBO9_MEM_NAME "mbuf_jumbo_9k" #define MBUF_JUMBO16_MEM_NAME "mbuf_jumbo_16k" #define MBUF_TAG_MEM_NAME "mbuf_tag" #define MBUF_EXTREFCNT_MEM_NAME "mbuf_ext_refcnt" #ifdef _KERNEL #ifdef WITNESS #define MBUF_CHECKSLEEP(how) do { \ if (how == M_WAITOK) \ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, \ "Sleeping in \"%s\"", __func__); \ } while (0) #else #define MBUF_CHECKSLEEP(how) #endif /* * Network buffer allocation API * * The rest of it is defined in kern/kern_mbuf.c */ extern uma_zone_t zone_mbuf; extern uma_zone_t zone_clust; extern uma_zone_t zone_pack; extern uma_zone_t zone_jumbop; extern uma_zone_t zone_jumbo9; extern uma_zone_t zone_jumbo16; extern uma_zone_t zone_ext_refcnt; void mb_free_ext(struct mbuf *); int m_pkthdr_init(struct mbuf *, int); static __inline int m_gettype(int size) { int type; switch (size) { case MSIZE: type = EXT_MBUF; break; case MCLBYTES: type = EXT_CLUSTER; break; #if MJUMPAGESIZE != MCLBYTES case MJUMPAGESIZE: type = EXT_JUMBOP; break; #endif case MJUM9BYTES: type = EXT_JUMBO9; break; case MJUM16BYTES: type = EXT_JUMBO16; break; default: panic("%s: invalid cluster size %d", __func__, size); } return (type); } /* * Associated an external reference counted buffer with an mbuf. */ static __inline void m_extaddref(struct mbuf *m, caddr_t buf, u_int size, u_int *ref_cnt, void (*freef)(struct mbuf *, void *, void *), void *arg1, void *arg2) { KASSERT(ref_cnt != NULL, ("%s: ref_cnt not provided", __func__)); atomic_add_int(ref_cnt, 1); m->m_flags |= M_EXT; m->m_ext.ext_buf = buf; m->m_ext.ext_cnt = ref_cnt; m->m_data = m->m_ext.ext_buf; m->m_ext.ext_size = size; m->m_ext.ext_free = freef; m->m_ext.ext_arg1 = arg1; m->m_ext.ext_arg2 = arg2; m->m_ext.ext_type = EXT_EXTREF; } static __inline uma_zone_t m_getzone(int size) { uma_zone_t zone; switch (size) { case MCLBYTES: zone = zone_clust; break; #if MJUMPAGESIZE != MCLBYTES case MJUMPAGESIZE: zone = zone_jumbop; break; #endif case MJUM9BYTES: zone = zone_jumbo9; break; case MJUM16BYTES: zone = zone_jumbo16; break; default: panic("%s: invalid cluster size %d", __func__, size); } return (zone); } /* * Initialize an mbuf with linear storage. * * Inline because the consumer text overhead will be roughly the same to * initialize or call a function with this many parameters and M_PKTHDR * should go away with constant propagation for !MGETHDR. */ static __inline int m_init(struct mbuf *m, uma_zone_t zone, int size, int how, short type, int flags) { int error; m->m_next = NULL; m->m_nextpkt = NULL; m->m_data = m->m_dat; m->m_len = 0; m->m_flags = flags; m->m_type = type; if (flags & M_PKTHDR) { if ((error = m_pkthdr_init(m, how)) != 0) return (error); } return (0); } static __inline struct mbuf * m_get(int how, short type) { struct mb_args args; args.flags = 0; args.type = type; return (uma_zalloc_arg(zone_mbuf, &args, how)); } /* * XXX This should be deprecated, very little use. */ static __inline struct mbuf * m_getclr(int how, short type) { struct mbuf *m; struct mb_args args; args.flags = 0; args.type = type; m = uma_zalloc_arg(zone_mbuf, &args, how); if (m != NULL) bzero(m->m_data, MLEN); return (m); } static __inline struct mbuf * m_gethdr(int how, short type) { struct mb_args args; args.flags = M_PKTHDR; args.type = type; return (uma_zalloc_arg(zone_mbuf, &args, how)); } static __inline struct mbuf * m_getcl(int how, short type, int flags) { struct mb_args args; args.flags = flags; args.type = type; return (uma_zalloc_arg(zone_pack, &args, how)); } -static __inline void +static __inline int m_clget(struct mbuf *m, int how) { if (m->m_flags & M_EXT) printf("%s: %p mbuf already has external storage\n", __func__, m); m->m_ext.ext_buf = (char *)NULL; uma_zalloc_arg(zone_clust, m, how); /* * On a cluster allocation failure, drain the packet zone and retry, * we might be able to loosen a few clusters up on the drain. */ if ((how & M_NOWAIT) && (m->m_ext.ext_buf == NULL)) { zone_drain(zone_pack); uma_zalloc_arg(zone_clust, m, how); } + return (m->m_flags & M_EXT); } /* * m_cljget() is different from m_clget() as it can allocate clusters without * attaching them to an mbuf. In that case the return value is the pointer * to the cluster of the requested size. If an mbuf was specified, it gets * the cluster attached to it and the return value can be safely ignored. * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES. */ static __inline void * m_cljget(struct mbuf *m, int how, int size) { uma_zone_t zone; if (m && m->m_flags & M_EXT) printf("%s: %p mbuf already has external storage\n", __func__, m); if (m != NULL) m->m_ext.ext_buf = NULL; zone = m_getzone(size); return (uma_zalloc_arg(zone, m, how)); } static __inline void m_cljset(struct mbuf *m, void *cl, int type) { uma_zone_t zone; int size; switch (type) { case EXT_CLUSTER: size = MCLBYTES; zone = zone_clust; break; #if MJUMPAGESIZE != MCLBYTES case EXT_JUMBOP: size = MJUMPAGESIZE; zone = zone_jumbop; break; #endif case EXT_JUMBO9: size = MJUM9BYTES; zone = zone_jumbo9; break; case EXT_JUMBO16: size = MJUM16BYTES; zone = zone_jumbo16; break; default: panic("%s: unknown cluster type %d", __func__, type); break; } m->m_data = m->m_ext.ext_buf = cl; m->m_ext.ext_free = m->m_ext.ext_arg1 = m->m_ext.ext_arg2 = NULL; m->m_ext.ext_size = size; m->m_ext.ext_type = type; m->m_ext.ext_flags = 0; m->m_ext.ext_cnt = uma_find_refcnt(zone, cl); m->m_flags |= M_EXT; } static __inline void m_chtype(struct mbuf *m, short new_type) { m->m_type = new_type; } static __inline void m_clrprotoflags(struct mbuf *m) { while (m) { m->m_flags &= ~M_PROTOFLAGS; m = m->m_next; } } static __inline struct mbuf * m_last(struct mbuf *m) { while (m->m_next) m = m->m_next; return (m); } /* * mbuf, cluster, and external object allocation macros (for compatibility * purposes). */ #define M_MOVE_PKTHDR(to, from) m_move_pkthdr((to), (from)) #define MGET(m, how, type) ((m) = m_get((how), (type))) #define MGETHDR(m, how, type) ((m) = m_gethdr((how), (type))) #define MCLGET(m, how) m_clget((m), (how)) #define MEXTADD(m, buf, size, free, arg1, arg2, flags, type) \ (void )m_extadd((m), (caddr_t)(buf), (size), (free), (arg1), (arg2),\ (flags), (type), M_NOWAIT) #define m_getm(m, len, how, type) \ m_getm2((m), (len), (how), (type), M_PKTHDR) /* * Evaluate TRUE if it's safe to write to the mbuf m's data region (this can * be both the local data payload, or an external buffer area, depending on * whether M_EXT is set). */ #define M_WRITABLE(m) (!((m)->m_flags & M_RDONLY) && \ (!(((m)->m_flags & M_EXT)) || \ (*((m)->m_ext.ext_cnt) == 1)) ) \ /* Check if the supplied mbuf has a packet header, or else panic. */ #define M_ASSERTPKTHDR(m) \ KASSERT((m) != NULL && (m)->m_flags & M_PKTHDR, \ ("%s: no mbuf packet header!", __func__)) /* * Ensure that the supplied mbuf is a valid, non-free mbuf. * * XXX: Broken at the moment. Need some UMA magic to make it work again. */ #define M_ASSERTVALID(m) \ KASSERT((((struct mbuf *)m)->m_flags & 0) == 0, \ ("%s: attempted use of a free mbuf!", __func__)) /* * Return the address of the start of the buffer associated with an mbuf, * handling external storage, packet-header mbufs, and regular data mbufs. */ #define M_START(m) \ (((m)->m_flags & M_EXT) ? (m)->m_ext.ext_buf : \ ((m)->m_flags & M_PKTHDR) ? &(m)->m_pktdat[0] : \ &(m)->m_dat[0]) /* * Return the size of the buffer associated with an mbuf, handling external * storage, packet-header mbufs, and regular data mbufs. */ #define M_SIZE(m) \ (((m)->m_flags & M_EXT) ? (m)->m_ext.ext_size : \ ((m)->m_flags & M_PKTHDR) ? MHLEN : \ MLEN) /* * Set the m_data pointer of a newly allocated mbuf to place an object of the * specified size at the end of the mbuf, longword aligned. * * NB: Historically, we had M_ALIGN(), MH_ALIGN(), and MEXT_ALIGN() as * separate macros, each asserting that it was called at the proper moment. * This required callers to themselves test the storage type and call the * right one. Rather than require callers to be aware of those layout * decisions, we centralize here. */ static __inline void m_align(struct mbuf *m, int len) { #ifdef INVARIANTS const char *msg = "%s: not a virgin mbuf"; #endif int adjust; KASSERT(m->m_data == M_START(m), (msg, __func__)); if (m->m_flags & M_EXT) { adjust = m->m_ext.ext_size - len; } else if (m->m_flags & M_PKTHDR) { adjust = MHLEN - len; } else { adjust = MLEN - len; } m->m_data += adjust &~ (sizeof(long)-1); } #define M_ALIGN(m, len) m_align(m, len) #define MH_ALIGN(m, len) m_align(m, len) #define MEXT_ALIGN(m, len) m_align(m, len) /* * Compute the amount of space available before the current start of data in * an mbuf. * * The M_WRITABLE() is a temporary, conservative safety measure: the burden * of checking writability of the mbuf data area rests solely with the caller. * * NB: In previous versions, M_LEADINGSPACE() would only check M_WRITABLE() * for mbufs with external storage. We now allow mbuf-embedded data to be * read-only as well. */ #define M_LEADINGSPACE(m) \ (M_WRITABLE(m) ? ((m)->m_data - M_START(m)) : 0) /* * Compute the amount of space available after the end of data in an mbuf. * * The M_WRITABLE() is a temporary, conservative safety measure: the burden * of checking writability of the mbuf data area rests solely with the caller. * * NB: In previous versions, M_TRAILINGSPACE() would only check M_WRITABLE() * for mbufs with external storage. We now allow mbuf-embedded data to be * read-only as well. */ #define M_TRAILINGSPACE(m) \ (M_WRITABLE(m) ? \ ((M_START(m) + M_SIZE(m)) - ((m)->m_data + (m)->m_len)) : 0) /* * Arrange to prepend space of size plen to mbuf m. If a new mbuf must be * allocated, how specifies whether to wait. If the allocation fails, the * original mbuf chain is freed and m is set to NULL. */ #define M_PREPEND(m, plen, how) do { \ struct mbuf **_mmp = &(m); \ struct mbuf *_mm = *_mmp; \ int _mplen = (plen); \ int __mhow = (how); \ \ MBUF_CHECKSLEEP(how); \ if (M_LEADINGSPACE(_mm) >= _mplen) { \ _mm->m_data -= _mplen; \ _mm->m_len += _mplen; \ } else \ _mm = m_prepend(_mm, _mplen, __mhow); \ if (_mm != NULL && _mm->m_flags & M_PKTHDR) \ _mm->m_pkthdr.len += _mplen; \ *_mmp = _mm; \ } while (0) /* * Change mbuf to new type. This is a relatively expensive operation and * should be avoided. */ #define MCHTYPE(m, t) m_chtype((m), (t)) /* Length to m_copy to copy all. */ #define M_COPYALL 1000000000 /* Compatibility with 4.3. */ #define m_copy(m, o, l) m_copym((m), (o), (l), M_NOWAIT) extern int max_datalen; /* MHLEN - max_hdr */ extern int max_hdr; /* Largest link + protocol header */ extern int max_linkhdr; /* Largest link-level header */ extern int max_protohdr; /* Largest protocol header */ extern int nmbclusters; /* Maximum number of clusters */ struct uio; void m_adj(struct mbuf *, int); int m_apply(struct mbuf *, int, int, int (*)(void *, void *, u_int), void *); int m_append(struct mbuf *, int, c_caddr_t); void m_cat(struct mbuf *, struct mbuf *); void m_catpkt(struct mbuf *, struct mbuf *); int m_extadd(struct mbuf *, caddr_t, u_int, void (*)(struct mbuf *, void *, void *), void *, void *, int, int, int); struct mbuf *m_collapse(struct mbuf *, int, int); void m_copyback(struct mbuf *, int, int, c_caddr_t); void m_copydata(const struct mbuf *, int, int, caddr_t); struct mbuf *m_copym(struct mbuf *, int, int, int); struct mbuf *m_copymdata(struct mbuf *, struct mbuf *, int, int, int, int); struct mbuf *m_copypacket(struct mbuf *, int); void m_copy_pkthdr(struct mbuf *, struct mbuf *); struct mbuf *m_copyup(struct mbuf *, int, int); struct mbuf *m_defrag(struct mbuf *, int); void m_demote(struct mbuf *, int, int); struct mbuf *m_devget(char *, int, int, struct ifnet *, void (*)(char *, caddr_t, u_int)); struct mbuf *m_dup(struct mbuf *, int); int m_dup_pkthdr(struct mbuf *, struct mbuf *, int); u_int m_fixhdr(struct mbuf *); struct mbuf *m_fragment(struct mbuf *, int, int); void m_freem(struct mbuf *); struct mbuf *m_get2(int, int, short, int); struct mbuf *m_getjcl(int, short, int, int); struct mbuf *m_getm2(struct mbuf *, int, int, short, int); struct mbuf *m_getptr(struct mbuf *, int, int *); u_int m_length(struct mbuf *, struct mbuf **); int m_mbuftouio(struct uio *, struct mbuf *, int); void m_move_pkthdr(struct mbuf *, struct mbuf *); struct mbuf *m_prepend(struct mbuf *, int, int); void m_print(const struct mbuf *, int); struct mbuf *m_pulldown(struct mbuf *, int, int, int *); struct mbuf *m_pullup(struct mbuf *, int); int m_sanity(struct mbuf *, int); struct mbuf *m_split(struct mbuf *, int, int); struct mbuf *m_uiotombuf(struct uio *, int, int, int, int); struct mbuf *m_unshare(struct mbuf *, int); /*- * Network packets may have annotations attached by affixing a list of * "packet tags" to the pkthdr structure. Packet tags are dynamically * allocated semi-opaque data structures that have a fixed header * (struct m_tag) that specifies the size of the memory block and a * pair that identifies it. The cookie is a 32-bit unique * unsigned value used to identify a module or ABI. By convention this value * is chosen as the date+time that the module is created, expressed as the * number of seconds since the epoch (e.g., using date -u +'%s'). The type * value is an ABI/module-specific value that identifies a particular * annotation and is private to the module. For compatibility with systems * like OpenBSD that define packet tags w/o an ABI/module cookie, the value * PACKET_ABI_COMPAT is used to implement m_tag_get and m_tag_find * compatibility shim functions and several tag types are defined below. * Users that do not require compatibility should use a private cookie value * so that packet tag-related definitions can be maintained privately. * * Note that the packet tag returned by m_tag_alloc has the default memory * alignment implemented by malloc. To reference private data one can use a * construct like: * * struct m_tag *mtag = m_tag_alloc(...); * struct foo *p = (struct foo *)(mtag+1); * * if the alignment of struct m_tag is sufficient for referencing members of * struct foo. Otherwise it is necessary to embed struct m_tag within the * private data structure to insure proper alignment; e.g., * * struct foo { * struct m_tag tag; * ... * }; * struct foo *p = (struct foo *) m_tag_alloc(...); * struct m_tag *mtag = &p->tag; */ /* * Persistent tags stay with an mbuf until the mbuf is reclaimed. Otherwise * tags are expected to ``vanish'' when they pass through a network * interface. For most interfaces this happens normally as the tags are * reclaimed when the mbuf is free'd. However in some special cases * reclaiming must be done manually. An example is packets that pass through * the loopback interface. Also, one must be careful to do this when * ``turning around'' packets (e.g., icmp_reflect). * * To mark a tag persistent bit-or this flag in when defining the tag id. * The tag will then be treated as described above. */ #define MTAG_PERSISTENT 0x800 #define PACKET_TAG_NONE 0 /* Nadda */ /* Packet tags for use with PACKET_ABI_COMPAT. */ #define PACKET_TAG_IPSEC_IN_DONE 1 /* IPsec applied, in */ #define PACKET_TAG_IPSEC_OUT_DONE 2 /* IPsec applied, out */ #define PACKET_TAG_IPSEC_IN_CRYPTO_DONE 3 /* NIC IPsec crypto done */ #define PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED 4 /* NIC IPsec crypto req'ed */ #define PACKET_TAG_IPSEC_IN_COULD_DO_CRYPTO 5 /* NIC notifies IPsec */ #define PACKET_TAG_IPSEC_PENDING_TDB 6 /* Reminder to do IPsec */ #define PACKET_TAG_BRIDGE 7 /* Bridge processing done */ #define PACKET_TAG_GIF 8 /* GIF processing done */ #define PACKET_TAG_GRE 9 /* GRE processing done */ #define PACKET_TAG_IN_PACKET_CHECKSUM 10 /* NIC checksumming done */ #define PACKET_TAG_ENCAP 11 /* Encap. processing */ #define PACKET_TAG_IPSEC_SOCKET 12 /* IPSEC socket ref */ #define PACKET_TAG_IPSEC_HISTORY 13 /* IPSEC history */ #define PACKET_TAG_IPV6_INPUT 14 /* IPV6 input processing */ #define PACKET_TAG_DUMMYNET 15 /* dummynet info */ #define PACKET_TAG_DIVERT 17 /* divert info */ #define PACKET_TAG_IPFORWARD 18 /* ipforward info */ #define PACKET_TAG_MACLABEL (19 | MTAG_PERSISTENT) /* MAC label */ #define PACKET_TAG_PF (21 | MTAG_PERSISTENT) /* PF/ALTQ information */ #define PACKET_TAG_RTSOCKFAM 25 /* rtsock sa family */ #define PACKET_TAG_IPOPTIONS 27 /* Saved IP options */ #define PACKET_TAG_CARP 28 /* CARP info */ #define PACKET_TAG_IPSEC_NAT_T_PORTS 29 /* two uint16_t */ #define PACKET_TAG_ND_OUTGOING 30 /* ND outgoing */ /* Specific cookies and tags. */ /* Packet tag routines. */ struct m_tag *m_tag_alloc(u_int32_t, int, int, int); void m_tag_delete(struct mbuf *, struct m_tag *); void m_tag_delete_chain(struct mbuf *, struct m_tag *); void m_tag_free_default(struct m_tag *); struct m_tag *m_tag_locate(struct mbuf *, u_int32_t, int, struct m_tag *); struct m_tag *m_tag_copy(struct m_tag *, int); int m_tag_copy_chain(struct mbuf *, struct mbuf *, int); void m_tag_delete_nonpersistent(struct mbuf *); /* * Initialize the list of tags associated with an mbuf. */ static __inline void m_tag_init(struct mbuf *m) { SLIST_INIT(&m->m_pkthdr.tags); } /* * Set up the contents of a tag. Note that this does not fill in the free * method; the caller is expected to do that. * * XXX probably should be called m_tag_init, but that was already taken. */ static __inline void m_tag_setup(struct m_tag *t, u_int32_t cookie, int type, int len) { t->m_tag_id = type; t->m_tag_len = len; t->m_tag_cookie = cookie; } /* * Reclaim resources associated with a tag. */ static __inline void m_tag_free(struct m_tag *t) { (*t->m_tag_free)(t); } /* * Return the first tag associated with an mbuf. */ static __inline struct m_tag * m_tag_first(struct mbuf *m) { return (SLIST_FIRST(&m->m_pkthdr.tags)); } /* * Return the next tag in the list of tags associated with an mbuf. */ static __inline struct m_tag * m_tag_next(struct mbuf *m, struct m_tag *t) { return (SLIST_NEXT(t, m_tag_link)); } /* * Prepend a tag to the list of tags associated with an mbuf. */ static __inline void m_tag_prepend(struct mbuf *m, struct m_tag *t) { SLIST_INSERT_HEAD(&m->m_pkthdr.tags, t, m_tag_link); } /* * Unlink a tag from the list of tags associated with an mbuf. */ static __inline void m_tag_unlink(struct mbuf *m, struct m_tag *t) { SLIST_REMOVE(&m->m_pkthdr.tags, t, m_tag, m_tag_link); } /* These are for OpenBSD compatibility. */ #define MTAG_ABI_COMPAT 0 /* compatibility ABI */ static __inline struct m_tag * m_tag_get(int type, int length, int wait) { return (m_tag_alloc(MTAG_ABI_COMPAT, type, length, wait)); } static __inline struct m_tag * m_tag_find(struct mbuf *m, int type, struct m_tag *start) { return (SLIST_EMPTY(&m->m_pkthdr.tags) ? (struct m_tag *)NULL : m_tag_locate(m, MTAG_ABI_COMPAT, type, start)); } static __inline struct mbuf * m_free(struct mbuf *m) { struct mbuf *n = m->m_next; if ((m->m_flags & (M_PKTHDR|M_NOFREE)) == (M_PKTHDR|M_NOFREE)) m_tag_delete_chain(m, NULL); if (m->m_flags & M_EXT) mb_free_ext(m); else if ((m->m_flags & M_NOFREE) == 0) uma_zfree(zone_mbuf, m); return (n); } static int inline rt_m_getfib(struct mbuf *m) { KASSERT(m->m_flags & M_PKTHDR , ("Attempt to get FIB from non header mbuf.")); return (m->m_pkthdr.fibnum); } #define M_GETFIB(_m) rt_m_getfib(_m) #define M_SETFIB(_m, _fib) do { \ KASSERT((_m)->m_flags & M_PKTHDR, ("Attempt to set FIB on non header mbuf.")); \ ((_m)->m_pkthdr.fibnum) = (_fib); \ } while (0) #endif /* _KERNEL */ #ifdef MBUF_PROFILING void m_profile(struct mbuf *m); #define M_PROFILE(m) m_profile(m) #else #define M_PROFILE(m) #endif #endif /* !_SYS_MBUF_H_ */