diff --git a/sys/conf/NOTES b/sys/conf/NOTES index b0d8ba719928..a5251f9b9cf1 100644 --- a/sys/conf/NOTES +++ b/sys/conf/NOTES @@ -1,2289 +1,2295 @@ # $FreeBSD$ # # NOTES -- Lines that can be cut/pasted into kernel and hints configs. # # Lines that begin with 'device', 'options', 'machine', 'ident', 'maxusers', # 'makeoptions', 'hints', etc. go into the kernel configuration that you # run config(8) with. # # Lines that begin with 'hint.' are NOT for config(8), they go into your # hints file. See /boot/device.hints and/or the 'hints' config(8) directive. # # Please use ``make LINT'' to create an old-style LINT file if you want to # do kernel test-builds. # # This file contains machine independent kernel configuration notes. For # machine dependent notes, look in /sys//conf/NOTES. # # # NOTES conventions and style guide: # # Large block comments should begin and end with a line containing only a # comment character. # # To describe a particular object, a block comment (if it exists) should # come first. Next should come device, options, and hints lines in that # order. All device and option lines must be described by a comment that # doesn't just expand the device or option name. Use only a concise # comment on the same line if possible. Very detailed descriptions of # devices and subsystems belong in manpages. # # A space followed by a tab separates 'option' from an option name. Two # spaces followed by a tab separate 'device' from a device name. Comments # after an option or device should use one space after the comment character. # To comment out a negative option that disables code and thus should not be # enabled for LINT builds, precede 'option' with "#!". # # # This is the ``identification'' of the kernel. Usually this should # be the same as the name of your kernel. # ident LINT # # The `maxusers' parameter controls the static sizing of a number of # internal system tables by a formula defined in subr_param.c. Setting # maxusers to 0 will cause the system to auto-size based on physical # memory. # maxusers 10 # # The `makeoptions' parameter allows variables to be passed to the # generated Makefile in the build area. # # CONF_CFLAGS gives some extra compiler flags that are added to ${CFLAGS} # after most other flags. Here we use it to inhibit use of non-optimal # gcc builtin functions (e.g., memcmp). # # DEBUG happens to be magic. # The following is equivalent to 'config -g KERNELNAME' and creates # 'kernel.debug' compiled with -g debugging as well as a normal # 'kernel'. Use 'make install.debug' to install the debug kernel # but that isn't normally necessary as the debug symbols are not loaded # by the kernel and are not useful there anyway. # # KERNEL can be overridden so that you can change the default name of your # kernel. # # MODULES_OVERRIDE can be used to limit modules built to a specific list. # makeoptions CONF_CFLAGS=-fno-builtin #Don't allow use of memcmp, etc. #makeoptions DEBUG=-g #Build kernel with gdb(1) debug symbols #makeoptions KERNEL=foo #Build kernel "foo" and install "/foo" # Only build Linux API modules and plus those parts of the sound system I need. #makeoptions MODULES_OVERRIDE="linux sound/snd sound/pcm sound/driver/maestro3" makeoptions DESTDIR=/tmp # # Certain applications can grow to be larger than the 512M limit # that FreeBSD initially imposes. Below are some options to # allow that limit to grow to 1GB, and can be increased further # with changing the parameters. MAXDSIZ is the maximum that the # limit can be set to, and the DFLDSIZ is the default value for # the limit. MAXSSIZ is the maximum that the stack limit can be # set to. You might want to set the default lower than the max, # and explicitly set the maximum with a shell command for processes # that regularly exceed the limit like INND. # options MAXDSIZ=(1024UL*1024*1024) options MAXSSIZ=(128UL*1024*1024) options DFLDSIZ=(1024UL*1024*1024) # # BLKDEV_IOSIZE sets the default block size used in user block # device I/O. Note that this value will be overriden by the label # when specifying a block device from a label with a non-0 # partition blocksize. The default is PAGE_SIZE. # options BLKDEV_IOSIZE=8192 # Options for the VM subsystem options PQ_CACHESIZE=512 # color for 512k/16k cache # Deprecated options supported for backwards compatibility #options PQ_NOOPT # No coloring #options PQ_LARGECACHE # color for 512k/16k cache #options PQ_HUGECACHE # color for 1024k/16k cache #options PQ_MEDIUMCACHE # color for 256k/16k cache #options PQ_NORMALCACHE # color for 64k/16k cache # This allows you to actually store this configuration file into # the kernel binary itself, where it may be later read by saying: # strings -n 3 /boot/kernel/kernel | sed -n 's/^___//p' > MYKERNEL # options INCLUDE_CONFIG_FILE # Include this file in kernel options GEOM_AES options GEOM_APPLE options GEOM_BDE options GEOM_BSD options GEOM_GPT options GEOM_MBR options GEOM_PC98 options GEOM_SUNLABEL options GEOM_VOL # # The root device and filesystem type can be compiled in; # this provides a fallback option if the root device cannot # be correctly guessed by the bootstrap code, or an override if # the RB_DFLTROOT flag (-r) is specified when booting the kernel. # options ROOTDEVNAME=\"ufs:da0s2e\" ##################################################################### # Scheduler options: # # Specifying one of SCHED_4BSD or SCHED_ULE is mandatory. These options # select which scheduler is compiled in. # # SCHED_4BSD is the historical, proven, BSD scheduler. It has a global run # queue and no cpu affinity which makes it suboptimal for SMP. It has very # good interactivity and priority selection. # # SCHED_ULE is a new experimental scheduler that has been designed for SMP, # but will work just fine on UP too. Users of this scheduler should expect # some hicups and be prepaired to provide feedback. # options SCHED_4BSD #options SCHED_ULE ##################################################################### # SMP OPTIONS: # # SMP enables building of a Symmetric MultiProcessor Kernel. # Mandatory: options SMP # Symmetric MultiProcessor Kernel # ADAPTIVE_MUTEXES changes the behavior of blocking mutexes to spin # if the thread that currently owns the mutex is executing on another # CPU. options ADAPTIVE_MUTEXES # SMP Debugging Options: # # MUTEX_DEBUG enables various extra assertions in the mutex code. # WITNESS enables the witness code which detects deadlocks and cycles # during locking operations. # WITNESS_DDB causes the witness code to drop into the kernel debugger if # a lock heirarchy violation occurs or if locks are held when going to # sleep. # WITNESS_SKIPSPIN disables the witness checks on spin mutexes. options MUTEX_DEBUG options WITNESS options WITNESS_DDB options WITNESS_SKIPSPIN # # MUTEX_PROFILING - Profiling mutual exclusion locks (mutexes). This # records four numbers for each acquisition point (identified by # source file name and line number): longest time held, total time held, # number of non-recursive acquisitions, and average time held. Measurements # are made and stored in nanoseconds (using nanotime(9)), but are presented # in microseconds, which should be sufficient for the locks which actually # want this (those that are held long and / or often). The MUTEX_PROFILING # option has the following sysctl namespace for controlling and viewing its # operation: # # debug.mutex.prof.enable - enable / disable profiling # debug.mutex.prof.acquisitions - number of mutex acquisitions held # debug.mutex.prof.records - number of acquisition points recorded # debug.mutex.prof.maxrecords - max number of acquisition points # debug.mutex.prof.rejected - number of rejections (due to full table) # debug.mutex.prof.hashsize - hash size # debug.mutex.prof.collisions - number of hash collisions # debug.mutex.prof.stats - profiling statistics # options MUTEX_PROFILING ##################################################################### # COMPATIBILITY OPTIONS # # Implement system calls compatible with 4.3BSD and older versions of # FreeBSD. You probably do NOT want to remove this as much current code # still relies on the 4.3 emulation. # options COMPAT_43 # Enable FreeBSD4 compatibility syscalls options COMPAT_FREEBSD4 # # These three options provide support for System V Interface # Definition-style interprocess communication, in the form of shared # memory, semaphores, and message queues, respectively. # options SYSVSHM options SYSVSEM options SYSVMSG ##################################################################### # DEBUGGING OPTIONS # # Enable the kernel debugger. # options DDB # # Use direct symbol lookup routines for ddb instead of the kernel linker # ones, so that symbols (mostly) work before the kernel linker has been # initialized. This is not the default because it breaks ddb's lookup of # symbols in loaded modules. # #!options DDB_NOKLDSYM # # Print a stack trace of the current thread out on the console for a panic. # options DDB_TRACE # # Don't drop into DDB for a panic. Intended for unattended operation # where you may want to drop to DDB from the console, but still want # the machine to recover from a panic # options DDB_UNATTENDED # # If using GDB remote mode to debug the kernel, there's a non-standard # extension to the remote protocol that can be used to use the serial # port as both the debugging port and the system console. It's non- # standard and you're on your own if you enable it. See also the # "remotechat" variables in the FreeBSD specific version of gdb. # options GDB_REMOTE_CHAT # # KTRACE enables the system-call tracing facility ktrace(2). To be more # SMP-friendly, KTRACE uses a worker thread to process most trace events # asynchronously to the thread generating the event. This requires a # pre-allocated store of objects representing trace events. The # KTRACE_REQUEST_POOL option specifies the initial size of this store. # The size of the pool can be adjusted both at boottime and runtime via # the kern.ktrace_request_pool tunable and sysctl. # options KTRACE #kernel tracing options KTRACE_REQUEST_POOL=101 # # KTR is a kernel tracing mechanism imported from BSD/OS. Currently it # has no userland interface aside from a few sysctl's. It is enabled with # the KTR option. KTR_ENTRIES defines the number of entries in the circular # trace buffer. KTR_COMPILE defines the mask of events to compile into the # kernel as defined by the KTR_* constants in . KTR_MASK defines the # initial value of the ktr_mask variable which determines at runtime what # events to trace. KTR_CPUMASK determines which CPU's log events, with # bit X corresponding to cpu X. KTR_VERBOSE enables dumping of KTR events # to the console by default. This functionality can be toggled via the # debug.ktr_verbose sysctl and defaults to off if KTR_VERBOSE is not defined. # options KTR options KTR_ENTRIES=1024 options KTR_COMPILE=(KTR_INTR|KTR_PROC) options KTR_MASK=KTR_INTR options KTR_CPUMASK=0x3 options KTR_VERBOSE # # The INVARIANTS option is used in a number of source files to enable # extra sanity checking of internal structures. This support is not # enabled by default because of the extra time it would take to check # for these conditions, which can only occur as a result of # programming errors. # options INVARIANTS # # The INVARIANT_SUPPORT option makes us compile in support for # verifying some of the internal structures. It is a prerequisite for # 'INVARIANTS', as enabling 'INVARIANTS' will make these functions be # called. The intent is that you can set 'INVARIANTS' for single # source files (by changing the source file or specifying it on the # command line) if you have 'INVARIANT_SUPPORT' enabled. Also, if you # wish to build a kernel module with 'INVARIANTS', then adding # 'INVARIANT_SUPPORT' to your kernel will provide all the necessary # infrastructure without the added overhead. # options INVARIANT_SUPPORT # # The DIAGNOSTIC option is used to enable extra debugging information # from some parts of the kernel. As this makes everything more noisy, # it is disabled by default. # options DIAGNOSTIC # # REGRESSION causes optional kernel interfaces necessary only for regression # testing to be enabled. These interfaces may consitute security risks # when enabled, as they permit processes to easily modify aspects of the # run-time environment to reproduce unlikely or unusual (possibly normally # impossible) scenarios. # options REGRESSION # # RESTARTABLE_PANICS allows one to continue from a panic as if it were # a call to the debugger via the Debugger() function instead. It is only # useful if a kernel debugger is present. To restart from a panic, reset # the panicstr variable to NULL and continue execution. This option is # for development use only and should NOT be used in production systems # to "workaround" a panic. # #options RESTARTABLE_PANICS # # This option let some drivers co-exist that can't co-exist in a running # system. This is used to be able to compile all kernel code in one go for # quality assurance purposes (like this file, which the option takes it name # from.) # options COMPILING_LINT ##################################################################### # NETWORKING OPTIONS # # Protocol families: # Only the INET (Internet) family is officially supported in FreeBSD. # options INET #Internet communications protocols options INET6 #IPv6 communications protocols options IPSEC #IP security options IPSEC_ESP #IP security (crypto; define w/ IPSEC) options IPSEC_DEBUG #debug for IP security # # Set IPSEC_FILTERGIF to force packets coming through a gif tunnel # to be processed by any configured packet filtering (ipfw, ipf). # The default is that packets coming from a tunnel are _not_ processed; # they are assumed trusted. # # Note that enabling this can be problematic as there are no mechanisms # in place for distinguishing packets coming out of a tunnel (e.g. no # encX devices as found on openbsd). # #options IPSEC_FILTERGIF #filter ipsec packets from a tunnel #options FAST_IPSEC #new IPsec (cannot define w/ IPSEC) options IPX #IPX/SPX communications protocols options IPXIP #IPX in IP encapsulation (not available) #options NCP #NetWare Core protocol options NETATALK #Appletalk communications protocols options NETATALKDEBUG #Appletalk debugging # # SMB/CIFS requester # NETSMB enables support for SMB protocol, it requires LIBMCHAIN and LIBICONV # options. # NETSMBCRYPTO enables support for encrypted passwords. options NETSMB #SMB/CIFS requester options NETSMBCRYPTO #encrypted password support for SMB # mchain library. It can be either loaded as KLD or compiled into kernel options LIBMCHAIN # netgraph(4). Enable the base netgraph code with the NETGRAPH option. # Individual node types can be enabled with the corresponding option # listed below; however, this is not strictly necessary as netgraph # will automatically load the corresponding KLD module if the node type # is not already compiled into the kernel. Each type below has a # corresponding man page, e.g., ng_async(8). options NETGRAPH #netgraph(4) system options NETGRAPH_ASYNC options NETGRAPH_BPF options NETGRAPH_BRIDGE options NETGRAPH_CISCO options NETGRAPH_ECHO options NETGRAPH_ETHER options NETGRAPH_FRAME_RELAY options NETGRAPH_GIF options NETGRAPH_GIF_DEMUX options NETGRAPH_HOLE options NETGRAPH_IFACE options NETGRAPH_IP_INPUT options NETGRAPH_KSOCKET options NETGRAPH_L2TP options NETGRAPH_LMI # MPPC compression requires proprietary files (not included) #options NETGRAPH_MPPC_COMPRESSION options NETGRAPH_MPPC_ENCRYPTION options NETGRAPH_ONE2MANY options NETGRAPH_PPP options NETGRAPH_PPPOE options NETGRAPH_PPTPGRE options NETGRAPH_RFC1490 options NETGRAPH_SOCKET options NETGRAPH_SPLIT options NETGRAPH_TEE options NETGRAPH_TTY options NETGRAPH_UI options NETGRAPH_VJC device mn # Munich32x/Falc54 Nx64kbit/sec cards. device lmc # tulip based LanMedia WAN cards device musycc # LMC/SBE LMC1504 quad T1/E1 # # Network interfaces: # The `loop' device is MANDATORY when networking is enabled. # The `ether' device provides generic code to handle # Ethernets; it is MANDATORY when an Ethernet device driver is # configured or token-ring is enabled. # The 'wlan' device provides generic code to support 802.11 # drivers, including host AP mode; it is MANDATORY for the wi # driver and will eventually be required by all 802.11 drivers. # The `fddi' device provides generic code to support FDDI. # The `arcnet' device provides generic code to support Arcnet. # The `sppp' device serves a similar role for certain types # of synchronous PPP links (like `cx', `ar'). # The `sl' device implements the Serial Line IP (SLIP) service. # The `ppp' device implements the Point-to-Point Protocol. # The `bpf' device enables the Berkeley Packet Filter. Be # aware of the legal and administrative consequences of enabling this # option. The number of devices determines the maximum number of # simultaneous BPF clients programs runnable. # The `disc' device implements a minimal network interface, # which throws away all packets sent and never receives any. It is # included for testing purposes. This shows up as the `ds' interface. # The `tap' device is a pty-like virtual Ethernet interface # The `tun' device implements (user-)ppp and nos-tun # The `gif' device implements IPv6 over IP4 tunneling, # IPv4 over IPv6 tunneling, IPv4 over IPv4 tunneling and # IPv6 over IPv6 tunneling. # The `gre' device implements two types of IP4 over IP4 tunneling: # GRE and MOBILE, as specified in the RFC1701 and RFC2004. # The XBONEHACK option allows the same pair of addresses to be configured on # multiple gif interfaces. # The `faith' device captures packets sent to it and diverts them # to the IPv4/IPv6 translation daemon. # The `stf' device implements 6to4 encapsulation. # The `ef' device provides support for multiple ethernet frame types # specified via ETHER_* options. See ef(4) for details. # # The PPP_BSDCOMP option enables support for compress(1) style entire # packet compression, the PPP_DEFLATE is for zlib/gzip style compression. # PPP_FILTER enables code for filtering the ppp data stream and selecting # events for resetting the demand dial activity timer - requires bpf. # See pppd(8) for more details. # device ether #Generic Ethernet device vlan #VLAN support device wlan #802.11 support device token #Generic TokenRing device fddi #Generic FDDI device arcnet #Generic Arcnet device sppp #Generic Synchronous PPP device loop #Network loopback device device bpf #Berkeley packet filter device disc #Discard device (ds0, ds1, etc) device tap #Virtual Ethernet driver device tun #Tunnel driver (ppp(8), nos-tun(8)) device sl #Serial Line IP device gre #IP over IP tunneling device ppp #Point-to-point protocol options PPP_BSDCOMP #PPP BSD-compress support options PPP_DEFLATE #PPP zlib/deflate/gzip support options PPP_FILTER #enable bpf filtering (needs bpf) device ef # Multiple ethernet frames support options ETHER_II # enable Ethernet_II frame options ETHER_8023 # enable Ethernet_802.3 (Novell) frame options ETHER_8022 # enable Ethernet_802.2 frame options ETHER_SNAP # enable Ethernet_802.2/SNAP frame # for IPv6 device gif #IPv6 and IPv4 tunneling options XBONEHACK device faith #for IPv6 and IPv4 translation device stf #6to4 IPv6 over IPv4 encapsulation # # Internet family options: # # MROUTING enables the kernel multicast packet forwarder, which works # with mrouted(8). # # IPFIREWALL enables support for IP firewall construction, in # conjunction with the `ipfw' program. IPFIREWALL_VERBOSE sends # logged packets to the system logger. IPFIREWALL_VERBOSE_LIMIT # limits the number of times a matching entry can be logged. # # WARNING: IPFIREWALL defaults to a policy of "deny ip from any to any" # and if you do not add other rules during startup to allow access, # YOU WILL LOCK YOURSELF OUT. It is suggested that you set firewall_type=open # in /etc/rc.conf when first enabling this feature, then refining the # firewall rules in /etc/rc.firewall after you've tested that the new kernel # feature works properly. # # IPFIREWALL_DEFAULT_TO_ACCEPT causes the default rule (at boot) to # allow everything. Use with care, if a cracker can crash your # firewall machine, they can get to your protected machines. However, # if you are using it as an as-needed filter for specific problems as # they arise, then this may be for you. Changing the default to 'allow' # means that you won't get stuck if the kernel and /sbin/ipfw binary get # out of sync. # # IPDIVERT enables the divert IP sockets, used by ``ipfw divert'' # # IPSTEALTH enables code to support stealth forwarding (i.e., forwarding # packets without touching the ttl). This can be useful to hide firewalls # from traceroute and similar tools. # # PFIL_HOOKS enables an abtraction layer which is meant to be used in # network code where filtering is required. See the pfil(9) man page. # This option is a subset of the IPFILTER option. # # TCPDEBUG enables code which keeps traces of the TCP state machine # for sockets with the SO_DEBUG option set, which can then be examined # using the trpt(8) utility. # options MROUTING # Multicast routing options IPFIREWALL #firewall options IPFIREWALL_VERBOSE #enable logging to syslogd(8) options IPFIREWALL_FORWARD #enable transparent proxy support options IPFIREWALL_VERBOSE_LIMIT=100 #limit verbosity options IPFIREWALL_DEFAULT_TO_ACCEPT #allow everything by default options IPV6FIREWALL #firewall for IPv6 options IPV6FIREWALL_VERBOSE options IPV6FIREWALL_VERBOSE_LIMIT=100 options IPV6FIREWALL_DEFAULT_TO_ACCEPT options IPDIVERT #divert sockets options IPFILTER #ipfilter support options IPFILTER_LOG #ipfilter logging options IPFILTER_DEFAULT_BLOCK #block all packets by default options IPSTEALTH #support for stealth forwarding options PFIL_HOOKS options TCPDEBUG # RANDOM_IP_ID causes the ID field in IP packets to be randomized # instead of incremented by 1 with each packet generated. This # option closes a minor information leak which allows remote # observers to determine the rate of packet generation on the # machine by watching the counter. options RANDOM_IP_ID # Statically Link in accept filters options ACCEPT_FILTER_DATA options ACCEPT_FILTER_HTTP # TCP_DROP_SYNFIN adds support for ignoring TCP packets with SYN+FIN. This # prevents nmap et al. from identifying the TCP/IP stack, but breaks support # for RFC1644 extensions and is not recommended for web servers. # options TCP_DROP_SYNFIN #drop TCP packets with SYN+FIN # DUMMYNET enables the "dummynet" bandwidth limiter. You need # IPFIREWALL as well. See the dummynet(4) and ipfw(8) manpages for more info. # When you run DUMMYNET it is advisable to also have "options HZ=1000" # to achieve a smoother scheduling of the traffic. # # BRIDGE enables bridging between ethernet cards -- see bridge(4). # You can use IPFIREWALL and DUMMYNET together with bridging. # options DUMMYNET options BRIDGE # Zero copy sockets support. This enables "zero copy" for sending and # receving data via a socket. The send side works for any type of NIC, # the receive side only works for NICs that support MTUs greater than the # page size of your architecture and that support header splitting. See # zero_copy(9) for more details. options ZERO_COPY_SOCKETS # # ATM (HARP version) options # # ATM_CORE includes the base ATM functionality code. This must be included # for ATM support. # # ATM_IP includes support for running IP over ATM. # # At least one (and usually only one) of the following signalling managers # must be included (note that all signalling managers include PVC support): # ATM_SIGPVC includes support for the PVC-only signalling manager `sigpvc'. # ATM_SPANS includes support for the `spans' signalling manager, which runs # the FORE Systems's proprietary SPANS signalling protocol. # ATM_UNI includes support for the `uni30' and `uni31' signalling managers, # which run the ATM Forum UNI 3.x signalling protocols. # # The `hea' driver provides support for the Efficient Networks, Inc. # ENI-155p ATM PCI Adapter. # # The `hfa' driver provides support for the FORE Systems, Inc. # PCA-200E ATM PCI Adapter. # options ATM_CORE #core ATM protocol family options ATM_IP #IP over ATM support options ATM_SIGPVC #SIGPVC signalling manager options ATM_SPANS #SPANS signalling manager options ATM_UNI #UNI signalling manager device hea #Efficient ENI-155p ATM PCI device hfa #FORE PCA-200E ATM PCI ##################################################################### # FILESYSTEM OPTIONS # # Only the root, /usr, and /tmp filesystems need be statically # compiled; everything else will be automatically loaded at mount # time. (Exception: the UFS family--- FFS --- cannot # currently be demand-loaded.) Some people still prefer to statically # compile other filesystems as well. # # NB: The NULL, PORTAL, UMAP and UNION filesystems are known to be # buggy, and WILL panic your system if you attempt to do anything with # them. They are included here as an incentive for some enterprising # soul to sit down and fix them. # # One of these is mandatory: options FFS #Fast filesystem options NFSCLIENT #Network File System options NFSSERVER #Network File System # The rest are optional: options CD9660 #ISO 9660 filesystem options FDESCFS #File descriptor filesystem options HPFS #OS/2 File system options MSDOSFS #MS DOS File System (FAT, FAT32) options NTFS #NT File System options NULLFS #NULL filesystem #options NWFS #NetWare filesystem options PORTALFS #Portal filesystem options PROCFS #Process filesystem (requires PSEUDOFS) options PSEUDOFS #Pseudo-filesystem framework options SMBFS #SMB/CIFS filesystem options UDF #Universal Disk Format options UMAPFS #UID map filesystem options UNIONFS #Union filesystem # The xFS_ROOT options REQUIRE the associated ``options xFS'' options NFS_ROOT #NFS usable as root device # Soft updates is a technique for improving filesystem speed and # making abrupt shutdown less risky. # options SOFTUPDATES # Extended attributes allow additional data to be associated with files, # and is used for ACLs, Capabilities, and MAC labels. # See src/sys/ufs/ufs/README.extattr for more information. options UFS_EXTATTR options UFS_EXTATTR_AUTOSTART # Access Control List support for UFS filesystems. The current ACL # implementation requires extended attribute support, UFS_EXTATTR, # for the underlying filesystem. # See src/sys/ufs/ufs/README.acls for more information. options UFS_ACL # Directory hashing improves the speed of operations on very large # directories at the expense of some memory. options UFS_DIRHASH # Make space in the kernel for a root filesystem on a md device. # Define to the number of kilobytes to reserve for the filesystem. options MD_ROOT_SIZE=10 # Make the md device a potential root device, either with preloaded # images of type mfs_root or md_root. options MD_ROOT # Allow this many swap-devices. # # In order to manage swap, the system must reserve bitmap space that # scales with the largest mounted swap device multiplied by NSWAPDEV, # irregardless of whether other swap devices exist or not. So it # is not a good idea to make this value too large. options NSWAPDEV=5 # Disk quotas are supported when this option is enabled. options QUOTA #enable disk quotas # If you are running a machine just as a fileserver for PC and MAC # users, using SAMBA or Netatalk, you may consider setting this option # and keeping all those users' directories on a filesystem that is # mounted with the suiddir option. This gives new files the same # ownership as the directory (similar to group). It's a security hole # if you let these users run programs, so confine it to file-servers # (but it'll save you lots of headaches in those cases). Root owned # directories are exempt and X bits are cleared. The suid bit must be # set on the directory as well; see chmod(1) PC owners can't see/set # ownerships so they keep getting their toes trodden on. This saves # you all the support calls as the filesystem it's used on will act as # they expect: "It's my dir so it must be my file". # options SUIDDIR # NFS options: options NFS_MINATTRTIMO=3 # VREG attrib cache timeout in sec options NFS_MAXATTRTIMO=60 options NFS_MINDIRATTRTIMO=30 # VDIR attrib cache timeout in sec options NFS_MAXDIRATTRTIMO=60 options NFS_GATHERDELAY=10 # Default write gather delay (msec) options NFS_WDELAYHASHSIZ=16 # and with this options NFS_DEBUG # Enable NFS Debugging # Coda stuff: options CODA #CODA filesystem. device vcoda 4 #coda minicache <-> venus comm. # # Add support for the EXT2FS filesystem of Linux fame. Be a bit # careful with this - the ext2fs code has a tendency to lag behind # changes and not be exercised very much, so mounting read/write could # be dangerous (and even mounting read only could result in panics.) # options EXT2FS # Use real implementations of the aio_* system calls. There are numerous # stability and security issues in the current aio code that make it # unsuitable for inclusion on machines with untrusted local users. options VFS_AIO # Cryptographically secure random number generator; /dev/[u]random device random ##################################################################### # POSIX P1003.1B # Real time extensions added in the 1993 Posix # _KPOSIX_PRIORITY_SCHEDULING: Build in _POSIX_PRIORITY_SCHEDULING options _KPOSIX_PRIORITY_SCHEDULING # p1003_1b_semaphores are very experimental, # user should be ready to assist in debugging if problems arise. options P1003_1B_SEMAPHORES ##################################################################### # SECURITY POLICY PARAMETERS # Support for Mandatory Access Control (MAC): options MAC options MAC_BIBA options MAC_BSDEXTENDED options MAC_DEBUG options MAC_IFOFF options MAC_LOMAC options MAC_MLS options MAC_NONE options MAC_PARTITION options MAC_PORTACL options MAC_SEEOTHERUIDS options MAC_TEST ##################################################################### # CLOCK OPTIONS # The granularity of operation is controlled by the kernel option HZ whose # default value (100) means a granularity of 10ms (1s/HZ). # Some subsystems, such as DUMMYNET, might benefit from a smaller # granularity such as 1ms or less, for a smoother scheduling of packets. # Consider, however, that reducing the granularity too much might # cause excessive overhead in clock interrupt processing, # potentially causing ticks to be missed and thus actually reducing # the accuracy of operation. options HZ=100 # If you see the "calcru: negative time of %ld usec for pid %d (%s)\n" # message you probably have some broken sw/hw which disables interrupts # for too long. You can make the system more resistant to this by # choosing a high value for NTIMECOUNTER. The default is 5, there # is no upper limit but more than a couple of hundred are not productive. options NTIMECOUNTER=20 # Enable support for the kernel PLL to use an external PPS signal, # under supervision of [x]ntpd(8) # More info in ntpd documentation: http://www.eecis.udel.edu/~ntp options PPS_SYNC ##################################################################### # SCSI DEVICES # SCSI DEVICE CONFIGURATION # The SCSI subsystem consists of the `base' SCSI code, a number of # high-level SCSI device `type' drivers, and the low-level host-adapter # device drivers. The host adapters are listed in the ISA and PCI # device configuration sections below. # # Beginning with FreeBSD 2.0.5 you can wire down your SCSI devices so # that a given bus, target, and LUN always come on line as the same # device unit. In earlier versions the unit numbers were assigned # in the order that the devices were probed on the SCSI bus. This # means that if you removed a disk drive, you may have had to rewrite # your /etc/fstab file, and also that you had to be careful when adding # a new disk as it may have been probed earlier and moved your device # configuration around. # This old behavior is maintained as the default behavior. The unit # assignment begins with the first non-wired down unit for a device # type. For example, if you wire a disk as "da3" then the first # non-wired disk will be assigned da4. # The syntax for wiring down devices is: hint.scbus.0.at="ahc0" hint.scbus.1.at="ahc1" hint.scbus.1.bus="0" hint.scbus.3.at="ahc2" hint.scbus.3.bus="0" hint.scbus.2.at="ahc2" hint.scbus.2.bus="1" hint.da.0.at="scbus0" hint.da.0.target="0" hint.da.0.unit="0" hint.da.1.at="scbus3" hint.da.1.target="1" hint.da.2.at="scbus2" hint.da.2.target="3" hint.sa.1.at="scbus1" hint.sa.1.target="6" # "units" (SCSI logical unit number) that are not specified are # treated as if specified as LUN 0. # All SCSI devices allocate as many units as are required. # The ch driver drives SCSI Media Changer ("jukebox") devices. # # The da driver drives SCSI Direct Access ("disk") and Optical Media # ("WORM") devices. # # The sa driver drives SCSI Sequential Access ("tape") devices. # # The cd driver drives SCSI Read Only Direct Access ("cd") devices. # # The ses driver drives SCSI Envinronment Services ("ses") and # SAF-TE ("SCSI Accessable Fault-Tolerant Enclosure") devices. # # The pt driver drives SCSI Processor devices. # # # Target Mode support is provided here but also requires that a SIM # (SCSI Host Adapter Driver) provide support as well. # # The targ driver provides target mode support as a Processor type device. # It exists to give the minimal context necessary to respond to Inquiry # commands. There is a sample user application that shows how the rest # of the command support might be done in /usr/share/examples/scsi_target. # # The targbh driver provides target mode support and exists to respond # to incoming commands that do not otherwise have a logical unit assigned # to them. # # The "unknown" device (uk? in pre-2.0.5) is now part of the base SCSI # configuration as the "pass" driver. device scbus #base SCSI code device ch #SCSI media changers device da #SCSI direct access devices (aka disks) device sa #SCSI tapes device cd #SCSI CD-ROMs device ses #SCSI Environmental Services (and SAF-TE) device pt #SCSI processor device targ #SCSI Target Mode Code device targbh #SCSI Target Mode Blackhole Device device pass #CAM passthrough driver # CAM OPTIONS: # debugging options: # -- NOTE -- If you specify one of the bus/target/lun options, you must # specify them all! # CAMDEBUG: When defined enables debugging macros # CAM_DEBUG_BUS: Debug the given bus. Use -1 to debug all busses. # CAM_DEBUG_TARGET: Debug the given target. Use -1 to debug all targets. # CAM_DEBUG_LUN: Debug the given lun. Use -1 to debug all luns. # CAM_DEBUG_FLAGS: OR together CAM_DEBUG_INFO, CAM_DEBUG_TRACE, # CAM_DEBUG_SUBTRACE, and CAM_DEBUG_CDB # # CAM_MAX_HIGHPOWER: Maximum number of concurrent high power (start unit) cmds # CAM_NEW_TRAN_CODE: this is the new transport layer code that will be switched # to soon # SCSI_NO_SENSE_STRINGS: When defined disables sense descriptions # SCSI_NO_OP_STRINGS: When defined disables opcode descriptions # SCSI_DELAY: The number of MILLISECONDS to freeze the SIM (scsi adapter) # queue after a bus reset, and the number of milliseconds to # freeze the device queue after a bus device reset. This # can be changed at boot and runtime with the # kern.cam.scsi_delay tunable/sysctl. options CAMDEBUG options CAM_DEBUG_BUS=-1 options CAM_DEBUG_TARGET=-1 options CAM_DEBUG_LUN=-1 options CAM_DEBUG_FLAGS=(CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB) options CAM_MAX_HIGHPOWER=4 options SCSI_NO_SENSE_STRINGS options SCSI_NO_OP_STRINGS options SCSI_DELAY=8000 # Be pessimistic about Joe SCSI device # Options for the CAM CDROM driver: # CHANGER_MIN_BUSY_SECONDS: Guaranteed minimum time quantum for a changer LUN # CHANGER_MAX_BUSY_SECONDS: Maximum time quantum per changer LUN, only # enforced if there is I/O waiting for another LUN # The compiled in defaults for these variables are 2 and 10 seconds, # respectively. # # These can also be changed on the fly with the following sysctl variables: # kern.cam.cd.changer.min_busy_seconds # kern.cam.cd.changer.max_busy_seconds # options CHANGER_MIN_BUSY_SECONDS=2 options CHANGER_MAX_BUSY_SECONDS=10 # Options for the CAM sequential access driver: # SA_IO_TIMEOUT: Timeout for read/write/wfm operations, in minutes # SA_SPACE_TIMEOUT: Timeout for space operations, in minutes # SA_REWIND_TIMEOUT: Timeout for rewind operations, in minutes # SA_ERASE_TIMEOUT: Timeout for erase operations, in minutes # SA_1FM_AT_EOD: Default to model which only has a default one filemark at EOT. options SA_IO_TIMEOUT=4 options SA_SPACE_TIMEOUT=60 options SA_REWIND_TIMEOUT=(2*60) options SA_ERASE_TIMEOUT=(4*60) options SA_1FM_AT_EOD # Optional timeout for the CAM processor target (pt) device # This is specified in seconds. The default is 60 seconds. options SCSI_PT_DEFAULT_TIMEOUT=60 # Optional enable of doing SES passthrough on other devices (e.g., disks) # # Normally disabled because a lot of newer SCSI disks report themselves # as having SES capabilities, but this can then clot up attempts to build # build a topology with the SES device that's on the box these drives # are in.... options SES_ENABLE_PASSTHROUGH ##################################################################### # MISCELLANEOUS DEVICES AND OPTIONS # The `pty' device usually turns out to be ``effectively mandatory'', # as it is required for `telnetd', `rlogind', `screen', `emacs', and # `xterm', among others. device pty #Pseudo ttys device nmdm #back-to-back tty devices device md #Memory/malloc disk device snp #Snoop device - to look at pty/vty/etc.. device ccd #Concatenated disk driver # Configuring Vinum into the kernel is not necessary, since the kld # module gets started automatically when vinum(8) starts. This # device is also untested. Use at your own risk. # # The option VINUMDEBUG must match the value set in CFLAGS # in src/sbin/vinum/Makefile. Failure to do so will result in # the following message from vinum(8): # # Can't get vinum config: Invalid argument # # see vinum(4) for more reasons not to use these options. device vinum #Vinum concat/mirror/raid driver options VINUMDEBUG #enable Vinum debugging hooks # RAIDframe device. RAID_AUTOCONFIG allows RAIDframe to search all of the # disk devices in the system looking for components that it recognizes (already # configured once before) and auto-configured them into arrays. device raidframe options RAID_AUTOCONFIG # Kernel side iconv library options LIBICONV # Size of the kernel message buffer. Should be N * pagesize. options MSGBUF_SIZE=40960 # Maximum size of a tty or pty input buffer. options TTYHOG=8193 ##################################################################### # HARDWARE DEVICE CONFIGURATION # For ISA the required hints are listed. # EISA, MCA, PCI and pccard are self identifying buses, so no hints # are needed. # # Mandatory devices: # # The keyboard controller; it controls the keyboard and the PS/2 mouse. device atkbdc hint.atkbdc.0.at="isa" hint.atkbdc.0.port="0x060" # The AT keyboard device atkbd hint.atkbd.0.at="atkbdc" hint.atkbd.0.irq="1" # Options for atkbd: options ATKBD_DFLT_KEYMAP # specify the built-in keymap makeoptions ATKBD_DFLT_KEYMAP=jp.106 # These options are valid for other keyboard drivers as well. options KBD_DISABLE_KEYMAP_LOAD # refuse to load a keymap options KBD_INSTALL_CDEV # install a CDEV entry in /dev # `flags' for atkbd: # 0x01 Force detection of keyboard, else we always assume a keyboard # 0x02 Don't reset keyboard, useful for some newer ThinkPads # 0x03 Force detection and avoid reset, might help with certain # dockingstations # 0x04 Old-style (XT) keyboard support, useful for older ThinkPads # PS/2 mouse device psm hint.psm.0.at="atkbdc" hint.psm.0.irq="12" # Options for psm: options PSM_HOOKRESUME #hook the system resume event, useful #for some laptops options PSM_RESETAFTERSUSPEND #reset the device at the resume event # Video card driver for VGA adapters. device vga hint.vga.0.at="isa" # Options for vga: # Try the following option if the mouse pointer is not drawn correctly # or font does not seem to be loaded properly. May cause flicker on # some systems. options VGA_ALT_SEQACCESS # If you can dispense with some vga driver features, you may want to # use the following options to save some memory. #options VGA_NO_FONT_LOADING # don't save/load font #options VGA_NO_MODE_CHANGE # don't change video modes # Older video cards may require this option for proper operation. options VGA_SLOW_IOACCESS # do byte-wide i/o's to TS and GDC regs # The following option probably won't work with the LCD displays. options VGA_WIDTH90 # support 90 column modes options FB_DEBUG # Frame buffer debugging device splash # Splash screen and screen saver support # Various screen savers. device blank_saver device daemon_saver device fade_saver device fire_saver device green_saver device logo_saver device rain_saver device star_saver device warp_saver # The syscons console driver (sco color console compatible). device sc hint.sc.0.at="isa" options MAXCONS=16 # number of virtual consoles options SC_ALT_MOUSE_IMAGE # simplified mouse cursor in text mode options SC_DFLT_FONT # compile font in makeoptions SC_DFLT_FONT=cp850 options SC_DISABLE_DDBKEY # disable `debug' key options SC_DISABLE_REBOOT # disable reboot key sequence options SC_HISTORY_SIZE=200 # number of history buffer lines options SC_MOUSE_CHAR=0x3 # char code for text mode mouse cursor options SC_PIXEL_MODE # add support for the raster text mode # The following options will let you change the default colors of syscons. options SC_NORM_ATTR=(FG_GREEN|BG_BLACK) options SC_NORM_REV_ATTR=(FG_YELLOW|BG_GREEN) options SC_KERNEL_CONS_ATTR=(FG_RED|BG_BLACK) options SC_KERNEL_CONS_REV_ATTR=(FG_BLACK|BG_RED) # The following options will let you change the default behaviour of # cut-n-paste feature options SC_CUT_SPACES2TABS # convert leading spaces into tabs options SC_CUT_SEPCHARS=\"x09\" # set of characters that delimit words # (default is single space - \"x20\") # If you have a two button mouse, you may want to add the following option # to use the right button of the mouse to paste text. options SC_TWOBUTTON_MOUSE # You can selectively disable features in syscons. options SC_NO_CUTPASTE options SC_NO_FONT_LOADING options SC_NO_HISTORY options SC_NO_SYSMOUSE options SC_NO_SUSPEND_VTYSWITCH # `flags' for sc # 0x80 Put the video card in the VESA 800x600 dots, 16 color mode # 0x100 Probe for a keyboard device periodically if one is not present # # Optional devices: # # # SCSI host adapters: # # adv: All Narrow SCSI bus AdvanSys controllers. # adw: Second Generation AdvanSys controllers including the ADV940UW. # aha: Adaptec 154x/1535/1640 # ahb: Adaptec 174x EISA controllers # ahc: Adaptec 274x/284x/2910/293x/294x/394x/3950x/3960x/398X/4944/ # 19160x/29160x, aic7770/aic78xx # ahd: Adaptec 29320/39320 Controllers. # aic: Adaptec 6260/6360, APA-1460 (PC Card), NEC PC9801-100 (C-BUS) # amd: Support for the AMD 53C974 SCSI host adapter chip as found on devices # such as the Tekram DC-390(T). # bt: Most Buslogic controllers: including BT-445, BT-54x, BT-64x, BT-74x, # BT-75x, BT-946, BT-948, BT-956, BT-958, SDC3211B, SDC3211F, SDC3222F # isp: Qlogic ISP 1020, 1040 and 1040B PCI SCSI host adapters, # ISP 1240 Dual Ultra SCSI, ISP 1080 and 1280 (Dual) Ultra2, # ISP 12160 Ultra3 SCSI, # Qlogic ISP 2100 and ISP 2200 1Gb Fibre Channel host adapters. # Qlogic ISP 2300 and ISP 2312 2Gb Fibre Channel host adapters. # ispfw: Firmware module for Qlogic host adapters # mpt: LSI-Logic MPT/Fusion 53c1020 or 53c1030 Ultra4 # or FC9x9 Fibre Channel host adapters. # ncr: NCR 53C810, 53C825 self-contained SCSI host adapters. # sym: Symbios/Logic 53C8XX family of PCI-SCSI I/O processors: # 53C810, 53C810A, 53C815, 53C825, 53C825A, 53C860, 53C875, # 53C876, 53C885, 53C895, 53C895A, 53C896, 53C897, 53C1510D, # 53C1010-33, 53C1010-66. # trm: Tekram DC395U/UW/F DC315U adapters. # wds: WD7000 # # Note that the order is important in order for Buslogic ISA/EISA cards to be # probed correctly. # device bt hint.bt.0.at="isa" hint.bt.0.port="0x330" device adv hint.adv.0.at="isa" device adw device aha hint.aha.0.at="isa" device aic hint.aic.0.at="isa" device ahb device ahc device ahd device amd device isp hint.isp.0.disable="1" hint.isp.0.role="3" hint.isp.0.prefer_iomap="1" hint.isp.0.prefer_memmap="1" hint.isp.0.fwload_disable="1" hint.isp.0.ignore_nvram="1" hint.isp.0.fullduplex="1" hint.isp.0.topology="lport" hint.isp.0.topology="nport" hint.isp.0.topology="lport-only" hint.isp.0.topology="nport-only" # we can't get u_int64_t types, nor can we get strings if it's got # a leading 0x, hence this silly dodge. hint.isp.0.portwnn="w50000000aaaa0000" hint.isp.0.nodewnn="w50000000aaaa0001" device ispfw device mpt device ncr device sym device trm device wds hint.wds.0.at="isa" hint.wds.0.port="0x350" hint.wds.0.irq="11" hint.wds.0.drq="6" # The aic7xxx driver will attempt to use memory mapped I/O for all PCI # controllers that have it configured only if this option is set. Unfortunately, # this doesn't work on some motherboards, which prevents it from being the # default. options AHC_ALLOW_MEMIO # Dump the contents of the ahc controller configuration PROM. options AHC_DUMP_EEPROM # Bitmap of units to enable targetmode operations. options AHC_TMODE_ENABLE # Compile in aic79xx debugging code. options AHD_DEBUG # Aic79xx driver debugging options. # See the ahd(4) manpage options AHD_DEBUG_OPTS=0xFFFFFFFF # Print human-readable register definitions when debugging options AHD_REG_PRETTY_PRINT # The adw driver will attempt to use memory mapped I/O for all PCI # controllers that have it configured only if this option is set. options ADW_ALLOW_MEMIO # Options used in dev/isp/ (Qlogic SCSI/FC driver). # # ISP_TARGET_MODE - enable target mode operation # options ISP_TARGET_MODE=1 # Options used in dev/sym/ (Symbios SCSI driver). #options SYM_SETUP_LP_PROBE_MAP #-Low Priority Probe Map (bits) # Allows the ncr to take precedence # 1 (1<<0) -> 810a, 860 # 2 (1<<1) -> 825a, 875, 885, 895 # 4 (1<<2) -> 895a, 896, 1510d #options SYM_SETUP_SCSI_DIFF #-HVD support for 825a, 875, 885 # disabled:0 (default), enabled:1 #options SYM_SETUP_PCI_PARITY #-PCI parity checking # disabled:0, enabled:1 (default) #options SYM_SETUP_MAX_LUN #-Number of LUNs supported # default:8, range:[1..64] # The 'asr' driver provides support for current DPT/Adaptec SCSI RAID # controllers (SmartRAID V and VI and later). # These controllers require the CAM infrastructure. # device asr # The 'dpt' driver provides support for old DPT controllers (http://www.dpt.com/). # These have hardware RAID-{0,1,5} support, and do multi-initiator I/O. # The DPT controllers are commonly re-licensed under other brand-names - # some controllers by Olivetti, Dec, HP, AT&T, SNI, AST, Alphatronic, NEC and # Compaq are actually DPT controllers. # # See src/sys/dev/dpt for debugging and other subtle options. # DPT_MEASURE_PERFORMANCE Enables a set of (semi)invasive metrics. Various # instruments are enabled. The tools in # /usr/sbin/dpt_* assume these to be enabled. # DPT_HANDLE_TIMEOUTS Normally device timeouts are handled by the DPT. # If you ant the driver to handle timeouts, enable # this option. If your system is very busy, this # option will create more trouble than solve. # DPT_TIMEOUT_FACTOR Used to compute the excessive amount of time to # wait when timing out with the above option. # DPT_DEBUG_xxxx These are controllable from sys/dev/dpt/dpt.h # DPT_LOST_IRQ When enabled, will try, once per second, to catch # any interrupt that got lost. Seems to help in some # DPT-firmware/Motherboard combinations. Minimal # cost, great benefit. # DPT_RESET_HBA Make "reset" actually reset the controller # instead of fudging it. Only enable this if you # are 100% certain you need it. device dpt # DPT options #!CAM# options DPT_MEASURE_PERFORMANCE #!CAM# options DPT_HANDLE_TIMEOUTS options DPT_TIMEOUT_FACTOR=4 options DPT_LOST_IRQ options DPT_RESET_HBA options DPT_ALLOW_MEMIO # # Compaq "CISS" RAID controllers (SmartRAID 5* series) # These controllers have a SCSI-like interface, and require the # CAM infrastructure. # device ciss # # Intel Integrated RAID controllers. # This driver was developed and is maintained by Intel. Contacts # at Intel for this driver are # "Kannanthanam, Boji T" and # "Leubner, Achim" . # device iir # # Mylex AcceleRAID and eXtremeRAID controllers with v6 and later # firmware. These controllers have a SCSI-like interface, and require # the CAM infrastructure. # device mly # # Compaq Smart RAID, Mylex DAC960 and AMI MegaRAID controllers. Only # one entry is needed; the code will find and configure all supported # controllers. # device ida # Compaq Smart RAID device mlx # Mylex DAC960 device amr # AMI MegaRAID # # 3ware ATA RAID # device twe # 3ware ATA RAID # # The 'ATA' driver supports all ATA and ATAPI devices, including PC Card # devices. You only need one "device ata" for it to find all # PCI and PC Card ATA/ATAPI devices on modern machines. device ata device atadisk # ATA disk drives device atapicd # ATAPI CDROM drives device atapifd # ATAPI floppy drives device atapist # ATAPI tape drives device atapicam # emulate ATAPI devices as SCSI ditto via CAM # needs CAM to be present (scbus & pass) # # For older non-PCI, non-PnPBIOS systems, these are the hints lines to add: hint.ata.0.at="isa" hint.ata.0.port="0x1f0" hint.ata.0.irq="14" hint.ata.1.at="isa" hint.ata.1.port="0x170" hint.ata.1.irq="15" # # The following options are valid on the ATA driver: # # ATA_STATIC_ID: controller numbering is static ie depends on location # else the device numbers are dynamically allocated. options ATA_STATIC_ID # # Standard floppy disk controllers and floppy tapes, supports # the Y-E DATA External FDD (PC Card) # device fdc hint.fdc.0.at="isa" hint.fdc.0.port="0x3F0" hint.fdc.0.irq="6" hint.fdc.0.drq="2" # # FDC_DEBUG enables floppy debugging. Since the debug output is huge, you # gotta turn it actually on by setting the variable fd_debug with DDB, # however. options FDC_DEBUG # # Activate this line if you happen to have an Insight floppy tape. # Probing them proved to be dangerous for people with floppy disks only, # so it's "hidden" behind a flag: #hint.fdc.0.flags="1" # Specify floppy devices hint.fd.0.at="fdc0" hint.fd.0.drive="0" hint.fd.1.at="fdc0" hint.fd.1.drive="1" # # sio: serial ports (see sio(4)), including support for various # PC Card devices, such as Modem and NICs (see etc/defaults/pccard.conf) device sio hint.sio.0.at="isa" hint.sio.0.port="0x3F8" hint.sio.0.flags="0x10" hint.sio.0.irq="4" # # `flags' for serial drivers that support consoles (only for sio now): # 0x10 enable console support for this unit. The other console flags # are ignored unless this is set. Enabling console support does # not make the unit the preferred console - boot with -h or set # the 0x20 flag for that. Currently, at most one unit can have # console support; the first one (in config file order) with # this flag set is preferred. Setting this flag for sio0 gives # the old behaviour. # 0x20 force this unit to be the console (unless there is another # higher priority console). This replaces the COMCONSOLE option. # 0x40 reserve this unit for low level console operations. Do not # access the device in any normal way. # 0x80 use this port for serial line gdb support in ddb. # # PnP `flags' # 0x1 disable probing of this device. Used to prevent your modem # from being attached as a PnP modem. # # Options for serial drivers that support consoles (only for sio now): options BREAK_TO_DEBUGGER #a BREAK on a comconsole goes to #DDB, if available. options CONSPEED=115200 # speed for serial console # (default 9600) # Solaris implements a new BREAK which is initiated by a character # sequence CR ~ ^b which is similar to a familiar pattern used on # Sun servers by the Remote Console. options ALT_BREAK_TO_DEBUGGER # Options for sio: options COM_ESP #code for Hayes ESP options COM_MULTIPORT #code for some cards with shared IRQs # Other flags for sio that aren't documented in the man page. # 0x20000 enable hardware RTS/CTS and larger FIFOs. Only works for # ST16650A-compatible UARTs. # PCI Universal Communications driver # Supports various single and multi port PCI serial cards. Maybe later # also the parallel ports on combination serial/parallel cards. New cards # can be added in src/sys/dev/puc/pucdata.c. # # If the PUC_FASTINTR option is used the driver will try to use fast # interrupts. The card must then be the only user of that interrupt. # Interrupts cannot be shared when using PUC_FASTINTR. device puc options PUC_FASTINTR # # Network interfaces: # # MII bus support is required for some PCI 10/100 ethernet NICs, # namely those which use MII-compliant transceivers or implement # tranceiver control interfaces that operate like an MII. Adding # "device miibus0" to the kernel config pulls in support for # the generic miibus API and all of the PHY drivers, including a # generic one for PHYs that aren't specifically handled by an # individual driver. device miibus # an: Aironet 4500/4800 802.11 wireless adapters. Supports the PCMCIA, # PCI and ISA varieties. # awi: Support for IEEE 802.11 PC Card devices using the AMD Am79C930 and # Harris (Intersil) Chipset with PCnetMobile firmware by AMD. # bge: Support for gigabit ethernet adapters based on the Broadcom # BCM570x family of controllers, including the 3Com 3c996-T, # the Netgear GA302T, the SysKonnect SK-9D21 and SK-9D41, and # the embedded gigE NICs on Dell PowerEdge 2550 servers. # cm: Arcnet SMC COM90c26 / SMC COM90c56 # (and SMC COM90c66 in '56 compatibility mode) adapters. # cnw: Xircom CNW/Netware Airsurfer PC Card adapter # cs: IBM Etherjet and other Crystal Semi CS89x0-based adapters # dc: Support for PCI fast ethernet adapters based on the DEC/Intel 21143 # and various workalikes including: # the ADMtek AL981 Comet and AN985 Centaur, the ASIX Electronics # AX88140A and AX88141, the Davicom DM9100 and DM9102, the Lite-On # 82c168 and 82c169 PNIC, the Lite-On/Macronix LC82C115 PNIC II # and the Macronix 98713/98713A/98715/98715A/98725 PMAC. This driver # replaces the old al, ax, dm, pn and mx drivers. List of brands: # Digital DE500-BA, Kingston KNE100TX, D-Link DFE-570TX, SOHOware SFA110, # SVEC PN102-TX, CNet Pro110B, 120A, and 120B, Compex RL100-TX, # LinkSys LNE100TX, LNE100TX V2.0, Jaton XpressNet, Alfa Inc GFC2204, # KNE110TX. # de: Digital Equipment DC21040 # em: Intel Pro/1000 Gigabit Ethernet 82542, 82543, 82544 based adapters. # ep: 3Com 3C509, 3C529, 3C556, 3C562D, 3C563D, 3C572, 3C574X, 3C579, 3C589 # and PC Card devices using these chipsets. # ex: Intel EtherExpress Pro/10 and other i82595-based adapters, # Olicom Ethernet PC Card devices. # fe: Fujitsu MB86960A/MB86965A Ethernet # fea: DEC DEFEA EISA FDDI adapter # fpa: Support for the Digital DEFPA PCI FDDI. `device fddi' is also needed. # fxp: Intel EtherExpress Pro/100B # (hint of prefer_iomap can be done to prefer I/O instead of Mem mapping) # gx: Intel Pro/1000 Gigabit Ethernet (82542, 82543-F, 82543-T) # lge: Support for PCI gigabit ethernet adapters based on the Level 1 # LXT1001 NetCellerator chipset. This includes the D-Link DGE-500SX, # SMC TigerCard 1000 (SMC9462SX), and some Addtron cards. # my: Myson Fast Ethernet (MTD80X, MTD89X) # nge: Support for PCI gigabit ethernet adapters based on the National # Semiconductor DP83820 and DP83821 chipset. This includes the # SMC EZ Card 1000 (SMC9462TX), D-Link DGE-500T, Asante FriendlyNet # GigaNIX 1000TA and 1000TPC, the Addtron AEG320T, the LinkSys # EG1032 and EG1064, the Surecom EP-320G-TX and the Netgear GA622T. # pcn: Support for PCI fast ethernet adapters based on the AMD Am79c97x # chipsets, including the PCnet/FAST, PCnet/FAST+, PCnet/PRO and # PCnet/Home. These were previously handled by the lnc driver (and # still will be if you leave this driver out of the kernel). # rl: Support for PCI fast ethernet adapters based on the RealTek 8129/8139 # chipset. Note that the RealTek driver defaults to using programmed # I/O to do register accesses because memory mapped mode seems to cause # severe lockups on SMP hardware. This driver also supports the # Accton EN1207D `Cheetah' adapter, which uses a chip called # the MPX 5030/5038, which is either a RealTek in disguise or a # RealTek workalike. Note that the D-Link DFE-530TX+ uses the RealTek # chipset and is supported by this driver, not the 'vr' driver. # sf: Support for Adaptec Duralink PCI fast ethernet adapters based on the # Adaptec AIC-6915 "starfire" controller. # This includes dual and quad port cards, as well as one 100baseFX card. # Most of these are 64-bit PCI devices, except for one single port # card which is 32-bit. # sis: Support for NICs based on the Silicon Integrated Systems SiS 900, # SiS 7016 and NS DP83815 PCI fast ethernet controller chips. # sk: Support for the SysKonnect SK-984x series PCI gigabit ethernet NICs. # This includes the SK-9841 and SK-9842 single port cards (single mode # and multimode fiber) and the SK-9843 and SK-9844 dual port cards # (also single mode and multimode). # The driver will autodetect the number of ports on the card and # attach each one as a separate network interface. # sn: Support for ISA and PC Card Ethernet devices using the # SMC91C90/92/94/95 chips. # ste: Sundance Technologies ST201 PCI fast ethernet controller, includes # the D-Link DFE-550TX. # ti: Support for PCI gigabit ethernet NICs based on the Alteon Networks # Tigon 1 and Tigon 2 chipsets. This includes the Alteon AceNIC, the # 3Com 3c985, the Netgear GA620 and various others. Note that you will # probably want to bump up NMBCLUSTERS a lot to use this driver. # tl: Support for the Texas Instruments TNETE100 series 'ThunderLAN' # cards and integrated ethernet controllers. This includes several # Compaq Netelligent 10/100 cards and the built-in ethernet controllers # in several Compaq Prosignia, Proliant and Deskpro systems. It also # supports several Olicom 10Mbps and 10/100 boards. # tx: SMC 9432 TX, BTX and FTX cards. (SMC EtherPower II serie) # txp: Support for 3Com 3cR990 cards with the "Typhoon" chipset # vr: Support for various fast ethernet adapters based on the VIA # Technologies VT3043 `Rhine I' and VT86C100A `Rhine II' chips, # including the D-Link DFE530TX (see 'rl' for DFE530TX+), the Hawking # Technologies PN102TX, and the AOpen/Acer ALN-320. # vx: 3Com 3C590 and 3C595 # wb: Support for fast ethernet adapters based on the Winbond W89C840F chip. # Note: this is not the same as the Winbond W89C940F, which is a # NE2000 clone. # wi: Lucent WaveLAN/IEEE 802.11 PCMCIA adapters. Note: this supports both # the PCMCIA and ISA cards: the ISA card is really a PCMCIA to ISA # bridge with a PCMCIA adapter plugged into it. # xe: Xircom/Intel EtherExpress Pro100/16 PC Card ethernet controller, # Accton Fast EtherCard-16, Compaq Netelligent 10/100 PC Card, # Toshiba 10/100 Ethernet PC Card, Xircom 16-bit Ethernet + Modem 56 # xl: Support for the 3Com 3c900, 3c905, 3c905B and 3c905C (Fast) # Etherlink XL cards and integrated controllers. This includes the # integrated 3c905B-TX chips in certain Dell Optiplex and Dell # Precision desktop machines and the integrated 3c905-TX chips # in Dell Latitude laptop docking stations. # Also supported: 3Com 3c980(C)-TX, 3Com 3cSOHO100-TX, 3Com 3c450-TX # Order for ISA/EISA devices is important here device cm hint.cm.0.at="isa" hint.cm.0.port="0x2e0" hint.cm.0.irq="9" hint.cm.0.maddr="0xdc000" device cs hint.cs.0.at="isa" hint.cs.0.port="0x300" device ep device ex device fe hint.fe.0.at="isa" hint.fe.0.port="0x300" device fea device sn hint.sn.0.at="isa" hint.sn.0.port="0x300" hint.sn.0.irq="10" device an device awi device cnw device wi device xe # PCI Ethernet NICs that use the common MII bus controller code. device dc # DEC/Intel 21143 and various workalikes device fxp # Intel EtherExpress PRO/100B (82557, 82558) hint.fxp.0.prefer_iomap="0" device my # Myson Fast Ethernet (MTD80X, MTD89X) device rl # RealTek 8129/8139 device pcn # AMD Am79C97x PCI 10/100 NICs device sf # Adaptec AIC-6915 (``Starfire'') device sis # Silicon Integrated Systems SiS 900/SiS 7016 device ste # Sundance ST201 (D-Link DFE-550TX) device tl # Texas Instruments ThunderLAN device tx # SMC EtherPower II (83c170 ``EPIC'') device vr # VIA Rhine, Rhine II device wb # Winbond W89C840F device xl # 3Com 3c90x (``Boomerang'', ``Cyclone'') # PCI Ethernet NICs. device de # DEC/Intel DC21x4x (``Tulip'') device txp # 3Com 3cR990 (``Typhoon'') device vx # 3Com 3c590, 3c595 (``Vortex'') # PCI Gigabit & FDDI NICs. device bge device gx device lge device nge device sk device ti device fpa # Use "private" jumbo buffers allocated exclusively for the ti(4) driver. # This option is incompatible with the TI_JUMBO_HDRSPLIT option below. #options TI_PRIVATE_JUMBOS # Turn on the header splitting option for the ti(4) driver firmware. This # only works for Tigon II chips, and has no effect for Tigon I chips. options TI_JUMBO_HDRSPLIT # These two options allow manipulating the mbuf cluster size and mbuf size, # respectively. Be very careful with NIC driver modules when changing # these from their default values, because that can potentially cause a # mismatch between the mbuf size assumed by the kernel and the mbuf size # assumed by a module. The only driver that currently has the ability to # detect a mismatch is ti(4). options MCLSHIFT=12 # mbuf cluster shift in bits, 12 == 4KB options MSIZE=512 # mbuf size in bytes # # ATM related options (Cranor version) # (note: this driver cannot be used with the HARP ATM stack) # # The `en' device provides support for Efficient Networks (ENI) # ENI-155 PCI midway cards, and the Adaptec 155Mbps PCI ATM cards (ANA-59x0). # # atm device provides generic atm functions and is required for # atm devices. # NATM enables the netnatm protocol family that can be used to # bypass TCP/IP. # # the current driver supports only PVC operations (no atm-arp, no multicast). # for more details, please read the original documents at # http://www.ccrc.wustl.edu/pub/chuck/tech/bsdatm/bsdatm.html # device atm device en options NATM #native ATM # # Audio drivers: `pcm', `sbc', `gusc' # # pcm: PCM audio through various sound cards. # # This has support for a large number of new audio cards, based on # CS423x, OPTi931, Yamaha OPL-SAx, and also for SB16, GusPnP. # For more information about this driver and supported cards, # see the pcm.4 man page. # # The flags of the device tells the device a bit more info about the # device that normally is obtained through the PnP interface. # bit 2..0 secondary DMA channel; # bit 4 set if the board uses two dma channels; # bit 15..8 board type, overrides autodetection; leave it # zero if don't know what to put in (and you don't, # since this is unsupported at the moment...). # # Supported cards include: # Creative SoundBlaster ISA PnP/non-PnP # Supports ESS and Avance ISA chips as well. # Gravis UltraSound ISA PnP/non-PnP # Crystal Semiconductor CS461x/428x PCI # Neomagic 256AV (ac97) # Most of the more common ISA/PnP sb/mss/ess compatable cards. device pcm # For non-pnp sound cards with no bridge drivers only: hint.pcm.0.at="isa" hint.pcm.0.irq="10" hint.pcm.0.drq="1" hint.pcm.0.flags="0x0" # # midi: MIDI interfaces and synthesizers # device midi # For non-pnp sound cards with no bridge drivers: hint.midi.0.at="isa" hint.midi.0.irq="5" hint.midi.0.flags="0x0" # For serial ports (this example configures port 2): # TODO: implement generic tty-midi interface so that we can use # other uarts. hint.midi.0.at="isa" hint.midi.0.port="0x2F8" hint.midi.0.irq="3" # # seq: MIDI sequencer # device seq # The bridge drivers for sound cards. These can be separately configured # for providing services to the likes of new-midi. # When used with 'device pcm' they also provide pcm sound services. # # sbc: Creative SoundBlaster ISA PnP/non-PnP # Supports ESS and Avance ISA chips as well. # gusc: Gravis UltraSound ISA PnP/non-PnP # csa: Crystal Semiconductor CS461x/428x PCI # For non-PnP cards: device sbc hint.sbc.0.at="isa" hint.sbc.0.port="0x220" hint.sbc.0.irq="5" hint.sbc.0.drq="1" hint.sbc.0.flags="0x15" device gusc hint.gusc.0.at="isa" hint.gusc.0.port="0x220" hint.gusc.0.irq="5" hint.gusc.0.drq="1" hint.gusc.0.flags="0x13" # # Miscellaneous hardware: # # scd: Sony CD-ROM using proprietary (non-ATAPI) interface # mcd: Mitsumi CD-ROM using proprietary (non-ATAPI) interface # meteor: Matrox Meteor video capture board # bktr: Brooktree bt848/848a/849a/878/879 video capture and TV Tuner board # cy: Cyclades serial driver # joy: joystick (including IO DATA PCJOY PC Card joystick) # rc: RISCom/8 multiport card # rp: Comtrol Rocketport(ISA/PCI) - single card # si: Specialix SI/XIO 4-32 port terminal multiplexor # nmdm: nullmodem terminal driver (see nmdm(4)) # Notes on the Comtrol Rocketport driver: # # The exact values used for rp0 depend on how many boards you have # in the system. The manufacturer's sample configs are listed as: # # device rp # core driver support # # Comtrol Rocketport ISA single card # hint.rp.0.at="isa" # hint.rp.0.port="0x280" # # If instead you have two ISA cards, one installed at 0x100 and the # second installed at 0x180, then you should add the following to # your kernel probe hints: # hint.rp.0.at="isa" # hint.rp.0.port="0x100" # hint.rp.1.at="isa" # hint.rp.1.port="0x180" # # For 4 ISA cards, it might be something like this: # hint.rp.0.at="isa" # hint.rp.0.port="0x180" # hint.rp.1.at="isa" # hint.rp.1.port="0x100" # hint.rp.2.at="isa" # hint.rp.2.port="0x340" # hint.rp.3.at="isa" # hint.rp.3.port="0x240" # # For PCI cards, you need no hints. # Mitsumi CD-ROM device mcd hint.mcd.0.at="isa" hint.mcd.0.port="0x300" # for the Sony CDU31/33A CDROM device scd hint.scd.0.at="isa" hint.scd.0.port="0x230" device joy # PnP aware, hints for nonpnp only hint.joy.0.at="isa" hint.joy.0.port="0x201" device rc hint.rc.0.at="isa" hint.rc.0.port="0x220" hint.rc.0.irq="12" device rp hint.rp.0.at="isa" hint.rp.0.port="0x280" device si options SI_DEBUG hint.si.0.at="isa" hint.si.0.maddr="0xd0000" hint.si.0.irq="12" device nmdm # # The `meteor' device is a PCI video capture board. It can also have the # following options: # options METEOR_ALLOC_PAGES=xxx preallocate kernel pages for data entry # figure (ROWS*COLUMN*BYTES_PER_PIXEL*FRAME+PAGE_SIZE-1)/PAGE_SIZE # options METEOR_DEALLOC_PAGES remove all allocated pages on close(2) # options METEOR_DEALLOC_ABOVE=xxx remove all allocated pages above the # specified amount. If this value is below the allocated amount no action # taken # options METEOR_SYSTEM_DEFAULT={METEOR_PAL|METEOR_NTSC|METEOR_SECAM}, used # for initialization of fps routine when a signal is not present. # # The 'bktr' device is a PCI video capture device using the Brooktree # bt848/bt848a/bt849a/bt878/bt879 chipset. When used with a TV Tuner it forms a # TV card, eg Miro PC/TV, Hauppauge WinCast/TV WinTV, VideoLogic Captivator, # Intel Smart Video III, AverMedia, IMS Turbo, FlyVideo. # # options OVERRIDE_CARD=xxx # options OVERRIDE_TUNER=xxx # options OVERRIDE_MSP=1 # options OVERRIDE_DBX=1 # These options can be used to override the auto detection # The current values for xxx are found in src/sys/dev/bktr/bktr_card.h # Using sysctl(8) run-time overrides on a per-card basis can be made # # options BROOKTREE_SYSTEM_DEFAULT=BROOKTREE_PAL # or # options BROOKTREE_SYSTEM_DEFAULT=BROOKTREE_NTSC # Specifes the default video capture mode. # This is required for Dual Crystal (28&35Mhz) boards where PAL is used # to prevent hangs during initialisation. eg VideoLogic Captivator PCI. # # options BKTR_USE_PLL # PAL or SECAM users who have a 28Mhz crystal (and no 35Mhz crystal) # must enable PLL mode with this option. eg some new Bt878 cards. # # options BKTR_GPIO_ACCESS # This enable IOCTLs which give user level access to the GPIO port. # # options BKTR_NO_MSP_RESET # Prevents the MSP34xx reset. Good if you initialise the MSP in another OS first # # options BKTR_430_FX_MODE # Switch Bt878/879 cards into Intel 430FX chipset compatibility mode. # # options BKTR_SIS_VIA_MODE # Switch Bt878/879 cards into SIS/VIA chipset compatibility mode which is # needed for some old SiS and VIA chipset motherboards. # This also allows Bt878/879 chips to work on old OPTi (<1997) chipset # motherboards and motherboards with bad or incomplete PCI 2.1 support. # As a rough guess, old = before 1998 # device meteor 1 # # options BKTR_USE_FREEBSD_SMBUS # Compile with FreeBSD SMBus implementation # # Brooktree driver has been ported to the new I2C framework. Thus, # you'll need to have the following 3 lines in the kernel config. # device smbus # device iicbus # device iicbb # device iicsmb # The iic and smb devices are only needed if you want to control other # I2C slaves connected to the external connector of some cards. # device bktr # # PC Card/PCMCIA # (OLDCARD) # # card: pccard slots # pcic: isa/pccard bridge #device pcic #hint.pcic.0.at="isa" #hint.pcic.1.at="isa" #device card 1 # # PC Card/PCMCIA and Cardbus # (NEWCARD) # # Note that NEWCARD and OLDCARD are incompatible. Do not use both at the same # time. # # pccbb: pci/cardbus bridge implementing YENTA interface # pccard: pccard slots # cardbus: cardbus slots device cbb device pccard device cardbus #device pcic ISA attachment currently busted #hint.pcic.0.at="isa" #hint.pcic.1.at="isa" # # SMB bus # # System Management Bus support is provided by the 'smbus' device. # Access to the SMBus device is via the 'smb' device (/dev/smb*), # which is a child of the 'smbus' device. # # Supported devices: # smb standard io through /dev/smb* # # Supported SMB interfaces: # iicsmb I2C to SMB bridge with any iicbus interface # bktr brooktree848 I2C hardware interface # intpm Intel PIIX4 (82371AB, 82443MX) Power Management Unit # alpm Acer Aladdin-IV/V/Pro2 Power Management Unit # ichsmb Intel ICH SMBus controller chips (82801AA, 82801AB, 82801BA) # viapm VIA VT82C586B/596B/686A and VT8233 Power Management Unit # amdpm AMD 756 Power Management Unit # nfpm NVIDIA nForce Power Management Unit # device smbus # Bus support, required for smb below. device intpm device alpm device ichsmb device viapm device amdpm device nfpm device smb # # I2C Bus # # Philips i2c bus support is provided by the `iicbus' device. # # Supported devices: # ic i2c network interface # iic i2c standard io # iicsmb i2c to smb bridge. Allow i2c i/o with smb commands. # # Supported interfaces: # bktr brooktree848 I2C software interface # # Other: # iicbb generic I2C bit-banging code (needed by lpbb, bktr) # device iicbus # Bus support, required for ic/iic/iicsmb below. device iicbb device ic device iic device iicsmb # smb over i2c bridge # Parallel-Port Bus # # Parallel port bus support is provided by the `ppbus' device. # Multiple devices may be attached to the parallel port, devices # are automatically probed and attached when found. # # Supported devices: # vpo Iomega Zip Drive # Requires SCSI disk support ('scbus' and 'da'), best # performance is achieved with ports in EPP 1.9 mode. # lpt Parallel Printer # plip Parallel network interface # ppi General-purpose I/O ("Geek Port") + IEEE1284 I/O # pps Pulse per second Timing Interface # lpbb Philips official parallel port I2C bit-banging interface # # Supported interfaces: # ppc ISA-bus parallel port interfaces. # options PPC_PROBE_CHIPSET # Enable chipset specific detection # (see flags in ppc(4)) options DEBUG_1284 # IEEE1284 signaling protocol debug options PERIPH_1284 # Makes your computer act as an IEEE1284 # compliant peripheral options DONTPROBE_1284 # Avoid boot detection of PnP parallel devices options VP0_DEBUG # ZIP/ZIP+ debug options LPT_DEBUG # Printer driver debug options PPC_DEBUG # Parallel chipset level debug options PLIP_DEBUG # Parallel network IP interface debug options PCFCLOCK_VERBOSE # Verbose pcfclock driver options PCFCLOCK_MAX_RETRIES=5 # Maximum read tries (default 10) device ppc hint.ppc.0.at="isa" hint.ppc.0.irq="7" device ppbus device vpo device lpt device plip device ppi device pps device lpbb device pcfclock # Kernel BOOTP support options BOOTP # Use BOOTP to obtain IP address/hostname # Requires NFSCLIENT and NFS_ROOT options BOOTP_NFSROOT # NFS mount root filesystem using BOOTP info options BOOTP_NFSV3 # Use NFS v3 to NFS mount root options BOOTP_COMPAT # Workaround for broken bootp daemons. options BOOTP_WIRED_TO=fxp0 # Use interface fxp0 for BOOTP # # Add tie-ins for a hardware watchdog. This only enable the hooks; # the user must still supply the actual driver. # options HW_WDOG # # Disable swapping. This option removes all code which actually performs # swapping, so it's not possible to turn it back on at run-time. # # This is sometimes usable for systems which don't have any swap space # (see also sysctls "vm.defer_swapspace_pageouts" and # "vm.disable_swapspace_pageouts") # #options NO_SWAPPING # Set the number of sf_bufs to allocate. sf_bufs are virtual buffers # for sendfile(2) that are used to map file VM pages, and normally # default to a quantity that is roughly 16*MAXUSERS+512. You would # typically want about 4 of these for each simultaneous file send. # options NSFBUFS=1024 # # Enable extra debugging code for locks. This stores the filename and # line of whatever acquired the lock in the lock itself, and change a # number of function calls to pass around the relevant data. This is # not at all useful unless you are debugging lock code. Also note # that it is likely to break e.g. fstat(1) unless you recompile your # userland with -DDEBUG_LOCKS as well. # options DEBUG_LOCKS ##################################################################### # USB support # UHCI controller device uhci # OHCI controller device ohci # General USB code (mandatory for USB) device usb # # USB Double Bulk Pipe devices device udbp # Generic USB device driver device ugen # Human Interface Device (anything with buttons and dials) device uhid # USB keyboard device ukbd # USB printer device ulpt # USB Iomega Zip 100 Drive (Requires scbus and da) device umass # USB modem support device umodem # USB mouse device ums # Diamond Rio 500 Mp3 player device urio # USB scanners device uscanner # USB serial support device ucom # USB support for serial adapters based on the FT8U100AX and FT8U232AM device uftdi # USB support for Prolific PL-2303 serial adapters device uplcom # USB support for Belkin F5U103 and compatible serial adapters device ubsa # USB serial support for DDI pocket's PHS device uvscom # USB Visor and Palm devices device uvisor # USB Fm Radio device ufm # # ADMtek USB ethernet. Supports the LinkSys USB100TX, # the Billionton USB100, the Melco LU-ATX, the D-Link DSB-650TX # and the SMC 2202USB. Also works with the ADMtek AN986 Pegasus # eval board. device aue # # CATC USB-EL1201A USB ethernet. Supports the CATC Netmate # and Netmate II, and the Belkin F5U111. device cue # # Kawasaki LSI ethernet. Supports the LinkSys USB10T, # Entrega USB-NET-E45, Peracom Ethernet Adapter, the # 3Com 3c19250, the ADS Technologies USB-10BT, the ATen UC10T, # the Netgear EA101, the D-Link DSB-650, the SMC 2102USB # and 2104USB, and the Corega USB-T. device kue # debugging options for the USB subsystem # options USB_DEBUG # options for ukbd: options UKBD_DFLT_KEYMAP # specify the built-in keymap makeoptions UKBD_DFLT_KEYMAP=it.iso # options for uvscom: options UVSCOM_DEFAULT_OPKTSIZE=8 # default output packet size ##################################################################### # Firewire support device firewire # Firewire bus code device sbp # SCSI over Firewire (Requires scbus and da) device fwe # Ethernet over Firewire (non-standard!) ##################################################################### # crypto subsystem # # This is a port of the openbsd crypto framework. Include this when # configuring FAST_IPSEC and when you have a h/w crypto device to accelerate # user applications that link to openssl. # # Drivers are ports from openbsd with some simple enhancements that have # been fed back to openbsd. device crypto # core crypto support device cryptodev # /dev/crypto for access to h/w +device rndtest # FIPS 140-2 entropy tester + device hifn # Hifn 7951, 7781, etc. +options HIFN_DEBUG # enable debugging support: hw.hifn.debug +options HIFN_RNDTEST # enable rndtest support + device ubsec # Broadcom 5501, 5601, 58xx -device rndtest # FIPS 140-2 entropy tester +options UBSEC_DEBUG # enable debugging support: hw.ubsec.debug +options UBSEC_RNDTEST # enable rndtest support ##################################################################### # # Embedded system options: # # An embedded system might want to run something other than init. options INIT_PATH=/sbin/init:/stand/sysinstall # Debug options options BUS_DEBUG # enable newbus debugging options DEBUG_VFS_LOCKS # enable vfs lock debugging ##################################################################### # SYSV IPC KERNEL PARAMETERS # # Maximum number of entries in a semaphore map. options SEMMAP=31 # Maximum number of System V semaphores that can be used on the system at # one time. options SEMMNI=11 # Total number of semaphores system wide options SEMMNS=61 # Total number of undo structures in system options SEMMNU=31 # Maximum number of System V semaphores that can be used by a single process # at one time. options SEMMSL=61 # Maximum number of operations that can be outstanding on a single System V # semaphore at one time. options SEMOPM=101 # Maximum number of undo operations that can be outstanding on a single # System V semaphore at one time. options SEMUME=11 # Maximum number of shared memory pages system wide. options SHMALL=1025 # Maximum size, in bytes, of a single System V shared memory region. options SHMMAX=(SHMMAXPGS*PAGE_SIZE+1) options SHMMAXPGS=1025 # Minimum size, in bytes, of a single System V shared memory region. options SHMMIN=2 # Maximum number of shared memory regions that can be used on the system # at one time. options SHMMNI=33 # Maximum number of System V shared memory regions that can be attached to # a single process at one time. options SHMSEG=9 # Set the amount of time (in seconds) the system will wait before # rebooting automatically when a kernel panic occurs. If set to (-1), # the system will wait indefinitely until a key is pressed on the # console. options PANIC_REBOOT_WAIT_TIME=16 ##################################################################### # More undocumented options for linting. # Note that documenting these are not considered an affront. options CAM_DEBUG_DELAY # VFS cluster debugging. options CLUSTERDEBUG options DEBUG # Kernel filelock debugging. options LOCKF_DEBUG # System V compatible message queues # Please note that the values provided here are used to test kernel # building. The defaults in the sources provide almost the same numbers. # MSGSSZ must be a power of 2 between 8 and 1024. options MSGMNB=2049 # Max number of chars in queue options MSGMNI=41 # Max number of message queue identifiers options MSGSEG=2049 # Max number of message segments options MSGSSZ=16 # Size of a message segment options MSGTQL=41 # Max number of messages in system options NBUF=512 # Number of buffer headers options NMBCLUSTERS=1024 # Number of mbuf clusters options SCSI_NCR_DEBUG options SCSI_NCR_MAX_SYNC=10000 options SCSI_NCR_MAX_WIDE=1 options SCSI_NCR_MYADDR=7 options SC_DEBUG_LEVEL=5 # Syscons debug level options SC_RENDER_DEBUG # syscons rendering debugging options SHOW_BUSYBUFS # List buffers that prevent root unmount options SLIP_IFF_OPTS options VFS_BIO_DEBUG # VFS buffer I/O debugging options KSTACK_MAX_PAGES=32 # Maximum pages to give the kernel stack # Yet more undocumented options for linting. options AAC_DEBUG options ACD_DEBUG options ACPI_MAX_THREADS=1 #!options ACPI_NO_SEMAPHORES # Broken: ##options ASR_MEASURE_PERFORMANCE options AST_DEBUG options ATAPI_DEBUG options ATA_DEBUG # BKTR_ALLOC_PAGES has no effect except to cause warnings, and # BROOKTREE_ALLOC_PAGES hasn't actually been superseded by it, since the # driver still mostly spells this option BROOKTREE_ALLOC_PAGES. ##options BKTR_ALLOC_PAGES=(217*4+1) options BROOKTREE_ALLOC_PAGES=(217*4+1) options MAXFILES=999 # METEOR_TEST_VIDEO has no effect since meteor is broken. options METEOR_TEST_VIDEO options NDEVFSINO=1025 options NDEVFSOVERFLOW=32769 # Yet more undocumented options for linting. options VGA_DEBUG diff --git a/sys/conf/options b/sys/conf/options index 49906f339e8a..1024a5de2989 100644 --- a/sys/conf/options +++ b/sys/conf/options @@ -1,604 +1,611 @@ # $FreeBSD$ # # On the handling of kernel options # # All kernel options should be listed in NOTES, with suitable # descriptions. Negative options (options that make some code not # compile) should be commented out; LINT (generated from NOTES) should # compile as much code as possible. Try to structure option-using # code so that a single option only switch code on, or only switch # code off, to make it possible to have a full compile-test. If # necessary, you can check for COMPILING_LINT to get maximum code # coverage. # # All new options shall also be listed in either "conf/options" or # "conf/options.". Options that affect a single source-file # .[c|s] should be directed into "opt_.h", while options # that affect multiple files should either go in "opt_global.h" if # this is a kernel-wide option (used just about everywhere), or in # "opt_.h" if it affect only some files. # Note that the effect of listing only an option without a # header-file-name in conf/options (and cousins) is that the last # convention is followed. # # This handling scheme is not yet fully implemented. # # # Format of this file: # Option name filename # # If filename is missing, the default is # opt_.h # Adaptec Array Controller driver options AAC_DEBUG opt_aac.h # Debugging levels: # 0 - quiet, only emit warnings # 1 - noisy, emit major function # points and things done # 2 - extremely noisy, emit trace # items in loops, etc. # Adaptec aic7xxx SCSI controller options AHC_ALLOW_MEMIO opt_aic7xxx.h # Allow PCI devices to use memory # mapped I/O AHC_TMODE_ENABLE opt_aic7xxx.h # Bitmap of units to enable # targetmode operations. AHC_DUMP_EEPROM opt_aic7xxx.h # Dump the contents of our # configuration prom. AHC_DEBUG opt_aic7xxx.h # Compile in Aic7xxx Debugging code. AHC_DEBUG_OPTS opt_aic7xxx.h # Aic7xxx driver debugging options. # See sys/dev/aic7xxx/aic7xxx.h AHC_REG_PRETTY_PRINT opt_aic7xxx.h # Print register bitfields in debug # output. Adds ~128k to driver. # Adaptec aic79xx SCSI controller options AHD_DEBUG opt_aic79xx.h # Compile in Aic79xx Debugging code. AHD_DEBUG_OPTS opt_aic79xx.h # Aic79xx driver debugging options. # See sys/dev/aic7xxx/aic79xx.h AHD_TMODE_ENABLE opt_aic79xx.h # Bitmap of units to enable # targetmode operations. AHD_REG_PRETTY_PRINT opt_aic79xx.h # Print register bitfields in debug # output. Adds ~215k to driver. ADW_ALLOW_MEMIO opt_adw.h # Allow PCI devices to use memory # mapped I/O # Miscellaneous options. ALQ opt_alq.h ADAPTIVE_MUTEXES COMPAT_43 opt_compat.h COMPAT_FREEBSD4 opt_compat.h COMPAT_SUNOS opt_compat.h NO_COMPAT_FREEBSD4 opt_compat.h COMPILING_LINT opt_global.h CY_PCI_FASTINTR CONSPEED opt_comconsole.h DDB DDB_NOKLDSYM opt_ddb.h DDB_TRACE DDB_UNATTENDED GDB_REMOTE_CHAT opt_ddb.h GDBSPEED opt_ddb.h GEOM_AES opt_geom.h GEOM_APPLE opt_geom.h GEOM_BDE opt_geom.h GEOM_BSD opt_geom.h GEOM_GPT opt_geom.h GEOM_MBR opt_geom.h GEOM_MIRROR opt_geom.h GEOM_PC98 opt_geom.h GEOM_SUNLABEL opt_geom.h GEOM_VOL opt_geom.h HW_WDOG KSTACK_PAGES KSTACK_MAX_PAGES KTRACE KTRACE_REQUEST_POOL opt_ktrace.h LIBICONV MD_ROOT opt_md.h MD_ROOT_SIZE opt_md.h NDGBPORTS opt_dgb.h NTIMECOUNTER opt_ntp.h NSWAPDEV opt_swap.h PANIC_REBOOT_WAIT_TIME opt_panic.h PPS_SYNC opt_ntp.h PUC_FASTINTR opt_puc.h QUOTA SCHED_4BSD opt_sched.h SCHED_ULE opt_sched.h SHOW_BUSYBUFS SPX_HACK SUIDDIR opt_suiddir.h MSGMNB opt_sysvipc.h MSGMNI opt_sysvipc.h MSGSEG opt_sysvipc.h MSGSSZ opt_sysvipc.h MSGTQL opt_sysvipc.h SEMMAP opt_sysvipc.h SEMMNI opt_sysvipc.h SEMMNS opt_sysvipc.h SEMMNU opt_sysvipc.h SEMMSL opt_sysvipc.h SEMOPM opt_sysvipc.h SEMUME opt_sysvipc.h SHMALL opt_sysvipc.h SHMMAX opt_sysvipc.h SHMMAXPGS opt_sysvipc.h SHMMIN opt_sysvipc.h SHMMNI opt_sysvipc.h SHMSEG opt_sysvipc.h SYSVMSG opt_sysvipc.h SYSVSEM opt_sysvipc.h SYSVSHM opt_sysvipc.h VFS_AIO WLCACHE opt_wavelan.h WLDEBUG opt_wavelan.h TTYHOG opt_tty.h # POSIX kernel options _KPOSIX_PRIORITY_SCHEDULING opt_posix.h P1003_1B_SEMAPHORES opt_posix.h ##################################################################### # SECURITY POLICY PARAMETERS # Support for Mandatory Access Control (MAC) MAC opt_mac.h MAC_BIBA opt_dontuse.h MAC_BSDEXTENDED opt_dontuse.h MAC_DEBUG opt_mac.h MAC_IFOFF opt_dontuse.h MAC_LOMAC opt_dontuse.h MAC_MLS opt_dontuse.h MAC_NONE opt_dontuse.h MAC_PARTITION opt_dontuse.h MAC_PORTACL opt_dontuse.h MAC_SEEOTHERUIDS opt_dontuse.h MAC_TEST opt_dontuse.h # Do we want the config file compiled into the kernel? INCLUDE_CONFIG_FILE opt_config.h # Options for static filesystems. These should only be used at config # time, since the corresponding lkms cannot work if there are any static # dependencies. Unusability is enforced by hiding the defines for the # options in a never-included header. CD9660 opt_dontuse.h CODA opt_dontuse.h EXT2FS opt_dontuse.h FDESCFS opt_dontuse.h LINPROCFS opt_dontuse.h MSDOSFS opt_dontuse.h NULLFS opt_dontuse.h NWFS opt_dontuse.h PORTALFS opt_dontuse.h PROCFS opt_dontuse.h PSEUDOFS opt_dontuse.h UMAPFS opt_dontuse.h NTFS opt_dontuse.h HPFS opt_dontuse.h SMBFS opt_dontuse.h UNIONFS opt_dontuse.h UDF opt_dontuse.h # Broken - ffs_snapshot() dependency from ufs_lookup() :-( FFS opt_ffs_broken_fixme.h # These static filesystems has one slightly bogus static dependency in # sys/i386/i386/autoconf.c. If any of these filesystems are # statically compiled into the kernel, code for mounting them as root # filesystems will be enabled - but look below. NFSCLIENT opt_nfs.h NFSSERVER opt_nfs.h # If you are following the conditions in the copyright, # you can enable soft-updates which will speed up a lot of thigs # and make the system safer from crashes at the same time. # otherwise a STUB module will be compiled in. SOFTUPDATES opt_ffs.h # Enabling this option turns on support for Access Control Lists in UFS, # which can be used to support high security configurations. Depends on # UFS_EXTATTR. UFS_ACL opt_ufs.h # Enabling this option turns on support for extended attributes in UFS-based # filesystems, which can be used to support high security configurations # as well as new filesystem features. UFS_EXTATTR opt_ufs.h UFS_EXTATTR_AUTOSTART opt_ufs.h # Enable fast hash lookups for large directories on UFS-based filesystems. UFS_DIRHASH opt_ufs.h # The above static dependencies are planned removed, with a # _ROOT option to control if it usable as root. This list # allows these options to be present in config files already (though # they won't make any difference yet). NFS_ROOT opt_nfsroot.h # SMB/CIFS requester NETSMB opt_netsmb.h NETSMBCRYPTO opt_netsmb.h # Options used only in subr_param.c. HZ opt_param.h MAXFILES opt_param.h NBUF opt_param.h NMBCLUSTERS opt_param.h NSFBUFS opt_param.h VM_BCACHE_SIZE_MAX opt_param.h VM_SWZONE_SIZE_MAX opt_param.h MAXUSERS DFLDSIZ opt_param.h MAXDSIZ opt_param.h MAXSSIZ opt_param.h # Generic SCSI options. CAM_MAX_HIGHPOWER opt_cam.h CAMDEBUG opt_cam.h CAM_DEBUG_DELAY opt_cam.h CAM_DEBUG_BUS opt_cam.h CAM_DEBUG_TARGET opt_cam.h CAM_DEBUG_LUN opt_cam.h CAM_DEBUG_FLAGS opt_cam.h CAM_NEW_TRAN_CODE opt_cam.h SCSI_DELAY opt_scsi.h SCSI_NO_SENSE_STRINGS opt_scsi.h SCSI_NO_OP_STRINGS opt_scsi.h # Options used only in cam/scsi/scsi_cd.c CHANGER_MIN_BUSY_SECONDS opt_cd.h CHANGER_MAX_BUSY_SECONDS opt_cd.h # Options used only in cam/scsi/scsi_sa.c. SA_IO_TIMEOUT opt_sa.h SA_SPACE_TIMEOUT opt_sa.h SA_REWIND_TIMEOUT opt_sa.h SA_ERASE_TIMEOUT opt_sa.h SA_1FM_AT_EOD opt_sa.h # Options used only in cam/scsi/scsi_pt.c SCSI_PT_DEFAULT_TIMEOUT opt_pt.h # Options used only in cam/scsi/scsi_ses.c SES_ENABLE_PASSTHROUGH opt_ses.h # Options used in dev/sym/ (Symbios SCSI driver). SYM_SETUP_LP_PROBE_MAP opt_sym.h #-Low Priority Probe Map (bits) # Allows the ncr to take precedence # 1 (1<<0) -> 810a, 860 # 2 (1<<1) -> 825a, 875, 885, 895 # 4 (1<<2) -> 895a, 896, 1510d SYM_SETUP_SCSI_DIFF opt_sym.h #-HVD support for 825a, 875, 885 # disabled:0 (default), enabled:1 SYM_SETUP_PCI_PARITY opt_sym.h #-PCI parity checking # disabled:0, enabled:1 (default) SYM_SETUP_MAX_LUN opt_sym.h #-Number of LUNs supported # default:8, range:[1..64] # Options used only in pci/ncr.c SCSI_NCR_DEBUG opt_ncr.h SCSI_NCR_MAX_SYNC opt_ncr.h SCSI_NCR_MAX_WIDE opt_ncr.h SCSI_NCR_MYADDR opt_ncr.h # Options used only in dev/isp/* ISP_TARGET_MODE opt_isp.h ISP_FW_CRASH_DUMP opt_isp.h # Options used in the 'ata' ATA/ATAPI driver ATA_STATIC_ID opt_ata.h ATA_NOPCI opt_ata.h DEV_ATADISK opt_ata.h DEV_ATAPICD opt_ata.h DEV_ATAPIST opt_ata.h DEV_ATAPIFD opt_ata.h DEV_ATAPICAM opt_ata.h ATA_DEBUG opt_ata.h ATAPI_DEBUG opt_ata.h ACD_DEBUG opt_ata.h AST_DEBUG opt_ata.h # Net stuff. ACCEPT_FILTER_DATA ACCEPT_FILTER_HTTP BOOTP opt_bootp.h BOOTP_COMPAT opt_bootp.h BOOTP_NFSROOT opt_bootp.h BOOTP_NFSV3 opt_bootp.h BOOTP_WIRED_TO opt_bootp.h BRIDGE opt_bdg.h ETHER_II opt_ef.h ETHER_8023 opt_ef.h ETHER_8022 opt_ef.h ETHER_SNAP opt_ef.h MROUTING opt_mrouting.h INET opt_inet.h INET6 opt_inet6.h IPSEC opt_ipsec.h IPSEC_ESP opt_ipsec.h IPSEC_DEBUG opt_ipsec.h IPSEC_FILTERGIF opt_ipsec.h FAST_IPSEC opt_ipsec.h IPDIVERT DUMMYNET opt_ipdn.h IPFILTER opt_ipfilter.h IPFILTER_LOG opt_ipfilter.h IPFILTER_DEFAULT_BLOCK opt_ipfilter.h PFIL_HOOKS opt_pfil_hooks.h IPFIREWALL opt_ipfw.h IPFIREWALL_VERBOSE opt_ipfw.h IPFIREWALL_VERBOSE_LIMIT opt_ipfw.h IPFIREWALL_DEFAULT_TO_ACCEPT opt_ipfw.h IPFIREWALL_FORWARD opt_ipfw.h IPV6FIREWALL opt_ip6fw.h IPV6FIREWALL_VERBOSE opt_ip6fw.h IPV6FIREWALL_VERBOSE_LIMIT opt_ip6fw.h IPV6FIREWALL_DEFAULT_TO_ACCEPT opt_ip6fw.h IPSTEALTH IPX opt_ipx.h IPXIP opt_ipx.h LIBMCHAIN NCP opt_ncp.h NETATALK opt_atalk.h PPP_BSDCOMP opt_ppp.h PPP_DEFLATE opt_ppp.h PPP_FILTER opt_ppp.h RANDOM_IP_ID SLIP_IFF_OPTS opt_slip.h TCPDEBUG TCP_DROP_SYNFIN opt_tcp_input.h XBONEHACK # Netgraph(4). Use option NETGRAPH to enable the base netgraph code. # Each netgraph node type can be either be compiled into the kernel # or loaded dynamically. To get the former, include the corresponding # option below. Each type has its own man page, e.g. ng_async(4). NETGRAPH NETGRAPH_ASYNC opt_netgraph.h NETGRAPH_BPF opt_netgraph.h NETGRAPH_BRIDGE opt_netgraph.h NETGRAPH_CISCO opt_netgraph.h NETGRAPH_ECHO opt_netgraph.h NETGRAPH_ETHER opt_netgraph.h NETGRAPH_FRAME_RELAY opt_netgraph.h NETGRAPH_GIF opt_netgraph.h NETGRAPH_GIF_DEMUX opt_netgraph.h NETGRAPH_HOLE opt_netgraph.h NETGRAPH_IFACE opt_netgraph.h NETGRAPH_IP_INPUT opt_netgraph.h NETGRAPH_KSOCKET opt_netgraph.h NETGRAPH_LMI opt_netgraph.h NETGRAPH_L2TP opt_netgraph.h # MPPC compression requires proprietary files (not included) NETGRAPH_MPPC_COMPRESSION opt_netgraph.h NETGRAPH_MPPC_ENCRYPTION opt_netgraph.h NETGRAPH_ONE2MANY opt_netgraph.h NETGRAPH_PPP opt_netgraph.h NETGRAPH_PPPOE opt_netgraph.h NETGRAPH_PPTPGRE opt_netgraph.h NETGRAPH_RFC1490 opt_netgraph.h NETGRAPH_SOCKET opt_netgraph.h NETGRAPH_SPLIT opt_netgraph.h NETGRAPH_TEE opt_netgraph.h NETGRAPH_TTY opt_netgraph.h NETGRAPH_UI opt_netgraph.h NETGRAPH_VJC opt_netgraph.h # DRM options DRM_LINUX opt_drm.h DRM_DEBUG opt_drm.h ZERO_COPY_SOCKETS opt_zero.h TI_PRIVATE_JUMBOS opt_ti.h TI_JUMBO_HDRSPLIT opt_ti.h # ATM (HARP version) ATM_CORE opt_atm.h ATM_IP opt_atm.h ATM_SIGPVC opt_atm.h ATM_SPANS opt_atm.h ATM_UNI opt_atm.h # XXX Conflict: # of devices vs network protocol (Native ATM). # This makes "atm.h" unusable. NATM opt_natm.h DPT_ALLOW_MEMIO opt_dpt.h # Allow PCI devices to use memory # mapped I/O # DPT driver debug flags DPT_MEASURE_PERFORMANCE opt_dpt.h DPT_HANDLE_TIMEOUTS opt_dpt.h DPT_TIMEOUT_FACTOR opt_dpt.h DPT_LOST_IRQ opt_dpt.h DPT_RESET_HBA opt_dpt.h # Adaptec ASR and DPT V/VI controller options ASR_MEASURE_PERFORMANCE opt_asr.h # Misc debug flags. Most of these should probably be replaced with # 'DEBUG', and then let people recompile just the interesting modules # with 'make CC="cc -DDEBUG"'. CLUSTERDEBUG opt_debug_cluster.h DEBUG_1284 opt_ppb_1284.h VP0_DEBUG opt_vpo.h LPT_DEBUG opt_lpt.h PLIP_DEBUG opt_plip.h LOCKF_DEBUG opt_debug_lockf.h NPX_DEBUG opt_debug_npx.h NETATALKDEBUG opt_atalk.h SI_DEBUG opt_debug_si.h # Fb options FB_DEBUG opt_fb.h FB_INSTALL_CDEV opt_fb.h # ppbus related options PERIPH_1284 opt_ppb_1284.h DONTPROBE_1284 opt_ppb_1284.h # smbus related options ENABLE_ALART opt_intpm.h # These cause changes all over the kernel BLKDEV_IOSIZE opt_global.h DEBUG opt_global.h DEBUG_LOCKS opt_global.h DEBUG_VFS_LOCKS opt_global.h LOOKUP_SHARED opt_global.h DIAGNOSTIC opt_global.h INVARIANT_SUPPORT opt_global.h INVARIANTS opt_global.h MCLSHIFT opt_global.h MSIZE opt_global.h REGRESSION opt_global.h RESTARTABLE_PANICS opt_global.h VFS_BIO_DEBUG opt_global.h # These are VM related options VM_KMEM_SIZE opt_vm.h VM_KMEM_SIZE_SCALE opt_vm.h VM_KMEM_SIZE_MAX opt_vm.h NO_SWAPPING opt_vm.h MALLOC_PROFILE opt_vm.h PQ_NOOPT opt_vmpage.h PQ_NORMALCACHE opt_vmpage.h PQ_MEDIUMCACHE opt_vmpage.h PQ_LARGECACHE opt_vmpage.h PQ_HUGECACHE opt_vmpage.h PQ_CACHESIZE opt_vmpage.h # Standard SMP options SMP opt_global.h # Size of the kernel message buffer MSGBUF_SIZE opt_msgbuf.h # NFS options NFS_MINATTRTIMO opt_nfs.h NFS_MAXATTRTIMO opt_nfs.h NFS_MINDIRATTRTIMO opt_nfs.h NFS_MAXDIRATTRTIMO opt_nfs.h NFS_GATHERDELAY opt_nfs.h NFS_WDELAYHASHSIZ opt_nfs.h NFS_DEBUG opt_nfs.h # For the Bt848/Bt848A/Bt849/Bt878/Bt879 driver OVERRIDE_CARD opt_bktr.h OVERRIDE_TUNER opt_bktr.h OVERRIDE_DBX opt_bktr.h OVERRIDE_MSP opt_bktr.h BROOKTREE_SYSTEM_DEFAULT opt_bktr.h BROOKTREE_ALLOC_PAGES opt_bktr.h BKTR_OVERRIDE_CARD opt_bktr.h BKTR_OVERRIDE_TUNER opt_bktr.h BKTR_OVERRIDE_DBX opt_bktr.h BKTR_OVERRIDE_MSP opt_bktr.h BKTR_SYSTEM_DEFAULT opt_bktr.h BKTR_ALLOC_PAGES opt_bktr.h BKTR_USE_PLL opt_bktr.h BKTR_GPIO_ACCESS opt_bktr.h BKTR_NO_MSP_RESET opt_bktr.h BKTR_430_FX_MODE opt_bktr.h BKTR_SIS_VIA_MODE opt_bktr.h BKTR_USE_FREEBSD_SMBUS opt_bktr.h # meteor opt_meteor.h METEOR_ALLOC_PAGES opt_meteor.h METEOR_TEST_VIDEO opt_meteor.h METEOR_SYSTEM_DEFAULT opt_meteor.h METEOR_DEALLOC_PAGES opt_meteor.h METEOR_DEALLOC_ABOVE opt_meteor.h # options for serial support COM_ESP opt_sio.h COM_MULTIPORT opt_sio.h BREAK_TO_DEBUGGER opt_comconsole.h ALT_BREAK_TO_DEBUGGER opt_comconsole.h # options for bus/device framework BUS_DEBUG opt_bus.h # options for USB support UHCI_DEBUG opt_usb.h OHCI_DEBUG opt_usb.h USB_DEBUG opt_usb.h UGEN_DEBUG opt_usb.h UHID_DEBUG opt_usb.h UHUB_DEBUG opt_usb.h UKBD_DEBUG opt_usb.h ULPT_DEBUG opt_usb.h UMASS_DEBUG opt_usb.h UMS_DEBUG opt_usb.h URIO_DEBUG opt_usb.h UKBD_DFLT_KEYMAP opt_ukbd.h UVSCOM_DEFAULT_OPKTSIZE opt_uvscom.h # Vinum options VINUMDEBUG opt_vinum.h VINUM_AUTOSTART opt_vinum.h # Embedded system options INIT_PATH opt_init_path.h ROOTDEVNAME opt_rootdevname.h FDC_DEBUG opt_fdc.h PCFCLOCK_VERBOSE opt_pcfclock.h PCFCLOCK_MAX_RETRIES opt_pcfclock.h # RAIDframe options RAID_AUTOCONFIG opt_raid.h RAID_DEBUG opt_raid.h TDFX_LINUX opt_tdfx.h KTR opt_global.h KTR_ALQ opt_ktr.h KTR_MASK opt_ktr.h KTR_CPUMASK opt_ktr.h KTR_COMPILE opt_global.h KTR_ENTRIES opt_global.h KTR_VERBOSE opt_ktr.h MUTEX_DEBUG opt_global.h WITNESS opt_global.h WITNESS_DDB opt_witness.h WITNESS_SKIPSPIN opt_witness.h # options for ACPI support ACPI_DEBUG opt_acpi.h ACPI_NO_SEMAPHORES opt_acpi.h ACPI_MAX_THREADS opt_acpi.h # options for DEVFS, see sys/fs/devfs/devfs.h NDEVFSINO opt_devfs.h NDEVFSOVERFLOW opt_devfs.h # various 'device presence' options. DEV_ISA opt_isa.h DEV_MCA opt_mca.h DEV_BPF opt_bpf.h # ed driver ED_NO_MIIBUS opt_ed.h # wi driver WI_SYMBOL_FIRMWARE opt_wi.h # Polling device handling DEVICE_POLLING opt_global.h # Mutex profiling MUTEX_PROFILING opt_global.h +# options for ubsec driver +UBSEC_DEBUG opt_ubsec.h +UBSEC_RNDTEST opt_ubsec.h + +# options for hifn driver +HIFN_DEBUG opt_hifn.h +HIFN_RNDTEST opt_hifn.h diff --git a/sys/dev/hifn/hifn7751.c b/sys/dev/hifn/hifn7751.c index cb58fe0b0f98..f7d60a45e3a5 100644 --- a/sys/dev/hifn/hifn7751.c +++ b/sys/dev/hifn/hifn7751.c @@ -1,2638 +1,2661 @@ /* $FreeBSD$ */ /* $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $ */ /* * Invertex AEON / Hifn 7751 driver * Copyright (c) 1999 Invertex Inc. All rights reserved. * Copyright (c) 1999 Theo de Raadt * Copyright (c) 2000-2001 Network Security Technologies, Inc. * http://www.netsec.net * * This driver is based on a previous driver by Invertex, for which they * requested: Please send any comments, feedback, bug-fixes, or feature * requests to software@invertex.com. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Effort sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F30602-01-2-0537. * */ -#define HIFN_DEBUG - /* * Driver for the Hifn 7751 encryption processor. */ +#include "opt_hifn.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include + +#ifdef HIFN_RNDTEST +#include +#endif #include #include /* * Prototypes and count for the pci_device structure */ static int hifn_probe(device_t); static int hifn_attach(device_t); static int hifn_detach(device_t); static int hifn_suspend(device_t); static int hifn_resume(device_t); static void hifn_shutdown(device_t); static device_method_t hifn_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hifn_probe), DEVMETHOD(device_attach, hifn_attach), DEVMETHOD(device_detach, hifn_detach), DEVMETHOD(device_suspend, hifn_suspend), DEVMETHOD(device_resume, hifn_resume), DEVMETHOD(device_shutdown, hifn_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), { 0, 0 } }; static driver_t hifn_driver = { "hifn", hifn_methods, sizeof (struct hifn_softc) }; static devclass_t hifn_devclass; DRIVER_MODULE(hifn, pci, hifn_driver, hifn_devclass, 0, 0); MODULE_DEPEND(hifn, crypto, 1, 1, 1); +#ifdef HIFN_RNDTEST +MODULE_DEPEND(hifn, rndtest, 1, 1, 1); +#endif static void hifn_reset_board(struct hifn_softc *, int); static void hifn_reset_puc(struct hifn_softc *); static void hifn_puc_wait(struct hifn_softc *); static int hifn_enable_crypto(struct hifn_softc *); static void hifn_set_retry(struct hifn_softc *sc); static void hifn_init_dma(struct hifn_softc *); static void hifn_init_pci_registers(struct hifn_softc *); static int hifn_sramsize(struct hifn_softc *); static int hifn_dramsize(struct hifn_softc *); static int hifn_ramtype(struct hifn_softc *); static void hifn_sessions(struct hifn_softc *); static void hifn_intr(void *); static u_int hifn_write_command(struct hifn_command *, u_int8_t *); static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt); static int hifn_newsession(void *, u_int32_t *, struct cryptoini *); static int hifn_freesession(void *, u_int64_t); static int hifn_process(void *, struct cryptop *, int); static void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *); static int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int); static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *); static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *); static int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *); static int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *); static int hifn_init_pubrng(struct hifn_softc *); static void hifn_rng(void *); static void hifn_tick(void *); static void hifn_abort(struct hifn_softc *); static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *); static void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t); static void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t); static __inline__ u_int32_t READ_REG_0(struct hifn_softc *sc, bus_size_t reg) { u_int32_t v = bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg); sc->sc_bar0_lastreg = (bus_size_t) -1; return (v); } #define WRITE_REG_0(sc, reg, val) hifn_write_reg_0(sc, reg, val) static __inline__ u_int32_t READ_REG_1(struct hifn_softc *sc, bus_size_t reg) { u_int32_t v = bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg); sc->sc_bar1_lastreg = (bus_size_t) -1; return (v); } #define WRITE_REG_1(sc, reg, val) hifn_write_reg_1(sc, reg, val) SYSCTL_NODE(_hw, OID_AUTO, hifn, CTLFLAG_RD, 0, "Hifn driver parameters"); #ifdef HIFN_DEBUG static int hifn_debug = 0; SYSCTL_INT(_hw_hifn, OID_AUTO, debug, CTLFLAG_RW, &hifn_debug, 0, "control debugging msgs"); #endif static struct hifn_stats hifnstats; SYSCTL_STRUCT(_hw_hifn, OID_AUTO, stats, CTLFLAG_RD, &hifnstats, hifn_stats, "driver statistics"); static int hifn_maxbatch = 1; SYSCTL_INT(_hw_hifn, OID_AUTO, maxbatch, CTLFLAG_RW, &hifn_maxbatch, 0, "max ops to batch w/o interrupt"); /* * Probe for a supported device. The PCI vendor and device * IDs are used to detect devices we know how to handle. */ static int hifn_probe(device_t dev) { if (pci_get_vendor(dev) == PCI_VENDOR_INVERTEX && pci_get_device(dev) == PCI_PRODUCT_INVERTEX_AEON) return (0); if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && (pci_get_device(dev) == PCI_PRODUCT_HIFN_7751 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)) return (0); if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC && pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751) return (0); return (ENXIO); } static void hifn_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *paddr = (bus_addr_t*) arg; *paddr = segs->ds_addr; } static const char* hifn_partname(struct hifn_softc *sc) { /* XXX sprintf numbers when not decoded */ switch (pci_get_vendor(sc->sc_dev)) { case PCI_VENDOR_HIFN: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_HIFN_6500: return "Hifn 6500"; case PCI_PRODUCT_HIFN_7751: return "Hifn 7751"; case PCI_PRODUCT_HIFN_7811: return "Hifn 7811"; case PCI_PRODUCT_HIFN_7951: return "Hifn 7951"; } return "Hifn unknown-part"; case PCI_VENDOR_INVERTEX: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_INVERTEX_AEON: return "Invertex AEON"; } return "Invertex unknown-part"; case PCI_VENDOR_NETSEC: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_NETSEC_7751: return "NetSec 7751"; } return "NetSec unknown-part"; } return "Unknown-vendor unknown-part"; } +static void +default_harvest(struct rndtest_state *rsp, void *buf, u_int count) +{ + random_harvest(buf, count, count*NBBY, 0, RANDOM_PURE); +} + /* * Attach an interface that successfully probed. */ static int hifn_attach(device_t dev) { struct hifn_softc *sc = device_get_softc(dev); u_int32_t cmd; caddr_t kva; int rseg, rid; char rbase; u_int16_t ena, rev; KASSERT(sc != NULL, ("hifn_attach: null software carrier!")); bzero(sc, sizeof (*sc)); sc->sc_dev = dev; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "crypto driver", MTX_DEF); /* XXX handle power management */ /* * The 7951 has a random number generator and * public key support; note this. */ if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && pci_get_device(dev) == PCI_PRODUCT_HIFN_7951) sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC; /* * The 7811 has a random number generator and * we also note it's identity 'cuz of some quirks. */ if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && pci_get_device(dev) == PCI_PRODUCT_HIFN_7811) sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG; /* * Configure support for memory-mapped access to * registers and for DMA operations. */ #define PCIM_ENA (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN) cmd = pci_read_config(dev, PCIR_COMMAND, 4); cmd |= PCIM_ENA; pci_write_config(dev, PCIR_COMMAND, cmd, 4); cmd = pci_read_config(dev, PCIR_COMMAND, 4); if ((cmd & PCIM_ENA) != PCIM_ENA) { device_printf(dev, "failed to enable %s\n", (cmd & PCIM_ENA) == 0 ? "memory mapping & bus mastering" : (cmd & PCIM_CMD_MEMEN) == 0 ? "memory mapping" : "bus mastering"); goto fail_pci; } #undef PCIM_ENA /* * Setup PCI resources. Note that we record the bus * tag and handle for each register mapping, this is * used by the READ_REG_0, WRITE_REG_0, READ_REG_1, * and WRITE_REG_1 macros throughout the driver. */ rid = HIFN_BAR0; sc->sc_bar0res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE); if (sc->sc_bar0res == NULL) { device_printf(dev, "cannot map bar%d register space\n", 0); goto fail_pci; } sc->sc_st0 = rman_get_bustag(sc->sc_bar0res); sc->sc_sh0 = rman_get_bushandle(sc->sc_bar0res); sc->sc_bar0_lastreg = (bus_size_t) -1; rid = HIFN_BAR1; sc->sc_bar1res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE); if (sc->sc_bar1res == NULL) { device_printf(dev, "cannot map bar%d register space\n", 1); goto fail_io0; } sc->sc_st1 = rman_get_bustag(sc->sc_bar1res); sc->sc_sh1 = rman_get_bushandle(sc->sc_bar1res); sc->sc_bar1_lastreg = (bus_size_t) -1; hifn_set_retry(sc); /* * Setup the area where the Hifn DMA's descriptors * and associated data structures. */ if (bus_dma_tag_create(NULL, /* parent */ 1, 0, /* alignment,boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ HIFN_MAX_DMALEN, /* maxsize */ MAX_SCATTER, /* nsegments */ HIFN_MAX_SEGLEN, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ &sc->sc_dmat)) { device_printf(dev, "cannot allocate DMA tag\n"); goto fail_io1; } if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &sc->sc_dmamap)) { device_printf(dev, "cannot create dma map\n"); bus_dma_tag_destroy(sc->sc_dmat); goto fail_io1; } if (bus_dmamem_alloc(sc->sc_dmat, (void**) &kva, BUS_DMA_NOWAIT, &sc->sc_dmamap)) { device_printf(dev, "cannot alloc dma buffer\n"); bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); bus_dma_tag_destroy(sc->sc_dmat); goto fail_io1; } if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, kva, sizeof (*sc->sc_dma), hifn_dmamap_cb, &sc->sc_dma_physaddr, BUS_DMA_NOWAIT)) { device_printf(dev, "cannot load dma map\n"); bus_dmamem_free(sc->sc_dmat, kva, sc->sc_dmamap); bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); bus_dma_tag_destroy(sc->sc_dmat); goto fail_io1; } sc->sc_dma = (struct hifn_dma *)kva; bzero(sc->sc_dma, sizeof(*sc->sc_dma)); KASSERT(sc->sc_st0 != NULL, ("hifn_attach: null bar0 tag!")); KASSERT(sc->sc_sh0 != NULL, ("hifn_attach: null bar0 handle!")); KASSERT(sc->sc_st1 != NULL, ("hifn_attach: null bar1 tag!")); KASSERT(sc->sc_sh1 != NULL, ("hifn_attach: null bar1 handle!")); /* * Reset the board and do the ``secret handshake'' * to enable the crypto support. Then complete the * initialization procedure by setting up the interrupt * and hooking in to the system crypto support so we'll * get used for system services like the crypto device, * IPsec, RNG device, etc. */ hifn_reset_board(sc, 0); if (hifn_enable_crypto(sc) != 0) { device_printf(dev, "crypto enabling failed\n"); goto fail_mem; } hifn_reset_puc(sc); hifn_init_dma(sc); hifn_init_pci_registers(sc); if (hifn_ramtype(sc)) goto fail_mem; if (sc->sc_drammodel == 0) hifn_sramsize(sc); else hifn_dramsize(sc); /* * Workaround for NetSec 7751 rev A: half ram size because two * of the address lines were left floating */ if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC && pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 && pci_get_revid(dev) == 0x61) /*XXX???*/ sc->sc_ramsize >>= 1; /* * Arrange the interrupt line. */ rid = 0; sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE|RF_ACTIVE); if (sc->sc_irq == NULL) { device_printf(dev, "could not map interrupt\n"); goto fail_mem; } /* * NB: Network code assumes we are blocked with splimp() * so make sure the IRQ is marked appropriately. */ if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET, hifn_intr, sc, &sc->sc_intrhand)) { device_printf(dev, "could not setup interrupt\n"); goto fail_intr2; } hifn_sessions(sc); /* * NB: Keep only the low 16 bits; this masks the chip id * from the 7951. */ rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff; rseg = sc->sc_ramsize / 1024; rbase = 'K'; if (sc->sc_ramsize >= (1024 * 1024)) { rbase = 'M'; rseg /= 1024; } device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram, %u sessions\n", hifn_partname(sc), rev, rseg, rbase, sc->sc_drammodel ? 'd' : 's', sc->sc_maxses); sc->sc_cid = crypto_get_driverid(0); if (sc->sc_cid < 0) { device_printf(dev, "could not get crypto driver id\n"); goto fail_intr; } WRITE_REG_0(sc, HIFN_0_PUCNFG, READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID); ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; switch (ena) { case HIFN_PUSTAT_ENA_2: crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0, hifn_newsession, hifn_freesession, hifn_process, sc); crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0, hifn_newsession, hifn_freesession, hifn_process, sc); /*FALLTHROUGH*/ case HIFN_PUSTAT_ENA_1: crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0, hifn_newsession, hifn_freesession, hifn_process, sc); crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0, hifn_newsession, hifn_freesession, hifn_process, sc); crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0, hifn_newsession, hifn_freesession, hifn_process, sc); crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0, hifn_newsession, hifn_freesession, hifn_process, sc); crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0, hifn_newsession, hifn_freesession, hifn_process, sc); break; } bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG)) hifn_init_pubrng(sc); /* NB: 1 means the callout runs w/o Giant locked */ callout_init(&sc->sc_tickto, 1); callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); return (0); fail_intr: bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand); fail_intr2: /* XXX don't store rid */ bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); fail_mem: bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap); bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap); bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); bus_dma_tag_destroy(sc->sc_dmat); /* Turn off DMA polling */ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); fail_io1: bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res); fail_io0: bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res); fail_pci: mtx_destroy(&sc->sc_mtx); return (ENXIO); } /* * Detach an interface that successfully probed. */ static int hifn_detach(device_t dev) { struct hifn_softc *sc = device_get_softc(dev); KASSERT(sc != NULL, ("hifn_detach: null software carrier!")); HIFN_LOCK(sc); /*XXX other resources */ callout_stop(&sc->sc_tickto); callout_stop(&sc->sc_rngto); /* Turn off DMA polling */ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); crypto_unregister_all(sc->sc_cid); bus_generic_detach(dev); /*XXX should be no children, right? */ bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand); /* XXX don't store rid */ bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap); bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap); bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); bus_dma_tag_destroy(sc->sc_dmat); bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res); bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res); HIFN_UNLOCK(sc); mtx_destroy(&sc->sc_mtx); return (0); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void hifn_shutdown(device_t dev) { #ifdef notyet hifn_stop(device_get_softc(dev)); #endif } /* * Device suspend routine. Stop the interface and save some PCI * settings in case the BIOS doesn't restore them properly on * resume. */ static int hifn_suspend(device_t dev) { struct hifn_softc *sc = device_get_softc(dev); #ifdef notyet int i; hifn_stop(sc); for (i = 0; i < 5; i++) sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4); sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); #endif sc->sc_suspended = 1; return (0); } /* * Device resume routine. Restore some PCI settings in case the BIOS * doesn't, re-enable busmastering, and restart the interface if * appropriate. */ static int hifn_resume(device_t dev) { struct hifn_softc *sc = device_get_softc(dev); #ifdef notyet int i; /* better way to do this? */ for (i = 0; i < 5; i++) pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4); pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4); pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1); pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1); pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1); /* reenable busmastering */ pci_enable_busmaster(dev); pci_enable_io(dev, HIFN_RES); /* reinitialize interface if necessary */ if (ifp->if_flags & IFF_UP) rl_init(sc); #endif sc->sc_suspended = 0; return (0); } static int hifn_init_pubrng(struct hifn_softc *sc) { u_int32_t r; int i; +#ifdef HIFN_RNDTEST + sc->sc_rndtest = rndtest_attach(sc->sc_dev); + if (sc->sc_rndtest) + sc->sc_harvest = rndtest_harvest; + else + sc->sc_harvest = default_harvest; +#else + sc->sc_harvest = default_harvest; +#endif if ((sc->sc_flags & HIFN_IS_7811) == 0) { /* Reset 7951 public key/rng engine */ WRITE_REG_1(sc, HIFN_1_PUB_RESET, READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET); for (i = 0; i < 100; i++) { DELAY(1000); if ((READ_REG_1(sc, HIFN_1_PUB_RESET) & HIFN_PUBRST_RESET) == 0) break; } if (i == 100) { device_printf(sc->sc_dev, "public key init failed\n"); return (1); } } /* Enable the rng, if available */ if (sc->sc_flags & HIFN_HAS_RNG) { if (sc->sc_flags & HIFN_IS_7811) { r = READ_REG_1(sc, HIFN_1_7811_RNGENA); if (r & HIFN_7811_RNGENA_ENA) { r &= ~HIFN_7811_RNGENA_ENA; WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); } WRITE_REG_1(sc, HIFN_1_7811_RNGCFG, HIFN_7811_RNGCFG_DEFL); r |= HIFN_7811_RNGENA_ENA; WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); } else WRITE_REG_1(sc, HIFN_1_RNG_CONFIG, READ_REG_1(sc, HIFN_1_RNG_CONFIG) | HIFN_RNGCFG_ENA); sc->sc_rngfirst = 1; if (hz >= 100) sc->sc_rnghz = hz / 100; else sc->sc_rnghz = 1; /* NB: 1 means the callout runs w/o Giant locked */ callout_init(&sc->sc_rngto, 1); callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); } /* Enable public key engine, if available */ if (sc->sc_flags & HIFN_HAS_PUBLIC) { WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE); sc->sc_dmaier |= HIFN_DMAIER_PUBDONE; WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); } return (0); } static void hifn_rng(void *vsc) { #define RANDOM_BITS(n) (n)*sizeof (u_int32_t), (n)*sizeof (u_int32_t)*NBBY, 0 struct hifn_softc *sc = vsc; u_int32_t sts, num[2]; int i; if (sc->sc_flags & HIFN_IS_7811) { for (i = 0; i < 5; i++) { sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS); if (sts & HIFN_7811_RNGSTS_UFL) { device_printf(sc->sc_dev, "RNG underflow: disabling\n"); return; } if ((sts & HIFN_7811_RNGSTS_RDY) == 0) break; /* * There are at least two words in the RNG FIFO * at this point. */ num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); /* NB: discard first data read */ if (sc->sc_rngfirst) sc->sc_rngfirst = 0; else - random_harvest(num, RANDOM_BITS(2), RANDOM_PURE); + (*sc->sc_harvest)(sc->sc_rndtest, + num, sizeof (num)); } } else { num[0] = READ_REG_1(sc, HIFN_1_RNG_DATA); /* NB: discard first data read */ if (sc->sc_rngfirst) sc->sc_rngfirst = 0; else - random_harvest(num, RANDOM_BITS(1), RANDOM_PURE); + (*sc->sc_harvest)(sc->sc_rndtest, + num, sizeof (num[0])); } callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); #undef RANDOM_BITS } static void hifn_puc_wait(struct hifn_softc *sc) { int i; for (i = 5000; i > 0; i--) { DELAY(1); if (!(READ_REG_0(sc, HIFN_0_PUCTRL) & HIFN_PUCTRL_RESET)) break; } if (!i) device_printf(sc->sc_dev, "proc unit did not reset\n"); } /* * Reset the processing unit. */ static void hifn_reset_puc(struct hifn_softc *sc) { /* Reset processing unit */ WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); hifn_puc_wait(sc); } /* * Set the Retry and TRDY registers; note that we set them to * zero because the 7811 locks up when forced to retry (section * 3.6 of "Specification Update SU-0014-04". Not clear if we * should do this for all Hifn parts, but it doesn't seem to hurt. */ static void hifn_set_retry(struct hifn_softc *sc) { /* NB: RETRY only responds to 8-bit reads/writes */ pci_write_config(sc->sc_dev, HIFN_RETRY_TIMEOUT, 0, 1); pci_write_config(sc->sc_dev, HIFN_TRDY_TIMEOUT, 0, 4); } /* * Resets the board. Values in the regesters are left as is * from the reset (i.e. initial values are assigned elsewhere). */ static void hifn_reset_board(struct hifn_softc *sc, int full) { u_int32_t reg; /* * Set polling in the DMA configuration register to zero. 0x7 avoids * resetting the board and zeros out the other fields. */ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); /* * Now that polling has been disabled, we have to wait 1 ms * before resetting the board. */ DELAY(1000); /* Reset the DMA unit */ if (full) { WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE); DELAY(1000); } else { WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET); hifn_reset_puc(sc); } KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!")); bzero(sc->sc_dma, sizeof(*sc->sc_dma)); /* Bring dma unit out of reset */ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); hifn_puc_wait(sc); hifn_set_retry(sc); if (sc->sc_flags & HIFN_IS_7811) { for (reg = 0; reg < 1000; reg++) { if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) & HIFN_MIPSRST_CRAMINIT) break; DELAY(1000); } if (reg == 1000) printf(": cram init timeout\n"); } } static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt) { int i; u_int32_t v; for (i = 0; i < cnt; i++) { /* get the parity */ v = a & 0x80080125; v ^= v >> 16; v ^= v >> 8; v ^= v >> 4; v ^= v >> 2; v ^= v >> 1; a = (v & 1) ^ (a << 1); } return a; } struct pci2id { u_short pci_vendor; u_short pci_prod; char card_id[13]; }; static struct pci2id pci2id[] = { { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { /* * Other vendors share this PCI ID as well, such as * http://www.powercrypt.com, and obviously they also * use the same key. */ PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, }; /* * Checks to see if crypto is already enabled. If crypto isn't enable, * "hifn_enable_crypto" is called to enable it. The check is important, * as enabling crypto twice will lock the board. */ static int hifn_enable_crypto(struct hifn_softc *sc) { u_int32_t dmacfg, ramcfg, encl, addr, i; char *offtbl = NULL; for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) { if (pci2id[i].pci_vendor == pci_get_vendor(sc->sc_dev) && pci2id[i].pci_prod == pci_get_device(sc->sc_dev)) { offtbl = pci2id[i].card_id; break; } } if (offtbl == NULL) { device_printf(sc->sc_dev, "Unknown card!\n"); return (1); } ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG); dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG); /* * The RAM config register's encrypt level bit needs to be set before * every read performed on the encryption level register. */ WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; /* * Make sure we don't re-unlock. Two unlocks kills chip until the * next reboot. */ if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) { #ifdef HIFN_DEBUG if (hifn_debug) device_printf(sc->sc_dev, "Strong crypto already enabled!\n"); #endif goto report; } if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) { #ifdef HIFN_DEBUG if (hifn_debug) device_printf(sc->sc_dev, "Unknown encryption level 0x%x\n", encl); #endif return 1; } WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK | HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); DELAY(1000); addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1); DELAY(1000); WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0); DELAY(1000); for (i = 0; i <= 12; i++) { addr = hifn_next_signature(addr, offtbl[i] + 0x101); WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr); DELAY(1000); } WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; #ifdef HIFN_DEBUG if (hifn_debug) { if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2) device_printf(sc->sc_dev, "Engine is permanently " "locked until next system reset!\n"); else device_printf(sc->sc_dev, "Engine enabled " "successfully!\n"); } #endif report: WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg); WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg); switch (encl) { case HIFN_PUSTAT_ENA_1: case HIFN_PUSTAT_ENA_2: break; case HIFN_PUSTAT_ENA_0: default: device_printf(sc->sc_dev, "disabled"); break; } return 0; } /* * Give initial values to the registers listed in the "Register Space" * section of the HIFN Software Development reference manual. */ static void hifn_init_pci_registers(struct hifn_softc *sc) { /* write fixed values needed by the Initialization registers */ WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD); WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER); /* write all 4 ring address registers */ WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0])); WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0])); WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0])); WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0])); DELAY(2000); /* write status register */ WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST | HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST | HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER | HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST | HIFN_DMACSR_S_WAIT | HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST | HIFN_DMACSR_C_WAIT | HIFN_DMACSR_ENGINE | ((sc->sc_flags & HIFN_HAS_PUBLIC) ? HIFN_DMACSR_PUBDONE : 0) | ((sc->sc_flags & HIFN_IS_7811) ? HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0)); sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0; sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT | HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER | HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT | ((sc->sc_flags & HIFN_IS_7811) ? HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0); sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES | HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 | (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM)); WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST | ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) | ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL)); } /* * The maximum number of sessions supported by the card * is dependent on the amount of context ram, which * encryption algorithms are enabled, and how compression * is configured. This should be configured before this * routine is called. */ static void hifn_sessions(struct hifn_softc *sc) { u_int32_t pucnfg; int ctxsize; pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG); if (pucnfg & HIFN_PUCNFG_COMPSING) { if (pucnfg & HIFN_PUCNFG_ENCCNFG) ctxsize = 128; else ctxsize = 512; sc->sc_maxses = 1 + ((sc->sc_ramsize - 32768) / ctxsize); } else sc->sc_maxses = sc->sc_ramsize / 16384; if (sc->sc_maxses > 2048) sc->sc_maxses = 2048; } /* * Determine ram type (sram or dram). Board should be just out of a reset * state when this is called. */ static int hifn_ramtype(struct hifn_softc *sc) { u_int8_t data[8], dataexpect[8]; int i; for (i = 0; i < sizeof(data); i++) data[i] = dataexpect[i] = 0x55; if (hifn_writeramaddr(sc, 0, data)) return (-1); if (hifn_readramaddr(sc, 0, data)) return (-1); if (bcmp(data, dataexpect, sizeof(data)) != 0) { sc->sc_drammodel = 1; return (0); } for (i = 0; i < sizeof(data); i++) data[i] = dataexpect[i] = 0xaa; if (hifn_writeramaddr(sc, 0, data)) return (-1); if (hifn_readramaddr(sc, 0, data)) return (-1); if (bcmp(data, dataexpect, sizeof(data)) != 0) { sc->sc_drammodel = 1; return (0); } return (0); } #define HIFN_SRAM_MAX (32 << 20) #define HIFN_SRAM_STEP_SIZE 16384 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE) static int hifn_sramsize(struct hifn_softc *sc) { u_int32_t a; u_int8_t data[8]; u_int8_t dataexpect[sizeof(data)]; int32_t i; for (i = 0; i < sizeof(data); i++) data[i] = dataexpect[i] = i ^ 0x5a; for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) { a = i * HIFN_SRAM_STEP_SIZE; bcopy(&i, data, sizeof(i)); hifn_writeramaddr(sc, a, data); } for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) { a = i * HIFN_SRAM_STEP_SIZE; bcopy(&i, dataexpect, sizeof(i)); if (hifn_readramaddr(sc, a, data) < 0) return (0); if (bcmp(data, dataexpect, sizeof(data)) != 0) return (0); sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE; } return (0); } /* * XXX For dram boards, one should really try all of the * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG * is already set up correctly. */ static int hifn_dramsize(struct hifn_softc *sc) { u_int32_t cnfg; cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) & HIFN_PUCNFG_DRAMMASK; sc->sc_ramsize = 1 << ((cnfg >> 13) + 18); return (0); } static void hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp) { struct hifn_dma *dma = sc->sc_dma; if (dma->cmdi == HIFN_D_CMD_RSIZE) { dma->cmdi = 0; dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } *cmdp = dma->cmdi++; dma->cmdk = dma->cmdi; if (dma->srci == HIFN_D_SRC_RSIZE) { dma->srci = 0; dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } *srcp = dma->srci++; dma->srck = dma->srci; if (dma->dsti == HIFN_D_DST_RSIZE) { dma->dsti = 0; dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } *dstp = dma->dsti++; dma->dstk = dma->dsti; if (dma->resi == HIFN_D_RES_RSIZE) { dma->resi = 0; dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } *resp = dma->resi++; dma->resk = dma->resi; } static int hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) { struct hifn_dma *dma = sc->sc_dma; hifn_base_command_t wc; const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; int r, cmdi, resi, srci, dsti; wc.masks = htole16(3 << 13); wc.session_num = htole16(addr >> 14); wc.total_source_count = htole16(8); wc.total_dest_count = htole16(addr & 0x3fff); hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); /* build write command */ bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); *(hifn_base_command_t *)dma->command_bufs[cmdi] = wc; bcopy(data, &dma->test_src, sizeof(dma->test_src)); dma->srcr[srci].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, test_src)); dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, test_dst)); dma->cmdr[cmdi].l = htole32(16 | masks); dma->srcr[srci].l = htole32(8 | masks); dma->dstr[dsti].l = htole32(4 | masks); dma->resr[resi].l = htole32(4 | masks); bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); for (r = 10000; r >= 0; r--) { DELAY(10); bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) break; bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } if (r == 0) { device_printf(sc->sc_dev, "writeramaddr -- " "result[%d](addr %d) still valid\n", resi, addr); r = -1; return (-1); } else r = 0; WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); return (r); } static int hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) { struct hifn_dma *dma = sc->sc_dma; hifn_base_command_t rc; const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; int r, cmdi, srci, dsti, resi; rc.masks = htole16(2 << 13); rc.session_num = htole16(addr >> 14); rc.total_source_count = htole16(addr & 0x3fff); rc.total_dest_count = htole16(8); hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); *(hifn_base_command_t *)dma->command_bufs[cmdi] = rc; dma->srcr[srci].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, test_src)); dma->test_src = 0; dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, test_dst)); dma->test_dst = 0; dma->cmdr[cmdi].l = htole32(8 | masks); dma->srcr[srci].l = htole32(8 | masks); dma->dstr[dsti].l = htole32(8 | masks); dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks); bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); for (r = 10000; r >= 0; r--) { DELAY(10); bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) break; bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } if (r == 0) { device_printf(sc->sc_dev, "readramaddr -- " "result[%d](addr %d) still valid\n", resi, addr); r = -1; } else { r = 0; bcopy(&dma->test_dst, data, sizeof(dma->test_dst)); } WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); return (r); } /* * Initialize the descriptor rings. */ static void hifn_init_dma(struct hifn_softc *sc) { struct hifn_dma *dma = sc->sc_dma; int i; hifn_set_retry(sc); /* initialize static pointer values */ for (i = 0; i < HIFN_D_CMD_RSIZE; i++) dma->cmdr[i].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, command_bufs[i][0])); for (i = 0; i < HIFN_D_RES_RSIZE; i++) dma->resr[i].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, result_bufs[i][0])); dma->cmdr[HIFN_D_CMD_RSIZE].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0])); dma->srcr[HIFN_D_SRC_RSIZE].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0])); dma->dstr[HIFN_D_DST_RSIZE].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0])); dma->resr[HIFN_D_RES_RSIZE].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0])); dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0; dma->cmdi = dma->srci = dma->dsti = dma->resi = 0; dma->cmdk = dma->srck = dma->dstk = dma->resk = 0; } /* * Writes out the raw command buffer space. Returns the * command buffer size. */ static u_int hifn_write_command(struct hifn_command *cmd, u_int8_t *buf) { u_int8_t *buf_pos; hifn_base_command_t *base_cmd; hifn_mac_command_t *mac_cmd; hifn_crypt_command_t *cry_cmd; int using_mac, using_crypt, len; u_int32_t dlen, slen; buf_pos = buf; using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC; using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT; base_cmd = (hifn_base_command_t *)buf_pos; base_cmd->masks = htole16(cmd->base_masks); slen = cmd->src_mapsize; if (cmd->sloplen) dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t); else dlen = cmd->dst_mapsize; base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO); base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO); dlen >>= 16; slen >>= 16; base_cmd->session_num = htole16(cmd->session_num | ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) | ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M)); buf_pos += sizeof(hifn_base_command_t); if (using_mac) { mac_cmd = (hifn_mac_command_t *)buf_pos; dlen = cmd->maccrd->crd_len; mac_cmd->source_count = htole16(dlen & 0xffff); dlen >>= 16; mac_cmd->masks = htole16(cmd->mac_masks | ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M)); mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip); mac_cmd->reserved = 0; buf_pos += sizeof(hifn_mac_command_t); } if (using_crypt) { cry_cmd = (hifn_crypt_command_t *)buf_pos; dlen = cmd->enccrd->crd_len; cry_cmd->source_count = htole16(dlen & 0xffff); dlen >>= 16; cry_cmd->masks = htole16(cmd->cry_masks | ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M)); cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip); cry_cmd->reserved = 0; buf_pos += sizeof(hifn_crypt_command_t); } if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) { bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH); buf_pos += HIFN_MAC_KEY_LENGTH; } if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) { switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { case HIFN_CRYPT_CMD_ALG_3DES: bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH); buf_pos += HIFN_3DES_KEY_LENGTH; break; case HIFN_CRYPT_CMD_ALG_DES: bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH); buf_pos += cmd->cklen; break; case HIFN_CRYPT_CMD_ALG_RC4: len = 256; do { int clen; clen = MIN(cmd->cklen, len); bcopy(cmd->ck, buf_pos, clen); len -= clen; buf_pos += clen; } while (len > 0); bzero(buf_pos, 4); buf_pos += 4; break; } } if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) { bcopy(cmd->iv, buf_pos, HIFN_IV_LENGTH); buf_pos += HIFN_IV_LENGTH; } if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) { bzero(buf_pos, 8); buf_pos += 8; } return (buf_pos - buf); } static int hifn_dmamap_aligned(struct hifn_operand *op) { int i; for (i = 0; i < op->nsegs; i++) { if (op->segs[i].ds_addr & 3) return (0); if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3)) return (0); } return (1); } static int hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd) { struct hifn_dma *dma = sc->sc_dma; struct hifn_operand *dst = &cmd->dst; u_int32_t p, l; int idx, used = 0, i; idx = dma->dsti; for (i = 0; i < dst->nsegs - 1; i++) { dma->dstr[idx].p = htole32(dst->segs[i].ds_addr); dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len); HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); used++; if (++idx == HIFN_D_DST_RSIZE) { dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); idx = 0; } } if (cmd->sloplen == 0) { p = dst->segs[i].ds_addr; l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | dst->segs[i].ds_len; } else { p = sc->sc_dma_physaddr + offsetof(struct hifn_dma, slop[cmd->slopidx]); l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | sizeof(u_int32_t); if ((dst->segs[i].ds_len - cmd->sloplen) != 0) { dma->dstr[idx].p = htole32(dst->segs[i].ds_addr); dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_MASKDONEIRQ | (dst->segs[i].ds_len - cmd->sloplen)); HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); used++; if (++idx == HIFN_D_DST_RSIZE) { dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); idx = 0; } } } dma->dstr[idx].p = htole32(p); dma->dstr[idx].l = htole32(l); HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); used++; if (++idx == HIFN_D_DST_RSIZE) { dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); idx = 0; } dma->dsti = idx; dma->dstu += used; return (idx); } static int hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd) { struct hifn_dma *dma = sc->sc_dma; struct hifn_operand *src = &cmd->src; int idx, i; u_int32_t last = 0; idx = dma->srci; for (i = 0; i < src->nsegs; i++) { if (i == src->nsegs - 1) last = HIFN_D_LAST; dma->srcr[idx].p = htole32(src->segs[i].ds_addr); dma->srcr[idx].l = htole32(src->segs[i].ds_len | HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last); HIFN_SRCR_SYNC(sc, idx, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); if (++idx == HIFN_D_SRC_RSIZE) { dma->srcr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); idx = 0; } } dma->srci = idx; dma->srcu += src->nsegs; return (idx); } static void hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error) { struct hifn_operand *op = arg; KASSERT(nsegs <= MAX_SCATTER, ("hifn_op_cb: too many DMA segments (%u > %u) " "returned when mapping operand", nsegs, MAX_SCATTER)); op->mapsize = mapsize; op->nsegs = nsegs; bcopy(seg, op->segs, nsegs * sizeof (seg[0])); } static int hifn_crypto( struct hifn_softc *sc, struct hifn_command *cmd, struct cryptop *crp, int hint) { struct hifn_dma *dma = sc->sc_dma; u_int32_t cmdlen; int cmdi, resi, err = 0; /* * need 1 cmd, and 1 res * * NB: check this first since it's easy. */ if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE || (dma->resu + 1) > HIFN_D_RES_RSIZE) { #ifdef HIFN_DEBUG if (hifn_debug) { device_printf(sc->sc_dev, "cmd/result exhaustion, cmdu %u resu %u\n", dma->cmdu, dma->resu); } #endif hifnstats.hst_nomem_cr++; return (ERESTART); } if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->src_map)) { hifnstats.hst_nomem_map++; return (ENOMEM); } if (crp->crp_flags & CRYPTO_F_IMBUF) { if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map, cmd->src_m, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) { hifnstats.hst_nomem_load++; err = ENOMEM; goto err_srcmap1; } } else if (crp->crp_flags & CRYPTO_F_IOV) { if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map, cmd->src_io, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) { hifnstats.hst_nomem_load++; err = ENOMEM; goto err_srcmap1; } } else { err = EINVAL; goto err_srcmap1; } if (hifn_dmamap_aligned(&cmd->src)) { cmd->sloplen = cmd->src_mapsize & 3; cmd->dst = cmd->src; } else { if (crp->crp_flags & CRYPTO_F_IOV) { err = EINVAL; goto err_srcmap; } else if (crp->crp_flags & CRYPTO_F_IMBUF) { int totlen, len; struct mbuf *m, *m0, *mlast; KASSERT(cmd->dst_m == cmd->src_m, ("hifn_crypto: dst_m initialized improperly")); hifnstats.hst_unaligned++; /* * Source is not aligned on a longword boundary. * Copy the data to insure alignment. If we fail * to allocate mbufs or clusters while doing this * we return ERESTART so the operation is requeued * at the crypto later, but only if there are * ops already posted to the hardware; otherwise we * have no guarantee that we'll be re-entered. */ totlen = cmd->src_mapsize; if (cmd->src_m->m_flags & M_PKTHDR) { len = MHLEN; MGETHDR(m0, M_DONTWAIT, MT_DATA); if (m0 && !m_dup_pkthdr(m0, cmd->src_m, M_DONTWAIT)) { m_free(m0); m0 = NULL; } } else { len = MLEN; MGET(m0, M_DONTWAIT, MT_DATA); } if (m0 == NULL) { hifnstats.hst_nomem_mbuf++; err = dma->cmdu ? ERESTART : ENOMEM; goto err_srcmap; } if (totlen >= MINCLSIZE) { MCLGET(m0, M_DONTWAIT); if ((m0->m_flags & M_EXT) == 0) { hifnstats.hst_nomem_mcl++; err = dma->cmdu ? ERESTART : ENOMEM; m_freem(m0); goto err_srcmap; } len = MCLBYTES; } totlen -= len; m0->m_pkthdr.len = m0->m_len = len; mlast = m0; while (totlen > 0) { MGET(m, M_DONTWAIT, MT_DATA); if (m == NULL) { hifnstats.hst_nomem_mbuf++; err = dma->cmdu ? ERESTART : ENOMEM; m_freem(m0); goto err_srcmap; } len = MLEN; if (totlen >= MINCLSIZE) { MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { hifnstats.hst_nomem_mcl++; err = dma->cmdu ? ERESTART : ENOMEM; mlast->m_next = m; m_freem(m0); goto err_srcmap; } len = MCLBYTES; } m->m_len = len; m0->m_pkthdr.len += len; totlen -= len; mlast->m_next = m; mlast = m; } cmd->dst_m = m0; } } if (cmd->dst_map == NULL) { if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->dst_map)) { hifnstats.hst_nomem_map++; err = ENOMEM; goto err_srcmap; } if (crp->crp_flags & CRYPTO_F_IMBUF) { if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map, cmd->dst_m, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) { hifnstats.hst_nomem_map++; err = ENOMEM; goto err_dstmap1; } } else if (crp->crp_flags & CRYPTO_F_IOV) { if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map, cmd->dst_io, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) { hifnstats.hst_nomem_load++; err = ENOMEM; goto err_dstmap1; } } } #ifdef HIFN_DEBUG if (hifn_debug) { device_printf(sc->sc_dev, "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n", READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER), dma->cmdu, dma->srcu, dma->dstu, dma->resu, cmd->src_nsegs, cmd->dst_nsegs); } #endif if (cmd->src_map == cmd->dst_map) { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); } else { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, BUS_DMASYNC_PREREAD); } /* * need N src, and N dst */ if ((dma->srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE || (dma->dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) { #ifdef HIFN_DEBUG if (hifn_debug) { device_printf(sc->sc_dev, "src/dst exhaustion, srcu %u+%u dstu %u+%u\n", dma->srcu, cmd->src_nsegs, dma->dstu, cmd->dst_nsegs); } #endif hifnstats.hst_nomem_sd++; err = ERESTART; goto err_dstmap; } if (dma->cmdi == HIFN_D_CMD_RSIZE) { dma->cmdi = 0; dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } cmdi = dma->cmdi++; cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]); HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE); /* .p for command/result already set */ dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ); HIFN_CMDR_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); dma->cmdu++; if (sc->sc_c_busy == 0) { WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA); sc->sc_c_busy = 1; } /* * We don't worry about missing an interrupt (which a "command wait" * interrupt salvages us from), unless there is more than one command * in the queue. */ if (dma->cmdu > 1) { sc->sc_dmaier |= HIFN_DMAIER_C_WAIT; WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); } hifnstats.hst_ipackets++; hifnstats.hst_ibytes += cmd->src_mapsize; hifn_dmamap_load_src(sc, cmd); if (sc->sc_s_busy == 0) { WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA); sc->sc_s_busy = 1; } /* * Unlike other descriptors, we don't mask done interrupt from * result descriptor. */ #ifdef HIFN_DEBUG if (hifn_debug) printf("load res\n"); #endif if (dma->resi == HIFN_D_RES_RSIZE) { dma->resi = 0; dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } resi = dma->resi++; KASSERT(dma->hifn_commands[resi] == NULL, ("hifn_crypto: command slot %u busy", resi)); dma->hifn_commands[resi] = cmd; HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD); if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) { dma->resr[resi].l = htole32(HIFN_MAX_RESULT | HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ); sc->sc_curbatch++; if (sc->sc_curbatch > hifnstats.hst_maxbatch) hifnstats.hst_maxbatch = sc->sc_curbatch; hifnstats.hst_totbatch++; } else { dma->resr[resi].l = htole32(HIFN_MAX_RESULT | HIFN_D_VALID | HIFN_D_LAST); sc->sc_curbatch = 0; } HIFN_RESR_SYNC(sc, resi, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); dma->resu++; if (sc->sc_r_busy == 0) { WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA); sc->sc_r_busy = 1; } if (cmd->sloplen) cmd->slopidx = resi; hifn_dmamap_load_dst(sc, cmd); if (sc->sc_d_busy == 0) { WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA); sc->sc_d_busy = 1; } #ifdef HIFN_DEBUG if (hifn_debug) { device_printf(sc->sc_dev, "command: stat %8x ier %8x\n", READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER)); } #endif sc->sc_active = 5; KASSERT(err == 0, ("hifn_crypto: success with error %u", err)); return (err); /* success */ err_dstmap: if (cmd->src_map != cmd->dst_map) bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); err_dstmap1: if (cmd->src_map != cmd->dst_map) bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); err_srcmap: if (crp->crp_flags & CRYPTO_F_IMBUF) { if (cmd->src_m != cmd->dst_m) m_freem(cmd->dst_m); } bus_dmamap_unload(sc->sc_dmat, cmd->src_map); err_srcmap1: bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); return (err); } static void hifn_tick(void* vsc) { struct hifn_softc *sc = vsc; HIFN_LOCK(sc); if (sc->sc_active == 0) { struct hifn_dma *dma = sc->sc_dma; u_int32_t r = 0; if (dma->cmdu == 0 && sc->sc_c_busy) { sc->sc_c_busy = 0; r |= HIFN_DMACSR_C_CTRL_DIS; } if (dma->srcu == 0 && sc->sc_s_busy) { sc->sc_s_busy = 0; r |= HIFN_DMACSR_S_CTRL_DIS; } if (dma->dstu == 0 && sc->sc_d_busy) { sc->sc_d_busy = 0; r |= HIFN_DMACSR_D_CTRL_DIS; } if (dma->resu == 0 && sc->sc_r_busy) { sc->sc_r_busy = 0; r |= HIFN_DMACSR_R_CTRL_DIS; } if (r) WRITE_REG_1(sc, HIFN_1_DMA_CSR, r); } else sc->sc_active--; HIFN_UNLOCK(sc); callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); } static void hifn_intr(void *arg) { struct hifn_softc *sc = arg; struct hifn_dma *dma; u_int32_t dmacsr, restart; int i, u; HIFN_LOCK(sc); dma = sc->sc_dma; dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR); #ifdef HIFN_DEBUG if (hifn_debug) { device_printf(sc->sc_dev, "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n", dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier, dma->cmdi, dma->srci, dma->dsti, dma->resi, dma->cmdk, dma->srck, dma->dstk, dma->resk, dma->cmdu, dma->srcu, dma->dstu, dma->resu); } #endif /* Nothing in the DMA unit interrupted */ if ((dmacsr & sc->sc_dmaier) == 0) { hifnstats.hst_noirq++; HIFN_UNLOCK(sc); return; } WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier); if ((sc->sc_flags & HIFN_HAS_PUBLIC) && (dmacsr & HIFN_DMACSR_PUBDONE)) WRITE_REG_1(sc, HIFN_1_PUB_STATUS, READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE); restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER); if (restart) device_printf(sc->sc_dev, "overrun %x\n", dmacsr); if (sc->sc_flags & HIFN_IS_7811) { if (dmacsr & HIFN_DMACSR_ILLR) device_printf(sc->sc_dev, "illegal read\n"); if (dmacsr & HIFN_DMACSR_ILLW) device_printf(sc->sc_dev, "illegal write\n"); } restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT | HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT); if (restart) { device_printf(sc->sc_dev, "abort, resetting.\n"); hifnstats.hst_abort++; hifn_abort(sc); HIFN_UNLOCK(sc); return; } if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) { /* * If no slots to process and we receive a "waiting on * command" interrupt, we disable the "waiting on command" * (by clearing it). */ sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); } /* clear the rings */ i = dma->resk; u = dma->resu; while (u != 0) { HIFN_RESR_SYNC(sc, i, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (dma->resr[i].l & htole32(HIFN_D_VALID)) { HIFN_RESR_SYNC(sc, i, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); break; } if (i != HIFN_D_RES_RSIZE) { struct hifn_command *cmd; u_int8_t *macbuf = NULL; HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD); cmd = dma->hifn_commands[i]; KASSERT(cmd != NULL, ("hifn_intr: null command slot %u", i)); dma->hifn_commands[i] = NULL; if (cmd->base_masks & HIFN_BASE_CMD_MAC) { macbuf = dma->result_bufs[i]; macbuf += 12; } hifn_callback(sc, cmd, macbuf); hifnstats.hst_opackets++; u--; } if (++i == (HIFN_D_RES_RSIZE + 1)) i = 0; } dma->resk = i; dma->resu = u; i = dma->srck; u = dma->srcu; while (u != 0) { if (i == HIFN_D_SRC_RSIZE) i = 0; HIFN_SRCR_SYNC(sc, i, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (dma->srcr[i].l & htole32(HIFN_D_VALID)) { HIFN_SRCR_SYNC(sc, i, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); break; } i++, u--; } dma->srck = i; dma->srcu = u; i = dma->cmdk; u = dma->cmdu; while (u != 0) { HIFN_CMDR_SYNC(sc, i, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) { HIFN_CMDR_SYNC(sc, i, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); break; } if (i != HIFN_D_CMD_RSIZE) { u--; HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE); } if (++i == (HIFN_D_CMD_RSIZE + 1)) i = 0; } dma->cmdk = i; dma->cmdu = u; if (sc->sc_needwakeup) { /* XXX check high watermark */ int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); #ifdef HIFN_DEBUG if (hifn_debug) device_printf(sc->sc_dev, "wakeup crypto (%x) u %d/%d/%d/%d\n", sc->sc_needwakeup, dma->cmdu, dma->srcu, dma->dstu, dma->resu); #endif sc->sc_needwakeup &= ~wakeup; crypto_unblock(sc->sc_cid, wakeup); } HIFN_UNLOCK(sc); } /* * Allocate a new 'session' and return an encoded session id. 'sidp' * contains our registration id, and should contain an encoded session * id on successful allocation. */ static int hifn_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri) { struct cryptoini *c; struct hifn_softc *sc = arg; int i, mac = 0, cry = 0; KASSERT(sc != NULL, ("hifn_newsession: null softc")); if (sidp == NULL || cri == NULL || sc == NULL) return (EINVAL); for (i = 0; i < sc->sc_maxses; i++) if (sc->sc_sessions[i].hs_state == HS_STATE_FREE) break; if (i == sc->sc_maxses) return (ENOMEM); for (c = cri; c != NULL; c = c->cri_next) { switch (c->cri_alg) { case CRYPTO_MD5: case CRYPTO_SHA1: case CRYPTO_MD5_HMAC: case CRYPTO_SHA1_HMAC: if (mac) return (EINVAL); mac = 1; break; case CRYPTO_DES_CBC: case CRYPTO_3DES_CBC: /* XXX this may read fewer, does it matter? */ read_random(sc->sc_sessions[i].hs_iv, HIFN_IV_LENGTH); /*FALLTHROUGH*/ case CRYPTO_ARC4: if (cry) return (EINVAL); cry = 1; break; default: return (EINVAL); } } if (mac == 0 && cry == 0) return (EINVAL); *sidp = HIFN_SID(device_get_unit(sc->sc_dev), i); sc->sc_sessions[i].hs_state = HS_STATE_USED; return (0); } /* * Deallocate a session. * XXX this routine should run a zero'd mac/encrypt key into context ram. * XXX to blow away any keys already stored there. */ static int hifn_freesession(void *arg, u_int64_t tid) { struct hifn_softc *sc = arg; int session; u_int32_t sid = ((u_int32_t) tid) & 0xffffffff; KASSERT(sc != NULL, ("hifn_freesession: null softc")); if (sc == NULL) return (EINVAL); session = HIFN_SESSION(sid); if (session >= sc->sc_maxses) return (EINVAL); bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session])); return (0); } static int hifn_process(void *arg, struct cryptop *crp, int hint) { struct hifn_softc *sc = arg; struct hifn_command *cmd = NULL; int session, err; struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; if (crp == NULL || crp->crp_callback == NULL) { hifnstats.hst_invalid++; return (EINVAL); } session = HIFN_SESSION(crp->crp_sid); if (sc == NULL || session >= sc->sc_maxses) { err = EINVAL; goto errout; } cmd = malloc(sizeof(struct hifn_command), M_DEVBUF, M_NOWAIT | M_ZERO); if (cmd == NULL) { hifnstats.hst_nomem++; err = ENOMEM; goto errout; } if (crp->crp_flags & CRYPTO_F_IMBUF) { cmd->src_m = (struct mbuf *)crp->crp_buf; cmd->dst_m = (struct mbuf *)crp->crp_buf; } else if (crp->crp_flags & CRYPTO_F_IOV) { cmd->src_io = (struct uio *)crp->crp_buf; cmd->dst_io = (struct uio *)crp->crp_buf; } else { err = EINVAL; goto errout; /* XXX we don't handle contiguous buffers! */ } crd1 = crp->crp_desc; if (crd1 == NULL) { err = EINVAL; goto errout; } crd2 = crd1->crd_next; if (crd2 == NULL) { if (crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC || crd1->crd_alg == CRYPTO_SHA1 || crd1->crd_alg == CRYPTO_MD5) { maccrd = crd1; enccrd = NULL; } else if (crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_3DES_CBC || crd1->crd_alg == CRYPTO_ARC4) { if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0) cmd->base_masks |= HIFN_BASE_CMD_DECODE; maccrd = NULL; enccrd = crd1; } else { err = EINVAL; goto errout; } } else { if ((crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC || crd1->crd_alg == CRYPTO_MD5 || crd1->crd_alg == CRYPTO_SHA1) && (crd2->crd_alg == CRYPTO_DES_CBC || crd2->crd_alg == CRYPTO_3DES_CBC || crd2->crd_alg == CRYPTO_ARC4) && ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { cmd->base_masks = HIFN_BASE_CMD_DECODE; maccrd = crd1; enccrd = crd2; } else if ((crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_ARC4 || crd1->crd_alg == CRYPTO_3DES_CBC) && (crd2->crd_alg == CRYPTO_MD5_HMAC || crd2->crd_alg == CRYPTO_SHA1_HMAC || crd2->crd_alg == CRYPTO_MD5 || crd2->crd_alg == CRYPTO_SHA1) && (crd1->crd_flags & CRD_F_ENCRYPT)) { enccrd = crd1; maccrd = crd2; } else { /* * We cannot order the 7751 as requested */ err = EINVAL; goto errout; } } if (enccrd) { cmd->enccrd = enccrd; cmd->base_masks |= HIFN_BASE_CMD_CRYPT; switch (enccrd->crd_alg) { case CRYPTO_ARC4: cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4; if ((enccrd->crd_flags & CRD_F_ENCRYPT) != sc->sc_sessions[session].hs_prev_op) sc->sc_sessions[session].hs_state = HS_STATE_USED; break; case CRYPTO_DES_CBC: cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES | HIFN_CRYPT_CMD_MODE_CBC | HIFN_CRYPT_CMD_NEW_IV; break; case CRYPTO_3DES_CBC: cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES | HIFN_CRYPT_CMD_MODE_CBC | HIFN_CRYPT_CMD_NEW_IV; break; default: err = EINVAL; goto errout; } if (enccrd->crd_alg != CRYPTO_ARC4) { if (enccrd->crd_flags & CRD_F_ENCRYPT) { if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) bcopy(enccrd->crd_iv, cmd->iv, HIFN_IV_LENGTH); else bcopy(sc->sc_sessions[session].hs_iv, cmd->iv, HIFN_IV_LENGTH); if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) { if (crp->crp_flags & CRYPTO_F_IMBUF) m_copyback(cmd->src_m, enccrd->crd_inject, HIFN_IV_LENGTH, cmd->iv); else if (crp->crp_flags & CRYPTO_F_IOV) cuio_copyback(cmd->src_io, enccrd->crd_inject, HIFN_IV_LENGTH, cmd->iv); } } else { if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) bcopy(enccrd->crd_iv, cmd->iv, HIFN_IV_LENGTH); else if (crp->crp_flags & CRYPTO_F_IMBUF) m_copydata(cmd->src_m, enccrd->crd_inject, HIFN_IV_LENGTH, cmd->iv); else if (crp->crp_flags & CRYPTO_F_IOV) cuio_copydata(cmd->src_io, enccrd->crd_inject, HIFN_IV_LENGTH, cmd->iv); } } cmd->ck = enccrd->crd_key; cmd->cklen = enccrd->crd_klen >> 3; if (sc->sc_sessions[session].hs_state == HS_STATE_USED) cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY; } if (maccrd) { cmd->maccrd = maccrd; cmd->base_masks |= HIFN_BASE_CMD_MAC; switch (maccrd->crd_alg) { case CRYPTO_MD5: cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | HIFN_MAC_CMD_POS_IPSEC; break; case CRYPTO_MD5_HMAC: cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; break; case CRYPTO_SHA1: cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | HIFN_MAC_CMD_POS_IPSEC; break; case CRYPTO_SHA1_HMAC: cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; break; } if ((maccrd->crd_alg == CRYPTO_SHA1_HMAC || maccrd->crd_alg == CRYPTO_MD5_HMAC) && sc->sc_sessions[session].hs_state == HS_STATE_USED) { cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY; bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3); bzero(cmd->mac + (maccrd->crd_klen >> 3), HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3)); } } cmd->crp = crp; cmd->session_num = session; cmd->softc = sc; err = hifn_crypto(sc, cmd, crp, hint); if (!err) { if (enccrd) sc->sc_sessions[session].hs_prev_op = enccrd->crd_flags & CRD_F_ENCRYPT; if (sc->sc_sessions[session].hs_state == HS_STATE_USED) sc->sc_sessions[session].hs_state = HS_STATE_KEY; return 0; } else if (err == ERESTART) { /* * There weren't enough resources to dispatch the request * to the part. Notify the caller so they'll requeue this * request and resubmit it again soon. */ #ifdef HIFN_DEBUG if (hifn_debug) device_printf(sc->sc_dev, "requeue request\n"); #endif free(cmd, M_DEVBUF); sc->sc_needwakeup |= CRYPTO_SYMQ; return (err); } errout: if (cmd != NULL) free(cmd, M_DEVBUF); if (err == EINVAL) hifnstats.hst_invalid++; else hifnstats.hst_nomem++; crp->crp_etype = err; crypto_done(crp); return (err); } static void hifn_abort(struct hifn_softc *sc) { struct hifn_dma *dma = sc->sc_dma; struct hifn_command *cmd; struct cryptop *crp; int i, u; i = dma->resk; u = dma->resu; while (u != 0) { cmd = dma->hifn_commands[i]; KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i)); dma->hifn_commands[i] = NULL; crp = cmd->crp; if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) { /* Salvage what we can. */ u_int8_t *macbuf; if (cmd->base_masks & HIFN_BASE_CMD_MAC) { macbuf = dma->result_bufs[i]; macbuf += 12; } else macbuf = NULL; hifnstats.hst_opackets++; hifn_callback(sc, cmd, macbuf); } else { if (cmd->src_map == cmd->dst_map) { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); } else { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, BUS_DMASYNC_POSTREAD); } if (cmd->src_m != cmd->dst_m) { m_freem(cmd->src_m); crp->crp_buf = (caddr_t)cmd->dst_m; } /* non-shared buffers cannot be restarted */ if (cmd->src_map != cmd->dst_map) { /* * XXX should be EAGAIN, delayed until * after the reset. */ crp->crp_etype = ENOMEM; bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); } else crp->crp_etype = ENOMEM; bus_dmamap_unload(sc->sc_dmat, cmd->src_map); bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); free(cmd, M_DEVBUF); if (crp->crp_etype != EAGAIN) crypto_done(crp); } if (++i == HIFN_D_RES_RSIZE) i = 0; u--; } dma->resk = i; dma->resu = u; /* Force upload of key next time */ for (i = 0; i < sc->sc_maxses; i++) if (sc->sc_sessions[i].hs_state == HS_STATE_KEY) sc->sc_sessions[i].hs_state = HS_STATE_USED; hifn_reset_board(sc, 1); hifn_init_dma(sc); hifn_init_pci_registers(sc); } static void hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf) { struct hifn_dma *dma = sc->sc_dma; struct cryptop *crp = cmd->crp; struct cryptodesc *crd; struct mbuf *m; int totlen, i, u; if (cmd->src_map == cmd->dst_map) { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); } else { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, BUS_DMASYNC_POSTREAD); } if (crp->crp_flags & CRYPTO_F_IMBUF) { if (cmd->src_m != cmd->dst_m) { crp->crp_buf = (caddr_t)cmd->dst_m; totlen = cmd->src_mapsize; for (m = cmd->dst_m; m != NULL; m = m->m_next) { if (totlen < m->m_len) { m->m_len = totlen; totlen = 0; } else totlen -= m->m_len; } cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len; m_freem(cmd->src_m); } } if (cmd->sloplen != 0) { if (crp->crp_flags & CRYPTO_F_IMBUF) m_copyback((struct mbuf *)crp->crp_buf, cmd->src_mapsize - cmd->sloplen, cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]); else if (crp->crp_flags & CRYPTO_F_IOV) cuio_copyback((struct uio *)crp->crp_buf, cmd->src_mapsize - cmd->sloplen, cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]); } i = dma->dstk; u = dma->dstu; while (u != 0) { if (i == HIFN_D_DST_RSIZE) i = 0; bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (dma->dstr[i].l & htole32(HIFN_D_VALID)) { bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); break; } i++, u--; } dma->dstk = i; dma->dstu = u; hifnstats.hst_obytes += cmd->dst_mapsize; if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) == HIFN_BASE_CMD_CRYPT) { for (crd = crp->crp_desc; crd; crd = crd->crd_next) { if (crd->crd_alg != CRYPTO_DES_CBC && crd->crd_alg != CRYPTO_3DES_CBC) continue; if (crp->crp_flags & CRYPTO_F_IMBUF) m_copydata((struct mbuf *)crp->crp_buf, crd->crd_skip + crd->crd_len - HIFN_IV_LENGTH, HIFN_IV_LENGTH, cmd->softc->sc_sessions[cmd->session_num].hs_iv); else if (crp->crp_flags & CRYPTO_F_IOV) { cuio_copydata((struct uio *)crp->crp_buf, crd->crd_skip + crd->crd_len - HIFN_IV_LENGTH, HIFN_IV_LENGTH, cmd->softc->sc_sessions[cmd->session_num].hs_iv); } break; } } if (macbuf != NULL) { for (crd = crp->crp_desc; crd; crd = crd->crd_next) { int len; if (crd->crd_alg == CRYPTO_MD5) len = 16; else if (crd->crd_alg == CRYPTO_SHA1) len = 20; else if (crd->crd_alg == CRYPTO_MD5_HMAC || crd->crd_alg == CRYPTO_SHA1_HMAC) len = 12; else continue; if (crp->crp_flags & CRYPTO_F_IMBUF) m_copyback((struct mbuf *)crp->crp_buf, crd->crd_inject, len, macbuf); else if ((crp->crp_flags & CRYPTO_F_IOV) && crp->crp_mac) bcopy((caddr_t)macbuf, crp->crp_mac, len); break; } } if (cmd->src_map != cmd->dst_map) { bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); } bus_dmamap_unload(sc->sc_dmat, cmd->src_map); bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); free(cmd, M_DEVBUF); crypto_done(crp); } /* * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0 * and Group 1 registers; avoid conditions that could create * burst writes by doing a read in between the writes. * * NB: The read we interpose is always to the same register; * we do this because reading from an arbitrary (e.g. last) * register may not always work. */ static void hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val) { if (sc->sc_flags & HIFN_IS_7811) { if (sc->sc_bar0_lastreg == reg - 4) bus_space_read_4(sc->sc_st0, sc->sc_sh0, HIFN_0_PUCNFG); sc->sc_bar0_lastreg = reg; } bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val); } static void hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val) { if (sc->sc_flags & HIFN_IS_7811) { if (sc->sc_bar1_lastreg == reg - 4) bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID); sc->sc_bar1_lastreg = reg; } bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val); } diff --git a/sys/dev/hifn/hifn7751var.h b/sys/dev/hifn/hifn7751var.h index e7ff4a30b258..9d58f5eceff2 100644 --- a/sys/dev/hifn/hifn7751var.h +++ b/sys/dev/hifn/hifn7751var.h @@ -1,356 +1,361 @@ /* $FreeBSD$ */ /* $OpenBSD: hifn7751var.h,v 1.42 2002/04/08 17:49:42 jason Exp $ */ /* * Invertex AEON / Hifn 7751 driver * Copyright (c) 1999 Invertex Inc. All rights reserved. * Copyright (c) 1999 Theo de Raadt * Copyright (c) 2000-2001 Network Security Technologies, Inc. * http://www.netsec.net * * Please send any comments, feedback, bug-fixes, or feature requests to * software@invertex.com. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Effort sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F30602-01-2-0537. * */ #ifndef __HIFN7751VAR_H__ #define __HIFN7751VAR_H__ #ifdef _KERNEL /* * Some configurable values for the driver. By default command+result * descriptor rings are the same size. The src+dst descriptor rings * are sized at 3.5x the number of potential commands. Slower parts * (e.g. 7951) tend to run out of src descriptors; faster parts (7811) * src+cmd/result descriptors. It's not clear that increasing the size * of the descriptor rings helps performance significantly as other * factors tend to come into play (e.g. copying misaligned packets). */ #define HIFN_D_CMD_RSIZE 24 /* command descriptors */ #define HIFN_D_SRC_RSIZE ((HIFN_D_CMD_RSIZE * 7) / 2) /* source descriptors */ #define HIFN_D_RES_RSIZE HIFN_D_CMD_RSIZE /* result descriptors */ #define HIFN_D_DST_RSIZE HIFN_D_SRC_RSIZE /* destination descriptors */ /* * Length values for cryptography */ #define HIFN_DES_KEY_LENGTH 8 #define HIFN_3DES_KEY_LENGTH 24 #define HIFN_MAX_CRYPT_KEY_LENGTH HIFN_3DES_KEY_LENGTH #define HIFN_IV_LENGTH 8 /* * Length values for authentication */ #define HIFN_MAC_KEY_LENGTH 64 #define HIFN_MD5_LENGTH 16 #define HIFN_SHA1_LENGTH 20 #define HIFN_MAC_TRUNC_LENGTH 12 #define MAX_SCATTER 64 /* * Data structure to hold all 4 rings and any other ring related data. */ struct hifn_dma { /* * Descriptor rings. We add +1 to the size to accomidate the * jump descriptor. */ struct hifn_desc cmdr[HIFN_D_CMD_RSIZE+1]; struct hifn_desc srcr[HIFN_D_SRC_RSIZE+1]; struct hifn_desc dstr[HIFN_D_DST_RSIZE+1]; struct hifn_desc resr[HIFN_D_RES_RSIZE+1]; struct hifn_command *hifn_commands[HIFN_D_RES_RSIZE]; u_char command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND]; u_char result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT]; u_int32_t slop[HIFN_D_CMD_RSIZE]; u_int64_t test_src, test_dst; /* * Our current positions for insertion and removal from the desriptor * rings. */ int cmdi, srci, dsti, resi; volatile int cmdu, srcu, dstu, resu; int cmdk, srck, dstk, resk; }; struct hifn_session { int hs_state; int hs_prev_op; /* XXX collapse into hs_flags? */ u_int8_t hs_iv[HIFN_IV_LENGTH]; }; #define HIFN_RING_SYNC(sc, r, i, f) \ bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) #define HIFN_CMDR_SYNC(sc, i, f) HIFN_RING_SYNC((sc), cmdr, (i), (f)) #define HIFN_RESR_SYNC(sc, i, f) HIFN_RING_SYNC((sc), resr, (i), (f)) #define HIFN_SRCR_SYNC(sc, i, f) HIFN_RING_SYNC((sc), srcr, (i), (f)) #define HIFN_DSTR_SYNC(sc, i, f) HIFN_RING_SYNC((sc), dstr, (i), (f)) #define HIFN_CMD_SYNC(sc, i, f) \ bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) #define HIFN_RES_SYNC(sc, i, f) \ bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) /* We use a state machine to on sessions */ #define HS_STATE_FREE 0 /* unused session entry */ #define HS_STATE_USED 1 /* allocated, but key not on card */ #define HS_STATE_KEY 2 /* allocated and key is on card */ +struct rndstate_test; + /* * Holds data specific to a single HIFN board. */ struct hifn_softc { device_t sc_dev; /* device backpointer */ struct mtx sc_mtx; /* per-instance lock */ bus_dma_tag_t sc_dmat; /* parent DMA tag decriptor */ struct resource *sc_bar0res; bus_space_handle_t sc_sh0; /* bar0 bus space handle */ bus_space_tag_t sc_st0; /* bar0 bus space tag */ bus_size_t sc_bar0_lastreg;/* bar0 last reg written */ struct resource *sc_bar1res; bus_space_handle_t sc_sh1; /* bar1 bus space handle */ bus_space_tag_t sc_st1; /* bar1 bus space tag */ bus_size_t sc_bar1_lastreg;/* bar1 last reg written */ struct resource *sc_irq; void *sc_intrhand; /* interrupt handle */ u_int32_t sc_dmaier; u_int32_t sc_drammodel; /* 1=dram, 0=sram */ struct hifn_dma *sc_dma; bus_dmamap_t sc_dmamap; bus_dma_segment_t sc_dmasegs[1]; bus_addr_t sc_dma_physaddr;/* physical address of sc_dma */ int sc_dmansegs; int32_t sc_cid; int sc_maxses; int sc_ramsize; int sc_flags; #define HIFN_HAS_RNG 0x1 /* includes random number generator */ #define HIFN_HAS_PUBLIC 0x2 /* includes public key support */ #define HIFN_IS_7811 0x4 /* Hifn 7811 part */ struct callout sc_rngto; /* for polling RNG */ struct callout sc_tickto; /* for managing DMA */ int sc_rngfirst; int sc_rnghz; /* RNG polling frequency */ + struct rndtest_state *sc_rndtest; /* RNG test state */ + void (*sc_harvest)(struct rndtest_state *, + void *, u_int); int sc_c_busy; /* command ring busy */ int sc_s_busy; /* source data ring busy */ int sc_d_busy; /* destination data ring busy */ int sc_r_busy; /* result ring busy */ int sc_active; /* for initial countdown */ int sc_needwakeup; /* ops q'd wating on resources */ int sc_curbatch; /* # ops submitted w/o int */ int sc_suspended; struct hifn_session sc_sessions[2048]; }; #define HIFN_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) #define HIFN_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) /* * hifn_command_t * * This is the control structure used to pass commands to hifn_encrypt(). * * flags * ----- * Flags is the bitwise "or" values for command configuration. A single * encrypt direction needs to be set: * * HIFN_ENCODE or HIFN_DECODE * * To use cryptography, a single crypto algorithm must be included: * * HIFN_CRYPT_3DES or HIFN_CRYPT_DES * * To use authentication is used, a single MAC algorithm must be included: * * HIFN_MAC_MD5 or HIFN_MAC_SHA1 * * By default MD5 uses a 16 byte hash and SHA-1 uses a 20 byte hash. * If the value below is set, hash values are truncated or assumed * truncated to 12 bytes: * * HIFN_MAC_TRUNC * * Keys for encryption and authentication can be sent as part of a command, * or the last key value used with a particular session can be retrieved * and used again if either of these flags are not specified. * * HIFN_CRYPT_NEW_KEY, HIFN_MAC_NEW_KEY * * session_num * ----------- * A number between 0 and 2048 (for DRAM models) or a number between * 0 and 768 (for SRAM models). Those who don't want to use session * numbers should leave value at zero and send a new crypt key and/or * new MAC key on every command. If you use session numbers and * don't send a key with a command, the last key sent for that same * session number will be used. * * Warning: Using session numbers and multiboard at the same time * is currently broken. * * mbuf * ---- * Either fill in the mbuf pointer and npa=0 or * fill packp[] and packl[] and set npa to > 0 * * mac_header_skip * --------------- * The number of bytes of the source_buf that are skipped over before * authentication begins. This must be a number between 0 and 2^16-1 * and can be used by IPsec implementers to skip over IP headers. * *** Value ignored if authentication not used *** * * crypt_header_skip * ----------------- * The number of bytes of the source_buf that are skipped over before * the cryptographic operation begins. This must be a number between 0 * and 2^16-1. For IPsec, this number will always be 8 bytes larger * than the auth_header_skip (to skip over the ESP header). * *** Value ignored if cryptography not used *** * */ struct hifn_operand { union { struct mbuf *m; struct uio *io; } u; bus_dmamap_t map; bus_size_t mapsize; int nsegs; bus_dma_segment_t segs[MAX_SCATTER]; }; struct hifn_command { u_int16_t session_num; u_int16_t base_masks, cry_masks, mac_masks; u_int8_t iv[HIFN_IV_LENGTH], *ck, mac[HIFN_MAC_KEY_LENGTH]; int cklen; int sloplen, slopidx; struct hifn_operand src; struct hifn_operand dst; struct hifn_softc *softc; struct cryptop *crp; struct cryptodesc *enccrd, *maccrd; }; #define src_m src.u.m #define src_io src.u.io #define src_map src.map #define src_mapsize src.mapsize #define src_segs src.segs #define src_nsegs src.nsegs #define dst_m dst.u.m #define dst_io dst.u.io #define dst_map dst.map #define dst_mapsize dst.mapsize #define dst_segs dst.segs #define dst_nsegs dst.nsegs /* * Return values for hifn_crypto() */ #define HIFN_CRYPTO_SUCCESS 0 #define HIFN_CRYPTO_BAD_INPUT (-1) #define HIFN_CRYPTO_RINGS_FULL (-2) /************************************************************************** * * Function: hifn_crypto * * Purpose: Called by external drivers to begin an encryption on the * HIFN board. * * Blocking/Non-blocking Issues * ============================ * The driver cannot block in hifn_crypto (no calls to tsleep) currently. * hifn_crypto() returns HIFN_CRYPTO_RINGS_FULL if there is not enough * room in any of the rings for the request to proceed. * * Return Values * ============= * 0 for success, negative values on error * * Defines for negative error codes are: * * HIFN_CRYPTO_BAD_INPUT : The passed in command had invalid settings. * HIFN_CRYPTO_RINGS_FULL : All DMA rings were full and non-blocking * behaviour was requested. * *************************************************************************/ /* * Convert back and forth from 'sid' to 'card' and 'session' */ #define HIFN_CARD(sid) (((sid) & 0xf0000000) >> 28) #define HIFN_SESSION(sid) ((sid) & 0x000007ff) #define HIFN_SID(crd,ses) (((crd) << 28) | ((ses) & 0x7ff)) #endif /* _KERNEL */ struct hifn_stats { u_int64_t hst_ibytes; u_int64_t hst_obytes; u_int32_t hst_ipackets; u_int32_t hst_opackets; u_int32_t hst_invalid; u_int32_t hst_nomem; /* malloc or one of hst_nomem_* */ u_int32_t hst_abort; u_int32_t hst_noirq; /* IRQ for no reason */ u_int32_t hst_totbatch; /* ops submitted w/o interrupt */ u_int32_t hst_maxbatch; /* max ops submitted together */ u_int32_t hst_unaligned; /* unaligned src caused copy */ /* * The following divides hst_nomem into more specific buckets. */ u_int32_t hst_nomem_map; /* bus_dmamap_create failed */ u_int32_t hst_nomem_load; /* bus_dmamap_load_* failed */ u_int32_t hst_nomem_mbuf; /* MGET* failed */ u_int32_t hst_nomem_mcl; /* MCLGET* failed */ u_int32_t hst_nomem_cr; /* out of command/result descriptor */ u_int32_t hst_nomem_sd; /* out of src/dst descriptors */ }; #endif /* __HIFN7751VAR_H__ */ diff --git a/sys/dev/ubsec/ubsec.c b/sys/dev/ubsec/ubsec.c index 278f2bba7438..c0916530e159 100644 --- a/sys/dev/ubsec/ubsec.c +++ b/sys/dev/ubsec/ubsec.c @@ -1,2832 +1,2857 @@ /* $FreeBSD$ */ /* $OpenBSD: ubsec.c,v 1.115 2002/09/24 18:33:26 jason Exp $ */ /* * Copyright (c) 2000 Jason L. Wright (jason@thought.net) * Copyright (c) 2000 Theo de Raadt (deraadt@openbsd.org) * Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com) * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Jason L. Wright * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * Effort sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F30602-01-2-0537. * */ -#define UBSEC_DEBUG - /* * uBsec 5[56]01, 58xx hardware crypto accelerator */ +#include "opt_ubsec.h" + #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* grr, #defines for gratuitous incompatibility in queue.h */ #define SIMPLEQ_HEAD STAILQ_HEAD #define SIMPLEQ_ENTRY STAILQ_ENTRY #define SIMPLEQ_INIT STAILQ_INIT #define SIMPLEQ_INSERT_TAIL STAILQ_INSERT_TAIL #define SIMPLEQ_EMPTY STAILQ_EMPTY #define SIMPLEQ_FIRST STAILQ_FIRST #define SIMPLEQ_REMOVE_HEAD STAILQ_REMOVE_HEAD_UNTIL #define SIMPLEQ_FOREACH STAILQ_FOREACH /* ditto for endian.h */ #define letoh16(x) le16toh(x) #define letoh32(x) le32toh(x) +#ifdef UBSEC_RNDTEST +#include +#endif #include #include /* * Prototypes and count for the pci_device structure */ static int ubsec_probe(device_t); static int ubsec_attach(device_t); static int ubsec_detach(device_t); static int ubsec_suspend(device_t); static int ubsec_resume(device_t); static void ubsec_shutdown(device_t); static device_method_t ubsec_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ubsec_probe), DEVMETHOD(device_attach, ubsec_attach), DEVMETHOD(device_detach, ubsec_detach), DEVMETHOD(device_suspend, ubsec_suspend), DEVMETHOD(device_resume, ubsec_resume), DEVMETHOD(device_shutdown, ubsec_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), { 0, 0 } }; static driver_t ubsec_driver = { "ubsec", ubsec_methods, sizeof (struct ubsec_softc) }; static devclass_t ubsec_devclass; DRIVER_MODULE(ubsec, pci, ubsec_driver, ubsec_devclass, 0, 0); MODULE_DEPEND(ubsec, crypto, 1, 1, 1); +#ifdef UBSEC_RNDTEST +MODULE_DEPEND(ubsec, rndtest, 1, 1, 1); +#endif static void ubsec_intr(void *); static int ubsec_newsession(void *, u_int32_t *, struct cryptoini *); static int ubsec_freesession(void *, u_int64_t); static int ubsec_process(void *, struct cryptop *, int); static void ubsec_callback(struct ubsec_softc *, struct ubsec_q *); static void ubsec_feed(struct ubsec_softc *); static void ubsec_mcopy(struct mbuf *, struct mbuf *, int, int); static void ubsec_callback2(struct ubsec_softc *, struct ubsec_q2 *); static int ubsec_feed2(struct ubsec_softc *); static void ubsec_rng(void *); static int ubsec_dma_malloc(struct ubsec_softc *, bus_size_t, struct ubsec_dma_alloc *, int); #define ubsec_dma_sync(_dma, _flags) \ bus_dmamap_sync((_dma)->dma_tag, (_dma)->dma_map, (_flags)) static void ubsec_dma_free(struct ubsec_softc *, struct ubsec_dma_alloc *); static int ubsec_dmamap_aligned(struct ubsec_operand *op); static void ubsec_reset_board(struct ubsec_softc *sc); static void ubsec_init_board(struct ubsec_softc *sc); static void ubsec_init_pciregs(device_t dev); static void ubsec_totalreset(struct ubsec_softc *sc); static int ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q); static int ubsec_kprocess(void*, struct cryptkop *, int); static int ubsec_kprocess_modexp_hw(struct ubsec_softc *, struct cryptkop *, int); static int ubsec_kprocess_modexp_sw(struct ubsec_softc *, struct cryptkop *, int); static int ubsec_kprocess_rsapriv(struct ubsec_softc *, struct cryptkop *, int); static void ubsec_kfree(struct ubsec_softc *, struct ubsec_q2 *); static int ubsec_ksigbits(struct crparam *); static void ubsec_kshift_r(u_int, u_int8_t *, u_int, u_int8_t *, u_int); static void ubsec_kshift_l(u_int, u_int8_t *, u_int, u_int8_t *, u_int); SYSCTL_NODE(_hw, OID_AUTO, ubsec, CTLFLAG_RD, 0, "Broadcom driver parameters"); #ifdef UBSEC_DEBUG static void ubsec_dump_pb(volatile struct ubsec_pktbuf *); static void ubsec_dump_mcr(struct ubsec_mcr *); static void ubsec_dump_ctx2(struct ubsec_ctx_keyop *); static int ubsec_debug = 0; SYSCTL_INT(_hw_ubsec, OID_AUTO, debug, CTLFLAG_RW, &ubsec_debug, 0, "control debugging msgs"); #endif #define READ_REG(sc,r) \ bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r)) #define WRITE_REG(sc,reg,val) \ bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val) #define SWAP32(x) (x) = htole32(ntohl((x))) #define HTOLE32(x) (x) = htole32(x) struct ubsec_stats ubsecstats; SYSCTL_STRUCT(_hw_ubsec, OID_AUTO, stats, CTLFLAG_RD, &ubsecstats, ubsec_stats, "driver statistics"); static int ubsec_probe(device_t dev) { if (pci_get_vendor(dev) == PCI_VENDOR_BLUESTEEL && (pci_get_device(dev) == PCI_PRODUCT_BLUESTEEL_5501 || pci_get_device(dev) == PCI_PRODUCT_BLUESTEEL_5601)) return (0); if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && (pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5801 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5802 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5805 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5820 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5821 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5822 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5823 )) return (0); return (ENXIO); } static const char* ubsec_partname(struct ubsec_softc *sc) { /* XXX sprintf numbers when not decoded */ switch (pci_get_vendor(sc->sc_dev)) { case PCI_VENDOR_BROADCOM: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_BROADCOM_5801: return "Broadcom 5801"; case PCI_PRODUCT_BROADCOM_5802: return "Broadcom 5802"; case PCI_PRODUCT_BROADCOM_5805: return "Broadcom 5805"; case PCI_PRODUCT_BROADCOM_5820: return "Broadcom 5820"; case PCI_PRODUCT_BROADCOM_5821: return "Broadcom 5821"; case PCI_PRODUCT_BROADCOM_5822: return "Broadcom 5822"; case PCI_PRODUCT_BROADCOM_5823: return "Broadcom 5823"; } return "Broadcom unknown-part"; case PCI_VENDOR_BLUESTEEL: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_BLUESTEEL_5601: return "Bluesteel 5601"; } return "Bluesteel unknown-part"; } return "Unknown-vendor unknown-part"; } +static void +default_harvest(struct rndtest_state *rsp, void *buf, u_int count) +{ + random_harvest(buf, count, count*NBBY, 0, RANDOM_PURE); +} + static int ubsec_attach(device_t dev) { struct ubsec_softc *sc = device_get_softc(dev); struct ubsec_dma *dmap; u_int32_t cmd, i; int rid; KASSERT(sc != NULL, ("ubsec_attach: null software carrier!")); bzero(sc, sizeof (*sc)); sc->sc_dev = dev; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "crypto driver", MTX_DEF); SIMPLEQ_INIT(&sc->sc_queue); SIMPLEQ_INIT(&sc->sc_qchip); SIMPLEQ_INIT(&sc->sc_queue2); SIMPLEQ_INIT(&sc->sc_qchip2); SIMPLEQ_INIT(&sc->sc_q2free); /* XXX handle power management */ sc->sc_statmask = BS_STAT_MCR1_DONE | BS_STAT_DMAERR; if (pci_get_vendor(dev) == PCI_VENDOR_BLUESTEEL && pci_get_device(dev) == PCI_PRODUCT_BLUESTEEL_5601) sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG; if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && (pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5802 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5805)) sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG; if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5820) sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY; if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && (pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5821 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5822 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5823 )) { /* NB: the 5821/5822 defines some additional status bits */ sc->sc_statmask |= BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY; sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY; } cmd = pci_read_config(dev, PCIR_COMMAND, 4); cmd |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN; pci_write_config(dev, PCIR_COMMAND, cmd, 4); cmd = pci_read_config(dev, PCIR_COMMAND, 4); if (!(cmd & PCIM_CMD_MEMEN)) { device_printf(dev, "failed to enable memory mapping\n"); goto bad; } if (!(cmd & PCIM_CMD_BUSMASTEREN)) { device_printf(dev, "failed to enable bus mastering\n"); goto bad; } /* * Setup memory-mapping of PCI registers. */ rid = BS_BAR; sc->sc_sr = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE); if (sc->sc_sr == NULL) { device_printf(dev, "cannot map register space\n"); goto bad; } sc->sc_st = rman_get_bustag(sc->sc_sr); sc->sc_sh = rman_get_bushandle(sc->sc_sr); /* * Arrange interrupt line. */ rid = 0; sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE|RF_ACTIVE); if (sc->sc_irq == NULL) { device_printf(dev, "could not map interrupt\n"); goto bad1; } /* * NB: Network code assumes we are blocked with splimp() * so make sure the IRQ is mapped appropriately. */ if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET, ubsec_intr, sc, &sc->sc_ih)) { device_printf(dev, "could not establish interrupt\n"); goto bad2; } sc->sc_cid = crypto_get_driverid(0); if (sc->sc_cid < 0) { device_printf(dev, "could not get crypto driver id\n"); goto bad3; } /* * Setup DMA descriptor area. */ if (bus_dma_tag_create(NULL, /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ 0x3ffff, /* maxsize */ UBS_MAX_SCATTER, /* nsegments */ 0xffff, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ &sc->sc_dmat)) { device_printf(dev, "cannot allocate DMA tag\n"); goto bad4; } SIMPLEQ_INIT(&sc->sc_freequeue); dmap = sc->sc_dmaa; for (i = 0; i < UBS_MAX_NQUEUE; i++, dmap++) { struct ubsec_q *q; q = (struct ubsec_q *)malloc(sizeof(struct ubsec_q), M_DEVBUF, M_NOWAIT); if (q == NULL) { device_printf(dev, "cannot allocate queue buffers\n"); break; } if (ubsec_dma_malloc(sc, sizeof(struct ubsec_dmachunk), &dmap->d_alloc, 0)) { device_printf(dev, "cannot allocate dma buffers\n"); free(q, M_DEVBUF); break; } dmap->d_dma = (struct ubsec_dmachunk *)dmap->d_alloc.dma_vaddr; q->q_dma = dmap; sc->sc_queuea[i] = q; SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); } device_printf(sc->sc_dev, "%s\n", ubsec_partname(sc)); crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0, ubsec_newsession, ubsec_freesession, ubsec_process, sc); crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0, ubsec_newsession, ubsec_freesession, ubsec_process, sc); crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0, ubsec_newsession, ubsec_freesession, ubsec_process, sc); crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0, ubsec_newsession, ubsec_freesession, ubsec_process, sc); /* * Reset Broadcom chip */ ubsec_reset_board(sc); /* * Init Broadcom specific PCI settings */ ubsec_init_pciregs(dev); /* * Init Broadcom chip */ ubsec_init_board(sc); #ifndef UBSEC_NO_RNG if (sc->sc_flags & UBS_FLAGS_RNG) { sc->sc_statmask |= BS_STAT_MCR2_DONE; +#ifdef UBSEC_RNDTEST + sc->sc_rndtest = rndtest_attach(dev); + if (sc->sc_rndtest) + sc->sc_harvest = rndtest_harvest; + else + sc->sc_harvest = default_harvest; +#else + sc->sc_harvest = default_harvest; +#endif if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), &sc->sc_rng.rng_q.q_mcr, 0)) goto skip_rng; if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rngbypass), &sc->sc_rng.rng_q.q_ctx, 0)) { ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); goto skip_rng; } if (ubsec_dma_malloc(sc, sizeof(u_int32_t) * UBSEC_RNG_BUFSIZ, &sc->sc_rng.rng_buf, 0)) { ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx); ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); goto skip_rng; } if (hz >= 100) sc->sc_rnghz = hz / 100; else sc->sc_rnghz = 1; /* NB: 1 means the callout runs w/o Giant locked */ callout_init(&sc->sc_rngto, 1); callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); skip_rng: ; } #endif /* UBSEC_NO_RNG */ if (sc->sc_flags & UBS_FLAGS_KEY) { sc->sc_statmask |= BS_STAT_MCR2_DONE; crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0, ubsec_kprocess, sc); #if 0 crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0, ubsec_kprocess, sc); #endif } return (0); bad4: crypto_unregister_all(sc->sc_cid); bad3: bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); bad2: bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); bad1: bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); bad: mtx_destroy(&sc->sc_mtx); return (ENXIO); } /* * Detach a device that successfully probed. */ static int ubsec_detach(device_t dev) { struct ubsec_softc *sc = device_get_softc(dev); KASSERT(sc != NULL, ("ubsec_detach: null software carrier")); /* XXX wait/abort active ops */ UBSEC_LOCK(sc); callout_stop(&sc->sc_rngto); crypto_unregister_all(sc->sc_cid); +#ifdef UBSEC_RNDTEST + if (sc->sc_rndtest) + rndtest_detach(sc->sc_rndtest); +#endif + while (!SIMPLEQ_EMPTY(&sc->sc_freequeue)) { struct ubsec_q *q; q = SIMPLEQ_FIRST(&sc->sc_freequeue); SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, q, q_next); ubsec_dma_free(sc, &q->q_dma->d_alloc); free(q, M_DEVBUF); } #ifndef UBSEC_NO_RNG if (sc->sc_flags & UBS_FLAGS_RNG) { ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx); ubsec_dma_free(sc, &sc->sc_rng.rng_buf); } #endif /* UBSEC_NO_RNG */ bus_generic_detach(dev); bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); bus_dma_tag_destroy(sc->sc_dmat); bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); UBSEC_UNLOCK(sc); mtx_destroy(&sc->sc_mtx); return (0); } /* * Stop all chip i/o so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void ubsec_shutdown(device_t dev) { #ifdef notyet ubsec_stop(device_get_softc(dev)); #endif } /* * Device suspend routine. */ static int ubsec_suspend(device_t dev) { struct ubsec_softc *sc = device_get_softc(dev); KASSERT(sc != NULL, ("ubsec_suspend: null software carrier")); #ifdef notyet /* XXX stop the device and save PCI settings */ #endif sc->sc_suspended = 1; return (0); } static int ubsec_resume(device_t dev) { struct ubsec_softc *sc = device_get_softc(dev); KASSERT(sc != NULL, ("ubsec_resume: null software carrier")); #ifdef notyet /* XXX retore PCI settings and start the device */ #endif sc->sc_suspended = 0; return (0); } /* * UBSEC Interrupt routine */ static void ubsec_intr(void *arg) { struct ubsec_softc *sc = arg; volatile u_int32_t stat; struct ubsec_q *q; struct ubsec_dma *dmap; int npkts = 0, i; UBSEC_LOCK(sc); stat = READ_REG(sc, BS_STAT); stat &= sc->sc_statmask; if (stat == 0) { UBSEC_UNLOCK(sc); return; } WRITE_REG(sc, BS_STAT, stat); /* IACK */ /* * Check to see if we have any packets waiting for us */ if ((stat & BS_STAT_MCR1_DONE)) { while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) { q = SIMPLEQ_FIRST(&sc->sc_qchip); dmap = q->q_dma; if ((dmap->d_dma->d_mcr.mcr_flags & htole16(UBS_MCR_DONE)) == 0) break; SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q, q_next); npkts = q->q_nstacked_mcrs; sc->sc_nqchip -= 1+npkts; /* * search for further sc_qchip ubsec_q's that share * the same MCR, and complete them too, they must be * at the top. */ for (i = 0; i < npkts; i++) { if(q->q_stacked_mcr[i]) { ubsec_callback(sc, q->q_stacked_mcr[i]); } else { break; } } ubsec_callback(sc, q); } /* * Don't send any more packet to chip if there has been * a DMAERR. */ if (!(stat & BS_STAT_DMAERR)) ubsec_feed(sc); } /* * Check to see if we have any key setups/rng's waiting for us */ if ((sc->sc_flags & (UBS_FLAGS_KEY|UBS_FLAGS_RNG)) && (stat & BS_STAT_MCR2_DONE)) { struct ubsec_q2 *q2; struct ubsec_mcr *mcr; while (!SIMPLEQ_EMPTY(&sc->sc_qchip2)) { q2 = SIMPLEQ_FIRST(&sc->sc_qchip2); ubsec_dma_sync(&q2->q_mcr, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); mcr = (struct ubsec_mcr *)q2->q_mcr.dma_vaddr; if ((mcr->mcr_flags & htole16(UBS_MCR_DONE)) == 0) { ubsec_dma_sync(&q2->q_mcr, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); break; } SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip2, q2, q_next); ubsec_callback2(sc, q2); /* * Don't send any more packet to chip if there has been * a DMAERR. */ if (!(stat & BS_STAT_DMAERR)) ubsec_feed2(sc); } } /* * Check to see if we got any DMA Error */ if (stat & BS_STAT_DMAERR) { #ifdef UBSEC_DEBUG if (ubsec_debug) { volatile u_int32_t a = READ_REG(sc, BS_ERR); printf("dmaerr %s@%08x\n", (a & BS_ERR_READ) ? "read" : "write", a & BS_ERR_ADDR); } #endif /* UBSEC_DEBUG */ ubsecstats.hst_dmaerr++; ubsec_totalreset(sc); ubsec_feed(sc); } if (sc->sc_needwakeup) { /* XXX check high watermark */ int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); #ifdef UBSEC_DEBUG if (ubsec_debug) device_printf(sc->sc_dev, "wakeup crypto (%x)\n", sc->sc_needwakeup); #endif /* UBSEC_DEBUG */ sc->sc_needwakeup &= ~wakeup; crypto_unblock(sc->sc_cid, wakeup); } UBSEC_UNLOCK(sc); } /* * ubsec_feed() - aggregate and post requests to chip */ static void ubsec_feed(struct ubsec_softc *sc) { struct ubsec_q *q, *q2; int npkts, i; void *v; u_int32_t stat; /* * Decide how many ops to combine in a single MCR. We cannot * aggregate more than UBS_MAX_AGGR because this is the number * of slots defined in the data structure. Note that * aggregation only happens if ops are marked batch'able. * Aggregating ops reduces the number of interrupts to the host * but also (potentially) increases the latency for processing * completed ops as we only get an interrupt when all aggregated * ops have completed. */ if (sc->sc_nqueue == 0) return; if (sc->sc_nqueue > 1) { npkts = 0; SIMPLEQ_FOREACH(q, &sc->sc_queue, q_next) { npkts++; if ((q->q_crp->crp_flags & CRYPTO_F_BATCH) == 0) break; } } else npkts = 1; /* * Check device status before going any further. */ if ((stat = READ_REG(sc, BS_STAT)) & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) { if (stat & BS_STAT_DMAERR) { ubsec_totalreset(sc); ubsecstats.hst_dmaerr++; } else ubsecstats.hst_mcr1full++; return; } if (sc->sc_nqueue > ubsecstats.hst_maxqueue) ubsecstats.hst_maxqueue = sc->sc_nqueue; if (npkts > UBS_MAX_AGGR) npkts = UBS_MAX_AGGR; if (npkts < 2) /* special case 1 op */ goto feed1; ubsecstats.hst_totbatch += npkts-1; #ifdef UBSEC_DEBUG if (ubsec_debug) printf("merging %d records\n", npkts); #endif /* UBSEC_DEBUG */ q = SIMPLEQ_FIRST(&sc->sc_queue); SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q, q_next); --sc->sc_nqueue; bus_dmamap_sync(sc->sc_dmat, q->q_src_map, BUS_DMASYNC_PREWRITE); if (q->q_dst_map != NULL) bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, BUS_DMASYNC_PREREAD); q->q_nstacked_mcrs = npkts - 1; /* Number of packets stacked */ for (i = 0; i < q->q_nstacked_mcrs; i++) { q2 = SIMPLEQ_FIRST(&sc->sc_queue); bus_dmamap_sync(sc->sc_dmat, q2->q_src_map, BUS_DMASYNC_PREWRITE); if (q2->q_dst_map != NULL) bus_dmamap_sync(sc->sc_dmat, q2->q_dst_map, BUS_DMASYNC_PREREAD); SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q2, q_next); --sc->sc_nqueue; v = (void*)(((char *)&q2->q_dma->d_dma->d_mcr) + sizeof(struct ubsec_mcr) - sizeof(struct ubsec_mcr_add)); bcopy(v, &q->q_dma->d_dma->d_mcradd[i], sizeof(struct ubsec_mcr_add)); q->q_stacked_mcr[i] = q2; } q->q_dma->d_dma->d_mcr.mcr_pkts = htole16(npkts); SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next); sc->sc_nqchip += npkts; if (sc->sc_nqchip > ubsecstats.hst_maxqchip) ubsecstats.hst_maxqchip = sc->sc_nqchip; ubsec_dma_sync(&q->q_dma->d_alloc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_mcr)); return; feed1: q = SIMPLEQ_FIRST(&sc->sc_queue); bus_dmamap_sync(sc->sc_dmat, q->q_src_map, BUS_DMASYNC_PREWRITE); if (q->q_dst_map != NULL) bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, BUS_DMASYNC_PREREAD); ubsec_dma_sync(&q->q_dma->d_alloc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_mcr)); #ifdef UBSEC_DEBUG if (ubsec_debug) printf("feed1: q->chip %p %08x stat %08x\n", q, (u_int32_t)vtophys(&q->q_dma->d_dma->d_mcr), stat); #endif /* UBSEC_DEBUG */ SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q, q_next); --sc->sc_nqueue; SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next); sc->sc_nqchip++; if (sc->sc_nqchip > ubsecstats.hst_maxqchip) ubsecstats.hst_maxqchip = sc->sc_nqchip; return; } /* * Allocate a new 'session' and return an encoded session id. 'sidp' * contains our registration id, and should contain an encoded session * id on successful allocation. */ static int ubsec_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri) { struct cryptoini *c, *encini = NULL, *macini = NULL; struct ubsec_softc *sc = arg; struct ubsec_session *ses = NULL; MD5_CTX md5ctx; SHA1_CTX sha1ctx; int i, sesn; KASSERT(sc != NULL, ("ubsec_newsession: null softc")); if (sidp == NULL || cri == NULL || sc == NULL) return (EINVAL); for (c = cri; c != NULL; c = c->cri_next) { if (c->cri_alg == CRYPTO_MD5_HMAC || c->cri_alg == CRYPTO_SHA1_HMAC) { if (macini) return (EINVAL); macini = c; } else if (c->cri_alg == CRYPTO_DES_CBC || c->cri_alg == CRYPTO_3DES_CBC) { if (encini) return (EINVAL); encini = c; } else return (EINVAL); } if (encini == NULL && macini == NULL) return (EINVAL); if (sc->sc_sessions == NULL) { ses = sc->sc_sessions = (struct ubsec_session *)malloc( sizeof(struct ubsec_session), M_DEVBUF, M_NOWAIT); if (ses == NULL) return (ENOMEM); sesn = 0; sc->sc_nsessions = 1; } else { for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { if (sc->sc_sessions[sesn].ses_used == 0) { ses = &sc->sc_sessions[sesn]; break; } } if (ses == NULL) { sesn = sc->sc_nsessions; ses = (struct ubsec_session *)malloc((sesn + 1) * sizeof(struct ubsec_session), M_DEVBUF, M_NOWAIT); if (ses == NULL) return (ENOMEM); bcopy(sc->sc_sessions, ses, sesn * sizeof(struct ubsec_session)); bzero(sc->sc_sessions, sesn * sizeof(struct ubsec_session)); free(sc->sc_sessions, M_DEVBUF); sc->sc_sessions = ses; ses = &sc->sc_sessions[sesn]; sc->sc_nsessions++; } } bzero(ses, sizeof(struct ubsec_session)); ses->ses_used = 1; if (encini) { /* get an IV, network byte order */ /* XXX may read fewer than requested */ read_random(ses->ses_iv, sizeof(ses->ses_iv)); /* Go ahead and compute key in ubsec's byte order */ if (encini->cri_alg == CRYPTO_DES_CBC) { bcopy(encini->cri_key, &ses->ses_deskey[0], 8); bcopy(encini->cri_key, &ses->ses_deskey[2], 8); bcopy(encini->cri_key, &ses->ses_deskey[4], 8); } else bcopy(encini->cri_key, ses->ses_deskey, 24); SWAP32(ses->ses_deskey[0]); SWAP32(ses->ses_deskey[1]); SWAP32(ses->ses_deskey[2]); SWAP32(ses->ses_deskey[3]); SWAP32(ses->ses_deskey[4]); SWAP32(ses->ses_deskey[5]); } if (macini) { for (i = 0; i < macini->cri_klen / 8; i++) macini->cri_key[i] ^= HMAC_IPAD_VAL; if (macini->cri_alg == CRYPTO_MD5_HMAC) { MD5Init(&md5ctx); MD5Update(&md5ctx, macini->cri_key, macini->cri_klen / 8); MD5Update(&md5ctx, hmac_ipad_buffer, HMAC_BLOCK_LEN - (macini->cri_klen / 8)); bcopy(md5ctx.state, ses->ses_hminner, sizeof(md5ctx.state)); } else { SHA1Init(&sha1ctx); SHA1Update(&sha1ctx, macini->cri_key, macini->cri_klen / 8); SHA1Update(&sha1ctx, hmac_ipad_buffer, HMAC_BLOCK_LEN - (macini->cri_klen / 8)); bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32)); } for (i = 0; i < macini->cri_klen / 8; i++) macini->cri_key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); if (macini->cri_alg == CRYPTO_MD5_HMAC) { MD5Init(&md5ctx); MD5Update(&md5ctx, macini->cri_key, macini->cri_klen / 8); MD5Update(&md5ctx, hmac_opad_buffer, HMAC_BLOCK_LEN - (macini->cri_klen / 8)); bcopy(md5ctx.state, ses->ses_hmouter, sizeof(md5ctx.state)); } else { SHA1Init(&sha1ctx); SHA1Update(&sha1ctx, macini->cri_key, macini->cri_klen / 8); SHA1Update(&sha1ctx, hmac_opad_buffer, HMAC_BLOCK_LEN - (macini->cri_klen / 8)); bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32)); } for (i = 0; i < macini->cri_klen / 8; i++) macini->cri_key[i] ^= HMAC_OPAD_VAL; } *sidp = UBSEC_SID(device_get_unit(sc->sc_dev), sesn); return (0); } /* * Deallocate a session. */ static int ubsec_freesession(void *arg, u_int64_t tid) { struct ubsec_softc *sc = arg; int session; u_int32_t sid = ((u_int32_t) tid) & 0xffffffff; KASSERT(sc != NULL, ("ubsec_freesession: null softc")); if (sc == NULL) return (EINVAL); session = UBSEC_SESSION(sid); if (session >= sc->sc_nsessions) return (EINVAL); bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session])); return (0); } static void ubsec_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error) { struct ubsec_operand *op = arg; KASSERT(nsegs <= UBS_MAX_SCATTER, ("Too many DMA segments returned when mapping operand")); #ifdef UBSEC_DEBUG if (ubsec_debug) printf("ubsec_op_cb: mapsize %u nsegs %d\n", (u_int) mapsize, nsegs); #endif op->mapsize = mapsize; op->nsegs = nsegs; bcopy(seg, op->segs, nsegs * sizeof (seg[0])); } static int ubsec_process(void *arg, struct cryptop *crp, int hint) { struct ubsec_q *q = NULL; int err = 0, i, j, nicealign; struct ubsec_softc *sc = arg; struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; int encoffset = 0, macoffset = 0, cpskip, cpoffset; int sskip, dskip, stheend, dtheend; int16_t coffset; struct ubsec_session *ses; struct ubsec_pktctx ctx; struct ubsec_dma *dmap = NULL; if (crp == NULL || crp->crp_callback == NULL || sc == NULL) { ubsecstats.hst_invalid++; return (EINVAL); } if (UBSEC_SESSION(crp->crp_sid) >= sc->sc_nsessions) { ubsecstats.hst_badsession++; return (EINVAL); } UBSEC_LOCK(sc); if (SIMPLEQ_EMPTY(&sc->sc_freequeue)) { ubsecstats.hst_queuefull++; sc->sc_needwakeup |= CRYPTO_SYMQ; UBSEC_UNLOCK(sc); return (ERESTART); } q = SIMPLEQ_FIRST(&sc->sc_freequeue); SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, q, q_next); UBSEC_UNLOCK(sc); dmap = q->q_dma; /* Save dma pointer */ bzero(q, sizeof(struct ubsec_q)); bzero(&ctx, sizeof(ctx)); q->q_sesn = UBSEC_SESSION(crp->crp_sid); q->q_dma = dmap; ses = &sc->sc_sessions[q->q_sesn]; if (crp->crp_flags & CRYPTO_F_IMBUF) { q->q_src_m = (struct mbuf *)crp->crp_buf; q->q_dst_m = (struct mbuf *)crp->crp_buf; } else if (crp->crp_flags & CRYPTO_F_IOV) { q->q_src_io = (struct uio *)crp->crp_buf; q->q_dst_io = (struct uio *)crp->crp_buf; } else { ubsecstats.hst_badflags++; err = EINVAL; goto errout; /* XXX we don't handle contiguous blocks! */ } bzero(&dmap->d_dma->d_mcr, sizeof(struct ubsec_mcr)); dmap->d_dma->d_mcr.mcr_pkts = htole16(1); dmap->d_dma->d_mcr.mcr_flags = 0; q->q_crp = crp; crd1 = crp->crp_desc; if (crd1 == NULL) { ubsecstats.hst_nodesc++; err = EINVAL; goto errout; } crd2 = crd1->crd_next; if (crd2 == NULL) { if (crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC) { maccrd = crd1; enccrd = NULL; } else if (crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_3DES_CBC) { maccrd = NULL; enccrd = crd1; } else { ubsecstats.hst_badalg++; err = EINVAL; goto errout; } } else { if ((crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC) && (crd2->crd_alg == CRYPTO_DES_CBC || crd2->crd_alg == CRYPTO_3DES_CBC) && ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { maccrd = crd1; enccrd = crd2; } else if ((crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_3DES_CBC) && (crd2->crd_alg == CRYPTO_MD5_HMAC || crd2->crd_alg == CRYPTO_SHA1_HMAC) && (crd1->crd_flags & CRD_F_ENCRYPT)) { enccrd = crd1; maccrd = crd2; } else { /* * We cannot order the ubsec as requested */ ubsecstats.hst_badalg++; err = EINVAL; goto errout; } } if (enccrd) { encoffset = enccrd->crd_skip; ctx.pc_flags |= htole16(UBS_PKTCTX_ENC_3DES); if (enccrd->crd_flags & CRD_F_ENCRYPT) { q->q_flags |= UBSEC_QFLAGS_COPYOUTIV; if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) bcopy(enccrd->crd_iv, ctx.pc_iv, 8); else { ctx.pc_iv[0] = ses->ses_iv[0]; ctx.pc_iv[1] = ses->ses_iv[1]; } if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) { if (crp->crp_flags & CRYPTO_F_IMBUF) m_copyback(q->q_src_m, enccrd->crd_inject, 8, (caddr_t)ctx.pc_iv); else if (crp->crp_flags & CRYPTO_F_IOV) cuio_copyback(q->q_src_io, enccrd->crd_inject, 8, (caddr_t)ctx.pc_iv); } } else { ctx.pc_flags |= htole16(UBS_PKTCTX_INBOUND); if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) bcopy(enccrd->crd_iv, ctx.pc_iv, 8); else if (crp->crp_flags & CRYPTO_F_IMBUF) m_copydata(q->q_src_m, enccrd->crd_inject, 8, (caddr_t)ctx.pc_iv); else if (crp->crp_flags & CRYPTO_F_IOV) cuio_copydata(q->q_src_io, enccrd->crd_inject, 8, (caddr_t)ctx.pc_iv); } ctx.pc_deskey[0] = ses->ses_deskey[0]; ctx.pc_deskey[1] = ses->ses_deskey[1]; ctx.pc_deskey[2] = ses->ses_deskey[2]; ctx.pc_deskey[3] = ses->ses_deskey[3]; ctx.pc_deskey[4] = ses->ses_deskey[4]; ctx.pc_deskey[5] = ses->ses_deskey[5]; SWAP32(ctx.pc_iv[0]); SWAP32(ctx.pc_iv[1]); } if (maccrd) { macoffset = maccrd->crd_skip; if (maccrd->crd_alg == CRYPTO_MD5_HMAC) ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_MD5); else ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_SHA1); for (i = 0; i < 5; i++) { ctx.pc_hminner[i] = ses->ses_hminner[i]; ctx.pc_hmouter[i] = ses->ses_hmouter[i]; HTOLE32(ctx.pc_hminner[i]); HTOLE32(ctx.pc_hmouter[i]); } } if (enccrd && maccrd) { /* * ubsec cannot handle packets where the end of encryption * and authentication are not the same, or where the * encrypted part begins before the authenticated part. */ if ((encoffset + enccrd->crd_len) != (macoffset + maccrd->crd_len)) { ubsecstats.hst_lenmismatch++; err = EINVAL; goto errout; } if (enccrd->crd_skip < maccrd->crd_skip) { ubsecstats.hst_skipmismatch++; err = EINVAL; goto errout; } sskip = maccrd->crd_skip; cpskip = dskip = enccrd->crd_skip; stheend = maccrd->crd_len; dtheend = enccrd->crd_len; coffset = enccrd->crd_skip - maccrd->crd_skip; cpoffset = cpskip + dtheend; #ifdef UBSEC_DEBUG if (ubsec_debug) { printf("mac: skip %d, len %d, inject %d\n", maccrd->crd_skip, maccrd->crd_len, maccrd->crd_inject); printf("enc: skip %d, len %d, inject %d\n", enccrd->crd_skip, enccrd->crd_len, enccrd->crd_inject); printf("src: skip %d, len %d\n", sskip, stheend); printf("dst: skip %d, len %d\n", dskip, dtheend); printf("ubs: coffset %d, pktlen %d, cpskip %d, cpoffset %d\n", coffset, stheend, cpskip, cpoffset); } #endif } else { cpskip = dskip = sskip = macoffset + encoffset; dtheend = stheend = (enccrd)?enccrd->crd_len:maccrd->crd_len; cpoffset = cpskip + dtheend; coffset = 0; } ctx.pc_offset = htole16(coffset >> 2); if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &q->q_src_map)) { ubsecstats.hst_nomap++; err = ENOMEM; goto errout; } if (crp->crp_flags & CRYPTO_F_IMBUF) { if (bus_dmamap_load_mbuf(sc->sc_dmat, q->q_src_map, q->q_src_m, ubsec_op_cb, &q->q_src, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); q->q_src_map = NULL; ubsecstats.hst_noload++; err = ENOMEM; goto errout; } } else if (crp->crp_flags & CRYPTO_F_IOV) { if (bus_dmamap_load_uio(sc->sc_dmat, q->q_src_map, q->q_src_io, ubsec_op_cb, &q->q_src, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); q->q_src_map = NULL; ubsecstats.hst_noload++; err = ENOMEM; goto errout; } } nicealign = ubsec_dmamap_aligned(&q->q_src); dmap->d_dma->d_mcr.mcr_pktlen = htole16(stheend); #ifdef UBSEC_DEBUG if (ubsec_debug) printf("src skip: %d nicealign: %u\n", sskip, nicealign); #endif for (i = j = 0; i < q->q_src_nsegs; i++) { struct ubsec_pktbuf *pb; bus_size_t packl = q->q_src_segs[i].ds_len; bus_addr_t packp = q->q_src_segs[i].ds_addr; if (sskip >= packl) { sskip -= packl; continue; } packl -= sskip; packp += sskip; sskip = 0; if (packl > 0xfffc) { err = EIO; goto errout; } if (j == 0) pb = &dmap->d_dma->d_mcr.mcr_ipktbuf; else pb = &dmap->d_dma->d_sbuf[j - 1]; pb->pb_addr = htole32(packp); if (stheend) { if (packl > stheend) { pb->pb_len = htole32(stheend); stheend = 0; } else { pb->pb_len = htole32(packl); stheend -= packl; } } else pb->pb_len = htole32(packl); if ((i + 1) == q->q_src_nsegs) pb->pb_next = 0; else pb->pb_next = htole32(dmap->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_sbuf[j])); j++; } if (enccrd == NULL && maccrd != NULL) { dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr = 0; dmap->d_dma->d_mcr.mcr_opktbuf.pb_len = 0; dmap->d_dma->d_mcr.mcr_opktbuf.pb_next = htole32(dmap->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_macbuf[0])); #ifdef UBSEC_DEBUG if (ubsec_debug) printf("opkt: %x %x %x\n", dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr, dmap->d_dma->d_mcr.mcr_opktbuf.pb_len, dmap->d_dma->d_mcr.mcr_opktbuf.pb_next); #endif } else { if (crp->crp_flags & CRYPTO_F_IOV) { if (!nicealign) { ubsecstats.hst_iovmisaligned++; err = EINVAL; goto errout; } if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &q->q_dst_map)) { ubsecstats.hst_nomap++; err = ENOMEM; goto errout; } if (bus_dmamap_load_uio(sc->sc_dmat, q->q_dst_map, q->q_dst_io, ubsec_op_cb, &q->q_dst, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); q->q_dst_map = NULL; ubsecstats.hst_noload++; err = ENOMEM; goto errout; } } else if (crp->crp_flags & CRYPTO_F_IMBUF) { if (nicealign) { q->q_dst = q->q_src; } else { int totlen, len; struct mbuf *m, *top, **mp; ubsecstats.hst_unaligned++; totlen = q->q_src_mapsize; if (q->q_src_m->m_flags & M_PKTHDR) { len = MHLEN; MGETHDR(m, M_DONTWAIT, MT_DATA); if (m && !m_dup_pkthdr(m, q->q_src_m, M_DONTWAIT)) { m_free(m); m = NULL; } } else { len = MLEN; MGET(m, M_DONTWAIT, MT_DATA); } if (m == NULL) { ubsecstats.hst_nombuf++; err = sc->sc_nqueue ? ERESTART : ENOMEM; goto errout; } if (totlen >= MINCLSIZE) { MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { m_free(m); ubsecstats.hst_nomcl++; err = sc->sc_nqueue ? ERESTART : ENOMEM; goto errout; } len = MCLBYTES; } m->m_len = len; top = NULL; mp = ⊤ while (totlen > 0) { if (top) { MGET(m, M_DONTWAIT, MT_DATA); if (m == NULL) { m_freem(top); ubsecstats.hst_nombuf++; err = sc->sc_nqueue ? ERESTART : ENOMEM; goto errout; } len = MLEN; } if (top && totlen >= MINCLSIZE) { MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { *mp = m; m_freem(top); ubsecstats.hst_nomcl++; err = sc->sc_nqueue ? ERESTART : ENOMEM; goto errout; } len = MCLBYTES; } m->m_len = len = min(totlen, len); totlen -= len; *mp = m; mp = &m->m_next; } q->q_dst_m = top; ubsec_mcopy(q->q_src_m, q->q_dst_m, cpskip, cpoffset); if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &q->q_dst_map) != 0) { ubsecstats.hst_nomap++; err = ENOMEM; goto errout; } if (bus_dmamap_load_mbuf(sc->sc_dmat, q->q_dst_map, q->q_dst_m, ubsec_op_cb, &q->q_dst, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); q->q_dst_map = NULL; ubsecstats.hst_noload++; err = ENOMEM; goto errout; } } } else { ubsecstats.hst_badflags++; err = EINVAL; goto errout; } #ifdef UBSEC_DEBUG if (ubsec_debug) printf("dst skip: %d\n", dskip); #endif for (i = j = 0; i < q->q_dst_nsegs; i++) { struct ubsec_pktbuf *pb; bus_size_t packl = q->q_dst_segs[i].ds_len; bus_addr_t packp = q->q_dst_segs[i].ds_addr; if (dskip >= packl) { dskip -= packl; continue; } packl -= dskip; packp += dskip; dskip = 0; if (packl > 0xfffc) { err = EIO; goto errout; } if (j == 0) pb = &dmap->d_dma->d_mcr.mcr_opktbuf; else pb = &dmap->d_dma->d_dbuf[j - 1]; pb->pb_addr = htole32(packp); if (dtheend) { if (packl > dtheend) { pb->pb_len = htole32(dtheend); dtheend = 0; } else { pb->pb_len = htole32(packl); dtheend -= packl; } } else pb->pb_len = htole32(packl); if ((i + 1) == q->q_dst_nsegs) { if (maccrd) pb->pb_next = htole32(dmap->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_macbuf[0])); else pb->pb_next = 0; } else pb->pb_next = htole32(dmap->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_dbuf[j])); j++; } } dmap->d_dma->d_mcr.mcr_cmdctxp = htole32(dmap->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_ctx)); if (sc->sc_flags & UBS_FLAGS_LONGCTX) { struct ubsec_pktctx_long *ctxl; ctxl = (struct ubsec_pktctx_long *)(dmap->d_alloc.dma_vaddr + offsetof(struct ubsec_dmachunk, d_ctx)); /* transform small context into long context */ ctxl->pc_len = htole16(sizeof(struct ubsec_pktctx_long)); ctxl->pc_type = htole16(UBS_PKTCTX_TYPE_IPSEC); ctxl->pc_flags = ctx.pc_flags; ctxl->pc_offset = ctx.pc_offset; for (i = 0; i < 6; i++) ctxl->pc_deskey[i] = ctx.pc_deskey[i]; for (i = 0; i < 5; i++) ctxl->pc_hminner[i] = ctx.pc_hminner[i]; for (i = 0; i < 5; i++) ctxl->pc_hmouter[i] = ctx.pc_hmouter[i]; ctxl->pc_iv[0] = ctx.pc_iv[0]; ctxl->pc_iv[1] = ctx.pc_iv[1]; } else bcopy(&ctx, dmap->d_alloc.dma_vaddr + offsetof(struct ubsec_dmachunk, d_ctx), sizeof(struct ubsec_pktctx)); UBSEC_LOCK(sc); SIMPLEQ_INSERT_TAIL(&sc->sc_queue, q, q_next); sc->sc_nqueue++; ubsecstats.hst_ipackets++; ubsecstats.hst_ibytes += dmap->d_alloc.dma_size; if ((hint & CRYPTO_HINT_MORE) == 0 || sc->sc_nqueue >= UBS_MAX_AGGR) ubsec_feed(sc); UBSEC_UNLOCK(sc); return (0); errout: if (q != NULL) { if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m)) m_freem(q->q_dst_m); if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) { bus_dmamap_unload(sc->sc_dmat, q->q_dst_map); bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); } if (q->q_src_map != NULL) { bus_dmamap_unload(sc->sc_dmat, q->q_src_map); bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); } UBSEC_LOCK(sc); SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); UBSEC_UNLOCK(sc); } if (err != ERESTART) { crp->crp_etype = err; crypto_done(crp); } else { sc->sc_needwakeup |= CRYPTO_SYMQ; } return (err); } static void ubsec_callback(struct ubsec_softc *sc, struct ubsec_q *q) { struct cryptop *crp = (struct cryptop *)q->q_crp; struct cryptodesc *crd; struct ubsec_dma *dmap = q->q_dma; ubsecstats.hst_opackets++; ubsecstats.hst_obytes += dmap->d_alloc.dma_size; ubsec_dma_sync(&dmap->d_alloc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) { bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->sc_dmat, q->q_dst_map); bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); } bus_dmamap_sync(sc->sc_dmat, q->q_src_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_dmat, q->q_src_map); bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); if ((crp->crp_flags & CRYPTO_F_IMBUF) && (q->q_src_m != q->q_dst_m)) { m_freem(q->q_src_m); crp->crp_buf = (caddr_t)q->q_dst_m; } ubsecstats.hst_obytes += ((struct mbuf *)crp->crp_buf)->m_len; /* copy out IV for future use */ if (q->q_flags & UBSEC_QFLAGS_COPYOUTIV) { for (crd = crp->crp_desc; crd; crd = crd->crd_next) { if (crd->crd_alg != CRYPTO_DES_CBC && crd->crd_alg != CRYPTO_3DES_CBC) continue; if (crp->crp_flags & CRYPTO_F_IMBUF) m_copydata((struct mbuf *)crp->crp_buf, crd->crd_skip + crd->crd_len - 8, 8, (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv); else if (crp->crp_flags & CRYPTO_F_IOV) { cuio_copydata((struct uio *)crp->crp_buf, crd->crd_skip + crd->crd_len - 8, 8, (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv); } break; } } for (crd = crp->crp_desc; crd; crd = crd->crd_next) { if (crd->crd_alg != CRYPTO_MD5_HMAC && crd->crd_alg != CRYPTO_SHA1_HMAC) continue; if (crp->crp_flags & CRYPTO_F_IMBUF) m_copyback((struct mbuf *)crp->crp_buf, crd->crd_inject, 12, (caddr_t)dmap->d_dma->d_macbuf); else if (crp->crp_flags & CRYPTO_F_IOV && crp->crp_mac) bcopy((caddr_t)dmap->d_dma->d_macbuf, crp->crp_mac, 12); break; } SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); crypto_done(crp); } static void ubsec_mcopy(struct mbuf *srcm, struct mbuf *dstm, int hoffset, int toffset) { int i, j, dlen, slen; caddr_t dptr, sptr; j = 0; sptr = srcm->m_data; slen = srcm->m_len; dptr = dstm->m_data; dlen = dstm->m_len; while (1) { for (i = 0; i < min(slen, dlen); i++) { if (j < hoffset || j >= toffset) *dptr++ = *sptr++; slen--; dlen--; j++; } if (slen == 0) { srcm = srcm->m_next; if (srcm == NULL) return; sptr = srcm->m_data; slen = srcm->m_len; } if (dlen == 0) { dstm = dstm->m_next; if (dstm == NULL) return; dptr = dstm->m_data; dlen = dstm->m_len; } } } /* * feed the key generator, must be called at splimp() or higher. */ static int ubsec_feed2(struct ubsec_softc *sc) { struct ubsec_q2 *q; while (!SIMPLEQ_EMPTY(&sc->sc_queue2)) { if (READ_REG(sc, BS_STAT) & BS_STAT_MCR2_FULL) break; q = SIMPLEQ_FIRST(&sc->sc_queue2); ubsec_dma_sync(&q->q_mcr, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); ubsec_dma_sync(&q->q_ctx, BUS_DMASYNC_PREWRITE); WRITE_REG(sc, BS_MCR2, q->q_mcr.dma_paddr); SIMPLEQ_REMOVE_HEAD(&sc->sc_queue2, q, q_next); --sc->sc_nqueue2; SIMPLEQ_INSERT_TAIL(&sc->sc_qchip2, q, q_next); } return (0); } /* * Callback for handling random numbers */ static void ubsec_callback2(struct ubsec_softc *sc, struct ubsec_q2 *q) { struct cryptkop *krp; struct ubsec_ctx_keyop *ctx; ctx = (struct ubsec_ctx_keyop *)q->q_ctx.dma_vaddr; ubsec_dma_sync(&q->q_ctx, BUS_DMASYNC_POSTWRITE); switch (q->q_type) { #ifndef UBSEC_NO_RNG case UBS_CTXOP_RNGBYPASS: { struct ubsec_q2_rng *rng = (struct ubsec_q2_rng *)q; ubsec_dma_sync(&rng->rng_buf, BUS_DMASYNC_POSTREAD); - random_harvest(rng->rng_buf.dma_vaddr, - UBSEC_RNG_BUFSIZ*sizeof (u_int32_t), - UBSEC_RNG_BUFSIZ*sizeof (u_int32_t)*NBBY, 0, - RANDOM_PURE); + (*sc->sc_harvest)(sc->sc_rndtest, + rng->rng_buf.dma_vaddr, + UBSEC_RNG_BUFSIZ*sizeof (u_int32_t)); rng->rng_used = 0; callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); break; } #endif case UBS_CTXOP_MODEXP: { struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q; u_int rlen, clen; krp = me->me_krp; rlen = (me->me_modbits + 7) / 8; clen = (krp->krp_param[krp->krp_iparams].crp_nbits + 7) / 8; ubsec_dma_sync(&me->me_M, BUS_DMASYNC_POSTWRITE); ubsec_dma_sync(&me->me_E, BUS_DMASYNC_POSTWRITE); ubsec_dma_sync(&me->me_C, BUS_DMASYNC_POSTREAD); ubsec_dma_sync(&me->me_epb, BUS_DMASYNC_POSTWRITE); if (clen < rlen) krp->krp_status = E2BIG; else { if (sc->sc_flags & UBS_FLAGS_HWNORM) { bzero(krp->krp_param[krp->krp_iparams].crp_p, (krp->krp_param[krp->krp_iparams].crp_nbits + 7) / 8); bcopy(me->me_C.dma_vaddr, krp->krp_param[krp->krp_iparams].crp_p, (me->me_modbits + 7) / 8); } else ubsec_kshift_l(me->me_shiftbits, me->me_C.dma_vaddr, me->me_normbits, krp->krp_param[krp->krp_iparams].crp_p, krp->krp_param[krp->krp_iparams].crp_nbits); } crypto_kdone(krp); /* bzero all potentially sensitive data */ bzero(me->me_E.dma_vaddr, me->me_E.dma_size); bzero(me->me_M.dma_vaddr, me->me_M.dma_size); bzero(me->me_C.dma_vaddr, me->me_C.dma_size); bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size); /* Can't free here, so put us on the free list. */ SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &me->me_q, q_next); break; } case UBS_CTXOP_RSAPRIV: { struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q; u_int len; krp = rp->rpr_krp; ubsec_dma_sync(&rp->rpr_msgin, BUS_DMASYNC_POSTWRITE); ubsec_dma_sync(&rp->rpr_msgout, BUS_DMASYNC_POSTREAD); len = (krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_nbits + 7) / 8; bcopy(rp->rpr_msgout.dma_vaddr, krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_p, len); crypto_kdone(krp); bzero(rp->rpr_msgin.dma_vaddr, rp->rpr_msgin.dma_size); bzero(rp->rpr_msgout.dma_vaddr, rp->rpr_msgout.dma_size); bzero(rp->rpr_q.q_ctx.dma_vaddr, rp->rpr_q.q_ctx.dma_size); /* Can't free here, so put us on the free list. */ SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &rp->rpr_q, q_next); break; } default: device_printf(sc->sc_dev, "unknown ctx op: %x\n", letoh16(ctx->ctx_op)); break; } } #ifndef UBSEC_NO_RNG static void ubsec_rng(void *vsc) { struct ubsec_softc *sc = vsc; struct ubsec_q2_rng *rng = &sc->sc_rng; struct ubsec_mcr *mcr; struct ubsec_ctx_rngbypass *ctx; UBSEC_LOCK(sc); if (rng->rng_used) { UBSEC_UNLOCK(sc); return; } sc->sc_nqueue2++; if (sc->sc_nqueue2 >= UBS_MAX_NQUEUE) goto out; mcr = (struct ubsec_mcr *)rng->rng_q.q_mcr.dma_vaddr; ctx = (struct ubsec_ctx_rngbypass *)rng->rng_q.q_ctx.dma_vaddr; mcr->mcr_pkts = htole16(1); mcr->mcr_flags = 0; mcr->mcr_cmdctxp = htole32(rng->rng_q.q_ctx.dma_paddr); mcr->mcr_ipktbuf.pb_addr = mcr->mcr_ipktbuf.pb_next = 0; mcr->mcr_ipktbuf.pb_len = 0; mcr->mcr_reserved = mcr->mcr_pktlen = 0; mcr->mcr_opktbuf.pb_addr = htole32(rng->rng_buf.dma_paddr); mcr->mcr_opktbuf.pb_len = htole32(((sizeof(u_int32_t) * UBSEC_RNG_BUFSIZ)) & UBS_PKTBUF_LEN); mcr->mcr_opktbuf.pb_next = 0; ctx->rbp_len = htole16(sizeof(struct ubsec_ctx_rngbypass)); ctx->rbp_op = htole16(UBS_CTXOP_RNGBYPASS); rng->rng_q.q_type = UBS_CTXOP_RNGBYPASS; ubsec_dma_sync(&rng->rng_buf, BUS_DMASYNC_PREREAD); SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rng->rng_q, q_next); rng->rng_used = 1; ubsec_feed2(sc); ubsecstats.hst_rng++; UBSEC_UNLOCK(sc); return; out: /* * Something weird happened, generate our own call back. */ sc->sc_nqueue2--; UBSEC_UNLOCK(sc); callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); } #endif /* UBSEC_NO_RNG */ static void ubsec_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *paddr = (bus_addr_t*) arg; *paddr = segs->ds_addr; } static int ubsec_dma_malloc( struct ubsec_softc *sc, bus_size_t size, struct ubsec_dma_alloc *dma, int mapflags ) { int r; /* XXX could specify sc_dmat as parent but that just adds overhead */ r = bus_dma_tag_create(NULL, /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ &dma->dma_tag); if (r != 0) { device_printf(sc->sc_dev, "ubsec_dma_malloc: " "bus_dma_tag_create failed; error %u\n", r); goto fail_0; } r = bus_dmamap_create(dma->dma_tag, BUS_DMA_NOWAIT, &dma->dma_map); if (r != 0) { device_printf(sc->sc_dev, "ubsec_dma_malloc: " "bus_dmamap_create failed; error %u\n", r); goto fail_1; } r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, BUS_DMA_NOWAIT, &dma->dma_map); if (r != 0) { device_printf(sc->sc_dev, "ubsec_dma_malloc: " "bus_dmammem_alloc failed; size %zu, error %u\n", size, r); goto fail_2; } r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size, ubsec_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT); if (r != 0) { device_printf(sc->sc_dev, "ubsec_dma_malloc: " "bus_dmamap_load failed; error %u\n", r); goto fail_3; } dma->dma_size = size; return (0); fail_3: bus_dmamap_unload(dma->dma_tag, dma->dma_map); fail_2: bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); fail_1: bus_dmamap_destroy(dma->dma_tag, dma->dma_map); bus_dma_tag_destroy(dma->dma_tag); fail_0: dma->dma_map = NULL; dma->dma_tag = NULL; return (r); } static void ubsec_dma_free(struct ubsec_softc *sc, struct ubsec_dma_alloc *dma) { bus_dmamap_unload(dma->dma_tag, dma->dma_map); bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); bus_dmamap_destroy(dma->dma_tag, dma->dma_map); bus_dma_tag_destroy(dma->dma_tag); } /* * Resets the board. Values in the regesters are left as is * from the reset (i.e. initial values are assigned elsewhere). */ static void ubsec_reset_board(struct ubsec_softc *sc) { volatile u_int32_t ctrl; ctrl = READ_REG(sc, BS_CTRL); ctrl |= BS_CTRL_RESET; WRITE_REG(sc, BS_CTRL, ctrl); /* * Wait aprox. 30 PCI clocks = 900 ns = 0.9 us */ DELAY(10); } /* * Init Broadcom registers */ static void ubsec_init_board(struct ubsec_softc *sc) { u_int32_t ctrl; ctrl = READ_REG(sc, BS_CTRL); ctrl &= ~(BS_CTRL_BE32 | BS_CTRL_BE64); ctrl |= BS_CTRL_LITTLE_ENDIAN | BS_CTRL_MCR1INT; if (sc->sc_flags & (UBS_FLAGS_KEY|UBS_FLAGS_RNG)) ctrl |= BS_CTRL_MCR2INT; else ctrl &= ~BS_CTRL_MCR2INT; if (sc->sc_flags & UBS_FLAGS_HWNORM) ctrl &= ~BS_CTRL_SWNORM; WRITE_REG(sc, BS_CTRL, ctrl); } /* * Init Broadcom PCI registers */ static void ubsec_init_pciregs(device_t dev) { #if 0 u_int32_t misc; misc = pci_conf_read(pc, pa->pa_tag, BS_RTY_TOUT); misc = (misc & ~(UBS_PCI_RTY_MASK << UBS_PCI_RTY_SHIFT)) | ((UBS_DEF_RTY & 0xff) << UBS_PCI_RTY_SHIFT); misc = (misc & ~(UBS_PCI_TOUT_MASK << UBS_PCI_TOUT_SHIFT)) | ((UBS_DEF_TOUT & 0xff) << UBS_PCI_TOUT_SHIFT); pci_conf_write(pc, pa->pa_tag, BS_RTY_TOUT, misc); #endif /* * This will set the cache line size to 1, this will * force the BCM58xx chip just to do burst read/writes. * Cache line read/writes are to slow */ pci_write_config(dev, PCIR_CACHELNSZ, UBS_DEF_CACHELINE, 1); } /* * Clean up after a chip crash. * It is assumed that the caller in splimp() */ static void ubsec_cleanchip(struct ubsec_softc *sc) { struct ubsec_q *q; while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) { q = SIMPLEQ_FIRST(&sc->sc_qchip); SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q, q_next); ubsec_free_q(sc, q); } sc->sc_nqchip = 0; } /* * free a ubsec_q * It is assumed that the caller is within splimp(). */ static int ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q) { struct ubsec_q *q2; struct cryptop *crp; int npkts; int i; npkts = q->q_nstacked_mcrs; for (i = 0; i < npkts; i++) { if(q->q_stacked_mcr[i]) { q2 = q->q_stacked_mcr[i]; if ((q2->q_dst_m != NULL) && (q2->q_src_m != q2->q_dst_m)) m_freem(q2->q_dst_m); crp = (struct cryptop *)q2->q_crp; SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q2, q_next); crp->crp_etype = EFAULT; crypto_done(crp); } else { break; } } /* * Free header MCR */ if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m)) m_freem(q->q_dst_m); crp = (struct cryptop *)q->q_crp; SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); crp->crp_etype = EFAULT; crypto_done(crp); return(0); } /* * Routine to reset the chip and clean up. * It is assumed that the caller is in splimp() */ static void ubsec_totalreset(struct ubsec_softc *sc) { ubsec_reset_board(sc); ubsec_init_board(sc); ubsec_cleanchip(sc); } static int ubsec_dmamap_aligned(struct ubsec_operand *op) { int i; for (i = 0; i < op->nsegs; i++) { if (op->segs[i].ds_addr & 3) return (0); if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3)) return (0); } return (1); } static void ubsec_kfree(struct ubsec_softc *sc, struct ubsec_q2 *q) { switch (q->q_type) { case UBS_CTXOP_MODEXP: { struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q; ubsec_dma_free(sc, &me->me_q.q_mcr); ubsec_dma_free(sc, &me->me_q.q_ctx); ubsec_dma_free(sc, &me->me_M); ubsec_dma_free(sc, &me->me_E); ubsec_dma_free(sc, &me->me_C); ubsec_dma_free(sc, &me->me_epb); free(me, M_DEVBUF); break; } case UBS_CTXOP_RSAPRIV: { struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q; ubsec_dma_free(sc, &rp->rpr_q.q_mcr); ubsec_dma_free(sc, &rp->rpr_q.q_ctx); ubsec_dma_free(sc, &rp->rpr_msgin); ubsec_dma_free(sc, &rp->rpr_msgout); free(rp, M_DEVBUF); break; } default: device_printf(sc->sc_dev, "invalid kfree 0x%x\n", q->q_type); break; } } static int ubsec_kprocess(void *arg, struct cryptkop *krp, int hint) { struct ubsec_softc *sc = arg; int r; if (krp == NULL || krp->krp_callback == NULL) return (EINVAL); while (!SIMPLEQ_EMPTY(&sc->sc_q2free)) { struct ubsec_q2 *q; q = SIMPLEQ_FIRST(&sc->sc_q2free); SIMPLEQ_REMOVE_HEAD(&sc->sc_q2free, q, q_next); ubsec_kfree(sc, q); } switch (krp->krp_op) { case CRK_MOD_EXP: if (sc->sc_flags & UBS_FLAGS_HWNORM) r = ubsec_kprocess_modexp_hw(sc, krp, hint); else r = ubsec_kprocess_modexp_sw(sc, krp, hint); break; case CRK_MOD_EXP_CRT: return (ubsec_kprocess_rsapriv(sc, krp, hint)); default: device_printf(sc->sc_dev, "kprocess: invalid op 0x%x\n", krp->krp_op); krp->krp_status = EOPNOTSUPP; crypto_kdone(krp); return (0); } return (0); /* silence compiler */ } /* * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (sw normalization) */ static int ubsec_kprocess_modexp_sw(struct ubsec_softc *sc, struct cryptkop *krp, int hint) { struct ubsec_q2_modexp *me; struct ubsec_mcr *mcr; struct ubsec_ctx_modexp *ctx; struct ubsec_pktbuf *epb; int err = 0; u_int nbits, normbits, mbits, shiftbits, ebits; me = (struct ubsec_q2_modexp *)malloc(sizeof *me, M_DEVBUF, M_NOWAIT); if (me == NULL) { err = ENOMEM; goto errout; } bzero(me, sizeof *me); me->me_krp = krp; me->me_q.q_type = UBS_CTXOP_MODEXP; nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]); if (nbits <= 512) normbits = 512; else if (nbits <= 768) normbits = 768; else if (nbits <= 1024) normbits = 1024; else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536) normbits = 1536; else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048) normbits = 2048; else { err = E2BIG; goto errout; } shiftbits = normbits - nbits; me->me_modbits = nbits; me->me_shiftbits = shiftbits; me->me_normbits = normbits; /* Sanity check: result bits must be >= true modulus bits. */ if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) { err = ERANGE; goto errout; } if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), &me->me_q.q_mcr, 0)) { err = ENOMEM; goto errout; } mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr; if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp), &me->me_q.q_ctx, 0)) { err = ENOMEM; goto errout; } mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]); if (mbits > nbits) { err = E2BIG; goto errout; } if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) { err = ENOMEM; goto errout; } ubsec_kshift_r(shiftbits, krp->krp_param[UBS_MODEXP_PAR_M].crp_p, mbits, me->me_M.dma_vaddr, normbits); if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) { err = ENOMEM; goto errout; } bzero(me->me_C.dma_vaddr, me->me_C.dma_size); ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]); if (ebits > nbits) { err = E2BIG; goto errout; } if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) { err = ENOMEM; goto errout; } ubsec_kshift_r(shiftbits, krp->krp_param[UBS_MODEXP_PAR_E].crp_p, ebits, me->me_E.dma_vaddr, normbits); if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf), &me->me_epb, 0)) { err = ENOMEM; goto errout; } epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr; epb->pb_addr = htole32(me->me_E.dma_paddr); epb->pb_next = 0; epb->pb_len = htole32(normbits / 8); #ifdef UBSEC_DEBUG if (ubsec_debug) { printf("Epb "); ubsec_dump_pb(epb); } #endif mcr->mcr_pkts = htole16(1); mcr->mcr_flags = 0; mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr); mcr->mcr_reserved = 0; mcr->mcr_pktlen = 0; mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr); mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8); mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr); mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr); mcr->mcr_opktbuf.pb_next = 0; mcr->mcr_opktbuf.pb_len = htole32(normbits / 8); #ifdef DIAGNOSTIC /* Misaligned output buffer will hang the chip. */ if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0) panic("%s: modexp invalid addr 0x%x\n", device_get_nameunit(sc->sc_dev), letoh32(mcr->mcr_opktbuf.pb_addr)); if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0) panic("%s: modexp invalid len 0x%x\n", device_get_nameunit(sc->sc_dev), letoh32(mcr->mcr_opktbuf.pb_len)); #endif ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr; bzero(ctx, sizeof(*ctx)); ubsec_kshift_r(shiftbits, krp->krp_param[UBS_MODEXP_PAR_N].crp_p, nbits, ctx->me_N, normbits); ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t))); ctx->me_op = htole16(UBS_CTXOP_MODEXP); ctx->me_E_len = htole16(nbits); ctx->me_N_len = htole16(nbits); #ifdef UBSEC_DEBUG if (ubsec_debug) { ubsec_dump_mcr(mcr); ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx); } #endif /* * ubsec_feed2 will sync mcr and ctx, we just need to sync * everything else. */ ubsec_dma_sync(&me->me_M, BUS_DMASYNC_PREWRITE); ubsec_dma_sync(&me->me_E, BUS_DMASYNC_PREWRITE); ubsec_dma_sync(&me->me_C, BUS_DMASYNC_PREREAD); ubsec_dma_sync(&me->me_epb, BUS_DMASYNC_PREWRITE); /* Enqueue and we're done... */ UBSEC_LOCK(sc); SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next); ubsec_feed2(sc); ubsecstats.hst_modexp++; UBSEC_UNLOCK(sc); return (0); errout: if (me != NULL) { if (me->me_q.q_mcr.dma_map != NULL) ubsec_dma_free(sc, &me->me_q.q_mcr); if (me->me_q.q_ctx.dma_map != NULL) { bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size); ubsec_dma_free(sc, &me->me_q.q_ctx); } if (me->me_M.dma_map != NULL) { bzero(me->me_M.dma_vaddr, me->me_M.dma_size); ubsec_dma_free(sc, &me->me_M); } if (me->me_E.dma_map != NULL) { bzero(me->me_E.dma_vaddr, me->me_E.dma_size); ubsec_dma_free(sc, &me->me_E); } if (me->me_C.dma_map != NULL) { bzero(me->me_C.dma_vaddr, me->me_C.dma_size); ubsec_dma_free(sc, &me->me_C); } if (me->me_epb.dma_map != NULL) ubsec_dma_free(sc, &me->me_epb); free(me, M_DEVBUF); } krp->krp_status = err; crypto_kdone(krp); return (0); } /* * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (hw normalization) */ static int ubsec_kprocess_modexp_hw(struct ubsec_softc *sc, struct cryptkop *krp, int hint) { struct ubsec_q2_modexp *me; struct ubsec_mcr *mcr; struct ubsec_ctx_modexp *ctx; struct ubsec_pktbuf *epb; int err = 0; u_int nbits, normbits, mbits, shiftbits, ebits; me = (struct ubsec_q2_modexp *)malloc(sizeof *me, M_DEVBUF, M_NOWAIT); if (me == NULL) { err = ENOMEM; goto errout; } bzero(me, sizeof *me); me->me_krp = krp; me->me_q.q_type = UBS_CTXOP_MODEXP; nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]); if (nbits <= 512) normbits = 512; else if (nbits <= 768) normbits = 768; else if (nbits <= 1024) normbits = 1024; else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536) normbits = 1536; else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048) normbits = 2048; else { err = E2BIG; goto errout; } shiftbits = normbits - nbits; /* XXX ??? */ me->me_modbits = nbits; me->me_shiftbits = shiftbits; me->me_normbits = normbits; /* Sanity check: result bits must be >= true modulus bits. */ if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) { err = ERANGE; goto errout; } if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), &me->me_q.q_mcr, 0)) { err = ENOMEM; goto errout; } mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr; if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp), &me->me_q.q_ctx, 0)) { err = ENOMEM; goto errout; } mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]); if (mbits > nbits) { err = E2BIG; goto errout; } if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) { err = ENOMEM; goto errout; } bzero(me->me_M.dma_vaddr, normbits / 8); bcopy(krp->krp_param[UBS_MODEXP_PAR_M].crp_p, me->me_M.dma_vaddr, (mbits + 7) / 8); if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) { err = ENOMEM; goto errout; } bzero(me->me_C.dma_vaddr, me->me_C.dma_size); ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]); if (ebits > nbits) { err = E2BIG; goto errout; } if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) { err = ENOMEM; goto errout; } bzero(me->me_E.dma_vaddr, normbits / 8); bcopy(krp->krp_param[UBS_MODEXP_PAR_E].crp_p, me->me_E.dma_vaddr, (ebits + 7) / 8); if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf), &me->me_epb, 0)) { err = ENOMEM; goto errout; } epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr; epb->pb_addr = htole32(me->me_E.dma_paddr); epb->pb_next = 0; epb->pb_len = htole32((ebits + 7) / 8); #ifdef UBSEC_DEBUG if (ubsec_debug) { printf("Epb "); ubsec_dump_pb(epb); } #endif mcr->mcr_pkts = htole16(1); mcr->mcr_flags = 0; mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr); mcr->mcr_reserved = 0; mcr->mcr_pktlen = 0; mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr); mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8); mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr); mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr); mcr->mcr_opktbuf.pb_next = 0; mcr->mcr_opktbuf.pb_len = htole32(normbits / 8); #ifdef DIAGNOSTIC /* Misaligned output buffer will hang the chip. */ if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0) panic("%s: modexp invalid addr 0x%x\n", device_get_nameunit(sc->sc_dev), letoh32(mcr->mcr_opktbuf.pb_addr)); if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0) panic("%s: modexp invalid len 0x%x\n", device_get_nameunit(sc->sc_dev), letoh32(mcr->mcr_opktbuf.pb_len)); #endif ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr; bzero(ctx, sizeof(*ctx)); bcopy(krp->krp_param[UBS_MODEXP_PAR_N].crp_p, ctx->me_N, (nbits + 7) / 8); ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t))); ctx->me_op = htole16(UBS_CTXOP_MODEXP); ctx->me_E_len = htole16(ebits); ctx->me_N_len = htole16(nbits); #ifdef UBSEC_DEBUG if (ubsec_debug) { ubsec_dump_mcr(mcr); ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx); } #endif /* * ubsec_feed2 will sync mcr and ctx, we just need to sync * everything else. */ ubsec_dma_sync(&me->me_M, BUS_DMASYNC_PREWRITE); ubsec_dma_sync(&me->me_E, BUS_DMASYNC_PREWRITE); ubsec_dma_sync(&me->me_C, BUS_DMASYNC_PREREAD); ubsec_dma_sync(&me->me_epb, BUS_DMASYNC_PREWRITE); /* Enqueue and we're done... */ UBSEC_LOCK(sc); SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next); ubsec_feed2(sc); UBSEC_UNLOCK(sc); return (0); errout: if (me != NULL) { if (me->me_q.q_mcr.dma_map != NULL) ubsec_dma_free(sc, &me->me_q.q_mcr); if (me->me_q.q_ctx.dma_map != NULL) { bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size); ubsec_dma_free(sc, &me->me_q.q_ctx); } if (me->me_M.dma_map != NULL) { bzero(me->me_M.dma_vaddr, me->me_M.dma_size); ubsec_dma_free(sc, &me->me_M); } if (me->me_E.dma_map != NULL) { bzero(me->me_E.dma_vaddr, me->me_E.dma_size); ubsec_dma_free(sc, &me->me_E); } if (me->me_C.dma_map != NULL) { bzero(me->me_C.dma_vaddr, me->me_C.dma_size); ubsec_dma_free(sc, &me->me_C); } if (me->me_epb.dma_map != NULL) ubsec_dma_free(sc, &me->me_epb); free(me, M_DEVBUF); } krp->krp_status = err; crypto_kdone(krp); return (0); } static int ubsec_kprocess_rsapriv(struct ubsec_softc *sc, struct cryptkop *krp, int hint) { struct ubsec_q2_rsapriv *rp = NULL; struct ubsec_mcr *mcr; struct ubsec_ctx_rsapriv *ctx; int err = 0; u_int padlen, msglen; msglen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_P]); padlen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_Q]); if (msglen > padlen) padlen = msglen; if (padlen <= 256) padlen = 256; else if (padlen <= 384) padlen = 384; else if (padlen <= 512) padlen = 512; else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 768) padlen = 768; else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 1024) padlen = 1024; else { err = E2BIG; goto errout; } if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DP]) > padlen) { err = E2BIG; goto errout; } if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DQ]) > padlen) { err = E2BIG; goto errout; } if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_PINV]) > padlen) { err = E2BIG; goto errout; } rp = (struct ubsec_q2_rsapriv *)malloc(sizeof *rp, M_DEVBUF, M_NOWAIT); if (rp == NULL) return (ENOMEM); bzero(rp, sizeof *rp); rp->rpr_krp = krp; rp->rpr_q.q_type = UBS_CTXOP_RSAPRIV; if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), &rp->rpr_q.q_mcr, 0)) { err = ENOMEM; goto errout; } mcr = (struct ubsec_mcr *)rp->rpr_q.q_mcr.dma_vaddr; if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rsapriv), &rp->rpr_q.q_ctx, 0)) { err = ENOMEM; goto errout; } ctx = (struct ubsec_ctx_rsapriv *)rp->rpr_q.q_ctx.dma_vaddr; bzero(ctx, sizeof *ctx); /* Copy in p */ bcopy(krp->krp_param[UBS_RSAPRIV_PAR_P].crp_p, &ctx->rpr_buf[0 * (padlen / 8)], (krp->krp_param[UBS_RSAPRIV_PAR_P].crp_nbits + 7) / 8); /* Copy in q */ bcopy(krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_p, &ctx->rpr_buf[1 * (padlen / 8)], (krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_nbits + 7) / 8); /* Copy in dp */ bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_p, &ctx->rpr_buf[2 * (padlen / 8)], (krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_nbits + 7) / 8); /* Copy in dq */ bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_p, &ctx->rpr_buf[3 * (padlen / 8)], (krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_nbits + 7) / 8); /* Copy in pinv */ bcopy(krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_p, &ctx->rpr_buf[4 * (padlen / 8)], (krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_nbits + 7) / 8); msglen = padlen * 2; /* Copy in input message (aligned buffer/length). */ if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGIN]) > msglen) { /* Is this likely? */ err = E2BIG; goto errout; } if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgin, 0)) { err = ENOMEM; goto errout; } bzero(rp->rpr_msgin.dma_vaddr, (msglen + 7) / 8); bcopy(krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_p, rp->rpr_msgin.dma_vaddr, (krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_nbits + 7) / 8); /* Prepare space for output message (aligned buffer/length). */ if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT]) < msglen) { /* Is this likely? */ err = E2BIG; goto errout; } if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgout, 0)) { err = ENOMEM; goto errout; } bzero(rp->rpr_msgout.dma_vaddr, (msglen + 7) / 8); mcr->mcr_pkts = htole16(1); mcr->mcr_flags = 0; mcr->mcr_cmdctxp = htole32(rp->rpr_q.q_ctx.dma_paddr); mcr->mcr_ipktbuf.pb_addr = htole32(rp->rpr_msgin.dma_paddr); mcr->mcr_ipktbuf.pb_next = 0; mcr->mcr_ipktbuf.pb_len = htole32(rp->rpr_msgin.dma_size); mcr->mcr_reserved = 0; mcr->mcr_pktlen = htole16(msglen); mcr->mcr_opktbuf.pb_addr = htole32(rp->rpr_msgout.dma_paddr); mcr->mcr_opktbuf.pb_next = 0; mcr->mcr_opktbuf.pb_len = htole32(rp->rpr_msgout.dma_size); #ifdef DIAGNOSTIC if (rp->rpr_msgin.dma_paddr & 3 || rp->rpr_msgin.dma_size & 3) { panic("%s: rsapriv: invalid msgin %x(0x%jx)", device_get_nameunit(sc->sc_dev), rp->rpr_msgin.dma_paddr, (uintmax_t)rp->rpr_msgin.dma_size); } if (rp->rpr_msgout.dma_paddr & 3 || rp->rpr_msgout.dma_size & 3) { panic("%s: rsapriv: invalid msgout %x(0x%jx)", device_get_nameunit(sc->sc_dev), rp->rpr_msgout.dma_paddr, (uintmax_t)rp->rpr_msgout.dma_size); } #endif ctx->rpr_len = (sizeof(u_int16_t) * 4) + (5 * (padlen / 8)); ctx->rpr_op = htole16(UBS_CTXOP_RSAPRIV); ctx->rpr_q_len = htole16(padlen); ctx->rpr_p_len = htole16(padlen); /* * ubsec_feed2 will sync mcr and ctx, we just need to sync * everything else. */ ubsec_dma_sync(&rp->rpr_msgin, BUS_DMASYNC_PREWRITE); ubsec_dma_sync(&rp->rpr_msgout, BUS_DMASYNC_PREREAD); /* Enqueue and we're done... */ UBSEC_LOCK(sc); SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rp->rpr_q, q_next); ubsec_feed2(sc); ubsecstats.hst_modexpcrt++; UBSEC_UNLOCK(sc); return (0); errout: if (rp != NULL) { if (rp->rpr_q.q_mcr.dma_map != NULL) ubsec_dma_free(sc, &rp->rpr_q.q_mcr); if (rp->rpr_msgin.dma_map != NULL) { bzero(rp->rpr_msgin.dma_vaddr, rp->rpr_msgin.dma_size); ubsec_dma_free(sc, &rp->rpr_msgin); } if (rp->rpr_msgout.dma_map != NULL) { bzero(rp->rpr_msgout.dma_vaddr, rp->rpr_msgout.dma_size); ubsec_dma_free(sc, &rp->rpr_msgout); } free(rp, M_DEVBUF); } krp->krp_status = err; crypto_kdone(krp); return (0); } #ifdef UBSEC_DEBUG static void ubsec_dump_pb(volatile struct ubsec_pktbuf *pb) { printf("addr 0x%x (0x%x) next 0x%x\n", pb->pb_addr, pb->pb_len, pb->pb_next); } static void ubsec_dump_ctx2(struct ubsec_ctx_keyop *c) { printf("CTX (0x%x):\n", c->ctx_len); switch (letoh16(c->ctx_op)) { case UBS_CTXOP_RNGBYPASS: case UBS_CTXOP_RNGSHA1: break; case UBS_CTXOP_MODEXP: { struct ubsec_ctx_modexp *cx = (void *)c; int i, len; printf(" Elen %u, Nlen %u\n", letoh16(cx->me_E_len), letoh16(cx->me_N_len)); len = (cx->me_N_len + 7)/8; for (i = 0; i < len; i++) printf("%s%02x", (i == 0) ? " N: " : ":", cx->me_N[i]); printf("\n"); break; } default: printf("unknown context: %x\n", c->ctx_op); } printf("END CTX\n"); } static void ubsec_dump_mcr(struct ubsec_mcr *mcr) { volatile struct ubsec_mcr_add *ma; int i; printf("MCR:\n"); printf(" pkts: %u, flags 0x%x\n", letoh16(mcr->mcr_pkts), letoh16(mcr->mcr_flags)); ma = (volatile struct ubsec_mcr_add *)&mcr->mcr_cmdctxp; for (i = 0; i < letoh16(mcr->mcr_pkts); i++) { printf(" %d: ctx 0x%x len 0x%x rsvd 0x%x\n", i, letoh32(ma->mcr_cmdctxp), letoh16(ma->mcr_pktlen), letoh16(ma->mcr_reserved)); printf(" %d: ipkt ", i); ubsec_dump_pb(&ma->mcr_ipktbuf); printf(" %d: opkt ", i); ubsec_dump_pb(&ma->mcr_opktbuf); ma++; } printf("END MCR\n"); } #endif /* UBSEC_DEBUG */ /* * Return the number of significant bits of a big number. */ static int ubsec_ksigbits(struct crparam *cr) { u_int plen = (cr->crp_nbits + 7) / 8; int i, sig = plen * 8; u_int8_t c, *p = cr->crp_p; for (i = plen - 1; i >= 0; i--) { c = p[i]; if (c != 0) { while ((c & 0x80) == 0) { sig--; c <<= 1; } break; } sig -= 8; } return (sig); } static void ubsec_kshift_r( u_int shiftbits, u_int8_t *src, u_int srcbits, u_int8_t *dst, u_int dstbits) { u_int slen, dlen; int i, si, di, n; slen = (srcbits + 7) / 8; dlen = (dstbits + 7) / 8; for (i = 0; i < slen; i++) dst[i] = src[i]; for (i = 0; i < dlen - slen; i++) dst[slen + i] = 0; n = shiftbits / 8; if (n != 0) { si = dlen - n - 1; di = dlen - 1; while (si >= 0) dst[di--] = dst[si--]; while (di >= 0) dst[di--] = 0; } n = shiftbits % 8; if (n != 0) { for (i = dlen - 1; i > 0; i--) dst[i] = (dst[i] << n) | (dst[i - 1] >> (8 - n)); dst[0] = dst[0] << n; } } static void ubsec_kshift_l( u_int shiftbits, u_int8_t *src, u_int srcbits, u_int8_t *dst, u_int dstbits) { int slen, dlen, i, n; slen = (srcbits + 7) / 8; dlen = (dstbits + 7) / 8; n = shiftbits / 8; for (i = 0; i < slen; i++) dst[i] = src[i + n]; for (i = 0; i < dlen - slen; i++) dst[slen + i] = 0; n = shiftbits % 8; if (n != 0) { for (i = 0; i < (dlen - 1); i++) dst[i] = (dst[i] >> n) | (dst[i + 1] << (8 - n)); dst[dlen - 1] = dst[dlen - 1] >> n; } } diff --git a/sys/dev/ubsec/ubsecvar.h b/sys/dev/ubsec/ubsecvar.h index 588dec981666..9c5b3ddea014 100644 --- a/sys/dev/ubsec/ubsecvar.h +++ b/sys/dev/ubsec/ubsecvar.h @@ -1,255 +1,260 @@ /* $FreeBSD$ */ /* $OpenBSD: ubsecvar.h,v 1.35 2002/09/24 18:33:26 jason Exp $ */ /* * Copyright (c) 2000 Theo de Raadt * Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Effort sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F30602-01-2-0537. * */ /* Maximum queue length */ #ifndef UBS_MAX_NQUEUE #define UBS_MAX_NQUEUE 60 #endif #define UBS_MAX_SCATTER 64 /* Maximum scatter/gather depth */ #ifndef UBS_MAX_AGGR #define UBS_MAX_AGGR 5 /* Maximum aggregation count */ #endif #define UBSEC_CARD(sid) (((sid) & 0xf0000000) >> 28) #define UBSEC_SESSION(sid) ( (sid) & 0x0fffffff) #define UBSEC_SID(crd, sesn) (((crd) << 28) | ((sesn) & 0x0fffffff)) #define UBS_DEF_RTY 0xff /* PCI Retry Timeout */ #define UBS_DEF_TOUT 0xff /* PCI TRDY Timeout */ #define UBS_DEF_CACHELINE 0x01 /* Cache Line setting */ #ifdef _KERNEL struct ubsec_dma_alloc { u_int32_t dma_paddr; caddr_t dma_vaddr; bus_dma_tag_t dma_tag; bus_dmamap_t dma_map; bus_dma_segment_t dma_seg; bus_size_t dma_size; int dma_nseg; }; struct ubsec_q2 { SIMPLEQ_ENTRY(ubsec_q2) q_next; struct ubsec_dma_alloc q_mcr; struct ubsec_dma_alloc q_ctx; u_int q_type; }; struct ubsec_q2_rng { struct ubsec_q2 rng_q; struct ubsec_dma_alloc rng_buf; int rng_used; }; /* C = (M ^ E) mod N */ #define UBS_MODEXP_PAR_M 0 #define UBS_MODEXP_PAR_E 1 #define UBS_MODEXP_PAR_N 2 #define UBS_MODEXP_PAR_C 3 struct ubsec_q2_modexp { struct ubsec_q2 me_q; struct cryptkop * me_krp; struct ubsec_dma_alloc me_M; struct ubsec_dma_alloc me_E; struct ubsec_dma_alloc me_C; struct ubsec_dma_alloc me_epb; int me_modbits; int me_shiftbits; int me_normbits; }; #define UBS_RSAPRIV_PAR_P 0 #define UBS_RSAPRIV_PAR_Q 1 #define UBS_RSAPRIV_PAR_DP 2 #define UBS_RSAPRIV_PAR_DQ 3 #define UBS_RSAPRIV_PAR_PINV 4 #define UBS_RSAPRIV_PAR_MSGIN 5 #define UBS_RSAPRIV_PAR_MSGOUT 6 struct ubsec_q2_rsapriv { struct ubsec_q2 rpr_q; struct cryptkop * rpr_krp; struct ubsec_dma_alloc rpr_msgin; struct ubsec_dma_alloc rpr_msgout; }; #define UBSEC_RNG_BUFSIZ 16 /* measured in 32bit words */ struct ubsec_dmachunk { struct ubsec_mcr d_mcr; struct ubsec_mcr_add d_mcradd[UBS_MAX_AGGR-1]; struct ubsec_pktbuf d_sbuf[UBS_MAX_SCATTER-1]; struct ubsec_pktbuf d_dbuf[UBS_MAX_SCATTER-1]; u_int32_t d_macbuf[5]; union { struct ubsec_pktctx_long ctxl; struct ubsec_pktctx ctx; } d_ctx; }; struct ubsec_dma { SIMPLEQ_ENTRY(ubsec_dma) d_next; struct ubsec_dmachunk *d_dma; struct ubsec_dma_alloc d_alloc; }; #define UBS_FLAGS_KEY 0x01 /* has key accelerator */ #define UBS_FLAGS_LONGCTX 0x02 /* uses long ipsec ctx */ #define UBS_FLAGS_BIGKEY 0x04 /* 2048bit keys */ #define UBS_FLAGS_HWNORM 0x08 /* hardware normalization */ #define UBS_FLAGS_RNG 0x10 /* hardware rng */ struct ubsec_operand { union { struct mbuf *m; struct uio *io; } u; bus_dmamap_t map; bus_size_t mapsize; int nsegs; bus_dma_segment_t segs[UBS_MAX_SCATTER]; }; struct ubsec_q { SIMPLEQ_ENTRY(ubsec_q) q_next; int q_nstacked_mcrs; struct ubsec_q *q_stacked_mcr[UBS_MAX_AGGR-1]; struct cryptop *q_crp; struct ubsec_dma *q_dma; struct ubsec_operand q_src; struct ubsec_operand q_dst; int q_sesn; int q_flags; }; #define q_src_m q_src.u.m #define q_src_io q_src.u.io #define q_src_map q_src.map #define q_src_nsegs q_src.nsegs #define q_src_segs q_src.segs #define q_src_mapsize q_src.mapsize #define q_dst_m q_dst.u.m #define q_dst_io q_dst.u.io #define q_dst_map q_dst.map #define q_dst_nsegs q_dst.nsegs #define q_dst_segs q_dst.segs #define q_dst_mapsize q_dst.mapsize +struct rndstate_test; + struct ubsec_softc { device_t sc_dev; /* device backpointer */ struct mtx sc_mtx; /* per-driver lock */ struct resource *sc_irq; void *sc_ih; /* interrupt handler cookie */ bus_space_handle_t sc_sh; /* memory handle */ bus_space_tag_t sc_st; /* memory tag */ struct resource *sc_sr; /* memory resource */ bus_dma_tag_t sc_dmat; /* dma tag */ int sc_flags; /* device specific flags */ int sc_suspended; int sc_needwakeup; /* notify crypto layer */ u_int32_t sc_statmask; /* interrupt status mask */ int32_t sc_cid; /* crypto tag */ SIMPLEQ_HEAD(,ubsec_q) sc_queue; /* packet queue, mcr1 */ int sc_nqueue; /* count enqueued, mcr1 */ SIMPLEQ_HEAD(,ubsec_q) sc_qchip; /* on chip, mcr1 */ int sc_nqchip; /* count on chip, mcr1 */ SIMPLEQ_HEAD(,ubsec_q) sc_freequeue; /* list of free queue elements */ SIMPLEQ_HEAD(,ubsec_q2) sc_queue2; /* packet queue, mcr2 */ int sc_nqueue2; /* count enqueued, mcr2 */ SIMPLEQ_HEAD(,ubsec_q2) sc_qchip2; /* on chip, mcr2 */ int sc_nsessions; /* # of sessions */ struct ubsec_session *sc_sessions; /* sessions */ struct callout sc_rngto; /* rng timeout */ int sc_rnghz; /* rng poll time */ struct ubsec_q2_rng sc_rng; + struct rndtest_state *sc_rndtest; /* RNG test state */ + void (*sc_harvest)(struct rndtest_state *, + void *, u_int); struct ubsec_dma sc_dmaa[UBS_MAX_NQUEUE]; struct ubsec_q *sc_queuea[UBS_MAX_NQUEUE]; SIMPLEQ_HEAD(,ubsec_q2) sc_q2free; /* free list */ }; #define UBSEC_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) #define UBSEC_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) #define UBSEC_QFLAGS_COPYOUTIV 0x1 struct ubsec_session { u_int32_t ses_used; u_int32_t ses_deskey[6]; /* 3DES key */ u_int32_t ses_hminner[5]; /* hmac inner state */ u_int32_t ses_hmouter[5]; /* hmac outer state */ u_int32_t ses_iv[2]; /* [3]DES iv */ }; #endif /* _KERNEL */ struct ubsec_stats { u_int64_t hst_ibytes; u_int64_t hst_obytes; u_int32_t hst_ipackets; u_int32_t hst_opackets; u_int32_t hst_invalid; /* invalid argument */ u_int32_t hst_badsession; /* invalid session id */ u_int32_t hst_badflags; /* flags indicate !(mbuf | uio) */ u_int32_t hst_nodesc; /* op submitted w/o descriptors */ u_int32_t hst_badalg; /* unsupported algorithm */ u_int32_t hst_nomem; u_int32_t hst_queuefull; u_int32_t hst_dmaerr; u_int32_t hst_mcrerr; u_int32_t hst_nodmafree; u_int32_t hst_lenmismatch; /* enc/auth lengths different */ u_int32_t hst_skipmismatch; /* enc part begins before auth part */ u_int32_t hst_iovmisaligned; /* iov op not aligned */ u_int32_t hst_noirq; /* IRQ for no reason */ u_int32_t hst_unaligned; /* unaligned src caused copy */ u_int32_t hst_nomap; /* bus_dmamap_create failed */ u_int32_t hst_noload; /* bus_dmamap_load_* failed */ u_int32_t hst_nombuf; /* MGET* failed */ u_int32_t hst_nomcl; /* MCLGET* failed */ u_int32_t hst_totbatch; /* ops submitted w/o interrupt */ u_int32_t hst_maxbatch; /* max ops submitted together */ u_int32_t hst_maxqueue; /* max ops queued for submission */ u_int32_t hst_maxqchip; /* max mcr1 ops out for processing */ u_int32_t hst_mcr1full; /* MCR1 too busy to take ops */ u_int32_t hst_rng; /* RNG requests */ u_int32_t hst_modexp; /* MOD EXP requests */ u_int32_t hst_modexpcrt; /* MOD EXP CRT requests */ };