Index: head/sys/amd64/conf/GENERIC =================================================================== --- head/sys/amd64/conf/GENERIC (revision 159569) +++ head/sys/amd64/conf/GENERIC (revision 159570) @@ -1,287 +1,288 @@ # # GENERIC -- Generic kernel configuration file for FreeBSD/amd64 # # For more information on this file, please read the handbook section on # Kernel Configuration Files: # # http://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html # # The handbook is also available locally in /usr/share/doc/handbook # if you've installed the doc distribution, otherwise always see the # FreeBSD World Wide Web server (http://www.FreeBSD.org/) for the # latest information. # # An exhaustive list of options and more detailed explanations of the # device lines is also present in the ../../conf/NOTES and NOTES files. # If you are in doubt as to the purpose or necessity of a line, check first # in NOTES. # # $FreeBSD$ cpu HAMMER ident GENERIC # To statically compile in device wiring instead of /boot/device.hints #hints "GENERIC.hints" # Default places to look for devices. makeoptions DEBUG=-g # Build kernel with gdb(1) debug symbols #options SCHED_ULE # ULE scheduler options SCHED_4BSD # 4BSD scheduler +#options SCHED_CORE # CORE scheduler options PREEMPTION # Enable kernel thread preemption options INET # InterNETworking options INET6 # IPv6 communications protocols options FFS # Berkeley Fast Filesystem options SOFTUPDATES # Enable FFS soft updates support options UFS_ACL # Support for access control lists options UFS_DIRHASH # Improve performance on big directories options MD_ROOT # MD is a potential root device options NFSCLIENT # Network Filesystem Client options NFSSERVER # Network Filesystem Server options NFS_ROOT # NFS usable as /, requires NFSCLIENT options NTFS # NT File System options MSDOSFS # MSDOS Filesystem options CD9660 # ISO 9660 Filesystem options PROCFS # Process filesystem (requires PSEUDOFS) options PSEUDOFS # Pseudo-filesystem framework options GEOM_GPT # GUID Partition Tables. options COMPAT_43 # Needed by COMPAT_LINUX32 options COMPAT_43TTY # BSD 4.3 TTY compat [KEEP THIS!] options COMPAT_IA32 # Compatible with i386 binaries options COMPAT_FREEBSD4 # Compatible with FreeBSD4 options COMPAT_FREEBSD5 # Compatible with FreeBSD5 options SCSI_DELAY=5000 # Delay (in ms) before probing SCSI options KTRACE # ktrace(1) support options SYSVSHM # SYSV-style shared memory options SYSVMSG # SYSV-style message queues options SYSVSEM # SYSV-style semaphores options _KPOSIX_PRIORITY_SCHEDULING # POSIX P1003_1B real-time extensions options KBD_INSTALL_CDEV # install a CDEV entry in /dev options ADAPTIVE_GIANT # Giant mutex is adaptive. options STOP_NMI # Stop CPUS using NMI instead of IPI # Debugging for use in -current options KDB # Enable kernel debugger support. options DDB # Support DDB. options GDB # Support remote GDB. options INVARIANTS # Enable calls of extra sanity checking options INVARIANT_SUPPORT # Extra sanity checks of internal structures, required by INVARIANTS options WITNESS # Enable checks to detect deadlocks and cycles options WITNESS_SKIPSPIN # Don't run witness on spinlocks for speed # Make an SMP-capable kernel by default options SMP # Symmetric MultiProcessor Kernel # Linux 32-bit ABI support options COMPAT_LINUX32 # Compatible with i386 linux binaries options LINPROCFS # Cannot be a module yet. options LINSYSFS # Bus support. device acpi device pci # Floppy drives device fdc # ATA and ATAPI devices device ata device atadisk # ATA disk drives device ataraid # ATA RAID drives device atapicd # ATAPI CDROM drives device atapifd # ATAPI floppy drives device atapist # ATAPI tape drives options ATA_STATIC_ID # Static device numbering # SCSI Controllers device ahc # AHA2940 and onboard AIC7xxx devices options AHC_REG_PRETTY_PRINT # Print register bitfields in debug # output. Adds ~128k to driver. device ahd # AHA39320/29320 and onboard AIC79xx devices options AHD_REG_PRETTY_PRINT # Print register bitfields in debug # output. Adds ~215k to driver. device amd # AMD 53C974 (Tekram DC-390(T)) device isp # Qlogic family #device ispfw # Firmware for QLogic HBAs- normally a module device mpt # LSI-Logic MPT-Fusion #device ncr # NCR/Symbios Logic device sym # NCR/Symbios Logic (newer chipsets + those of `ncr') device trm # Tekram DC395U/UW/F DC315U adapters device adv # Advansys SCSI adapters device adw # Advansys wide SCSI adapters device aic # Adaptec 15[012]x SCSI adapters, AIC-6[23]60. device bt # Buslogic/Mylex MultiMaster SCSI adapters # SCSI peripherals device scbus # SCSI bus (required for SCSI) device ch # SCSI media changers device da # Direct Access (disks) device sa # Sequential Access (tape etc) device cd # CD device pass # Passthrough device (direct SCSI access) device ses # SCSI Environmental Services (and SAF-TE) # RAID controllers interfaced to the SCSI subsystem device amr # AMI MegaRAID device arcmsr # Areca SATA II RAID device ciss # Compaq Smart RAID 5* device dpt # DPT Smartcache III, IV - See NOTES for options device hptmv # Highpoint RocketRAID 182x device rr232x # Highpoint RocketRAID 232x device iir # Intel Integrated RAID device ips # IBM (Adaptec) ServeRAID device mly # Mylex AcceleRAID/eXtremeRAID device twa # 3ware 9000 series PATA/SATA RAID # RAID controllers device aac # Adaptec FSA RAID device aacp # SCSI passthrough for aac (requires CAM) device ida # Compaq Smart RAID device mfi # LSI MegaRAID SAS device mlx # Mylex DAC960 family #XXX pointer/int warnings #device pst # Promise Supertrak SX6000 device twe # 3ware ATA RAID # atkbdc0 controls both the keyboard and the PS/2 mouse device atkbdc # AT keyboard controller device atkbd # AT keyboard device psm # PS/2 mouse device kbdmux # keyboard multiplexer device vga # VGA video card driver device splash # Splash screen and screen saver support # syscons is the default console driver, resembling an SCO console device sc device agp # support several AGP chipsets # PCCARD (PCMCIA) support # PCMCIA and cardbus bridge support device cbb # cardbus (yenta) bridge device pccard # PC Card (16-bit) bus device cardbus # CardBus (32-bit) bus # Serial (COM) ports device sio # 8250, 16[45]50 based serial ports device uart # Generic UART driver # Parallel port device ppc device ppbus # Parallel port bus (required) device lpt # Printer device plip # TCP/IP over parallel device ppi # Parallel port interface device #device vpo # Requires scbus and da # If you've got a "dumb" serial or parallel PCI card that is # supported by the puc(4) glue driver, uncomment the following # line to enable it (connects to sio, uart and/or ppc drivers): #device puc # PCI Ethernet NICs. device de # DEC/Intel DC21x4x (``Tulip'') device em # Intel PRO/1000 adapter Gigabit Ethernet Card device ixgb # Intel PRO/10GbE Ethernet Card device le # AMD Am7900 LANCE and Am79C9xx PCnet device txp # 3Com 3cR990 (``Typhoon'') device vx # 3Com 3c590, 3c595 (``Vortex'') # PCI Ethernet NICs that use the common MII bus controller code. # NOTE: Be sure to keep the 'device miibus' line in order to use these NICs! device miibus # MII bus support device bce # Broadcom BCM5706/BCM5708 Gigabit Ethernet device bfe # Broadcom BCM440x 10/100 Ethernet device bge # Broadcom BCM570xx Gigabit Ethernet device dc # DEC/Intel 21143 and various workalikes device fxp # Intel EtherExpress PRO/100B (82557, 82558) device lge # Level 1 LXT1001 gigabit Ethernet device nge # NatSemi DP83820 gigabit Ethernet device nve # nVidia nForce MCP on-board Ethernet Networking device pcn # AMD Am79C97x PCI 10/100 (precedence over 'le') device re # RealTek 8139C+/8169/8169S/8110S device rl # RealTek 8129/8139 device sf # Adaptec AIC-6915 (``Starfire'') device sis # Silicon Integrated Systems SiS 900/SiS 7016 device sk # SysKonnect SK-984x & SK-982x gigabit Ethernet device ste # Sundance ST201 (D-Link DFE-550TX) device ti # Alteon Networks Tigon I/II gigabit Ethernet device tl # Texas Instruments ThunderLAN device tx # SMC EtherPower II (83c170 ``EPIC'') device vge # VIA VT612x gigabit Ethernet device vr # VIA Rhine, Rhine II device wb # Winbond W89C840F device xl # 3Com 3c90x (``Boomerang'', ``Cyclone'') # ISA Ethernet NICs. pccard NICs included. device cs # Crystal Semiconductor CS89x0 NIC # 'device ed' requires 'device miibus' device ed # NE[12]000, SMC Ultra, 3c503, DS8390 cards device ex # Intel EtherExpress Pro/10 and Pro/10+ device ep # Etherlink III based cards device fe # Fujitsu MB8696x based cards device sn # SMC's 9000 series of Ethernet chips device xe # Xircom pccard Ethernet # Wireless NIC cards device wlan # 802.11 support device wlan_wep # 802.11 WEP support device wlan_ccmp # 802.11 CCMP support device wlan_tkip # 802.11 TKIP support device an # Aironet 4500/4800 802.11 wireless NICs. device ath # Atheros pci/cardbus NIC's device ath_hal # Atheros HAL (Hardware Access Layer) device ath_rate_sample # SampleRate tx rate control for ath device awi # BayStack 660 and others device ral # Ralink Technology RT2500 wireless NICs. device wi # WaveLAN/Intersil/Symbol 802.11 wireless NICs. # Pseudo devices. device loop # Network loopback device random # Entropy device device ether # Ethernet support device sl # Kernel SLIP device ppp # Kernel PPP device tun # Packet tunnel. device pty # Pseudo-ttys (telnet etc) device md # Memory "disks" device gif # IPv6 and IPv4 tunneling device faith # IPv6-to-IPv4 relaying (translation) # The `bpf' device enables the Berkeley Packet Filter. # Be aware of the administrative consequences of enabling this! # Note that 'bpf' is required for DHCP. device bpf # Berkeley packet filter # USB support device uhci # UHCI PCI->USB interface device ohci # OHCI PCI->USB interface device ehci # EHCI PCI->USB interface (USB 2.0) device usb # USB Bus (required) #device udbp # USB Double Bulk Pipe devices device ugen # Generic device uhid # "Human Interface Devices" device ukbd # Keyboard device ulpt # Printer device umass # Disks/Mass storage - Requires scbus and da device ums # Mouse device ural # Ralink Technology RT2500USB wireless NICs device urio # Diamond Rio 500 MP3 player device uscanner # Scanners # USB Ethernet, requires miibus device aue # ADMtek USB Ethernet device axe # ASIX Electronics USB Ethernet device cdce # Generic USB over Ethernet device cue # CATC USB Ethernet device kue # Kawasaki LSI USB Ethernet device rue # RealTek RTL8150 USB Ethernet # FireWire support device firewire # FireWire bus code device sbp # SCSI over FireWire (Requires scbus and da) device fwe # Ethernet over FireWire (non-standard!) Index: head/sys/conf/NOTES =================================================================== --- head/sys/conf/NOTES (revision 159569) +++ head/sys/conf/NOTES (revision 159570) @@ -1,2624 +1,2625 @@ # $FreeBSD$ # # NOTES -- Lines that can be cut/pasted into kernel and hints configs. # # Lines that begin with 'device', 'options', 'machine', 'ident', 'maxusers', # 'makeoptions', 'hints', etc. go into the kernel configuration that you # run config(8) with. # # Lines that begin with 'hint.' are NOT for config(8), they go into your # hints file. See /boot/device.hints and/or the 'hints' config(8) directive. # # Please use ``make LINT'' to create an old-style LINT file if you want to # do kernel test-builds. # # This file contains machine independent kernel configuration notes. For # machine dependent notes, look in /sys//conf/NOTES. # # # NOTES conventions and style guide: # # Large block comments should begin and end with a line containing only a # comment character. # # To describe a particular object, a block comment (if it exists) should # come first. Next should come device, options, and hints lines in that # order. All device and option lines must be described by a comment that # doesn't just expand the device or option name. Use only a concise # comment on the same line if possible. Very detailed descriptions of # devices and subsystems belong in man pages. # # A space followed by a tab separates 'options' from an option name. Two # spaces followed by a tab separate 'device' from a device name. Comments # after an option or device should use one space after the comment character. # To comment out a negative option that disables code and thus should not be # enabled for LINT builds, precede 'options' with "#!". # # # This is the ``identification'' of the kernel. Usually this should # be the same as the name of your kernel. # ident LINT # # The `maxusers' parameter controls the static sizing of a number of # internal system tables by a formula defined in subr_param.c. # Omitting this parameter or setting it to 0 will cause the system to # auto-size based on physical memory. # maxusers 10 # # The `makeoptions' parameter allows variables to be passed to the # generated Makefile in the build area. # # CONF_CFLAGS gives some extra compiler flags that are added to ${CFLAGS} # after most other flags. Here we use it to inhibit use of non-optimal # gcc builtin functions (e.g., memcmp). # # DEBUG happens to be magic. # The following is equivalent to 'config -g KERNELNAME' and creates # 'kernel.debug' compiled with -g debugging as well as a normal # 'kernel'. Use 'make install.debug' to install the debug kernel # but that isn't normally necessary as the debug symbols are not loaded # by the kernel and are not useful there anyway. # # KERNEL can be overridden so that you can change the default name of your # kernel. # # MODULES_OVERRIDE can be used to limit modules built to a specific list. # makeoptions CONF_CFLAGS=-fno-builtin #Don't allow use of memcmp, etc. #makeoptions DEBUG=-g #Build kernel with gdb(1) debug symbols #makeoptions KERNEL=foo #Build kernel "foo" and install "/foo" # Only build ext2fs module plus those parts of the sound system I need. #makeoptions MODULES_OVERRIDE="ext2fs sound/sound sound/driver/maestro3" makeoptions DESTDIR=/tmp # # FreeBSD processes are subject to certain limits to their consumption # of system resources. See getrlimit(2) for more details. Each # resource limit has two values, a "soft" limit and a "hard" limit. # The soft limits can be modified during normal system operation, but # the hard limits are set at boot time. Their default values are # in sys//include/vmparam.h. There are two ways to change them: # # 1. Set the values at kernel build time. The options below are one # way to allow that limit to grow to 1GB. They can be increased # further by changing the parameters: # # 2. In /boot/loader.conf, set the tunables kern.maxswzone, # kern.maxbcache, kern.maxtsiz, kern.dfldsiz, kern.maxdsiz, # kern.dflssiz, kern.maxssiz and kern.sgrowsiz. # # The options in /boot/loader.conf override anything in the kernel # configuration file. See the function init_param1 in # sys/kern/subr_param.c for more details. # options MAXDSIZ=(1024UL*1024*1024) options MAXSSIZ=(128UL*1024*1024) options DFLDSIZ=(1024UL*1024*1024) # # BLKDEV_IOSIZE sets the default block size used in user block # device I/O. Note that this value will be overridden by the label # when specifying a block device from a label with a non-0 # partition blocksize. The default is PAGE_SIZE. # options BLKDEV_IOSIZE=8192 # Options for the VM subsystem # Deprecated options supported for backwards compatibility #options PQ_NOOPT # No coloring # This allows you to actually store this configuration file into # the kernel binary itself, where it may be later read by saying: # strings -n 3 /boot/kernel/kernel | sed -n 's/^___//p' > MYKERNEL # options INCLUDE_CONFIG_FILE # Include this file in kernel options GEOM_AES # Don't use, use GEOM_BDE options GEOM_APPLE # Apple partitioning options GEOM_BDE # Disk encryption. options GEOM_BSD # BSD disklabels options GEOM_CONCAT # Disk concatenation. options GEOM_ELI # Disk encryption. options GEOM_FOX # Redundant path mitigation options GEOM_GATE # Userland services. options GEOM_GPT # GPT partitioning options GEOM_LABEL # Providers labelization. options GEOM_MBR # DOS/MBR partitioning options GEOM_MIRROR # Disk mirroring. options GEOM_NOP # Test class. options GEOM_PC98 # NEC PC9800 partitioning options GEOM_RAID3 # RAID3 functionality. options GEOM_SHSEC # Shared secret. options GEOM_STRIPE # Disk striping. options GEOM_SUNLABEL # Sun/Solaris partitioning options GEOM_UZIP # Read-only compressed disks options GEOM_VOL # Volume names from UFS superblock options GEOM_ZERO # Peformance testing helper. # # The root device and filesystem type can be compiled in; # this provides a fallback option if the root device cannot # be correctly guessed by the bootstrap code, or an override if # the RB_DFLTROOT flag (-r) is specified when booting the kernel. # options ROOTDEVNAME=\"ufs:da0s2e\" ##################################################################### # Scheduler options: # # Specifying one of SCHED_4BSD or SCHED_ULE is mandatory. These options # select which scheduler is compiled in. # # SCHED_4BSD is the historical, proven, BSD scheduler. It has a global run # queue and no cpu affinity which makes it suboptimal for SMP. It has very # good interactivity and priority selection. # # SCHED_ULE is a new scheduler that has been designed for SMP and has some # advantages for UP as well. It is intended to replace the 4BSD scheduler # over time. # options SCHED_4BSD +#options SCHED_CORE #options SCHED_ULE ##################################################################### # SMP OPTIONS: # # SMP enables building of a Symmetric MultiProcessor Kernel. # Mandatory: options SMP # Symmetric MultiProcessor Kernel # ADAPTIVE_MUTEXES changes the behavior of blocking mutexes to spin # if the thread that currently owns the mutex is executing on another # CPU. This behaviour is enabled by default, so this option can be used # to disable it. options NO_ADAPTIVE_MUTEXES # ADAPTIVE_GIANT causes the Giant lock to also be made adaptive when # running without NO_ADAPTIVE_MUTEXES. Normally, because Giant is assumed # to be held for extended periods, contention on Giant will cause a thread # to sleep rather than spinning. options ADAPTIVE_GIANT # MUTEX_NOINLINE forces mutex operations to call functions to perform each # operation rather than inlining the simple cases. This can be used to # shrink the size of the kernel text segment. Note that this behavior is # already implied by the INVARIANT_SUPPORT, INVARIANTS, KTR, MUTEX_PROFILING, # and WITNESS options. options MUTEX_NOINLINE # MUTEX_WAKE_ALL changes the mutex unlock algorithm to wake all waiters # when a contested mutex is released rather than just awaking the highest # priority waiter. options MUTEX_WAKE_ALL # RWLOCK_NOINLINE forces rwlock operations to call functions to perform each # operation rather than inlining the simple cases. This can be used to # shrink the size of the kernel text segment. Note that this behavior is # already implied by the INVARIANT_SUPPORT, INVARIANTS, KTR, MUTEX_PROFILING, # and WITNESS options. options RWLOCK_NOINLINE # SMP Debugging Options: # # PREEMPTION allows the threads that are in the kernel to be preempted # by higher priority threads. It helps with interactivity and # allows interrupt threads to run sooner rather than waiting. # WARNING! Only tested on amd64 and i386. # FULL_PREEMPTION instructs the kernel to preempt non-realtime kernel # threads. Its sole use is to expose race conditions and other # bugs during development. Enabling this option will reduce # performance and increase the frequency of kernel panics by # design. If you aren't sure that you need it then you don't. # Relies on the PREEMPTION option. DON'T TURN THIS ON. # MUTEX_DEBUG enables various extra assertions in the mutex code. # SLEEPQUEUE_PROFILING enables rudimentary profiling of the hash table # used to hold active sleep queues. # TURNSTILE_PROFILING enables rudimentary profiling of the hash table # used to hold active lock queues. # WITNESS enables the witness code which detects deadlocks and cycles # during locking operations. # WITNESS_KDB causes the witness code to drop into the kernel debugger if # a lock hierarchy violation occurs or if locks are held when going to # sleep. # WITNESS_SKIPSPIN disables the witness checks on spin mutexes. options PREEMPTION options FULL_PREEMPTION options MUTEX_DEBUG options WITNESS options WITNESS_KDB options WITNESS_SKIPSPIN # MUTEX_PROFILING - Profiling mutual exclusion locks (mutexes). See # MUTEX_PROFILING(9) for details. options MUTEX_PROFILING # Set the number of buffers and the hash size. The hash size MUST be larger # than the number of buffers. Hash size should be prime. options MPROF_BUFFERS="1536" options MPROF_HASH_SIZE="1543" # Profiling for internal hash tables. options SLEEPQUEUE_PROFILING options TURNSTILE_PROFILING ##################################################################### # COMPATIBILITY OPTIONS # # Implement system calls compatible with 4.3BSD and older versions of # FreeBSD. You probably do NOT want to remove this as much current code # still relies on the 4.3 emulation. Note that some architectures that # are supported by FreeBSD do not include support for certain important # aspects of this compatibility option, namely those related to the # signal delivery mechanism. # options COMPAT_43 # Old tty interface. options COMPAT_43TTY # Enable FreeBSD4 compatibility syscalls options COMPAT_FREEBSD4 # Enable FreeBSD5 compatibility syscalls options COMPAT_FREEBSD5 # # These three options provide support for System V Interface # Definition-style interprocess communication, in the form of shared # memory, semaphores, and message queues, respectively. # options SYSVSHM options SYSVSEM options SYSVMSG ##################################################################### # DEBUGGING OPTIONS # # Compile with kernel debugger related code. # options KDB # # Print a stack trace of the current thread on the console for a panic. # options KDB_TRACE # # Don't enter the debugger for a panic. Intended for unattended operation # where you may want to enter the debugger from the console, but still want # the machine to recover from a panic. # options KDB_UNATTENDED # # Enable the ddb debugger backend. # options DDB # # Print the numerical value of symbols in addition to the symbolic # representation. # options DDB_NUMSYM # # Enable the remote gdb debugger backend. # options GDB # # SYSCTL_DEBUG enables a 'sysctl' debug tree that can be used to dump the # contents of the registered sysctl nodes on the console. It is disabled by # default because it generates excessively verbose consol output that can # interfere with serial console operation. # options SYSCTL_DEBUG # # DEBUG_MEMGUARD builds and enables memguard(9), a replacement allocator # for the kernel used to detect modify-after-free scenarios. See the # memguard(9) man page for more information on usage. # options DEBUG_MEMGUARD # # DEBUG_REDZONE enables buffer underflows and buffer overflows detection for # malloc(9). # options DEBUG_REDZONE # # KTRACE enables the system-call tracing facility ktrace(2). To be more # SMP-friendly, KTRACE uses a worker thread to process most trace events # asynchronously to the thread generating the event. This requires a # pre-allocated store of objects representing trace events. The # KTRACE_REQUEST_POOL option specifies the initial size of this store. # The size of the pool can be adjusted both at boottime and runtime via # the kern.ktrace_request_pool tunable and sysctl. # options KTRACE #kernel tracing options KTRACE_REQUEST_POOL=101 # # KTR is a kernel tracing mechanism imported from BSD/OS. Currently # it has no userland interface aside from a few sysctl's. It is # enabled with the KTR option. KTR_ENTRIES defines the number of # entries in the circular trace buffer; it must be a power of two. # KTR_COMPILE defines the mask of events to compile into the kernel as # defined by the KTR_* constants in . KTR_MASK defines the # initial value of the ktr_mask variable which determines at runtime # what events to trace. KTR_CPUMASK determines which CPU's log # events, with bit X corresponding to cpu X. KTR_VERBOSE enables # dumping of KTR events to the console by default. This functionality # can be toggled via the debug.ktr_verbose sysctl and defaults to off # if KTR_VERBOSE is not defined. # options KTR options KTR_ENTRIES=1024 options KTR_COMPILE=(KTR_INTR|KTR_PROC) options KTR_MASK=KTR_INTR options KTR_CPUMASK=0x3 options KTR_VERBOSE # # ALQ(9) is a facilty for the asynchronous queuing of records from the kernel # to a vnode, and is employed by services such as KTR(4) to produce trace # files based on a kernel event stream. Records are written asynchronously # in a worker thread. # options ALQ options KTR_ALQ # # The INVARIANTS option is used in a number of source files to enable # extra sanity checking of internal structures. This support is not # enabled by default because of the extra time it would take to check # for these conditions, which can only occur as a result of # programming errors. # options INVARIANTS # # The INVARIANT_SUPPORT option makes us compile in support for # verifying some of the internal structures. It is a prerequisite for # 'INVARIANTS', as enabling 'INVARIANTS' will make these functions be # called. The intent is that you can set 'INVARIANTS' for single # source files (by changing the source file or specifying it on the # command line) if you have 'INVARIANT_SUPPORT' enabled. Also, if you # wish to build a kernel module with 'INVARIANTS', then adding # 'INVARIANT_SUPPORT' to your kernel will provide all the necessary # infrastructure without the added overhead. # options INVARIANT_SUPPORT # # The DIAGNOSTIC option is used to enable extra debugging information # from some parts of the kernel. As this makes everything more noisy, # it is disabled by default. # options DIAGNOSTIC # # REGRESSION causes optional kernel interfaces necessary only for regression # testing to be enabled. These interfaces may constitute security risks # when enabled, as they permit processes to easily modify aspects of the # run-time environment to reproduce unlikely or unusual (possibly normally # impossible) scenarios. # options REGRESSION # # RESTARTABLE_PANICS allows one to continue from a panic as if it were # a call to the debugger to continue from a panic as instead. It is only # useful if a kernel debugger is present. To restart from a panic, reset # the panicstr variable to NULL and continue execution. This option is # for development use only and should NOT be used in production systems # to "workaround" a panic. # #options RESTARTABLE_PANICS # # This option let some drivers co-exist that can't co-exist in a running # system. This is used to be able to compile all kernel code in one go for # quality assurance purposes (like this file, which the option takes it name # from.) # options COMPILING_LINT ##################################################################### # PERFORMANCE MONITORING OPTIONS # # The hwpmc driver that allows the use of in-CPU performance monitoring # counters for performance monitoring. The base kernel needs to configured # with the 'options' line, while the hwpmc device can be either compiled # in or loaded as a loadable kernel module. # # Additional configuration options may be required on specific architectures, # please see hwpmc(4). device hwpmc # Driver (also a loadable module) options HWPMC_HOOKS # Other necessary kernel hooks ##################################################################### # NETWORKING OPTIONS # # Protocol families: # Only the INET (Internet) family is officially supported in FreeBSD. # options INET #Internet communications protocols options INET6 #IPv6 communications protocols options IPSEC #IP security options IPSEC_ESP #IP security (crypto; define w/ IPSEC) options IPSEC_DEBUG #debug for IP security # # Set IPSEC_FILTERGIF to force packets coming through a gif tunnel # to be processed by any configured packet filtering (ipfw, ipf). # The default is that packets coming from a tunnel are _not_ processed; # they are assumed trusted. # # IPSEC history is preserved for such packets, and can be filtered # using ipfw(8)'s 'ipsec' keyword, when this option is enabled. # #options IPSEC_FILTERGIF #filter ipsec packets from a tunnel #options FAST_IPSEC #new IPsec (cannot define w/ IPSEC) options IPX #IPX/SPX communications protocols options IPXIP #IPX in IP encapsulation (not available) options NCP #NetWare Core protocol options NETATALK #Appletalk communications protocols options NETATALKDEBUG #Appletalk debugging # # SMB/CIFS requester # NETSMB enables support for SMB protocol, it requires LIBMCHAIN and LIBICONV # options. options NETSMB #SMB/CIFS requester # mchain library. It can be either loaded as KLD or compiled into kernel options LIBMCHAIN # libalias library, performing NAT options LIBALIAS # altq(9). Enable the base part of the hooks with the ALTQ option. # Individual disciplines must be built into the base system and can not be # loaded as modules at this point. ALTQ requires a stable TSC so if yours is # broken or changes with CPU throttling then you must also have the ALTQ_NOPCC # option. options ALTQ options ALTQ_CBQ # Class Bases Queueing options ALTQ_RED # Random Early Detection options ALTQ_RIO # RED In/Out options ALTQ_HFSC # Hierarchical Packet Scheduler options ALTQ_CDNR # Traffic conditioner options ALTQ_PRIQ # Priority Queueing options ALTQ_NOPCC # Required if the TSC is unusable options ALTQ_DEBUG # netgraph(4). Enable the base netgraph code with the NETGRAPH option. # Individual node types can be enabled with the corresponding option # listed below; however, this is not strictly necessary as netgraph # will automatically load the corresponding KLD module if the node type # is not already compiled into the kernel. Each type below has a # corresponding man page, e.g., ng_async(8). options NETGRAPH # netgraph(4) system options NETGRAPH_DEBUG # enable extra debugging, this # affects netgraph(4) and nodes # Node types options NETGRAPH_ASYNC options NETGRAPH_ATMLLC options NETGRAPH_ATM_ATMPIF options NETGRAPH_BLUETOOTH # ng_bluetooth(4) options NETGRAPH_BLUETOOTH_BT3C # ng_bt3c(4) options NETGRAPH_BLUETOOTH_H4 # ng_h4(4) options NETGRAPH_BLUETOOTH_HCI # ng_hci(4) options NETGRAPH_BLUETOOTH_L2CAP # ng_l2cap(4) options NETGRAPH_BLUETOOTH_SOCKET # ng_btsocket(4) options NETGRAPH_BLUETOOTH_UBT # ng_ubt(4) options NETGRAPH_BLUETOOTH_UBTBCMFW # ubtbcmfw(4) options NETGRAPH_BPF options NETGRAPH_BRIDGE options NETGRAPH_CISCO options NETGRAPH_DEVICE options NETGRAPH_ECHO options NETGRAPH_EIFACE options NETGRAPH_ETHER options NETGRAPH_FEC options NETGRAPH_FRAME_RELAY options NETGRAPH_GIF options NETGRAPH_GIF_DEMUX options NETGRAPH_HOLE options NETGRAPH_IFACE options NETGRAPH_IP_INPUT options NETGRAPH_IPFW options NETGRAPH_KSOCKET options NETGRAPH_L2TP options NETGRAPH_LMI # MPPC compression requires proprietary files (not included) #options NETGRAPH_MPPC_COMPRESSION options NETGRAPH_MPPC_ENCRYPTION options NETGRAPH_NETFLOW options NETGRAPH_NAT options NETGRAPH_ONE2MANY options NETGRAPH_PPP options NETGRAPH_PPPOE options NETGRAPH_PPTPGRE options NETGRAPH_RFC1490 options NETGRAPH_SOCKET options NETGRAPH_SPLIT options NETGRAPH_SPPP options NETGRAPH_TCPMSS options NETGRAPH_TEE options NETGRAPH_TTY options NETGRAPH_UI options NETGRAPH_VJC # NgATM - Netgraph ATM options NGATM_ATM options NGATM_ATMBASE options NGATM_SSCOP options NGATM_SSCFU options NGATM_UNI options NGATM_CCATM device mn # Munich32x/Falc54 Nx64kbit/sec cards. # # Network interfaces: # The `loop' device is MANDATORY when networking is enabled. # The `ether' device provides generic code to handle # Ethernets; it is MANDATORY when an Ethernet device driver is # configured or token-ring is enabled. # The `vlan' device implements the VLAN tagging of Ethernet frames # according to IEEE 802.1Q. It requires `device miibus'. # The `wlan' device provides generic code to support 802.11 # drivers, including host AP mode; it is MANDATORY for the wi, # ath, and awi drivers and will eventually be required by all 802.11 drivers. # The `wlan_wep', `wlan_tkip', and `wlan_ccmp' devices provide # support for WEP, TKIP, and AES-CCMP crypto protocols optionally # used with 802.11 devices that depend on the `wlan' module. # The `wlan_xauth' device provides support for external (i.e. user-mode) # authenticators for use with 802.11 drivers that use the `wlan' # module and support 802.1x and/or WPA security protocols. # The `wlan_acl' device provides a MAC-based access control mechanism # for use with 802.11 drivers operating in ap mode and using the # `wlan' module. # The `fddi' device provides generic code to support FDDI. # The `arcnet' device provides generic code to support Arcnet. # The `sppp' device serves a similar role for certain types # of synchronous PPP links (like `cx', `ar'). # The `sl' device implements the Serial Line IP (SLIP) service. # The `ppp' device implements the Point-to-Point Protocol. # The `bpf' device enables the Berkeley Packet Filter. Be # aware of the legal and administrative consequences of enabling this # option. The number of devices determines the maximum number of # simultaneous BPF clients programs runnable. DHCP requires bpf. # The `disc' device implements a minimal network interface, # which throws away all packets sent and never receives any. It is # included for testing purposes. This shows up as the `ds' interface. # The `tap' device is a pty-like virtual Ethernet interface # The `tun' device implements (user-)ppp and nos-tun # The `gif' device implements IPv6 over IP4 tunneling, # IPv4 over IPv6 tunneling, IPv4 over IPv4 tunneling and # IPv6 over IPv6 tunneling. # The `gre' device implements two types of IP4 over IP4 tunneling: # GRE and MOBILE, as specified in the RFC1701 and RFC2004. # The XBONEHACK option allows the same pair of addresses to be configured on # multiple gif interfaces. # The `faith' device captures packets sent to it and diverts them # to the IPv4/IPv6 translation daemon. # The `stf' device implements 6to4 encapsulation. # The `ef' device provides support for multiple ethernet frame types # specified via ETHER_* options. See ef(4) for details. # # The pf packet filter consists of three devices: # The `pf' device provides /dev/pf and the firewall code itself. # The `pflog' device provides the pflog0 interface which logs packets. # The `pfsync' device provides the pfsync0 interface used for # synchronization of firewall state tables (over the net). # # The PPP_BSDCOMP option enables support for compress(1) style entire # packet compression, the PPP_DEFLATE is for zlib/gzip style compression. # PPP_FILTER enables code for filtering the ppp data stream and selecting # events for resetting the demand dial activity timer - requires bpf. # See pppd(8) for more details. # device ether #Generic Ethernet device vlan #VLAN support (needs miibus) device wlan #802.11 support device wlan_wep #802.11 WEP support device wlan_ccmp #802.11 CCMP support device wlan_tkip #802.11 TKIP support device wlan_xauth #802.11 external authenticator support device wlan_acl #802.11 MAC ACL support device token #Generic TokenRing device fddi #Generic FDDI device arcnet #Generic Arcnet device sppp #Generic Synchronous PPP device loop #Network loopback device device bpf #Berkeley packet filter device disc #Discard device (ds0, ds1, etc) device tap #Virtual Ethernet driver device tun #Tunnel driver (ppp(8), nos-tun(8)) device sl #Serial Line IP device gre #IP over IP tunneling device if_bridge #Bridge interface device pf #PF OpenBSD packet-filter firewall device pflog #logging support interface for PF device pfsync #synchronization interface for PF device carp #Common Address Redundancy Protocol device ppp #Point-to-point protocol options PPP_BSDCOMP #PPP BSD-compress support options PPP_DEFLATE #PPP zlib/deflate/gzip support options PPP_FILTER #enable bpf filtering (needs bpf) device ef # Multiple ethernet frames support options ETHER_II # enable Ethernet_II frame options ETHER_8023 # enable Ethernet_802.3 (Novell) frame options ETHER_8022 # enable Ethernet_802.2 frame options ETHER_SNAP # enable Ethernet_802.2/SNAP frame # for IPv6 device gif #IPv6 and IPv4 tunneling options XBONEHACK device faith #for IPv6 and IPv4 translation device stf #6to4 IPv6 over IPv4 encapsulation # # Internet family options: # # MROUTING enables the kernel multicast packet forwarder, which works # with mrouted(8). # # PIM enables Protocol Independent Multicast in the kernel. # Requires MROUTING enabled. # # IPFIREWALL enables support for IP firewall construction, in # conjunction with the `ipfw' program. IPFIREWALL_VERBOSE sends # logged packets to the system logger. IPFIREWALL_VERBOSE_LIMIT # limits the number of times a matching entry can be logged. # # WARNING: IPFIREWALL defaults to a policy of "deny ip from any to any" # and if you do not add other rules during startup to allow access, # YOU WILL LOCK YOURSELF OUT. It is suggested that you set firewall_type=open # in /etc/rc.conf when first enabling this feature, then refining the # firewall rules in /etc/rc.firewall after you've tested that the new kernel # feature works properly. # # IPFIREWALL_DEFAULT_TO_ACCEPT causes the default rule (at boot) to # allow everything. Use with care, if a cracker can crash your # firewall machine, they can get to your protected machines. However, # if you are using it as an as-needed filter for specific problems as # they arise, then this may be for you. Changing the default to 'allow' # means that you won't get stuck if the kernel and /sbin/ipfw binary get # out of sync. # # IPDIVERT enables the divert IP sockets, used by ``ipfw divert''. It # depends on IPFIREWALL if compiled into the kernel. # # IPFIREWALL_FORWARD enables changing of the packet destination either # to do some sort of policy routing or transparent proxying. Used by # ``ipfw forward''. # # IPFIREWALL_FORWARD_EXTENDED enables full packet destination changing # including redirecting packets to local IP addresses and ports. All # redirections apply to locally generated packets too. Because of this # great care is required when crafting the ruleset. # # IPSTEALTH enables code to support stealth forwarding (i.e., forwarding # packets without touching the ttl). This can be useful to hide firewalls # from traceroute and similar tools. # # TCPDEBUG enables code which keeps traces of the TCP state machine # for sockets with the SO_DEBUG option set, which can then be examined # using the trpt(8) utility. # options MROUTING # Multicast routing options PIM # Protocol Independent Multicast options IPFIREWALL #firewall options IPFIREWALL_VERBOSE #enable logging to syslogd(8) options IPFIREWALL_VERBOSE_LIMIT=100 #limit verbosity options IPFIREWALL_DEFAULT_TO_ACCEPT #allow everything by default options IPFIREWALL_FORWARD #packet destination changes options IPFIREWALL_FORWARD_EXTENDED #all packet dest changes options IPDIVERT #divert sockets options IPFILTER #ipfilter support options IPFILTER_LOG #ipfilter logging options IPFILTER_LOOKUP #ipfilter pools options IPFILTER_DEFAULT_BLOCK #block all packets by default options IPSTEALTH #support for stealth forwarding options TCPDEBUG # The MBUF_STRESS_TEST option enables options which create # various random failures / extreme cases related to mbuf # functions. See mbuf(9) for a list of available test cases. options MBUF_STRESS_TEST # Statically Link in accept filters options ACCEPT_FILTER_DATA options ACCEPT_FILTER_HTTP # TCP_DROP_SYNFIN adds support for ignoring TCP packets with SYN+FIN. This # prevents nmap et al. from identifying the TCP/IP stack, but breaks support # for RFC1644 extensions and is not recommended for web servers. # options TCP_DROP_SYNFIN #drop TCP packets with SYN+FIN # TCP_SIGNATURE adds support for RFC 2385 (TCP-MD5) digests. These are # carried in TCP option 19. This option is commonly used to protect # TCP sessions (e.g. BGP) where IPSEC is not available nor desirable. # This is enabled on a per-socket basis using the TCP_MD5SIG socket option. # This requires the use of 'device crypto', 'options FAST_IPSEC' or 'options # IPSEC', and 'device cryptodev'. #options TCP_SIGNATURE #include support for RFC 2385 # DUMMYNET enables the "dummynet" bandwidth limiter. You need IPFIREWALL # as well. See dummynet(4) and ipfw(8) for more info. When you run # DUMMYNET it is advisable to also have "options HZ=1000" to achieve a # smoother scheduling of the traffic. options DUMMYNET # Zero copy sockets support. This enables "zero copy" for sending and # receiving data via a socket. The send side works for any type of NIC, # the receive side only works for NICs that support MTUs greater than the # page size of your architecture and that support header splitting. See # zero_copy(9) for more details. options ZERO_COPY_SOCKETS # # ATM (HARP version) options # # ATM_CORE includes the base ATM functionality code. This must be included # for ATM support. # # ATM_IP includes support for running IP over ATM. # # At least one (and usually only one) of the following signalling managers # must be included (note that all signalling managers include PVC support): # ATM_SIGPVC includes support for the PVC-only signalling manager `sigpvc'. # ATM_SPANS includes support for the `spans' signalling manager, which runs # the FORE Systems's proprietary SPANS signalling protocol. # ATM_UNI includes support for the `uni30' and `uni31' signalling managers, # which run the ATM Forum UNI 3.x signalling protocols. # # The `hfa' driver provides support for the FORE Systems, Inc. # PCA-200E ATM PCI Adapter. # # The `harp' pseudo-driver makes all NATM interface drivers available to HARP. # options ATM_CORE #core ATM protocol family options ATM_IP #IP over ATM support options ATM_SIGPVC #SIGPVC signalling manager options ATM_SPANS #SPANS signalling manager options ATM_UNI #UNI signalling manager device hfa #FORE PCA-200E ATM PCI device harp #Pseudo-interface for NATM ##################################################################### # FILESYSTEM OPTIONS # # Only the root, /usr, and /tmp filesystems need be statically # compiled; everything else will be automatically loaded at mount # time. (Exception: the UFS family--- FFS --- cannot # currently be demand-loaded.) Some people still prefer to statically # compile other filesystems as well. # # NB: The NULL, PORTAL, UMAP and UNION filesystems are known to be # buggy, and WILL panic your system if you attempt to do anything with # them. They are included here as an incentive for some enterprising # soul to sit down and fix them. # # One of these is mandatory: options FFS #Fast filesystem options NFSCLIENT #Network File System client # The rest are optional: options CD9660 #ISO 9660 filesystem options FDESCFS #File descriptor filesystem options HPFS #OS/2 File system options MSDOSFS #MS DOS File System (FAT, FAT32) options NFSSERVER #Network File System server options NTFS #NT File System options NULLFS #NULL filesystem # Broken (depends on NCP): #options NWFS #NetWare filesystem options PORTALFS #Portal filesystem options PROCFS #Process filesystem (requires PSEUDOFS) options PSEUDOFS #Pseudo-filesystem framework options PSEUDOFS_TRACE #Debugging support for PSEUDOFS options SMBFS #SMB/CIFS filesystem options UDF #Universal Disk Format # Broken (seriously (functionally) broken): #options UMAPFS #UID map filesystem options UNIONFS #Union filesystem # The xFS_ROOT options REQUIRE the associated ``options xFS'' options NFS_ROOT #NFS usable as root device # Soft updates is a technique for improving filesystem speed and # making abrupt shutdown less risky. # options SOFTUPDATES # Extended attributes allow additional data to be associated with files, # and is used for ACLs, Capabilities, and MAC labels. # See src/sys/ufs/ufs/README.extattr for more information. options UFS_EXTATTR options UFS_EXTATTR_AUTOSTART # Access Control List support for UFS filesystems. The current ACL # implementation requires extended attribute support, UFS_EXTATTR, # for the underlying filesystem. # See src/sys/ufs/ufs/README.acls for more information. options UFS_ACL # Directory hashing improves the speed of operations on very large # directories at the expense of some memory. options UFS_DIRHASH # Make space in the kernel for a root filesystem on a md device. # Define to the number of kilobytes to reserve for the filesystem. options MD_ROOT_SIZE=10 # Make the md device a potential root device, either with preloaded # images of type mfs_root or md_root. options MD_ROOT # Disk quotas are supported when this option is enabled. options QUOTA #enable disk quotas # If you are running a machine just as a fileserver for PC and MAC # users, using SAMBA or Netatalk, you may consider setting this option # and keeping all those users' directories on a filesystem that is # mounted with the suiddir option. This gives new files the same # ownership as the directory (similar to group). It's a security hole # if you let these users run programs, so confine it to file-servers # (but it'll save you lots of headaches in those cases). Root owned # directories are exempt and X bits are cleared. The suid bit must be # set on the directory as well; see chmod(1) PC owners can't see/set # ownerships so they keep getting their toes trodden on. This saves # you all the support calls as the filesystem it's used on will act as # they expect: "It's my dir so it must be my file". # options SUIDDIR # NFS options: options NFS_MINATTRTIMO=3 # VREG attrib cache timeout in sec options NFS_MAXATTRTIMO=60 options NFS_MINDIRATTRTIMO=30 # VDIR attrib cache timeout in sec options NFS_MAXDIRATTRTIMO=60 options NFS_GATHERDELAY=10 # Default write gather delay (msec) options NFS_WDELAYHASHSIZ=16 # and with this options NFS_DEBUG # Enable NFS Debugging # Coda stuff: options CODA #CODA filesystem. device vcoda #coda minicache <-> venus comm. # Use the old Coda 5.x venus<->kernel interface instead of the new # realms-aware 6.x protocol. #options CODA_COMPAT_5 # # Add support for the EXT2FS filesystem of Linux fame. Be a bit # careful with this - the ext2fs code has a tendency to lag behind # changes and not be exercised very much, so mounting read/write could # be dangerous (and even mounting read only could result in panics.) # options EXT2FS # # Add support for the ReiserFS filesystem (used in Linux). Currently, # this is limited to read-only access. # options REISERFS # # Add support for the SGI XFS filesystem. Currently, # this is limited to read-only access. # options XFS # Use real implementations of the aio_* system calls. There are numerous # stability and security issues in the current aio code that make it # unsuitable for inclusion on machines with untrusted local users. options VFS_AIO # Cryptographically secure random number generator; /dev/random device random # The system memory devices; /dev/mem, /dev/kmem device mem # Optional character code conversion support with LIBICONV. # Each option requires their base file system and LIBICONV. options CD9660_ICONV options MSDOSFS_ICONV options NTFS_ICONV options UDF_ICONV # Experimental support for large MS-DOS filesystems. # # WARNING: This uses at least 32 bytes of kernel memory (which is not # reclaimed until the FS is unmounted) for each file on disk to map # between the 32-bit inode numbers used by VFS and the 64-bit pseudo-inode # numbers used internally by msdosfs. This is only safe to use in certain # controlled situations (e.g. read-only FS with less than 1 million files). # Since the mappings do not persist across unmounts (or reboots), these # filesystems are not suitable for exporting through NFS, or any other # application that requires fixed inode numbers. options MSDOSFS_LARGE ##################################################################### # POSIX P1003.1B # Real time extensions added in the 1993 Posix # _KPOSIX_PRIORITY_SCHEDULING: Build in _POSIX_PRIORITY_SCHEDULING options _KPOSIX_PRIORITY_SCHEDULING # p1003_1b_semaphores are very experimental, # user should be ready to assist in debugging if problems arise. options P1003_1B_SEMAPHORES # POSIX message queue options P1003_1B_MQUEUE ##################################################################### # SECURITY POLICY PARAMETERS # Support for BSM audit options AUDIT # Support for Mandatory Access Control (MAC): options MAC options MAC_BIBA options MAC_BSDEXTENDED options MAC_DEBUG options MAC_IFOFF options MAC_LOMAC options MAC_MLS options MAC_NONE options MAC_PARTITION options MAC_PORTACL options MAC_SEEOTHERUIDS options MAC_STUB options MAC_TEST ##################################################################### # CLOCK OPTIONS # The granularity of operation is controlled by the kernel option HZ whose # default value (100) means a granularity of 10ms (1s/HZ). # Some subsystems, such as DUMMYNET, might benefit from a smaller # granularity such as 1ms or less, for a smoother scheduling of packets. # Consider, however, that reducing the granularity too much might # cause excessive overhead in clock interrupt processing, # potentially causing ticks to be missed and thus actually reducing # the accuracy of operation. options HZ=100 # Enable support for the kernel PLL to use an external PPS signal, # under supervision of [x]ntpd(8) # More info in ntpd documentation: http://www.eecis.udel.edu/~ntp options PPS_SYNC ##################################################################### # SCSI DEVICES # SCSI DEVICE CONFIGURATION # The SCSI subsystem consists of the `base' SCSI code, a number of # high-level SCSI device `type' drivers, and the low-level host-adapter # device drivers. The host adapters are listed in the ISA and PCI # device configuration sections below. # # It is possible to wire down your SCSI devices so that a given bus, # target, and LUN always come on line as the same device unit. In # earlier versions the unit numbers were assigned in the order that # the devices were probed on the SCSI bus. This means that if you # removed a disk drive, you may have had to rewrite your /etc/fstab # file, and also that you had to be careful when adding a new disk # as it may have been probed earlier and moved your device configuration # around. (See also option GEOM_VOL for a different solution to this # problem.) # This old behavior is maintained as the default behavior. The unit # assignment begins with the first non-wired down unit for a device # type. For example, if you wire a disk as "da3" then the first # non-wired disk will be assigned da4. # The syntax for wiring down devices is: hint.scbus.0.at="ahc0" hint.scbus.1.at="ahc1" hint.scbus.1.bus="0" hint.scbus.3.at="ahc2" hint.scbus.3.bus="0" hint.scbus.2.at="ahc2" hint.scbus.2.bus="1" hint.da.0.at="scbus0" hint.da.0.target="0" hint.da.0.unit="0" hint.da.1.at="scbus3" hint.da.1.target="1" hint.da.2.at="scbus2" hint.da.2.target="3" hint.sa.1.at="scbus1" hint.sa.1.target="6" # "units" (SCSI logical unit number) that are not specified are # treated as if specified as LUN 0. # All SCSI devices allocate as many units as are required. # The ch driver drives SCSI Media Changer ("jukebox") devices. # # The da driver drives SCSI Direct Access ("disk") and Optical Media # ("WORM") devices. # # The sa driver drives SCSI Sequential Access ("tape") devices. # # The cd driver drives SCSI Read Only Direct Access ("cd") devices. # # The ses driver drives SCSI Environment Services ("ses") and # SAF-TE ("SCSI Accessible Fault-Tolerant Enclosure") devices. # # The pt driver drives SCSI Processor devices. # # # Target Mode support is provided here but also requires that a SIM # (SCSI Host Adapter Driver) provide support as well. # # The targ driver provides target mode support as a Processor type device. # It exists to give the minimal context necessary to respond to Inquiry # commands. There is a sample user application that shows how the rest # of the command support might be done in /usr/share/examples/scsi_target. # # The targbh driver provides target mode support and exists to respond # to incoming commands that do not otherwise have a logical unit assigned # to them. # # The "unknown" device (uk? in pre-2.0.5) is now part of the base SCSI # configuration as the "pass" driver. device scbus #base SCSI code device ch #SCSI media changers device da #SCSI direct access devices (aka disks) device sa #SCSI tapes device cd #SCSI CD-ROMs device ses #SCSI Environmental Services (and SAF-TE) device pt #SCSI processor device targ #SCSI Target Mode Code device targbh #SCSI Target Mode Blackhole Device device pass #CAM passthrough driver # CAM OPTIONS: # debugging options: # -- NOTE -- If you specify one of the bus/target/lun options, you must # specify them all! # CAMDEBUG: When defined enables debugging macros # CAM_DEBUG_BUS: Debug the given bus. Use -1 to debug all busses. # CAM_DEBUG_TARGET: Debug the given target. Use -1 to debug all targets. # CAM_DEBUG_LUN: Debug the given lun. Use -1 to debug all luns. # CAM_DEBUG_FLAGS: OR together CAM_DEBUG_INFO, CAM_DEBUG_TRACE, # CAM_DEBUG_SUBTRACE, and CAM_DEBUG_CDB # # CAM_MAX_HIGHPOWER: Maximum number of concurrent high power (start unit) cmds # CAM_NEW_TRAN_CODE: this is the new transport layer code that will be switched # to soon # SCSI_NO_SENSE_STRINGS: When defined disables sense descriptions # SCSI_NO_OP_STRINGS: When defined disables opcode descriptions # SCSI_DELAY: The number of MILLISECONDS to freeze the SIM (scsi adapter) # queue after a bus reset, and the number of milliseconds to # freeze the device queue after a bus device reset. This # can be changed at boot and runtime with the # kern.cam.scsi_delay tunable/sysctl. options CAMDEBUG options CAM_DEBUG_BUS=-1 options CAM_DEBUG_TARGET=-1 options CAM_DEBUG_LUN=-1 options CAM_DEBUG_FLAGS=(CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB) options CAM_MAX_HIGHPOWER=4 options SCSI_NO_SENSE_STRINGS options SCSI_NO_OP_STRINGS options SCSI_DELAY=5000 # Be pessimistic about Joe SCSI device # Options for the CAM CDROM driver: # CHANGER_MIN_BUSY_SECONDS: Guaranteed minimum time quantum for a changer LUN # CHANGER_MAX_BUSY_SECONDS: Maximum time quantum per changer LUN, only # enforced if there is I/O waiting for another LUN # The compiled in defaults for these variables are 2 and 10 seconds, # respectively. # # These can also be changed on the fly with the following sysctl variables: # kern.cam.cd.changer.min_busy_seconds # kern.cam.cd.changer.max_busy_seconds # options CHANGER_MIN_BUSY_SECONDS=2 options CHANGER_MAX_BUSY_SECONDS=10 # Options for the CAM sequential access driver: # SA_IO_TIMEOUT: Timeout for read/write/wfm operations, in minutes # SA_SPACE_TIMEOUT: Timeout for space operations, in minutes # SA_REWIND_TIMEOUT: Timeout for rewind operations, in minutes # SA_ERASE_TIMEOUT: Timeout for erase operations, in minutes # SA_1FM_AT_EOD: Default to model which only has a default one filemark at EOT. options SA_IO_TIMEOUT=4 options SA_SPACE_TIMEOUT=60 options SA_REWIND_TIMEOUT=(2*60) options SA_ERASE_TIMEOUT=(4*60) options SA_1FM_AT_EOD # Optional timeout for the CAM processor target (pt) device # This is specified in seconds. The default is 60 seconds. options SCSI_PT_DEFAULT_TIMEOUT=60 # Optional enable of doing SES passthrough on other devices (e.g., disks) # # Normally disabled because a lot of newer SCSI disks report themselves # as having SES capabilities, but this can then clot up attempts to build # build a topology with the SES device that's on the box these drives # are in.... options SES_ENABLE_PASSTHROUGH ##################################################################### # MISCELLANEOUS DEVICES AND OPTIONS # The `pty' device usually turns out to be ``effectively mandatory'', # as it is required for `telnetd', `rlogind', `screen', `emacs', and # `xterm', among others. device pty #Pseudo ttys device nmdm #back-to-back tty devices device md #Memory/malloc disk device snp #Snoop device - to look at pty/vty/etc.. device ccd #Concatenated disk driver device firmware #firmware(9) support # Kernel side iconv library options LIBICONV # Size of the kernel message buffer. Should be N * pagesize. options MSGBUF_SIZE=40960 # Maximum size of a tty or pty input buffer. options TTYHOG=8193 ##################################################################### # HARDWARE DEVICE CONFIGURATION # For ISA the required hints are listed. # EISA, MCA, PCI and pccard are self identifying buses, so no hints # are needed. # # Mandatory devices: # # The keyboard controller; it controls the keyboard and the PS/2 mouse. device atkbdc hint.atkbdc.0.at="isa" hint.atkbdc.0.port="0x060" # The AT keyboard device atkbd hint.atkbd.0.at="atkbdc" hint.atkbd.0.irq="1" # Options for atkbd: options ATKBD_DFLT_KEYMAP # specify the built-in keymap makeoptions ATKBD_DFLT_KEYMAP=jp.106 # These options are valid for other keyboard drivers as well. options KBD_DISABLE_KEYMAP_LOAD # refuse to load a keymap options KBD_INSTALL_CDEV # install a CDEV entry in /dev # `flags' for atkbd: # 0x01 Force detection of keyboard, else we always assume a keyboard # 0x02 Don't reset keyboard, useful for some newer ThinkPads # 0x03 Force detection and avoid reset, might help with certain # dockingstations # 0x04 Old-style (XT) keyboard support, useful for older ThinkPads # PS/2 mouse device psm hint.psm.0.at="atkbdc" hint.psm.0.irq="12" # Options for psm: options PSM_HOOKRESUME #hook the system resume event, useful #for some laptops options PSM_RESETAFTERSUSPEND #reset the device at the resume event # Video card driver for VGA adapters. device vga hint.vga.0.at="isa" # Options for vga: # Try the following option if the mouse pointer is not drawn correctly # or font does not seem to be loaded properly. May cause flicker on # some systems. options VGA_ALT_SEQACCESS # If you can dispense with some vga driver features, you may want to # use the following options to save some memory. #options VGA_NO_FONT_LOADING # don't save/load font #options VGA_NO_MODE_CHANGE # don't change video modes # Older video cards may require this option for proper operation. options VGA_SLOW_IOACCESS # do byte-wide i/o's to TS and GDC regs # The following option probably won't work with the LCD displays. options VGA_WIDTH90 # support 90 column modes options FB_DEBUG # Frame buffer debugging device splash # Splash screen and screen saver support # Various screen savers. device blank_saver device daemon_saver device dragon_saver device fade_saver device fire_saver device green_saver device logo_saver device rain_saver device snake_saver device star_saver device warp_saver # The syscons console driver (sco color console compatible). device sc hint.sc.0.at="isa" options MAXCONS=16 # number of virtual consoles options SC_ALT_MOUSE_IMAGE # simplified mouse cursor in text mode options SC_DFLT_FONT # compile font in makeoptions SC_DFLT_FONT=cp850 options SC_DISABLE_KDBKEY # disable `debug' key options SC_DISABLE_REBOOT # disable reboot key sequence options SC_HISTORY_SIZE=200 # number of history buffer lines options SC_MOUSE_CHAR=0x3 # char code for text mode mouse cursor options SC_PIXEL_MODE # add support for the raster text mode # The following options will let you change the default colors of syscons. options SC_NORM_ATTR=(FG_GREEN|BG_BLACK) options SC_NORM_REV_ATTR=(FG_YELLOW|BG_GREEN) options SC_KERNEL_CONS_ATTR=(FG_RED|BG_BLACK) options SC_KERNEL_CONS_REV_ATTR=(FG_BLACK|BG_RED) # The following options will let you change the default behaviour of # cut-n-paste feature options SC_CUT_SPACES2TABS # convert leading spaces into tabs options SC_CUT_SEPCHARS=\"x09\" # set of characters that delimit words # (default is single space - \"x20\") # If you have a two button mouse, you may want to add the following option # to use the right button of the mouse to paste text. options SC_TWOBUTTON_MOUSE # You can selectively disable features in syscons. options SC_NO_CUTPASTE options SC_NO_FONT_LOADING options SC_NO_HISTORY options SC_NO_SYSMOUSE options SC_NO_SUSPEND_VTYSWITCH # `flags' for sc # 0x80 Put the video card in the VESA 800x600 dots, 16 color mode # 0x100 Probe for a keyboard device periodically if one is not present # # Optional devices: # # # SCSI host adapters: # # adv: All Narrow SCSI bus AdvanSys controllers. # adw: Second Generation AdvanSys controllers including the ADV940UW. # aha: Adaptec 154x/1535/1640 # ahb: Adaptec 174x EISA controllers # ahc: Adaptec 274x/284x/2910/293x/294x/394x/3950x/3960x/398X/4944/ # 19160x/29160x, aic7770/aic78xx # ahd: Adaptec 29320/39320 Controllers. # aic: Adaptec 6260/6360, APA-1460 (PC Card), NEC PC9801-100 (C-BUS) # amd: Support for the AMD 53C974 SCSI host adapter chip as found on devices # such as the Tekram DC-390(T). # bt: Most Buslogic controllers: including BT-445, BT-54x, BT-64x, BT-74x, # BT-75x, BT-946, BT-948, BT-956, BT-958, SDC3211B, SDC3211F, SDC3222F # esp: NCR53c9x. Only for SBUS hardware right now. # isp: Qlogic ISP 1020, 1040 and 1040B PCI SCSI host adapters, # ISP 1240 Dual Ultra SCSI, ISP 1080 and 1280 (Dual) Ultra2, # ISP 12160 Ultra3 SCSI, # Qlogic ISP 2100 and ISP 2200 1Gb Fibre Channel host adapters. # Qlogic ISP 2300 and ISP 2312 2Gb Fibre Channel host adapters. # Qlogic ISP 2322 and ISP 6322 2Gb Fibre Channel host adapters. # ispfw: Firmware module for Qlogic host adapters # mpt: LSI-Logic MPT/Fusion 53c1020 or 53c1030 Ultra4 # or FC9x9 Fibre Channel host adapters. # ncr: NCR 53C810, 53C825 self-contained SCSI host adapters. # sym: Symbios/Logic 53C8XX family of PCI-SCSI I/O processors: # 53C810, 53C810A, 53C815, 53C825, 53C825A, 53C860, 53C875, # 53C876, 53C885, 53C895, 53C895A, 53C896, 53C897, 53C1510D, # 53C1010-33, 53C1010-66. # trm: Tekram DC395U/UW/F DC315U adapters. # wds: WD7000 # # Note that the order is important in order for Buslogic ISA/EISA cards to be # probed correctly. # device bt hint.bt.0.at="isa" hint.bt.0.port="0x330" device adv hint.adv.0.at="isa" device adw device aha hint.aha.0.at="isa" device aic hint.aic.0.at="isa" device ahb device ahc device ahd device amd device esp device isp hint.isp.0.disable="1" hint.isp.0.role="3" hint.isp.0.prefer_iomap="1" hint.isp.0.prefer_memmap="1" hint.isp.0.fwload_disable="1" hint.isp.0.ignore_nvram="1" hint.isp.0.fullduplex="1" hint.isp.0.topology="lport" hint.isp.0.topology="nport" hint.isp.0.topology="lport-only" hint.isp.0.topology="nport-only" # we can't get u_int64_t types, nor can we get strings if it's got # a leading 0x, hence this silly dodge. hint.isp.0.portwnn="w50000000aaaa0000" hint.isp.0.nodewnn="w50000000aaaa0001" device ispfw device mpt device ncr device sym device trm device wds hint.wds.0.at="isa" hint.wds.0.port="0x350" hint.wds.0.irq="11" hint.wds.0.drq="6" # The aic7xxx driver will attempt to use memory mapped I/O for all PCI # controllers that have it configured only if this option is set. Unfortunately, # this doesn't work on some motherboards, which prevents it from being the # default. options AHC_ALLOW_MEMIO # Dump the contents of the ahc controller configuration PROM. options AHC_DUMP_EEPROM # Bitmap of units to enable targetmode operations. options AHC_TMODE_ENABLE # Compile in Aic7xxx Debugging code. options AHC_DEBUG # Aic7xxx driver debugging options. See sys/dev/aic7xxx/aic7xxx.h options AHC_DEBUG_OPTS # Print register bitfields in debug output. Adds ~128k to driver # See ahc(4). options AHC_REG_PRETTY_PRINT # Compile in aic79xx debugging code. options AHD_DEBUG # Aic79xx driver debugging options. Adds ~215k to driver. See ahd(4). options AHD_DEBUG_OPTS=0xFFFFFFFF # Print human-readable register definitions when debugging options AHD_REG_PRETTY_PRINT # Bitmap of units to enable targetmode operations. options AHD_TMODE_ENABLE # The adw driver will attempt to use memory mapped I/O for all PCI # controllers that have it configured only if this option is set. options ADW_ALLOW_MEMIO # Options used in dev/isp/ (Qlogic SCSI/FC driver). # # ISP_TARGET_MODE - enable target mode operation # options ISP_TARGET_MODE=1 # # ISP_DEFAULT_ROLES - default role (none, target, init, both) # options ISP_DEFAULT_ROLES=3 # Options used in dev/sym/ (Symbios SCSI driver). #options SYM_SETUP_LP_PROBE_MAP #-Low Priority Probe Map (bits) # Allows the ncr to take precedence # 1 (1<<0) -> 810a, 860 # 2 (1<<1) -> 825a, 875, 885, 895 # 4 (1<<2) -> 895a, 896, 1510d #options SYM_SETUP_SCSI_DIFF #-HVD support for 825a, 875, 885 # disabled:0 (default), enabled:1 #options SYM_SETUP_PCI_PARITY #-PCI parity checking # disabled:0, enabled:1 (default) #options SYM_SETUP_MAX_LUN #-Number of LUNs supported # default:8, range:[1..64] # The 'dpt' driver provides support for old DPT controllers (http://www.dpt.com/). # These have hardware RAID-{0,1,5} support, and do multi-initiator I/O. # The DPT controllers are commonly re-licensed under other brand-names - # some controllers by Olivetti, Dec, HP, AT&T, SNI, AST, Alphatronic, NEC and # Compaq are actually DPT controllers. # # See src/sys/dev/dpt for debugging and other subtle options. # DPT_MEASURE_PERFORMANCE Enables a set of (semi)invasive metrics. Various # instruments are enabled. The tools in # /usr/sbin/dpt_* assume these to be enabled. # DPT_HANDLE_TIMEOUTS Normally device timeouts are handled by the DPT. # If you ant the driver to handle timeouts, enable # this option. If your system is very busy, this # option will create more trouble than solve. # DPT_TIMEOUT_FACTOR Used to compute the excessive amount of time to # wait when timing out with the above option. # DPT_DEBUG_xxxx These are controllable from sys/dev/dpt/dpt.h # DPT_LOST_IRQ When enabled, will try, once per second, to catch # any interrupt that got lost. Seems to help in some # DPT-firmware/Motherboard combinations. Minimal # cost, great benefit. # DPT_RESET_HBA Make "reset" actually reset the controller # instead of fudging it. Only enable this if you # are 100% certain you need it. device dpt # DPT options #!CAM# options DPT_MEASURE_PERFORMANCE #!CAM# options DPT_HANDLE_TIMEOUTS options DPT_TIMEOUT_FACTOR=4 options DPT_LOST_IRQ options DPT_RESET_HBA # # Compaq "CISS" RAID controllers (SmartRAID 5* series) # These controllers have a SCSI-like interface, and require the # CAM infrastructure. # device ciss # # Intel Integrated RAID controllers. # This driver was developed and is maintained by Intel. Contacts # at Intel for this driver are # "Kannanthanam, Boji T" and # "Leubner, Achim" . # device iir # # Mylex AcceleRAID and eXtremeRAID controllers with v6 and later # firmware. These controllers have a SCSI-like interface, and require # the CAM infrastructure. # device mly # # Compaq Smart RAID, Mylex DAC960 and AMI MegaRAID controllers. Only # one entry is needed; the code will find and configure all supported # controllers. # device ida # Compaq Smart RAID device mlx # Mylex DAC960 device amr # AMI MegaRAID device mfi # LSI MegaRAID SAS # # 3ware ATA RAID # device twe # 3ware ATA RAID # # The 'ATA' driver supports all ATA and ATAPI devices, including PC Card # devices. You only need one "device ata" for it to find all # PCI and PC Card ATA/ATAPI devices on modern machines. device ata device atadisk # ATA disk drives device ataraid # ATA RAID drives device atapicd # ATAPI CDROM drives device atapifd # ATAPI floppy drives device atapist # ATAPI tape drives device atapicam # emulate ATAPI devices as SCSI ditto via CAM # needs CAM to be present (scbus & pass) # # For older non-PCI, non-PnPBIOS systems, these are the hints lines to add: hint.ata.0.at="isa" hint.ata.0.port="0x1f0" hint.ata.0.irq="14" hint.ata.1.at="isa" hint.ata.1.port="0x170" hint.ata.1.irq="15" # # The following options are valid on the ATA driver: # # ATA_STATIC_ID: controller numbering is static ie depends on location # else the device numbers are dynamically allocated. options ATA_STATIC_ID # # Standard floppy disk controllers and floppy tapes, supports # the Y-E DATA External FDD (PC Card) # device fdc hint.fdc.0.at="isa" hint.fdc.0.port="0x3F0" hint.fdc.0.irq="6" hint.fdc.0.drq="2" # # FDC_DEBUG enables floppy debugging. Since the debug output is huge, you # gotta turn it actually on by setting the variable fd_debug with DDB, # however. options FDC_DEBUG # # Activate this line if you happen to have an Insight floppy tape. # Probing them proved to be dangerous for people with floppy disks only, # so it's "hidden" behind a flag: #hint.fdc.0.flags="1" # Specify floppy devices hint.fd.0.at="fdc0" hint.fd.0.drive="0" hint.fd.1.at="fdc0" hint.fd.1.drive="1" # # sio: serial ports (see sio(4)), including support for various # PC Card devices, such as Modem and NICs (see etc/defaults/pccard.conf) # device sio hint.sio.0.at="isa" hint.sio.0.port="0x3F8" hint.sio.0.flags="0x10" hint.sio.0.irq="4" # Options for sio: options COM_ESP # Code for Hayes ESP. options COM_MULTIPORT # Code for some cards with shared IRQs. options CONSPEED=115200 # Speed for serial console # (default 9600). # `flags' specific to sio(4). See below for flags used by both sio(4) and # uart(4). # 0x20 force this unit to be the console (unless there is another # higher priority console). This replaces the COMCONSOLE option. # 0x40 reserve this unit for low level console operations. Do not # access the device in any normal way. # PnP `flags' # 0x1 disable probing of this device. Used to prevent your modem # from being attached as a PnP modem. # Other flags for sio that aren't documented in the man page. # 0x20000 enable hardware RTS/CTS and larger FIFOs. Only works for # ST16650A-compatible UARTs. # # uart: newbusified driver for serial interfaces. It consolidates the sio(4), # sab(4) and zs(4) drivers. # device uart # Options for uart(4) options UART_PPS_ON_CTS # Do time pulse capturing using CTS # instead of DCD. # The following hint should only be used for pure ISA devices. It is not # needed otherwise. Use of hints is strongly discouraged. hint.uart.0.at="isa" # The following 3 hints are used when the UART is a system device (i.e., a # console or debug port), but only on platforms that don't have any other # means to pass the information to the kernel. The unit number of the hint # is only used to bundle the hints together. There is no relation to the # unit number of the probed UART. hint.uart.0.port="0x3f8" hint.uart.0.flags="0x10" hint.uart.0.baud="115200" # `flags' for serial drivers that support consoles like sio(4) and uart(4): # 0x10 enable console support for this unit. Other console flags # (if applicable) are ignored unless this is set. Enabling # console support does not make the unit the preferred console. # Boot with -h or set boot_serial=YES in the loader. For sio(4) # specifically, the 0x20 flag can also be set (see above). # Currently, at most one unit can have console support; the # first one (in config file order) with this flag set is # preferred. Setting this flag for sio0 gives the old behaviour. # 0x80 use this port for serial line gdb support in ddb. Also known # as debug port. # # Options for serial drivers that support consoles: options BREAK_TO_DEBUGGER # A BREAK on a serial console goes to # ddb, if available. # Solaris implements a new BREAK which is initiated by a character # sequence CR ~ ^b which is similar to a familiar pattern used on # Sun servers by the Remote Console. options ALT_BREAK_TO_DEBUGGER # Serial Communications Controller # Supports the Siemens SAB 82532 and Zilog Z8530 multi-channel # communications controllers. device scc # PCI Universal Communications driver # Supports various multi port PCI I/O cards. device puc # # Network interfaces: # # MII bus support is required for some PCI 10/100 ethernet NICs, # namely those which use MII-compliant transceivers or implement # transceiver control interfaces that operate like an MII. Adding # "device miibus0" to the kernel config pulls in support for # the generic miibus API and all of the PHY drivers, including a # generic one for PHYs that aren't specifically handled by an # individual driver. device miibus # an: Aironet 4500/4800 802.11 wireless adapters. Supports the PCMCIA, # PCI and ISA varieties. # awi: Support for IEEE 802.11 PC Card devices using the AMD Am79C930 and # Harris (Intersil) Chipset with PCnetMobile firmware by AMD. # bge: Support for gigabit ethernet adapters based on the Broadcom # BCM570x family of controllers, including the 3Com 3c996-T, # the Netgear GA302T, the SysKonnect SK-9D21 and SK-9D41, and # the embedded gigE NICs on Dell PowerEdge 2550 servers. # cm: Arcnet SMC COM90c26 / SMC COM90c56 # (and SMC COM90c66 in '56 compatibility mode) adapters. # cnw: Xircom CNW/Netware Airsurfer PC Card adapter # dc: Support for PCI fast ethernet adapters based on the DEC/Intel 21143 # and various workalikes including: # the ADMtek AL981 Comet and AN985 Centaur, the ASIX Electronics # AX88140A and AX88141, the Davicom DM9100 and DM9102, the Lite-On # 82c168 and 82c169 PNIC, the Lite-On/Macronix LC82C115 PNIC II # and the Macronix 98713/98713A/98715/98715A/98725 PMAC. This driver # replaces the old al, ax, dm, pn and mx drivers. List of brands: # Digital DE500-BA, Kingston KNE100TX, D-Link DFE-570TX, SOHOware SFA110, # SVEC PN102-TX, CNet Pro110B, 120A, and 120B, Compex RL100-TX, # LinkSys LNE100TX, LNE100TX V2.0, Jaton XpressNet, Alfa Inc GFC2204, # KNE110TX. # de: Digital Equipment DC21040 # em: Intel Pro/1000 Gigabit Ethernet 82542, 82543, 82544 based adapters. # ep: 3Com 3C509, 3C529, 3C556, 3C562D, 3C563D, 3C572, 3C574X, 3C579, 3C589 # and PC Card devices using these chipsets. # ex: Intel EtherExpress Pro/10 and other i82595-based adapters, # Olicom Ethernet PC Card devices. # fe: Fujitsu MB86960A/MB86965A Ethernet # fea: DEC DEFEA EISA FDDI adapter # fpa: Support for the Digital DEFPA PCI FDDI. `device fddi' is also needed. # fxp: Intel EtherExpress Pro/100B # (hint of prefer_iomap can be done to prefer I/O instead of Mem mapping) # hme: Sun HME (Happy Meal Ethernet) # le: AMD Am7900 LANCE and Am79C9xx PCnet # lge: Support for PCI gigabit ethernet adapters based on the Level 1 # LXT1001 NetCellerator chipset. This includes the D-Link DGE-500SX, # SMC TigerCard 1000 (SMC9462SX), and some Addtron cards. # lmc: Support for the LMC/SBE wide-area network interface cards. # my: Myson Fast Ethernet (MTD80X, MTD89X) # nge: Support for PCI gigabit ethernet adapters based on the National # Semiconductor DP83820 and DP83821 chipset. This includes the # SMC EZ Card 1000 (SMC9462TX), D-Link DGE-500T, Asante FriendlyNet # GigaNIX 1000TA and 1000TPC, the Addtron AEG320T, the Surecom # EP-320G-TX and the Netgear GA622T. # pcn: Support for PCI fast ethernet adapters based on the AMD Am79c97x # PCnet-FAST, PCnet-FAST+, PCnet-FAST III, PCnet-PRO and PCnet-Home # chipsets. These can also be handled by the le(4) driver if the # pcn(4) driver is left out of the kernel. The le(4) driver does not # support the additional features like the MII bus and burst mode of # the PCnet-FAST and greater chipsets though. # rl: Support for PCI fast ethernet adapters based on the RealTek 8129/8139 # chipset. Note that the RealTek driver defaults to using programmed # I/O to do register accesses because memory mapped mode seems to cause # severe lockups on SMP hardware. This driver also supports the # Accton EN1207D `Cheetah' adapter, which uses a chip called # the MPX 5030/5038, which is either a RealTek in disguise or a # RealTek workalike. Note that the D-Link DFE-530TX+ uses the RealTek # chipset and is supported by this driver, not the 'vr' driver. # sf: Support for Adaptec Duralink PCI fast ethernet adapters based on the # Adaptec AIC-6915 "starfire" controller. # This includes dual and quad port cards, as well as one 100baseFX card. # Most of these are 64-bit PCI devices, except for one single port # card which is 32-bit. # sis: Support for NICs based on the Silicon Integrated Systems SiS 900, # SiS 7016 and NS DP83815 PCI fast ethernet controller chips. # sbsh: Support for Granch SBNI16 SHDSL modem PCI adapters # sk: Support for the SysKonnect SK-984x series PCI gigabit ethernet NICs. # This includes the SK-9841 and SK-9842 single port cards (single mode # and multimode fiber) and the SK-9843 and SK-9844 dual port cards # (also single mode and multimode). # The driver will autodetect the number of ports on the card and # attach each one as a separate network interface. # sn: Support for ISA and PC Card Ethernet devices using the # SMC91C90/92/94/95 chips. # ste: Sundance Technologies ST201 PCI fast ethernet controller, includes # the D-Link DFE-550TX. # ti: Support for PCI gigabit ethernet NICs based on the Alteon Networks # Tigon 1 and Tigon 2 chipsets. This includes the Alteon AceNIC, the # 3Com 3c985, the Netgear GA620 and various others. Note that you will # probably want to bump up NMBCLUSTERS a lot to use this driver. # tl: Support for the Texas Instruments TNETE100 series 'ThunderLAN' # cards and integrated ethernet controllers. This includes several # Compaq Netelligent 10/100 cards and the built-in ethernet controllers # in several Compaq Prosignia, Proliant and Deskpro systems. It also # supports several Olicom 10Mbps and 10/100 boards. # tx: SMC 9432 TX, BTX and FTX cards. (SMC EtherPower II series) # txp: Support for 3Com 3cR990 cards with the "Typhoon" chipset # vr: Support for various fast ethernet adapters based on the VIA # Technologies VT3043 `Rhine I' and VT86C100A `Rhine II' chips, # including the D-Link DFE530TX (see 'rl' for DFE530TX+), the Hawking # Technologies PN102TX, and the AOpen/Acer ALN-320. # vx: 3Com 3C590 and 3C595 # wb: Support for fast ethernet adapters based on the Winbond W89C840F chip. # Note: this is not the same as the Winbond W89C940F, which is a # NE2000 clone. # wi: Lucent WaveLAN/IEEE 802.11 PCMCIA adapters. Note: this supports both # the PCMCIA and ISA cards: the ISA card is really a PCMCIA to ISA # bridge with a PCMCIA adapter plugged into it. # xe: Xircom/Intel EtherExpress Pro100/16 PC Card ethernet controller, # Accton Fast EtherCard-16, Compaq Netelligent 10/100 PC Card, # Toshiba 10/100 Ethernet PC Card, Xircom 16-bit Ethernet + Modem 56 # xl: Support for the 3Com 3c900, 3c905, 3c905B and 3c905C (Fast) # Etherlink XL cards and integrated controllers. This includes the # integrated 3c905B-TX chips in certain Dell Optiplex and Dell # Precision desktop machines and the integrated 3c905-TX chips # in Dell Latitude laptop docking stations. # Also supported: 3Com 3c980(C)-TX, 3Com 3cSOHO100-TX, 3Com 3c450-TX # Order for ISA/EISA devices is important here device cm hint.cm.0.at="isa" hint.cm.0.port="0x2e0" hint.cm.0.irq="9" hint.cm.0.maddr="0xdc000" device ep device ex device fe hint.fe.0.at="isa" hint.fe.0.port="0x300" device fea device sn hint.sn.0.at="isa" hint.sn.0.port="0x300" hint.sn.0.irq="10" device an device awi device cnw device wi device xe # PCI Ethernet NICs that use the common MII bus controller code. device dc # DEC/Intel 21143 and various workalikes device fxp # Intel EtherExpress PRO/100B (82557, 82558) hint.fxp.0.prefer_iomap="0" device hme # Sun HME (Happy Meal Ethernet) device my # Myson Fast Ethernet (MTD80X, MTD89X) device rl # RealTek 8129/8139 device pcn # AMD Am79C97x PCI 10/100 NICs device sf # Adaptec AIC-6915 (``Starfire'') device sbsh # Granch SBNI16 SHDSL modem device sis # Silicon Integrated Systems SiS 900/SiS 7016 device ste # Sundance ST201 (D-Link DFE-550TX) device tl # Texas Instruments ThunderLAN device tx # SMC EtherPower II (83c170 ``EPIC'') device vr # VIA Rhine, Rhine II device wb # Winbond W89C840F device xl # 3Com 3c90x (``Boomerang'', ``Cyclone'') # PCI Ethernet NICs. device de # DEC/Intel DC21x4x (``Tulip'') device le # AMD Am7900 LANCE and Am79C9xx PCnet device txp # 3Com 3cR990 (``Typhoon'') device vx # 3Com 3c590, 3c595 (``Vortex'') # PCI Gigabit & FDDI NICs. device bge device lge device nge device sk device ti device fpa # PCI WAN adapters. device lmc # Use "private" jumbo buffers allocated exclusively for the ti(4) driver. # This option is incompatible with the TI_JUMBO_HDRSPLIT option below. #options TI_PRIVATE_JUMBOS # Turn on the header splitting option for the ti(4) driver firmware. This # only works for Tigon II chips, and has no effect for Tigon I chips. options TI_JUMBO_HDRSPLIT # These two options allow manipulating the mbuf cluster size and mbuf size, # respectively. Be very careful with NIC driver modules when changing # these from their default values, because that can potentially cause a # mismatch between the mbuf size assumed by the kernel and the mbuf size # assumed by a module. The only driver that currently has the ability to # detect a mismatch is ti(4). options MCLSHIFT=12 # mbuf cluster shift in bits, 12 == 4KB options MSIZE=512 # mbuf size in bytes # # ATM related options (Cranor version) # (note: this driver cannot be used with the HARP ATM stack) # # The `en' device provides support for Efficient Networks (ENI) # ENI-155 PCI midway cards, and the Adaptec 155Mbps PCI ATM cards (ANA-59x0). # # The `hatm' device provides support for Fore/Marconi HE155 and HE622 # ATM PCI cards. # # The `fatm' device provides support for Fore PCA200E ATM PCI cards. # # The `patm' device provides support for IDT77252 based cards like # ProSum's ProATM-155 and ProATM-25 and IDT's evaluation boards. # # atm device provides generic atm functions and is required for # atm devices. # NATM enables the netnatm protocol family that can be used to # bypass TCP/IP. # # utopia provides the access to the ATM PHY chips and is required for en, # hatm and fatm. # # the current driver supports only PVC operations (no atm-arp, no multicast). # for more details, please read the original documents at # http://www.ccrc.wustl.edu/pub/chuck/tech/bsdatm/bsdatm.html # device atm device en device fatm #Fore PCA200E device hatm #Fore/Marconi HE155/622 device patm #IDT77252 cards (ProATM and IDT) device utopia #ATM PHY driver options NATM #native ATM options LIBMBPOOL #needed by patm, iatm # # Sound drivers # # sound: The generic sound driver. # device sound # # snd_*: Device-specific drivers. # # The flags of the device tells the device a bit more info about the # device that normally is obtained through the PnP interface. # bit 2..0 secondary DMA channel; # bit 4 set if the board uses two dma channels; # bit 15..8 board type, overrides autodetection; leave it # zero if don't know what to put in (and you don't, # since this is unsupported at the moment...). # # snd_ad1816: Analog Devices AD1816 ISA PnP/non-PnP. # snd_als4000: Avance Logic ALS4000 PCI. # snd_atiixp: ATI IXP 200/300/400 PCI. # snd_au88x0 Aureal Vortex 1/2/Advantage PCI. This driver # lacks support for playback and recording. # snd_audiocs: Crystal Semiconductor CS4231 SBus/EBus. Only # for sparc64. # snd_cmi: CMedia CMI8338/CMI8738 PCI. # snd_cs4281: Crystal Semiconductor CS4281 PCI. # snd_csa: Crystal Semiconductor CS461x/428x PCI. (except # 4281) # snd_ds1: Yamaha DS-1 PCI. # snd_emu10k1: Creative EMU10K1 PCI and EMU10K2 (Audigy) PCI. # snd_es137x: Ensoniq AudioPCI ES137x PCI. # snd_ess: Ensoniq ESS ISA PnP/non-PnP, to be used in # conjunction with snd_sbc. # snd_fm801: Forte Media FM801 PCI. # snd_gusc: Gravis UltraSound ISA PnP/non-PnP. # snd_ich: Intel ICH PCI and some more audio controllers # embedded in a chipset, for example nVidia # nForce controllers. # snd_maestro: ESS Technology Maestro-1/2x PCI. # snd_maestro3: ESS Technology Maestro-3/Allegro PCI. # snd_mss: Microsoft Sound System ISA PnP/non-PnP. # snd_neomagic: Neomagic 256 AV/ZX PCI. # snd_sb16: Creative SoundBlaster16, to be used in # conjuction with snd_sbc. # snd_sb8: Creative SoundBlaster (pre-16), to be used in # conjuction with snd_sbc. # snd_sbc: Creative SoundBlaster ISA PnP/non-PnP. # Supports ESS and Avance ISA chips as well. # snd_solo: ESS Solo-1x PCI. # snd_t4dwave: Trident 4DWave DX/NX PCI, Sis 7018 PCI and Acer Labs # M5451 PCI. # snd_via8233: VIA VT8233x PCI. # snd_via82c686: VIA VT82C686A PCI. # snd_vibes: S3 Sonicvibes PCI. # snd_uaudio: USB audio. device snd_ad1816 device snd_als4000 device snd_atiixp #device snd_au88x0 #device snd_audiocs device snd_cmi device snd_cs4281 device snd_csa device snd_ds1 device snd_emu10k1 device snd_es137x device snd_ess device snd_fm801 device snd_gusc device snd_ich device snd_maestro device snd_maestro3 device snd_mss device snd_neomagic device snd_sb16 device snd_sb8 device snd_sbc device snd_solo device snd_t4dwave device snd_via8233 device snd_via82c686 device snd_vibes device snd_uaudio # For non-pnp sound cards: hint.pcm.0.at="isa" hint.pcm.0.irq="10" hint.pcm.0.drq="1" hint.pcm.0.flags="0x0" hint.sbc.0.at="isa" hint.sbc.0.port="0x220" hint.sbc.0.irq="5" hint.sbc.0.drq="1" hint.sbc.0.flags="0x15" hint.gusc.0.at="isa" hint.gusc.0.port="0x220" hint.gusc.0.irq="5" hint.gusc.0.drq="1" hint.gusc.0.flags="0x13" # # IEEE-488 hardware: # pcii: PCIIA cards (uPD7210 based isa cards) # tnt4882: National Instruments PCI-GPIB card. device pcii hint.pcii.0.at="isa" hint.pcii.0.port="0x2e1" hint.pcii.0.irq="5" hint.pcii.0.drq="1" device tnt4882 # # Miscellaneous hardware: # # scd: Sony CD-ROM using proprietary (non-ATAPI) interface # mcd: Mitsumi CD-ROM using proprietary (non-ATAPI) interface # bktr: Brooktree bt848/848a/849a/878/879 video capture and TV Tuner board # cy: Cyclades serial driver # joy: joystick (including IO DATA PCJOY PC Card joystick) # rc: RISCom/8 multiport card # rp: Comtrol Rocketport(ISA/PCI) - single card # si: Specialix SI/XIO 4-32 port terminal multiplexor # nmdm: nullmodem terminal driver (see nmdm(4)) # Notes on the Comtrol Rocketport driver: # # The exact values used for rp0 depend on how many boards you have # in the system. The manufacturer's sample configs are listed as: # # device rp # core driver support # # Comtrol Rocketport ISA single card # hint.rp.0.at="isa" # hint.rp.0.port="0x280" # # If instead you have two ISA cards, one installed at 0x100 and the # second installed at 0x180, then you should add the following to # your kernel probe hints: # hint.rp.0.at="isa" # hint.rp.0.port="0x100" # hint.rp.1.at="isa" # hint.rp.1.port="0x180" # # For 4 ISA cards, it might be something like this: # hint.rp.0.at="isa" # hint.rp.0.port="0x180" # hint.rp.1.at="isa" # hint.rp.1.port="0x100" # hint.rp.2.at="isa" # hint.rp.2.port="0x340" # hint.rp.3.at="isa" # hint.rp.3.port="0x240" # # For PCI cards, you need no hints. # Mitsumi CD-ROM device mcd hint.mcd.0.at="isa" hint.mcd.0.port="0x300" # for the Sony CDU31/33A CDROM device scd hint.scd.0.at="isa" hint.scd.0.port="0x230" device joy # PnP aware, hints for nonpnp only hint.joy.0.at="isa" hint.joy.0.port="0x201" device rc hint.rc.0.at="isa" hint.rc.0.port="0x220" hint.rc.0.irq="12" device rp hint.rp.0.at="isa" hint.rp.0.port="0x280" device si options SI_DEBUG hint.si.0.at="isa" hint.si.0.maddr="0xd0000" hint.si.0.irq="12" device nmdm # # The 'bktr' device is a PCI video capture device using the Brooktree # bt848/bt848a/bt849a/bt878/bt879 chipset. When used with a TV Tuner it forms a # TV card, e.g. Miro PC/TV, Hauppauge WinCast/TV WinTV, VideoLogic Captivator, # Intel Smart Video III, AverMedia, IMS Turbo, FlyVideo. # # options OVERRIDE_CARD=xxx # options OVERRIDE_TUNER=xxx # options OVERRIDE_MSP=1 # options OVERRIDE_DBX=1 # These options can be used to override the auto detection # The current values for xxx are found in src/sys/dev/bktr/bktr_card.h # Using sysctl(8) run-time overrides on a per-card basis can be made # # options BROOKTREE_SYSTEM_DEFAULT=BROOKTREE_PAL # or # options BROOKTREE_SYSTEM_DEFAULT=BROOKTREE_NTSC # Specifies the default video capture mode. # This is required for Dual Crystal (28&35Mhz) boards where PAL is used # to prevent hangs during initialisation, e.g. VideoLogic Captivator PCI. # # options BKTR_USE_PLL # This is required for PAL or SECAM boards with a 28Mhz crystal and no 35Mhz # crystal, e.g. some new Bt878 cards. # # options BKTR_GPIO_ACCESS # This enable IOCTLs which give user level access to the GPIO port. # # options BKTR_NO_MSP_RESET # Prevents the MSP34xx reset. Good if you initialise the MSP in another OS first # # options BKTR_430_FX_MODE # Switch Bt878/879 cards into Intel 430FX chipset compatibility mode. # # options BKTR_SIS_VIA_MODE # Switch Bt878/879 cards into SIS/VIA chipset compatibility mode which is # needed for some old SiS and VIA chipset motherboards. # This also allows Bt878/879 chips to work on old OPTi (<1997) chipset # motherboards and motherboards with bad or incomplete PCI 2.1 support. # As a rough guess, old = before 1998 # # options BKTR_NEW_MSP34XX_DRIVER # Use new, more complete initialization scheme for the msp34* soundchip. # Should fix stereo autodetection if the old driver does only output # mono sound. # # options BKTR_USE_FREEBSD_SMBUS # Compile with FreeBSD SMBus implementation # # Brooktree driver has been ported to the new I2C framework. Thus, # you'll need to have the following 3 lines in the kernel config. # device smbus # device iicbus # device iicbb # device iicsmb # The iic and smb devices are only needed if you want to control other # I2C slaves connected to the external connector of some cards. # device bktr # # PC Card/PCMCIA and Cardbus # # pccbb: pci/cardbus bridge implementing YENTA interface # pccard: pccard slots # cardbus: cardbus slots device cbb device pccard device cardbus # # SMB bus # # System Management Bus support is provided by the 'smbus' device. # Access to the SMBus device is via the 'smb' device (/dev/smb*), # which is a child of the 'smbus' device. # # Supported devices: # smb standard I/O through /dev/smb* # # Supported SMB interfaces: # iicsmb I2C to SMB bridge with any iicbus interface # bktr brooktree848 I2C hardware interface # intpm Intel PIIX4 (82371AB, 82443MX) Power Management Unit # alpm Acer Aladdin-IV/V/Pro2 Power Management Unit # ichsmb Intel ICH SMBus controller chips (82801AA, 82801AB, 82801BA) # viapm VIA VT82C586B/596B/686A and VT8233 Power Management Unit # amdpm AMD 756 Power Management Unit # amdsmb AMD 8111 SMBus 2.0 Controller # nfpm NVIDIA nForce Power Management Unit # nfsmb NVIDIA nForce2/3/4 MCP SMBus 2.0 Controller # device smbus # Bus support, required for smb below. device intpm device alpm device ichsmb device viapm device amdpm device amdsmb device nfpm device nfsmb device smb # # I2C Bus # # Philips i2c bus support is provided by the `iicbus' device. # # Supported devices: # ic i2c network interface # iic i2c standard io # iicsmb i2c to smb bridge. Allow i2c i/o with smb commands. # # Supported interfaces: # bktr brooktree848 I2C software interface # # Other: # iicbb generic I2C bit-banging code (needed by lpbb, bktr) # device iicbus # Bus support, required for ic/iic/iicsmb below. device iicbb device ic device iic device iicsmb # smb over i2c bridge # Parallel-Port Bus # # Parallel port bus support is provided by the `ppbus' device. # Multiple devices may be attached to the parallel port, devices # are automatically probed and attached when found. # # Supported devices: # vpo Iomega Zip Drive # Requires SCSI disk support ('scbus' and 'da'), best # performance is achieved with ports in EPP 1.9 mode. # lpt Parallel Printer # plip Parallel network interface # ppi General-purpose I/O ("Geek Port") + IEEE1284 I/O # pps Pulse per second Timing Interface # lpbb Philips official parallel port I2C bit-banging interface # # Supported interfaces: # ppc ISA-bus parallel port interfaces. # options PPC_PROBE_CHIPSET # Enable chipset specific detection # (see flags in ppc(4)) options DEBUG_1284 # IEEE1284 signaling protocol debug options PERIPH_1284 # Makes your computer act as an IEEE1284 # compliant peripheral options DONTPROBE_1284 # Avoid boot detection of PnP parallel devices options VP0_DEBUG # ZIP/ZIP+ debug options LPT_DEBUG # Printer driver debug options PPC_DEBUG # Parallel chipset level debug options PLIP_DEBUG # Parallel network IP interface debug options PCFCLOCK_VERBOSE # Verbose pcfclock driver options PCFCLOCK_MAX_RETRIES=5 # Maximum read tries (default 10) device ppc hint.ppc.0.at="isa" hint.ppc.0.irq="7" device ppbus device vpo device lpt device plip device ppi device pps device lpbb device pcfclock # Kernel BOOTP support options BOOTP # Use BOOTP to obtain IP address/hostname # Requires NFSCLIENT and NFS_ROOT options BOOTP_NFSROOT # NFS mount root filesystem using BOOTP info options BOOTP_NFSV3 # Use NFS v3 to NFS mount root options BOOTP_COMPAT # Workaround for broken bootp daemons. options BOOTP_WIRED_TO=fxp0 # Use interface fxp0 for BOOTP # # Add software watchdog routines. # options SW_WATCHDOG # # Disable swapping of stack pages. This option removes all # code which actually performs swapping, so it's not possible to turn # it back on at run-time. # # This is sometimes usable for systems which don't have any swap space # (see also sysctls "vm.defer_swapspace_pageouts" and # "vm.disable_swapspace_pageouts") # #options NO_SWAPPING # Set the number of sf_bufs to allocate. sf_bufs are virtual buffers # for sendfile(2) that are used to map file VM pages, and normally # default to a quantity that is roughly 16*MAXUSERS+512. You would # typically want about 4 of these for each simultaneous file send. # options NSFBUFS=1024 # # Enable extra debugging code for locks. This stores the filename and # line of whatever acquired the lock in the lock itself, and change a # number of function calls to pass around the relevant data. This is # not at all useful unless you are debugging lock code. Also note # that it is likely to break e.g. fstat(1) unless you recompile your # userland with -DDEBUG_LOCKS as well. # options DEBUG_LOCKS ##################################################################### # USB support # UHCI controller device uhci # OHCI controller device ohci # EHCI controller device ehci # SL811 Controller device slhci # General USB code (mandatory for USB) device usb # # USB Double Bulk Pipe devices device udbp # USB Fm Radio device ufm # Generic USB device driver device ugen # Human Interface Device (anything with buttons and dials) device uhid # USB keyboard device ukbd # USB printer device ulpt # USB Iomega Zip 100 Drive (Requires scbus and da) device umass # USB support for Belkin F5U109 and Magic Control Technology serial adapters device umct # USB modem support device umodem # USB mouse device ums # Diamond Rio 500 Mp3 player device urio # USB scanners device uscanner # # USB serial support device ucom # USB support for Belkin F5U103 and compatible serial adapters device ubsa # USB support for BWCT console serial adapters device ubser # USB support for serial adapters based on the FT8U100AX and FT8U232AM device uftdi # USB support for Prolific PL-2303 serial adapters device uplcom # USB Visor and Palm devices device uvisor # USB serial support for DDI pocket's PHS device uvscom # # ADMtek USB ethernet. Supports the LinkSys USB100TX, # the Billionton USB100, the Melco LU-ATX, the D-Link DSB-650TX # and the SMC 2202USB. Also works with the ADMtek AN986 Pegasus # eval board. device aue # ASIX Electronics AX88172 USB 2.0 ethernet driver. Used in the # LinkSys USB200M and various other adapters. device axe # # Devices which communicate using Ethernet over USB, particularly # Communication Device Class (CDC) Ethernet specification. Supports # Sharp Zaurus PDAs, some DOCSIS cable modems and so on. device cdce # # CATC USB-EL1201A USB ethernet. Supports the CATC Netmate # and Netmate II, and the Belkin F5U111. device cue # # Kawasaki LSI ethernet. Supports the LinkSys USB10T, # Entrega USB-NET-E45, Peracom Ethernet Adapter, the # 3Com 3c19250, the ADS Technologies USB-10BT, the ATen UC10T, # the Netgear EA101, the D-Link DSB-650, the SMC 2102USB # and 2104USB, and the Corega USB-T. device kue # # RealTek RTL8150 USB to fast ethernet. Supports the Melco LUA-KTX # and the GREEN HOUSE GH-USB100B. device rue # # Davicom DM9601E USB to fast ethernet. Supports the Corega FEther USB-TXC. device udav # debugging options for the USB subsystem # options USB_DEBUG # options for ukbd: options UKBD_DFLT_KEYMAP # specify the built-in keymap makeoptions UKBD_DFLT_KEYMAP=it.iso # options for uplcom: options UPLCOM_INTR_INTERVAL=100 # interrupt pipe interval # in milliseconds # options for uvscom: options UVSCOM_DEFAULT_OPKTSIZE=8 # default output packet size options UVSCOM_INTR_INTERVAL=100 # interrupt pipe interval # in milliseconds ##################################################################### # FireWire support device firewire # FireWire bus code device sbp # SCSI over Firewire (Requires scbus and da) device sbp_targ # SBP-2 Target mode (Requires scbus and targ) device fwe # Ethernet over FireWire (non-standard!) device fwip # IP over FireWire (rfc2734 and rfc3146) ##################################################################### # dcons support (Dumb Console Device) device dcons # dumb console driver device dcons_crom # FireWire attachment options DCONS_BUF_SIZE=16384 # buffer size options DCONS_POLL_HZ=100 # polling rate options DCONS_FORCE_CONSOLE=0 # force to be the primary console options DCONS_FORCE_GDB=1 # force to be the gdb device ##################################################################### # crypto subsystem # # This is a port of the openbsd crypto framework. Include this when # configuring FAST_IPSEC and when you have a h/w crypto device to accelerate # user applications that link to openssl. # # Drivers are ports from openbsd with some simple enhancements that have # been fed back to openbsd. device crypto # core crypto support device cryptodev # /dev/crypto for access to h/w device rndtest # FIPS 140-2 entropy tester device hifn # Hifn 7951, 7781, etc. options HIFN_DEBUG # enable debugging support: hw.hifn.debug options HIFN_RNDTEST # enable rndtest support device ubsec # Broadcom 5501, 5601, 58xx options UBSEC_DEBUG # enable debugging support: hw.ubsec.debug options UBSEC_RNDTEST # enable rndtest support ##################################################################### # # Embedded system options: # # An embedded system might want to run something other than init. options INIT_PATH=/sbin/init:/stand/sysinstall # Debug options options BUS_DEBUG # enable newbus debugging options DEBUG_VFS_LOCKS # enable vfs lock debugging options SOCKBUF_DEBUG # enable sockbuf last record/mb tail checking # # Verbose SYSINIT # # Make the SYSINIT process performed by mi_startup() verbose. This is very # useful when porting to a new architecture. If DDB is also enabled, this # will print function names instead of addresses. options VERBOSE_SYSINIT ##################################################################### # SYSV IPC KERNEL PARAMETERS # # Maximum number of entries in a semaphore map. options SEMMAP=31 # Maximum number of System V semaphores that can be used on the system at # one time. options SEMMNI=11 # Total number of semaphores system wide options SEMMNS=61 # Total number of undo structures in system options SEMMNU=31 # Maximum number of System V semaphores that can be used by a single process # at one time. options SEMMSL=61 # Maximum number of operations that can be outstanding on a single System V # semaphore at one time. options SEMOPM=101 # Maximum number of undo operations that can be outstanding on a single # System V semaphore at one time. options SEMUME=11 # Maximum number of shared memory pages system wide. options SHMALL=1025 # Maximum size, in bytes, of a single System V shared memory region. options SHMMAX=(SHMMAXPGS*PAGE_SIZE+1) options SHMMAXPGS=1025 # Minimum size, in bytes, of a single System V shared memory region. options SHMMIN=2 # Maximum number of shared memory regions that can be used on the system # at one time. options SHMMNI=33 # Maximum number of System V shared memory regions that can be attached to # a single process at one time. options SHMSEG=9 # Set the amount of time (in seconds) the system will wait before # rebooting automatically when a kernel panic occurs. If set to (-1), # the system will wait indefinitely until a key is pressed on the # console. options PANIC_REBOOT_WAIT_TIME=16 # Attempt to bypass the buffer cache and put data directly into the # userland buffer for read operation when O_DIRECT flag is set on the # file. Both offset and length of the read operation must be # multiples of the physical media sector size. # options DIRECTIO # Specify a lower limit for the number of swap I/O buffers. They are # (among other things) used when bypassing the buffer cache due to # DIRECTIO kernel option enabled and O_DIRECT flag set on file. # options NSWBUF_MIN=120 ##################################################################### # More undocumented options for linting. # Note that documenting these are not considered an affront. options CAM_DEBUG_DELAY # VFS cluster debugging. options CLUSTERDEBUG options DEBUG # Kernel filelock debugging. options LOCKF_DEBUG # System V compatible message queues # Please note that the values provided here are used to test kernel # building. The defaults in the sources provide almost the same numbers. # MSGSSZ must be a power of 2 between 8 and 1024. options MSGMNB=2049 # Max number of chars in queue options MSGMNI=41 # Max number of message queue identifiers options MSGSEG=2049 # Max number of message segments options MSGSSZ=16 # Size of a message segment options MSGTQL=41 # Max number of messages in system options NBUF=512 # Number of buffer headers options NMBCLUSTERS=1024 # Number of mbuf clusters options SCSI_NCR_DEBUG options SCSI_NCR_MAX_SYNC=10000 options SCSI_NCR_MAX_WIDE=1 options SCSI_NCR_MYADDR=7 options SC_DEBUG_LEVEL=5 # Syscons debug level options SC_RENDER_DEBUG # syscons rendering debugging options SHOW_BUSYBUFS # List buffers that prevent root unmount options SLIP_IFF_OPTS options VFS_BIO_DEBUG # VFS buffer I/O debugging options KSTACK_MAX_PAGES=32 # Maximum pages to give the kernel stack # Adaptec Array Controller driver options options AAC_DEBUG # Debugging levels: # 0 - quiet, only emit warnings # 1 - noisy, emit major function # points and things done # 2 - extremely noisy, emit trace # items in loops, etc. # Yet more undocumented options for linting. # BKTR_ALLOC_PAGES has no effect except to cause warnings, and # BROOKTREE_ALLOC_PAGES hasn't actually been superseded by it, since the # driver still mostly spells this option BROOKTREE_ALLOC_PAGES. ##options BKTR_ALLOC_PAGES=(217*4+1) options BROOKTREE_ALLOC_PAGES=(217*4+1) options MAXFILES=999 options NDEVFSINO=1025 options NDEVFSOVERFLOW=32769 # Yet more undocumented options for linting. options VGA_DEBUG Index: head/sys/conf/files =================================================================== --- head/sys/conf/files (revision 159569) +++ head/sys/conf/files (revision 159570) @@ -1,2069 +1,2070 @@ # $FreeBSD$ # # The long compile-with and dependency lines are required because of # limitations in config: backslash-newline doesn't work in strings, and # dependency lines other than the first are silently ignored. # acpi_quirks.h optional acpi \ dependency "$S/tools/acpi_quirks2h.awk $S/dev/acpica/acpi_quirks" \ compile-with "${AWK} -f $S/tools/acpi_quirks2h.awk $S/dev/acpica/acpi_quirks" \ no-obj no-implicit-rule before-depend \ clean "acpi_quirks.h" aicasm optional ahc | ahd \ dependency "$S/dev/aic7xxx/aicasm/*.[chyl]" \ compile-with "CC='${CC}' ${MAKE} -f $S/dev/aic7xxx/aicasm/Makefile MAKESRCPATH=$S/dev/aic7xxx/aicasm" \ no-obj no-implicit-rule \ clean "aicasm* y.tab.h" aic7xxx_seq.h optional ahc \ compile-with "./aicasm ${INCLUDES} -I$S/cam/scsi -I$S/dev/aic7xxx -o aic7xxx_seq.h -r aic7xxx_reg.h -p aic7xxx_reg_print.c -i $S/dev/aic7xxx/aic7xxx_osm.h $S/dev/aic7xxx/aic7xxx.seq" \ no-obj no-implicit-rule before-depend local \ clean "aic7xxx_seq.h" \ dependency "$S/dev/aic7xxx/aic7xxx.{reg,seq} $S/cam/scsi/scsi_message.h aicasm" aic7xxx_reg.h optional ahc \ compile-with "./aicasm ${INCLUDES} -I$S/cam/scsi -I$S/dev/aic7xxx -o aic7xxx_seq.h -r aic7xxx_reg.h -p aic7xxx_reg_print.c -i $S/dev/aic7xxx/aic7xxx_osm.h $S/dev/aic7xxx/aic7xxx.seq" \ no-obj no-implicit-rule before-depend local \ clean "aic7xxx_reg.h" \ dependency "$S/dev/aic7xxx/aic7xxx.{reg,seq} $S/cam/scsi/scsi_message.h aicasm" aic7xxx_reg_print.c optional ahc \ compile-with "./aicasm ${INCLUDES} -I$S/cam/scsi -I$S/dev/aic7xxx -o aic7xxx_seq.h -r aic7xxx_reg.h -p aic7xxx_reg_print.c -i $S/dev/aic7xxx/aic7xxx_osm.h $S/dev/aic7xxx/aic7xxx.seq" \ no-obj no-implicit-rule local \ clean "aic7xxx_reg_print.c" \ dependency "$S/dev/aic7xxx/aic7xxx.{reg,seq} $S/cam/scsi/scsi_message.h aicasm" aic7xxx_reg_print.o optional ahc ahc_reg_pretty_print \ compile-with "${NORMAL_C}" \ no-implicit-rule local aic79xx_seq.h optional ahd pci \ compile-with "./aicasm ${INCLUDES} -I$S/cam/scsi -I$S/dev/aic7xxx -o aic79xx_seq.h -r aic79xx_reg.h -p aic79xx_reg_print.c -i $S/dev/aic7xxx/aic79xx_osm.h $S/dev/aic7xxx/aic79xx.seq" \ no-obj no-implicit-rule before-depend local \ clean "aic79xx_seq.h" \ dependency "$S/dev/aic7xxx/aic79xx.{reg,seq} $S/cam/scsi/scsi_message.h aicasm" aic79xx_reg.h optional ahd pci \ compile-with "./aicasm ${INCLUDES} -I$S/cam/scsi -I$S/dev/aic7xxx -o aic79xx_seq.h -r aic79xx_reg.h -p aic79xx_reg_print.c -i $S/dev/aic7xxx/aic79xx_osm.h $S/dev/aic7xxx/aic79xx.seq" \ no-obj no-implicit-rule before-depend local \ clean "aic79xx_reg.h" \ dependency "$S/dev/aic7xxx/aic79xx.{reg,seq} $S/cam/scsi/scsi_message.h aicasm" aic79xx_reg_print.c optional ahd pci \ compile-with "./aicasm ${INCLUDES} -I$S/cam/scsi -I$S/dev/aic7xxx -o aic79xx_seq.h -r aic79xx_reg.h -p aic79xx_reg_print.c -i $S/dev/aic7xxx/aic79xx_osm.h $S/dev/aic7xxx/aic79xx.seq" \ no-obj no-implicit-rule local \ clean "aic79xx_reg_print.c" \ dependency "$S/dev/aic7xxx/aic79xx.{reg,seq} $S/cam/scsi/scsi_message.h aicasm" aic79xx_reg_print.o optional ahd pci ahd_reg_pretty_print \ compile-with "${NORMAL_C}" \ no-implicit-rule local emu10k1-alsa%diked.h optional snd_emu10k1 pci \ dependency "$S/tools/emu10k1-mkalsa.sh $S/gnu/dev/sound/pci/emu10k1-alsa.h" \ compile-with "CC='${CC}' AWK=${AWK} sh $S/tools/emu10k1-mkalsa.sh $S/gnu/dev/sound/pci/emu10k1-alsa.h emu10k1-alsa%diked.h" \ no-obj no-implicit-rule before-depend \ clean "emu10k1-alsa%diked.h" miidevs.h optional miibus | mii \ dependency "$S/tools/miidevs2h.awk $S/dev/mii/miidevs" \ compile-with "${AWK} -f $S/tools/miidevs2h.awk $S/dev/mii/miidevs" \ no-obj no-implicit-rule before-depend \ clean "miidevs.h" pccarddevs.h standard \ dependency "$S/tools/pccarddevs2h.awk $S/dev/pccard/pccarddevs" \ compile-with "${AWK} -f $S/tools/pccarddevs2h.awk $S/dev/pccard/pccarddevs" \ no-obj no-implicit-rule before-depend \ clean "pccarddevs.h" usbdevs.h optional usb \ dependency "$S/tools/usbdevs2h.awk $S/dev/usb/usbdevs" \ compile-with "${AWK} -f $S/tools/usbdevs2h.awk $S/dev/usb/usbdevs -h" \ no-obj no-implicit-rule before-depend \ clean "usbdevs.h" usbdevs_data.h optional usb \ dependency "$S/tools/usbdevs2h.awk $S/dev/usb/usbdevs" \ compile-with "${AWK} -f $S/tools/usbdevs2h.awk $S/dev/usb/usbdevs -d" \ no-obj no-implicit-rule before-depend \ clean "usbdevs_data.h" cam/cam.c optional scbus cam/cam_periph.c optional scbus cam/cam_queue.c optional scbus cam/cam_sim.c optional scbus cam/cam_xpt.c optional scbus cam/scsi/scsi_all.c optional scbus cam/scsi/scsi_cd.c optional cd cam/scsi/scsi_ch.c optional ch cam/scsi/scsi_da.c optional da cam/scsi/scsi_low.c optional ct | ncv | nsp | stg cam/scsi/scsi_low_pisa.c optional ct | ncv | nsp | stg cam/scsi/scsi_pass.c optional pass cam/scsi/scsi_pt.c optional pt cam/scsi/scsi_sa.c optional sa cam/scsi/scsi_ses.c optional ses cam/scsi/scsi_targ_bh.c optional targbh cam/scsi/scsi_target.c optional targ coda/coda_fbsd.c optional vcoda coda/coda_namecache.c optional vcoda coda/coda_psdev.c optional vcoda coda/coda_subr.c optional vcoda coda/coda_venus.c optional vcoda coda/coda_vfsops.c optional vcoda coda/coda_vnops.c optional vcoda contrib/altq/altq/altq_cbq.c optional altq \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/altq/altq/altq_cdnr.c optional altq contrib/altq/altq/altq_hfsc.c optional altq \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/altq/altq/altq_priq.c optional altq \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/altq/altq/altq_red.c optional altq \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/altq/altq/altq_rio.c optional altq \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/altq/altq/altq_rmclass.c optional altq contrib/altq/altq/altq_subr.c optional altq \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/dev/acpica/dbcmds.c optional acpi acpi_debug contrib/dev/acpica/dbdisply.c optional acpi acpi_debug contrib/dev/acpica/dbexec.c optional acpi acpi_debug contrib/dev/acpica/dbfileio.c optional acpi acpi_debug contrib/dev/acpica/dbhistry.c optional acpi acpi_debug contrib/dev/acpica/dbinput.c optional acpi acpi_debug contrib/dev/acpica/dbstats.c optional acpi acpi_debug contrib/dev/acpica/dbutils.c optional acpi acpi_debug contrib/dev/acpica/dbxface.c optional acpi acpi_debug contrib/dev/acpica/dmbuffer.c optional acpi acpi_debug contrib/dev/acpica/dmnames.c optional acpi acpi_debug contrib/dev/acpica/dmopcode.c optional acpi acpi_debug contrib/dev/acpica/dmobject.c optional acpi acpi_debug contrib/dev/acpica/dmresrc.c optional acpi acpi_debug contrib/dev/acpica/dmresrcl.c optional acpi acpi_debug contrib/dev/acpica/dmresrcs.c optional acpi acpi_debug contrib/dev/acpica/dmutils.c optional acpi acpi_debug contrib/dev/acpica/dmwalk.c optional acpi acpi_debug contrib/dev/acpica/dsfield.c optional acpi contrib/dev/acpica/dsinit.c optional acpi contrib/dev/acpica/dsmethod.c optional acpi contrib/dev/acpica/dsmthdat.c optional acpi contrib/dev/acpica/dsobject.c optional acpi contrib/dev/acpica/dsopcode.c optional acpi contrib/dev/acpica/dsutils.c optional acpi contrib/dev/acpica/dswexec.c optional acpi contrib/dev/acpica/dswload.c optional acpi contrib/dev/acpica/dswscope.c optional acpi contrib/dev/acpica/dswstate.c optional acpi contrib/dev/acpica/evevent.c optional acpi contrib/dev/acpica/evgpe.c optional acpi contrib/dev/acpica/evgpeblk.c optional acpi contrib/dev/acpica/evmisc.c optional acpi contrib/dev/acpica/evregion.c optional acpi contrib/dev/acpica/evrgnini.c optional acpi contrib/dev/acpica/evsci.c optional acpi contrib/dev/acpica/evxface.c optional acpi contrib/dev/acpica/evxfevnt.c optional acpi contrib/dev/acpica/evxfregn.c optional acpi contrib/dev/acpica/exconfig.c optional acpi contrib/dev/acpica/exconvrt.c optional acpi contrib/dev/acpica/excreate.c optional acpi contrib/dev/acpica/exdump.c optional acpi contrib/dev/acpica/exfield.c optional acpi contrib/dev/acpica/exfldio.c optional acpi contrib/dev/acpica/exmisc.c optional acpi contrib/dev/acpica/exmutex.c optional acpi contrib/dev/acpica/exnames.c optional acpi contrib/dev/acpica/exoparg1.c optional acpi contrib/dev/acpica/exoparg2.c optional acpi contrib/dev/acpica/exoparg3.c optional acpi contrib/dev/acpica/exoparg6.c optional acpi contrib/dev/acpica/exprep.c optional acpi contrib/dev/acpica/exregion.c optional acpi contrib/dev/acpica/exresnte.c optional acpi contrib/dev/acpica/exresolv.c optional acpi contrib/dev/acpica/exresop.c optional acpi contrib/dev/acpica/exstore.c optional acpi contrib/dev/acpica/exstoren.c optional acpi contrib/dev/acpica/exstorob.c optional acpi contrib/dev/acpica/exsystem.c optional acpi contrib/dev/acpica/exutils.c optional acpi contrib/dev/acpica/hwacpi.c optional acpi contrib/dev/acpica/hwgpe.c optional acpi contrib/dev/acpica/hwregs.c optional acpi contrib/dev/acpica/hwsleep.c optional acpi contrib/dev/acpica/hwtimer.c optional acpi contrib/dev/acpica/nsaccess.c optional acpi contrib/dev/acpica/nsalloc.c optional acpi contrib/dev/acpica/nsdump.c optional acpi contrib/dev/acpica/nseval.c optional acpi contrib/dev/acpica/nsinit.c optional acpi contrib/dev/acpica/nsload.c optional acpi contrib/dev/acpica/nsnames.c optional acpi contrib/dev/acpica/nsobject.c optional acpi contrib/dev/acpica/nsparse.c optional acpi contrib/dev/acpica/nssearch.c optional acpi contrib/dev/acpica/nsutils.c optional acpi contrib/dev/acpica/nswalk.c optional acpi contrib/dev/acpica/nsxfeval.c optional acpi contrib/dev/acpica/nsxfname.c optional acpi contrib/dev/acpica/nsxfobj.c optional acpi contrib/dev/acpica/psargs.c optional acpi contrib/dev/acpica/psloop.c optional acpi contrib/dev/acpica/psopcode.c optional acpi contrib/dev/acpica/psparse.c optional acpi contrib/dev/acpica/psscope.c optional acpi contrib/dev/acpica/pstree.c optional acpi contrib/dev/acpica/psutils.c optional acpi contrib/dev/acpica/pswalk.c optional acpi contrib/dev/acpica/psxface.c optional acpi contrib/dev/acpica/rsaddr.c optional acpi contrib/dev/acpica/rscalc.c optional acpi contrib/dev/acpica/rscreate.c optional acpi contrib/dev/acpica/rsdump.c optional acpi contrib/dev/acpica/rsinfo.c optional acpi contrib/dev/acpica/rsio.c optional acpi contrib/dev/acpica/rsirq.c optional acpi contrib/dev/acpica/rslist.c optional acpi contrib/dev/acpica/rsmemory.c optional acpi contrib/dev/acpica/rsmisc.c optional acpi contrib/dev/acpica/rsutils.c optional acpi contrib/dev/acpica/rsxface.c optional acpi contrib/dev/acpica/tbconvrt.c optional acpi contrib/dev/acpica/tbget.c optional acpi contrib/dev/acpica/tbgetall.c optional acpi contrib/dev/acpica/tbinstal.c optional acpi contrib/dev/acpica/tbrsdt.c optional acpi contrib/dev/acpica/tbutils.c optional acpi contrib/dev/acpica/tbxface.c optional acpi contrib/dev/acpica/tbxfroot.c optional acpi contrib/dev/acpica/utalloc.c optional acpi contrib/dev/acpica/utcache.c optional acpi \ compile-with "${NORMAL_C} -DACPI_USE_LOCAL_CACHE" contrib/dev/acpica/utclib.c optional acpi contrib/dev/acpica/utcopy.c optional acpi contrib/dev/acpica/utdebug.c optional acpi contrib/dev/acpica/utdelete.c optional acpi contrib/dev/acpica/uteval.c optional acpi contrib/dev/acpica/utglobal.c optional acpi contrib/dev/acpica/utinit.c optional acpi contrib/dev/acpica/utmath.c optional acpi contrib/dev/acpica/utmisc.c optional acpi contrib/dev/acpica/utmutex.c optional acpi contrib/dev/acpica/utobject.c optional acpi contrib/dev/acpica/utstate.c optional acpi contrib/dev/acpica/utxface.c optional acpi contrib/dev/ath/freebsd/ah_osdep.c optional ath_hal \ compile-with "${NORMAL_C} -I$S/contrib/dev/ath/freebsd" contrib/ipfilter/netinet/fil.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_auth.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_fil_freebsd.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_frag.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_log.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_nat.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_proxy.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_state.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_lookup.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_pool.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_htable.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_sync.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/mlfk_ipl.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ngatm/netnatm/api/cc_conn.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/cc_data.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/cc_dump.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/cc_port.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/cc_sig.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/cc_user.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/unisap.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/misc/straddr.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/misc/unimsg_common.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/msg/traffic.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/msg/uni_ie.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/msg/uni_msg.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/saal/saal_sscfu.c optional ngatm_sscfu \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/saal/saal_sscop.c optional ngatm_sscop \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_call.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_coord.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_party.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_print.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_reset.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_uni.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_unimsgcpy.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_verify.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/pf/net/if_pflog.c optional pflog \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/pf/net/if_pfsync.c optional pfsync \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/pf/net/pf.c optional pf \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/pf/net/pf_if.c optional pf \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/pf/net/pf_subr.c optional pf \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/pf/net/pf_ioctl.c optional pf \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/pf/net/pf_norm.c optional pf \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/pf/net/pf_table.c optional pf \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/pf/net/pf_osfp.c optional pf \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/pf/netinet/in4_cksum.c optional pf inet crypto/blowfish/bf_ecb.c optional ipsec ipsec_esp crypto/blowfish/bf_skey.c optional crypto | ipsec ipsec_esp crypto/des/des_ecb.c optional crypto | ipsec ipsec_esp | netsmb crypto/des/des_setkey.c optional crypto | ipsec ipsec_esp | netsmb crypto/rc4/rc4.c optional netgraph_mppc_encryption crypto/rijndael/rijndael-alg-fst.c optional crypto | geom_bde | \ ipsec | random | wlan_ccmp crypto/rijndael/rijndael-api-fst.c optional geom_bde | random crypto/rijndael/rijndael-api.c optional crypto | ipsec | wlan_ccmp crypto/sha1.c optional carp | crypto | ipsec | \ netgraph_mppc_encryption crypto/sha2/sha2.c optional crypto | geom_bde | ipsec | random ddb/db_access.c optional ddb ddb/db_break.c optional ddb ddb/db_command.c optional ddb ddb/db_examine.c optional ddb ddb/db_expr.c optional ddb ddb/db_input.c optional ddb ddb/db_lex.c optional ddb ddb/db_main.c optional ddb ddb/db_output.c optional ddb ddb/db_print.c optional ddb ddb/db_ps.c optional ddb ddb/db_run.c optional ddb ddb/db_sym.c optional ddb ddb/db_thread.c optional ddb ddb/db_variables.c optional ddb ddb/db_watch.c optional ddb ddb/db_write_cmd.c optional ddb #dev/dpt/dpt_control.c optional dpt dev/aac/aac.c optional aac dev/aac/aac_cam.c optional aacp aac dev/aac/aac_debug.c optional aac dev/aac/aac_disk.c optional aac dev/aac/aac_linux.c optional aac compat_linux dev/aac/aac_pci.c optional aac pci dev/acpi_support/acpi_asus.c optional acpi_asus acpi dev/acpi_support/acpi_fujitsu.c optional acpi_fujitsu acpi dev/acpi_support/acpi_ibm.c optional acpi_ibm acpi dev/acpi_support/acpi_panasonic.c optional acpi_panasonic acpi dev/acpi_support/acpi_sony.c optional acpi_sony acpi dev/acpi_support/acpi_toshiba.c optional acpi_toshiba acpi dev/acpica/Osd/OsdDebug.c optional acpi dev/acpica/Osd/OsdHardware.c optional acpi dev/acpica/Osd/OsdInterrupt.c optional acpi dev/acpica/Osd/OsdMemory.c optional acpi dev/acpica/Osd/OsdSchedule.c optional acpi dev/acpica/Osd/OsdStream.c optional acpi dev/acpica/Osd/OsdSynch.c optional acpi dev/acpica/Osd/OsdTable.c optional acpi dev/acpica/acpi.c optional acpi dev/acpica/acpi_acad.c optional acpi dev/acpica/acpi_battery.c optional acpi dev/acpica/acpi_button.c optional acpi dev/acpica/acpi_cmbat.c optional acpi dev/acpica/acpi_cpu.c optional acpi dev/acpica/acpi_ec.c optional acpi dev/acpica/acpi_hpet.c optional acpi dev/acpica/acpi_isab.c optional acpi isa dev/acpica/acpi_lid.c optional acpi dev/acpica/acpi_package.c optional acpi dev/acpica/acpi_pci.c optional acpi pci dev/acpica/acpi_pci_link.c optional acpi pci dev/acpica/acpi_pcib.c optional acpi pci dev/acpica/acpi_pcib_acpi.c optional acpi pci dev/acpica/acpi_pcib_pci.c optional acpi pci dev/acpica/acpi_perf.c optional acpi dev/acpica/acpi_powerres.c optional acpi dev/acpica/acpi_quirk.c optional acpi dev/acpica/acpi_resource.c optional acpi dev/acpica/acpi_smbat.c optional acpi dev/acpica/acpi_thermal.c optional acpi dev/acpica/acpi_throttle.c optional acpi dev/acpica/acpi_timer.c optional acpi dev/acpica/acpi_video.c optional acpi_video acpi dev/acpica/acpi_dock.c optional acpi_dock acpi dev/adlink/adlink.c optional adlink dev/advansys/adv_eisa.c optional adv eisa dev/advansys/adv_pci.c optional adv pci dev/advansys/advansys.c optional adv dev/advansys/advlib.c optional adv dev/advansys/advmcode.c optional adv dev/advansys/adw_pci.c optional adw pci dev/advansys/adwcam.c optional adw dev/advansys/adwlib.c optional adw dev/advansys/adwmcode.c optional adw dev/aha/aha.c optional aha dev/aha/aha_isa.c optional aha isa dev/aha/aha_mca.c optional aha mca dev/ahb/ahb.c optional ahb eisa dev/aic/aic.c optional aic dev/aic/aic_pccard.c optional aic pccard dev/aic7xxx/ahc_eisa.c optional ahc eisa dev/aic7xxx/ahc_isa.c optional ahc isa dev/aic7xxx/ahc_pci.c optional ahc pci dev/aic7xxx/ahd_pci.c optional ahd pci dev/aic7xxx/aic7770.c optional ahc dev/aic7xxx/aic79xx.c optional ahd pci dev/aic7xxx/aic79xx_osm.c optional ahd pci dev/aic7xxx/aic79xx_pci.c optional ahd pci dev/aic7xxx/aic7xxx.c optional ahc dev/aic7xxx/aic7xxx_93cx6.c optional ahc dev/aic7xxx/aic7xxx_osm.c optional ahc dev/aic7xxx/aic7xxx_pci.c optional ahc pci dev/amd/amd.c optional amd dev/amr/amr.c optional amr dev/amr/amr_cam.c optional amr dev/amr/amr_disk.c optional amr dev/amr/amr_linux.c optional amr compat_linux dev/amr/amr_pci.c optional amr pci dev/an/if_an.c optional an dev/an/if_an_isa.c optional an isa dev/an/if_an_pccard.c optional an pccard dev/an/if_an_pci.c optional an pci dev/asr/asr.c optional asr pci dev/ata/ata_if.m optional ata dev/ata/ata-all.c optional ata dev/ata/ata-card.c optional ata pccard dev/ata/ata-cbus.c optional ata pc98 dev/ata/ata-chipset.c optional ata pci dev/ata/ata-disk.c optional atadisk dev/ata/ata-dma.c optional ata pci dev/ata/ata-isa.c optional ata isa dev/ata/ata-lowlevel.c optional ata dev/ata/ata-pci.c optional ata pci dev/ata/ata-queue.c optional ata dev/ata/ata-raid.c optional ataraid dev/ata/ata-usb.c optional atausb dev/ata/atapi-cam.c optional atapicam dev/ata/atapi-cd.c optional atapicd dev/ata/atapi-fd.c optional atapifd dev/ata/atapi-tape.c optional atapist dev/ath/ath_rate/amrr/amrr.c optional ath_rate_amrr dev/ath/ath_rate/onoe/onoe.c optional ath_rate_onoe \ compile-with "${NORMAL_C} -I$S/contrib/dev/ath/freebsd" dev/ath/ath_rate/sample/sample.c optional ath_rate_sample \ compile-with "${NORMAL_C} -I$S/contrib/dev/ath/freebsd" dev/ath/if_ath.c optional ath \ compile-with "${NORMAL_C} -I$S/contrib/dev/ath/freebsd" dev/ath/if_ath_pci.c optional ath pci \ compile-with "${NORMAL_C} -I$S/contrib/dev/ath/freebsd" dev/awi/am79c930.c optional awi dev/awi/awi.c optional awi dev/awi/if_awi_pccard.c optional awi pccard dev/bce/if_bce.c optional bce dev/bfe/if_bfe.c optional bfe dev/bge/if_bge.c optional bge dev/bktr/bktr_audio.c optional bktr pci dev/bktr/bktr_card.c optional bktr pci dev/bktr/bktr_core.c optional bktr pci dev/bktr/bktr_i2c.c optional bktr pci smbus dev/bktr/bktr_os.c optional bktr pci dev/bktr/bktr_tuner.c optional bktr pci dev/bktr/msp34xx.c optional bktr pci dev/buslogic/bt.c optional bt dev/buslogic/bt_eisa.c optional bt eisa dev/buslogic/bt_isa.c optional bt isa dev/buslogic/bt_mca.c optional bt mca dev/buslogic/bt_pci.c optional bt pci dev/cardbus/cardbus.c optional cardbus dev/cardbus/cardbus_cis.c optional cardbus dev/cardbus/cardbus_device.c optional cardbus dev/ciss/ciss.c optional ciss dev/cm/smc90cx6.c optional cm dev/cnw/if_cnw.c optional cnw pccard dev/cpufreq/ichss.c optional cpufreq dev/cs/if_cs.c optional cs dev/cs/if_cs_isa.c optional cs isa dev/cs/if_cs_pccard.c optional cs pccard dev/cy/cy.c optional cy dev/cy/cy_isa.c optional cy isa dev/cy/cy_pci.c optional cy pci dev/dc/if_dc.c optional dc pci dev/dc/dcphy.c optional dc pci dev/dc/pnphy.c optional dc pci dev/dcons/dcons.c optional dcons dev/dcons/dcons_crom.c optional dcons_crom dev/dcons/dcons_os.c optional dcons dev/de/if_de.c optional de pci dev/digi/CX.c optional digi_CX dev/digi/CX_PCI.c optional digi_CX_PCI dev/digi/EPCX.c optional digi_EPCX dev/digi/EPCX_PCI.c optional digi_EPCX_PCI dev/digi/Xe.c optional digi_Xe dev/digi/Xem.c optional digi_Xem dev/digi/Xr.c optional digi_Xr dev/digi/digi.c optional digi dev/digi/digi_isa.c optional digi isa dev/digi/digi_pci.c optional digi pci dev/dpt/dpt_eisa.c optional dpt eisa dev/dpt/dpt_pci.c optional dpt pci dev/dpt/dpt_scsi.c optional dpt dev/drm/ati_pcigart.c optional drm dev/drm/drm_agpsupport.c optional drm dev/drm/drm_auth.c optional drm dev/drm/drm_bufs.c optional drm dev/drm/drm_context.c optional drm dev/drm/drm_dma.c optional drm dev/drm/drm_drawable.c optional drm dev/drm/drm_drv.c optional drm dev/drm/drm_fops.c optional drm dev/drm/drm_ioctl.c optional drm dev/drm/drm_irq.c optional drm dev/drm/drm_lock.c optional drm dev/drm/drm_memory.c optional drm dev/drm/drm_pci.c optional drm dev/drm/drm_scatter.c optional drm dev/drm/drm_sysctl.c optional drm dev/drm/drm_vm.c optional drm dev/drm/i915_dma.c optional i915drm dev/drm/i915_drv.c optional i915drm dev/drm/i915_irq.c optional i915drm dev/drm/i915_mem.c optional i915drm dev/drm/mach64_dma.c optional mach64drm dev/drm/mach64_drv.c optional mach64drm dev/drm/mach64_irq.c optional mach64drm dev/drm/mach64_state.c optional mach64drm dev/drm/mga_dma.c optional mgadrm dev/drm/mga_drv.c optional mgadrm dev/drm/mga_irq.c optional mgadrm dev/drm/mga_state.c optional mgadrm \ compile-with "${NORMAL_C} -finline-limit=13500" dev/drm/mga_warp.c optional mgadrm dev/drm/r128_cce.c optional r128drm dev/drm/r128_drv.c optional r128drm dev/drm/r128_irq.c optional r128drm dev/drm/r128_state.c optional r128drm \ compile-with "${NORMAL_C} -finline-limit=13500" dev/drm/r300_cmdbuf.c optional radeondrm dev/drm/radeon_cp.c optional radeondrm dev/drm/radeon_drv.c optional radeondrm dev/drm/radeon_irq.c optional radeondrm dev/drm/radeon_mem.c optional radeondrm dev/drm/radeon_state.c optional radeondrm dev/drm/savage_bci.c optional savagedrm dev/drm/savage_drv.c optional savagedrm dev/drm/savage_state.c optional savagedrm dev/drm/sis_drv.c optional sisdrm dev/drm/sis_ds.c optional sisdrm dev/drm/sis_mm.c optional sisdrm dev/drm/tdfx_drv.c optional tdfxdrm dev/ed/if_ed.c optional ed dev/ed/if_ed_novell.c optional ed dev/ed/if_ed_rtl80x9.c optional ed dev/ed/if_ed_pccard.c optional ed pccard dev/ed/if_ed_pci.c optional ed pci dev/eisa/eisa_if.m standard dev/eisa/eisaconf.c optional eisa dev/em/if_em.c optional em dev/em/if_em_hw.c optional em dev/en/if_en_pci.c optional en pci dev/en/midway.c optional en dev/ep/if_ep.c optional ep dev/ep/if_ep_eisa.c optional ep eisa dev/ep/if_ep_isa.c optional ep isa dev/ep/if_ep_mca.c optional ep mca dev/ep/if_ep_pccard.c optional ep pccard dev/esp/ncr53c9x.c optional esp dev/ex/if_ex.c optional ex dev/ex/if_ex_isa.c optional ex isa dev/ex/if_ex_pccard.c optional ex pccard dev/exca/exca.c optional cbb dev/fatm/if_fatm.c optional fatm pci dev/fe/if_fe.c optional fe dev/fe/if_fe_pccard.c optional fe pccard dev/firewire/firewire.c optional firewire dev/firewire/fwcrom.c optional firewire dev/firewire/fwdev.c optional firewire dev/firewire/fwdma.c optional firewire dev/firewire/fwmem.c optional firewire dev/firewire/fwohci.c optional firewire dev/firewire/fwohci_pci.c optional firewire pci dev/firewire/if_fwe.c optional fwe dev/firewire/if_fwip.c optional fwip dev/firewire/sbp.c optional sbp dev/firewire/sbp_targ.c optional sbp_targ dev/fxp/if_fxp.c optional fxp dev/gem/if_gem.c optional gem dev/gem/if_gem_pci.c optional gem pci dev/harp/if_harp.c optional harp pci dev/hatm/if_hatm.c optional hatm pci dev/hatm/if_hatm_intr.c optional hatm pci dev/hatm/if_hatm_ioctl.c optional hatm pci dev/hatm/if_hatm_rx.c optional hatm pci dev/hatm/if_hatm_tx.c optional hatm pci dev/hfa/fore_buffer.c optional hfa dev/hfa/fore_command.c optional hfa dev/hfa/fore_globals.c optional hfa dev/hfa/fore_if.c optional hfa dev/hfa/fore_init.c optional hfa dev/hfa/fore_intr.c optional hfa dev/hfa/fore_output.c optional hfa dev/hfa/fore_receive.c optional hfa dev/hfa/fore_stats.c optional hfa dev/hfa/fore_timer.c optional hfa dev/hfa/fore_transmit.c optional hfa dev/hfa/fore_vcm.c optional hfa #dev/hfa/hfa_eisa.c optional hfa eisa dev/hfa/hfa_freebsd.c optional hfa dev/hfa/hfa_pci.c optional hfa pci #dev/hfa/hfa_sbus.c optional hfa sbus dev/hifn/hifn7751.c optional hifn dev/hme/if_hme.c optional hme dev/hme/if_hme_pci.c optional hme pci dev/hme/if_hme_sbus.c optional hme sbus dev/hwpmc/hwpmc_logging.c optional hwpmc dev/hwpmc/hwpmc_mod.c optional hwpmc dev/ichsmb/ichsmb.c optional ichsmb dev/ichsmb/ichsmb_pci.c optional ichsmb pci dev/ida/ida.c optional ida dev/ida/ida_disk.c optional ida dev/ida/ida_eisa.c optional ida eisa dev/ida/ida_pci.c optional ida pci dev/ie/if_ie.c optional ie isa nowerror dev/ie/if_ie_isa.c optional ie isa dev/ieee488/ibfoo.c optional pcii | tnt4882 dev/ieee488/pcii.c optional pcii dev/ieee488/tnt4882.c optional tnt4882 dev/ieee488/upd7210.c optional pcii | tnt4882 dev/iicbus/if_ic.c optional ic dev/iicbus/iic.c optional iic dev/iicbus/iicbb.c optional iicbb dev/iicbus/iicbb_if.m optional iicbb dev/iicbus/iicbus.c optional iicbus dev/iicbus/iicbus_if.m optional iicbus dev/iicbus/iiconf.c optional iicbus dev/iicbus/iicsmb.c optional iicsmb \ dependency "iicbus_if.h" dev/iir/iir.c optional iir dev/iir/iir_ctrl.c optional iir dev/iir/iir_pci.c optional iir pci dev/ips/ips.c optional ips dev/ips/ips_commands.c optional ips dev/ips/ips_disk.c optional ips dev/ips/ips_ioctl.c optional ips dev/ips/ips_pci.c optional ips pci dev/ipw/if_ipw.c optional ipw dev/isp/isp.c optional isp dev/isp/isp_freebsd.c optional isp dev/isp/isp_library.c optional isp dev/isp/isp_pci.c optional isp pci dev/isp/isp_sbus.c optional isp sbus dev/isp/isp_target.c optional isp dev/ispfw/ispfw.c optional ispfw dev/iwi/if_iwi.c optional iwi dev/ixgb/if_ixgb.c optional ixgb dev/ixgb/ixgb_ee.c optional ixgb dev/ixgb/ixgb_hw.c optional ixgb dev/joy/joy.c optional joy dev/joy/joy_isa.c optional joy isa dev/joy/joy_pccard.c optional joy pccard dev/kbdmux/kbdmux.c optional kbdmux dev/le/am7990.c optional le dev/le/am79900.c optional le dev/le/if_le_pci.c optional le pci dev/le/lance.c optional le dev/led/led.c standard dev/lge/if_lge.c optional lge dev/lmc/if_lmc.c optional lmc dev/mc146818/mc146818.c optional mc146818 dev/mca/mca_bus.c optional mca dev/mcd/mcd.c optional mcd isa nowerror dev/mcd/mcd_isa.c optional mcd isa nowerror dev/md/md.c optional md dev/mem/memdev.c optional mem dev/mfi/mfi.c optional mfi dev/mfi/mfi_pci.c optional mfi pci dev/mfi/mfi_disk.c optional mfi dev/mfi/mfi_linux.c optional mfi compat_linux dev/mii/acphy.c optional miibus | acphy dev/mii/amphy.c optional miibus | amphy dev/mii/bmtphy.c optional miibus | bmtphy dev/mii/brgphy.c optional miibus | brgphy dev/mii/ciphy.c optional miibus | ciphy dev/mii/e1000phy.c optional miibus | e1000phy # XXX only xl cards? dev/mii/exphy.c optional miibus | exphy # XXX only fxp cards? dev/mii/inphy.c optional miibus | inphy dev/mii/lxtphy.c optional miibus | lxtphy dev/mii/mii.c optional miibus | mii dev/mii/mii_physubr.c optional miibus | mii dev/mii/miibus_if.m optional miibus | mii dev/mii/mlphy.c optional miibus | mlphy dev/mii/nsgphy.c optional miibus | nsgphy dev/mii/nsphy.c optional miibus | nsphy dev/mii/pnaphy.c optional miibus | pnaphy dev/mii/qsphy.c optional miibus | qsphy dev/mii/rgephy.c optional miibus | rgephy # XXX rl and re only? dev/mii/rlphy.c optional miibus | rlphy # XXX rue only? dev/mii/ruephy.c optional miibus | ruephy dev/mii/tdkphy.c optional miibus | tdkphy dev/mii/tlphy.c optional miibus | tlphy dev/mii/ukphy.c optional miibus | mii dev/mii/ukphy_subr.c optional miibus | mii dev/mii/xmphy.c optional miibus | xmphy dev/mk48txx/mk48txx.c optional mk48txx dev/mlx/mlx.c optional mlx dev/mlx/mlx_disk.c optional mlx dev/mlx/mlx_pci.c optional mlx pci dev/mly/mly.c optional mly dev/mpt/mpt.c optional mpt dev/mpt/mpt_cam.c optional mpt dev/mpt/mpt_debug.c optional mpt dev/mpt/mpt_pci.c optional mpt pci dev/mpt/mpt_raid.c optional mpt dev/my/if_my.c optional my dev/ncv/ncr53c500.c optional ncv dev/ncv/ncr53c500_pccard.c optional ncv pccard dev/nge/if_nge.c optional nge dev/nmdm/nmdm.c optional nmdm dev/nsp/nsp.c optional nsp dev/nsp/nsp_pccard.c optional nsp pccard dev/null/null.c standard dev/patm/if_patm.c optional patm pci dev/patm/if_patm_attach.c optional patm pci dev/patm/if_patm_intr.c optional patm pci dev/patm/if_patm_ioctl.c optional patm pci dev/patm/if_patm_rtables.c optional patm pci dev/patm/if_patm_rx.c optional patm pci dev/patm/if_patm_tx.c optional patm pci dev/pbio/pbio.c optional pbio isa dev/pccard/card_if.m standard dev/pccard/pccard.c optional pccard dev/pccard/pccard_cis.c optional pccard dev/pccard/pccard_cis_quirks.c optional pccard dev/pccard/pccard_device.c optional pccard dev/pccard/power_if.m standard dev/pccbb/pccbb.c optional cbb dev/pccbb/pccbb_isa.c optional cbb isa dev/pccbb/pccbb_pci.c optional cbb pci dev/pcf/pcf.c optional pcf dev/pci/eisa_pci.c optional pci eisa dev/pci/fixup_pci.c optional pci dev/pci/hostb_pci.c optional pci dev/pci/ignore_pci.c optional pci dev/pci/isa_pci.c optional pci isa dev/pci/pci.c optional pci dev/pci/pci_if.m standard dev/pci/pci_pci.c optional pci dev/pci/pci_user.c optional pci dev/pci/pcib_if.m standard dev/pci/vga_pci.c optional pci dev/pdq/if_fea.c optional fea eisa dev/pdq/if_fpa.c optional fpa pci dev/pdq/pdq.c optional nowerror fea eisa | fpa pci dev/pdq/pdq_ifsubr.c optional nowerror fea eisa | fpa pci dev/ppbus/if_plip.c optional plip dev/ppbus/immio.c optional vpo dev/ppbus/lpbb.c optional lpbb dev/ppbus/lpt.c optional lpt dev/ppbus/pcfclock.c optional pcfclock dev/ppbus/ppb_1284.c optional ppbus dev/ppbus/ppb_base.c optional ppbus dev/ppbus/ppb_msq.c optional ppbus dev/ppbus/ppbconf.c optional ppbus dev/ppbus/ppbus_if.m optional ppbus dev/ppbus/ppi.c optional ppi dev/ppbus/pps.c optional pps dev/ppbus/vpo.c optional vpo dev/ppbus/vpoio.c optional vpo dev/ppc/ppc.c optional ppc dev/ppc/ppc_acpi.c optional ppc acpi dev/ppc/ppc_isa.c optional ppc isa dev/ppc/ppc_pci.c optional ppc pci dev/ppc/ppc_puc.c optional ppc puc dev/pst/pst-iop.c optional pst dev/pst/pst-pci.c optional pst pci dev/pst/pst-raid.c optional pst dev/puc/puc.c optional puc dev/puc/puc_cfg.c optional puc dev/puc/puc_pccard.c optional puc pccard dev/puc/puc_pci.c optional puc pci dev/puc/pucdata.c optional puc pci dev/ral/rt2560.c optional ral dev/ral/rt2661.c optional ral dev/ral/if_ralrate.c optional ral dev/ral/if_ral_pci.c optional ral pci dev/random/harvest.c standard dev/random/hash.c optional random dev/random/probe.c optional random dev/random/randomdev.c optional random dev/random/randomdev_soft.c optional random dev/random/yarrow.c optional random dev/ray/if_ray.c optional ray pccard dev/rc/rc.c optional rc dev/re/if_re.c optional re dev/rndtest/rndtest.c optional rndtest dev/rp/rp.c optional rp dev/rp/rp_isa.c optional rp isa dev/rp/rp_pci.c optional rp pci dev/safe/safe.c optional safe dev/sbsh/if_sbsh.c optional sbsh dev/scc/scc_if.m optional scc dev/scc/scc_bfe_ebus.c optional scc ebus dev/scc/scc_bfe_sbus.c optional scc fhc | scc sbus dev/scc/scc_core.c optional scc dev/scc/scc_dev_sab82532.c optional scc dev/scc/scc_dev_z8530.c optional scc dev/scd/scd.c optional scd isa dev/scd/scd_isa.c optional scd isa dev/si/si.c optional si dev/si/si2_z280.c optional si dev/si/si3_t225.c optional si dev/si/si_eisa.c optional si eisa dev/si/si_isa.c optional si isa dev/si/si_pci.c optional si pci dev/sio/sio_pccard.c optional sio pccard dev/sio/sio_pci.c optional sio pci dev/sio/sio_puc.c optional sio puc dev/sk/if_sk.c optional sk pci dev/smbus/smb.c optional smb dev/smbus/smbconf.c optional smbus dev/smbus/smbus.c optional smbus dev/smbus/smbus_if.m optional smbus dev/sn/if_sn.c optional sn dev/sn/if_sn_isa.c optional sn isa dev/sn/if_sn_pccard.c optional sn pccard dev/snp/snp.c optional snp dev/sound/isa/ad1816.c optional snd_ad1816 isa dev/sound/isa/ess.c optional snd_ess isa dev/sound/isa/gusc.c optional snd_gusc isa dev/sound/isa/mss.c optional snd_mss isa dev/sound/isa/sb16.c optional snd_sb16 isa dev/sound/isa/sb8.c optional snd_sb8 isa dev/sound/isa/sbc.c optional snd_sbc isa dev/sound/isa/sndbuf_dma.c optional sound isa dev/sound/pci/als4000.c optional snd_als4000 pci dev/sound/pci/atiixp.c optional snd_atiixp pci #dev/sound/pci/au88x0.c optional snd_au88x0 pci dev/sound/pci/cmi.c optional snd_cmi pci dev/sound/pci/cs4281.c optional snd_cs4281 pci dev/sound/pci/csa.c optional snd_csa pci \ warning "kernel contains GPL contaminated csaimg.h header" dev/sound/pci/csapcm.c optional snd_csa pci dev/sound/pci/ds1.c optional snd_ds1 pci dev/sound/pci/emu10k1.c optional snd_emu10k1 pci \ dependency "emu10k1-alsa%diked.h" \ warning "kernel contains GPL contaminated emu10k1 headers" dev/sound/pci/es137x.c optional snd_es137x pci dev/sound/pci/fm801.c optional snd_fm801 pci dev/sound/pci/ich.c optional snd_ich pci dev/sound/pci/maestro.c optional snd_maestro pci dev/sound/pci/maestro3.c optional snd_maestro3 pci \ warning "kernel contains GPL contaminated maestro3 headers" dev/sound/pci/neomagic.c optional snd_neomagic pci dev/sound/pci/solo.c optional snd_solo pci dev/sound/pci/t4dwave.c optional snd_t4dwave pci dev/sound/pci/via8233.c optional snd_via8233 pci dev/sound/pci/via82c686.c optional snd_via82c686 pci dev/sound/pci/vibes.c optional snd_vibes pci dev/sound/pcm/ac97.c optional sound dev/sound/pcm/ac97_if.m optional sound dev/sound/pcm/ac97_patch.c optional sound dev/sound/pcm/buffer.c optional sound dev/sound/pcm/channel.c optional sound dev/sound/pcm/channel_if.m optional sound dev/sound/pcm/dsp.c optional sound dev/sound/pcm/fake.c optional sound dev/sound/pcm/feeder.c optional sound dev/sound/pcm/feeder_fmt.c optional sound dev/sound/pcm/feeder_if.m optional sound dev/sound/pcm/feeder_rate.c optional sound dev/sound/pcm/feeder_volume.c optional sound dev/sound/pcm/mixer.c optional sound dev/sound/pcm/mixer_if.m optional sound dev/sound/pcm/sndstat.c optional sound dev/sound/pcm/sound.c optional sound dev/sound/pcm/vchan.c optional sound #dev/sound/usb/upcm.c optional snd_upcm usb dev/sound/usb/uaudio.c optional snd_uaudio usb dev/sound/usb/uaudio_pcm.c optional snd_uaudio usb dev/sound/midi/midi.c optional sound dev/sound/midi/mpu401.c optional sound dev/sound/midi/mpu_if.m optional sound dev/sound/midi/mpufoi_if.m optional sound dev/sound/midi/sequencer.c optional sound dev/sound/midi/synth_if.m optional sound dev/sr/if_sr.c optional sr dev/sr/if_sr_pci.c optional sr pci dev/stg/tmc18c30.c optional stg dev/stg/tmc18c30_isa.c optional stg isa dev/stg/tmc18c30_pccard.c optional stg pccard dev/stg/tmc18c30_pci.c optional stg pci dev/stg/tmc18c30_subr.c optional stg dev/streams/streams.c optional streams dev/sym/sym_hipd.c optional sym \ dependency "$S/dev/sym/sym_{conf,defs}.h" dev/syscons/blank/blank_saver.c optional blank_saver dev/syscons/daemon/daemon_saver.c optional daemon_saver dev/syscons/dragon/dragon_saver.c optional dragon_saver dev/syscons/fade/fade_saver.c optional fade_saver dev/syscons/fire/fire_saver.c optional fire_saver dev/syscons/green/green_saver.c optional green_saver dev/syscons/logo/logo.c optional logo_saver dev/syscons/logo/logo_saver.c optional logo_saver dev/syscons/rain/rain_saver.c optional rain_saver dev/syscons/snake/snake_saver.c optional snake_saver dev/syscons/star/star_saver.c optional star_saver dev/syscons/warp/warp_saver.c optional warp_saver dev/tdfx/tdfx_linux.c optional tdfx_linux tdfx compat_linux dev/tdfx/tdfx_pci.c optional tdfx pci dev/ti/if_ti.c optional ti pci dev/trm/trm.c optional trm dev/twa/tw_cl_fwimg.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twa/tw_cl_init.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twa/tw_cl_intr.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twa/tw_cl_io.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twa/tw_cl_misc.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twa/tw_osl_cam.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twa/tw_osl_freebsd.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twe/twe.c optional twe dev/twe/twe_freebsd.c optional twe dev/tx/if_tx.c optional tx dev/txp/if_txp.c optional txp dev/uart/uart_bus_acpi.c optional uart acpi #dev/uart/uart_bus_cbus.c optional uart cbus dev/uart/uart_bus_ebus.c optional uart ebus dev/uart/uart_bus_isa.c optional uart isa dev/uart/uart_bus_pccard.c optional uart pccard dev/uart/uart_bus_pci.c optional uart pci dev/uart/uart_bus_puc.c optional uart puc dev/uart/uart_bus_scc.c optional uart scc dev/uart/uart_core.c optional uart dev/uart/uart_dbg.c optional uart gdb dev/uart/uart_dev_ns8250.c optional uart uart_ns8250 dev/uart/uart_dev_sab82532.c optional uart uart_sab82532 dev/uart/uart_dev_z8530.c optional uart uart_z8530 dev/uart/uart_if.m optional uart dev/uart/uart_subr.c optional uart dev/uart/uart_tty.c optional uart dev/ubsec/ubsec.c optional ubsec # # USB support dev/usb/ehci.c optional ehci dev/usb/ehci_pci.c optional ehci pci dev/usb/hid.c optional usb dev/usb/if_aue.c optional aue dev/usb/if_axe.c optional axe dev/usb/if_cdce.c optional cdce dev/usb/if_cue.c optional cue dev/usb/if_kue.c optional kue dev/usb/if_ural.c optional ural dev/usb/if_rue.c optional rue dev/usb/if_udav.c optional udav dev/usb/ohci.c optional ohci dev/usb/ohci_pci.c optional ohci pci dev/usb/sl811hs.c optional slhci dev/usb/slhci_pccard.c optional slhci pccard dev/usb/ubsa.c optional ubsa ucom dev/usb/ubser.c optional ubser dev/usb/ucom.c optional ucom dev/usb/ucycom.c optional ucycom ucom dev/usb/udbp.c optional udbp dev/usb/ufoma.c optional ufoma ucom dev/usb/ufm.c optional ufm dev/usb/uftdi.c optional uftdi ucom dev/usb/ugen.c optional ugen dev/usb/uhci.c optional uhci dev/usb/uhci_pci.c optional uhci pci dev/usb/uhid.c optional uhid dev/usb/uhub.c optional usb dev/usb/ukbd.c optional ukbd dev/usb/ulpt.c optional ulpt dev/usb/umass.c optional umass dev/usb/umct.c optional umct dev/usb/umodem.c optional umodem dev/usb/ums.c optional ums dev/usb/uplcom.c optional uplcom ucom dev/usb/urio.c optional urio dev/usb/usb.c optional usb dev/usb/usb_ethersubr.c optional usb dev/usb/usb_if.m optional usb dev/usb/usb_mem.c optional usb dev/usb/usb_quirks.c optional usb dev/usb/usb_subr.c optional usb dev/usb/usbdi.c optional usb dev/usb/usbdi_util.c optional usb dev/usb/uscanner.c optional uscanner dev/usb/uvisor.c optional uvisor ucom dev/usb/uvscom.c optional uvscom ucom dev/utopia/idtphy.c optional utopia dev/utopia/suni.c optional utopia dev/utopia/utopia.c optional utopia dev/vge/if_vge.c optional vge dev/vkbd/vkbd.c optional vkbd dev/vx/if_vx.c optional vx dev/vx/if_vx_eisa.c optional vx eisa dev/vx/if_vx_pci.c optional vx pci dev/watchdog/watchdog.c standard dev/wds/wd7000.c optional wds isa dev/wi/if_wi.c optional wi dev/wi/if_wi_pccard.c optional wi pccard dev/wi/if_wi_pci.c optional wi pci dev/wl/if_wl.c optional wl isa dev/xe/if_xe.c optional xe dev/xe/if_xe_pccard.c optional xe pccard fs/deadfs/dead_vnops.c standard fs/devfs/devfs_devs.c standard fs/devfs/devfs_rule.c standard fs/devfs/devfs_vfsops.c standard fs/devfs/devfs_vnops.c standard fs/fdescfs/fdesc_vfsops.c optional fdescfs fs/fdescfs/fdesc_vnops.c optional fdescfs fs/fifofs/fifo_vnops.c standard fs/hpfs/hpfs_alsubr.c optional hpfs fs/hpfs/hpfs_lookup.c optional hpfs fs/hpfs/hpfs_subr.c optional hpfs fs/hpfs/hpfs_vfsops.c optional hpfs fs/hpfs/hpfs_vnops.c optional hpfs fs/msdosfs/msdosfs_conv.c optional msdosfs fs/msdosfs/msdosfs_denode.c optional msdosfs fs/msdosfs/msdosfs_fat.c optional msdosfs fs/msdosfs/msdosfs_fileno.c optional msdosfs_large fs/msdosfs/msdosfs_iconv.c optional msdosfs_iconv fs/msdosfs/msdosfs_lookup.c optional msdosfs fs/msdosfs/msdosfs_vfsops.c optional msdosfs fs/msdosfs/msdosfs_vnops.c optional msdosfs fs/ntfs/ntfs_compr.c optional ntfs fs/ntfs/ntfs_iconv.c optional ntfs_iconv fs/ntfs/ntfs_ihash.c optional ntfs fs/ntfs/ntfs_subr.c optional ntfs fs/ntfs/ntfs_vfsops.c optional ntfs fs/ntfs/ntfs_vnops.c optional ntfs fs/nullfs/null_subr.c optional nullfs fs/nullfs/null_vfsops.c optional nullfs fs/nullfs/null_vnops.c optional nullfs fs/nwfs/nwfs_io.c optional nwfs fs/nwfs/nwfs_ioctl.c optional nwfs fs/nwfs/nwfs_node.c optional nwfs fs/nwfs/nwfs_subr.c optional nwfs fs/nwfs/nwfs_vfsops.c optional nwfs fs/nwfs/nwfs_vnops.c optional nwfs fs/portalfs/portal_vfsops.c optional portalfs fs/portalfs/portal_vnops.c optional portalfs fs/procfs/procfs.c optional procfs fs/procfs/procfs_ctl.c optional procfs fs/procfs/procfs_dbregs.c optional procfs fs/procfs/procfs_fpregs.c optional procfs fs/procfs/procfs_ioctl.c optional procfs fs/procfs/procfs_map.c optional procfs fs/procfs/procfs_mem.c optional procfs fs/procfs/procfs_note.c optional procfs fs/procfs/procfs_regs.c optional procfs fs/procfs/procfs_rlimit.c optional procfs fs/procfs/procfs_status.c optional procfs fs/procfs/procfs_type.c optional procfs fs/pseudofs/pseudofs.c optional pseudofs fs/pseudofs/pseudofs_fileno.c optional pseudofs fs/pseudofs/pseudofs_vncache.c optional pseudofs fs/pseudofs/pseudofs_vnops.c optional pseudofs fs/smbfs/smbfs_io.c optional smbfs fs/smbfs/smbfs_node.c optional smbfs fs/smbfs/smbfs_smb.c optional smbfs fs/smbfs/smbfs_subr.c optional smbfs fs/smbfs/smbfs_vfsops.c optional smbfs fs/smbfs/smbfs_vnops.c optional smbfs fs/udf/osta.c optional udf fs/udf/udf_iconv.c optional udf_iconv fs/udf/udf_vfsops.c optional udf fs/udf/udf_vnops.c optional udf fs/umapfs/umap_subr.c optional umapfs fs/umapfs/umap_vfsops.c optional umapfs fs/umapfs/umap_vnops.c optional umapfs fs/unionfs/union_subr.c optional unionfs fs/unionfs/union_vfsops.c optional unionfs fs/unionfs/union_vnops.c optional unionfs gdb/gdb_cons.c optional gdb gdb/gdb_main.c optional gdb gdb/gdb_packet.c optional gdb geom/bde/g_bde.c optional geom_bde geom/bde/g_bde_crypt.c optional geom_bde geom/bde/g_bde_lock.c optional geom_bde geom/bde/g_bde_work.c optional geom_bde geom/concat/g_concat.c optional geom_concat geom/eli/g_eli.c optional geom_eli geom/eli/g_eli_crypto.c optional geom_eli geom/eli/g_eli_ctl.c optional geom_eli geom/eli/g_eli_integrity.c optional geom_eli geom/eli/g_eli_key.c optional geom_eli geom/eli/g_eli_privacy.c optional geom_eli geom/eli/pkcs5v2.c optional geom_eli geom/gate/g_gate.c optional geom_gate geom/geom_aes.c optional geom_aes geom/geom_apple.c optional geom_apple geom/geom_bsd.c optional geom_bsd geom/geom_bsd_enc.c optional geom_bsd geom/geom_ccd.c optional ccd | geom_ccd geom/geom_ctl.c standard geom/geom_dev.c standard geom/geom_disk.c standard geom/geom_dump.c standard geom/geom_event.c standard geom/geom_fox.c optional geom_fox geom/geom_gpt.c optional geom_gpt geom/geom_io.c standard geom/geom_kern.c standard geom/geom_mbr.c optional geom_mbr geom/geom_mbr_enc.c optional geom_mbr geom/geom_pc98.c optional geom_pc98 geom/geom_pc98_enc.c optional geom_pc98 geom/geom_slice.c standard geom/geom_subr.c standard geom/geom_sunlabel.c optional geom_sunlabel geom/geom_sunlabel_enc.c optional geom_sunlabel geom/geom_vfs.c standard geom/geom_vol_ffs.c optional geom_vol geom/label/g_label.c optional geom_label geom/label/g_label_ext2fs.c optional geom_label geom/label/g_label_iso9660.c optional geom_label geom/label/g_label_msdosfs.c optional geom_label geom/label/g_label_ntfs.c optional geom_label geom/label/g_label_reiserfs.c optional geom_label geom/label/g_label_ufs.c optional geom_label geom/mirror/g_mirror.c optional geom_mirror geom/mirror/g_mirror_ctl.c optional geom_mirror geom/nop/g_nop.c optional geom_nop geom/raid3/g_raid3.c optional geom_raid3 geom/raid3/g_raid3_ctl.c optional geom_raid3 geom/shsec/g_shsec.c optional geom_shsec geom/stripe/g_stripe.c optional geom_stripe geom/uzip/g_uzip.c optional geom_uzip geom/zero/g_zero.c optional geom_zero gnu/fs/ext2fs/ext2_alloc.c optional ext2fs \ warning "kernel contains GPL contaminated ext2fs filesystem" gnu/fs/ext2fs/ext2_balloc.c optional ext2fs gnu/fs/ext2fs/ext2_bmap.c optional ext2fs gnu/fs/ext2fs/ext2_inode.c optional ext2fs gnu/fs/ext2fs/ext2_inode_cnv.c optional ext2fs gnu/fs/ext2fs/ext2_linux_balloc.c optional ext2fs gnu/fs/ext2fs/ext2_linux_ialloc.c optional ext2fs gnu/fs/ext2fs/ext2_lookup.c optional ext2fs gnu/fs/ext2fs/ext2_subr.c optional ext2fs gnu/fs/ext2fs/ext2_vfsops.c optional ext2fs gnu/fs/ext2fs/ext2_vnops.c optional ext2fs gnu/fs/reiserfs/reiserfs_hashes.c optional reiserfs \ warning "kernel contains GPL contaminated ReiserFS filesystem" gnu/fs/reiserfs/reiserfs_inode.c optional reiserfs gnu/fs/reiserfs/reiserfs_item_ops.c optional reiserfs gnu/fs/reiserfs/reiserfs_namei.c optional reiserfs gnu/fs/reiserfs/reiserfs_prints.c optional reiserfs gnu/fs/reiserfs/reiserfs_stree.c optional reiserfs gnu/fs/reiserfs/reiserfs_vfsops.c optional reiserfs gnu/fs/reiserfs/reiserfs_vnops.c optional reiserfs # # isdn4bsd device drivers # i4b/driver/i4b_trace.c optional i4btrc i4b/driver/i4b_rbch.c optional i4brbch i4b/driver/i4b_tel.c optional i4btel i4b/driver/i4b_ipr.c optional i4bipr net/slcompress.c optional i4bipr | i4bisppp i4b/driver/i4b_ctl.c optional i4bctl i4b/driver/i4b_ing.c optional i4bing i4b/driver/i4b_isppp.c optional i4bisppp # # isdn4bsd CAPI driver # i4b/capi/capi_l4if.c optional i4bcapi i4b/capi/capi_llif.c optional i4bcapi i4b/capi/capi_msgs.c optional i4bcapi # # isdn4bsd AVM B1/T1 CAPI driver # i4b/capi/iavc/iavc_pci.c optional iavc i4bcapi pci i4b/capi/iavc/iavc_isa.c optional iavc i4bcapi isa i4b/capi/iavc/iavc_lli.c optional iavc i4bcapi i4b/capi/iavc/iavc_card.c optional iavc i4bcapi # # isdn4bsd support # i4b/layer2/i4b_mbuf.c optional i4btrc # # isdn4bsd Q.921 handler # i4b/layer2/i4b_l2.c optional i4bq921 i4b/layer2/i4b_l2fsm.c optional i4bq921 i4b/layer2/i4b_uframe.c optional i4bq921 i4b/layer2/i4b_tei.c optional i4bq921 i4b/layer2/i4b_sframe.c optional i4bq921 i4b/layer2/i4b_iframe.c optional i4bq921 i4b/layer2/i4b_l2timer.c optional i4bq921 i4b/layer2/i4b_util.c optional i4bq921 i4b/layer2/i4b_lme.c optional i4bq921 # # isdn4bsd Q.931 handler # i4b/layer3/i4b_q931.c optional i4bq931 i4b/layer3/i4b_l3fsm.c optional i4bq931 i4b/layer3/i4b_l3timer.c optional i4bq931 i4b/layer3/i4b_l2if.c optional i4bq931 i4b/layer3/i4b_l4if.c optional i4bq931 i4b/layer3/i4b_q932fac.c optional i4bq931 # # isdn4bsd control device driver, interface to isdnd # i4b/layer4/i4b_i4bdrv.c optional i4b i4b/layer4/i4b_l4.c optional i4b i4b/layer4/i4b_l4mgmt.c optional i4b i4b/layer4/i4b_l4timer.c optional i4b # isa/isa_if.m standard isa/isa_common.c optional isa isa/isahint.c optional isa isa/orm.c optional isa isa/pnp.c optional isa isapnp isa/pnpparse.c optional isa isapnp isofs/cd9660/cd9660_bmap.c optional cd9660 isofs/cd9660/cd9660_lookup.c optional cd9660 isofs/cd9660/cd9660_node.c optional cd9660 isofs/cd9660/cd9660_rrip.c optional cd9660 isofs/cd9660/cd9660_util.c optional cd9660 isofs/cd9660/cd9660_vfsops.c optional cd9660 isofs/cd9660/cd9660_vnops.c optional cd9660 isofs/cd9660/cd9660_iconv.c optional cd9660_iconv kern/bus_if.m standard kern/clock_if.m optional genclock kern/cpufreq_if.m standard kern/device_if.m standard kern/imgact_elf.c standard kern/imgact_shell.c standard kern/inflate.c optional gzip kern/init_main.c standard kern/init_sysent.c standard kern/kern_acct.c standard kern/kern_acl.c standard kern/kern_alq.c optional alq kern/kern_clock.c standard kern/kern_condvar.c standard kern/kern_conf.c standard kern/kern_cpu.c standard kern/kern_context.c standard kern/kern_descrip.c standard kern/kern_environment.c standard kern/kern_event.c standard kern/kern_exec.c standard kern/kern_exit.c standard kern/kern_fork.c standard kern/kern_idle.c standard kern/kern_intr.c standard kern/kern_jail.c standard kern/kern_kse.c standard kern/kern_kthread.c standard kern/kern_ktr.c optional ktr kern/kern_ktrace.c standard kern/kern_linker.c standard kern/kern_lock.c standard kern/kern_lockf.c standard kern/kern_mac.c standard kern/kern_malloc.c standard kern/kern_mbuf.c standard kern/kern_mib.c standard kern/kern_module.c standard kern/kern_mtxpool.c standard kern/kern_mutex.c standard kern/kern_ntptime.c standard kern/kern_physio.c standard kern/kern_pmc.c standard kern/kern_poll.c optional device_polling kern/kern_proc.c standard kern/kern_prot.c standard kern/kern_resource.c standard kern/kern_rwlock.c standard kern/kern_sema.c standard kern/kern_shutdown.c standard kern/kern_sig.c standard kern/kern_subr.c standard kern/kern_sx.c standard kern/kern_synch.c standard kern/kern_syscalls.c standard kern/kern_sysctl.c standard kern/kern_tc.c standard kern/kern_thr.c standard kern/kern_thread.c standard kern/kern_time.c standard kern/kern_timeout.c standard kern/kern_umtx.c standard kern/kern_uuid.c standard kern/kern_xxx.c standard kern/link_elf.c standard kern/linker_if.m standard kern/md4c.c optional netsmb kern/md5c.c standard kern/sched_4bsd.c optional sched_4bsd +kern/sched_core.c optional sched_core kern/sched_ule.c optional sched_ule kern/serdev_if.m optional puc | scc kern/subr_autoconf.c standard kern/subr_blist.c standard kern/subr_bus.c standard kern/subr_clock.c optional genclock kern/subr_devstat.c standard kern/subr_disk.c standard kern/subr_eventhandler.c standard kern/subr_firmware.c optional firmware kern/subr_hints.c standard kern/subr_kdb.c standard kern/subr_kobj.c standard kern/subr_lock.c standard kern/subr_log.c standard kern/subr_mbpool.c optional libmbpool kern/subr_mchain.c optional libmchain kern/subr_module.c standard kern/subr_msgbuf.c standard kern/subr_param.c standard kern/subr_pcpu.c standard kern/subr_power.c standard kern/subr_prf.c standard kern/subr_prof.c standard kern/subr_rman.c standard kern/subr_sbuf.c standard kern/subr_scanf.c standard kern/subr_sleepqueue.c standard kern/subr_smp.c standard kern/subr_stack.c optional ddb kern/subr_taskqueue.c standard kern/subr_trap.c standard kern/subr_turnstile.c standard kern/subr_unit.c standard kern/subr_witness.c optional witness kern/sys_generic.c standard kern/sys_pipe.c standard kern/sys_process.c standard kern/sys_socket.c standard kern/syscalls.c optional witness kern/sysv_ipc.c standard kern/sysv_msg.c optional sysvmsg kern/sysv_sem.c optional sysvsem kern/sysv_shm.c optional sysvshm kern/tty.c standard kern/tty_compat.c optional compat_43tty kern/tty_conf.c standard kern/tty_cons.c standard kern/tty_pty.c optional pty kern/tty_pts.c optional pty kern/tty_subr.c standard kern/tty_tty.c standard kern/uipc_accf.c optional inet kern/uipc_cow.c optional zero_copy_sockets kern/uipc_domain.c standard kern/uipc_mbuf.c standard kern/uipc_mbuf2.c standard kern/uipc_mqueue.c optional p1003_1b_mqueue kern/uipc_proto.c standard kern/uipc_sem.c optional p1003_1b_semaphores kern/uipc_socket.c standard kern/uipc_socket2.c standard kern/uipc_syscalls.c standard kern/uipc_usrreq.c standard kern/vfs_aio.c optional vfs_aio kern/vfs_bio.c standard kern/vfs_cache.c standard kern/vfs_cluster.c standard kern/vfs_default.c standard kern/vfs_export.c standard kern/vfs_hash.c standard kern/vfs_init.c standard kern/vfs_lookup.c standard kern/vfs_mount.c standard kern/vfs_subr.c standard kern/vfs_syscalls.c standard kern/vfs_vnops.c standard # # These files in libkern/ are those needed by all architectures. Some # of the files in libkern/ are only needed on some architectures, e.g., # libkern/divdi3.c is needed by i386 but not alpha. Also, some of these # routines may be optimized for a particular platform. In either case, # the file should be moved to conf/files. from here. # libkern/arc4random.c standard libkern/bcd.c standard libkern/bsearch.c standard libkern/crc32.c standard libkern/fnmatch.c standard libkern/gets.c standard libkern/iconv.c optional libiconv libkern/iconv_converter_if.m optional libiconv libkern/iconv_xlat.c optional libiconv libkern/iconv_xlat16.c optional libiconv libkern/index.c standard libkern/inet_ntoa.c standard libkern/mcount.c optional profiling-routine libkern/qsort.c standard libkern/qsort_r.c standard libkern/random.c standard libkern/rindex.c standard libkern/scanc.c standard libkern/skpc.c standard libkern/strcasecmp.c standard libkern/strcat.c standard libkern/strcmp.c standard libkern/strcpy.c standard libkern/strdup.c standard libkern/strlcat.c standard libkern/strlcpy.c standard libkern/strlen.c standard libkern/strncmp.c standard libkern/strncpy.c standard libkern/strsep.c standard libkern/strspn.c standard libkern/strtol.c standard libkern/strtoq.c standard libkern/strtoul.c standard libkern/strtouq.c standard libkern/strvalid.c standard net/bpf.c standard net/bpf_jitter.c optional bpf_jitter net/bpf_filter.c optional bpf | netgraph_bpf net/bridgestp.c optional if_bridge net/bsd_comp.c optional ppp_bsdcomp net/if.c standard net/if_arcsubr.c optional arcnet net/if_atmsubr.c optional atm net/if_bridge.c optional if_bridge net/if_clone.c standard net/if_disc.c optional disc net/if_ef.c optional ef net/if_ethersubr.c optional ether net/if_faith.c optional faith net/if_fddisubr.c optional fddi net/if_fwsubr.c optional fwip net/if_gif.c optional gif net/if_gre.c optional gre net/if_iso88025subr.c optional token net/if_loop.c optional loop net/if_media.c standard net/if_mib.c standard net/if_ppp.c optional ppp net/if_sl.c optional sl net/if_spppfr.c optional i4bisppp | sppp net/if_spppsubr.c optional i4bisppp | sppp net/if_stf.c optional stf net/if_tun.c optional tun net/if_tap.c optional tap net/if_vlan.c optional vlan net/netisr.c standard net/ppp_deflate.c optional ppp_deflate net/ppp_tty.c optional ppp net/pfil.c optional ether | inet net/radix.c standard net/raw_cb.c standard net/raw_usrreq.c standard net/route.c standard net/rtsock.c standard net/slcompress.c optional netgraph_vjc | ppp | sl | sppp net/zlib.c optional crypto | geom_uzip | ipsec | \ ppp_deflate net80211/ieee80211.c optional wlan net80211/ieee80211_acl.c optional wlan_acl net80211/ieee80211_crypto.c optional wlan net80211/ieee80211_crypto_ccmp.c optional wlan_ccmp net80211/ieee80211_crypto_none.c optional wlan net80211/ieee80211_crypto_tkip.c optional wlan_tkip net80211/ieee80211_crypto_wep.c optional wlan_wep net80211/ieee80211_freebsd.c optional wlan net80211/ieee80211_input.c optional wlan net80211/ieee80211_ioctl.c optional wlan net80211/ieee80211_node.c optional wlan net80211/ieee80211_output.c optional wlan net80211/ieee80211_proto.c optional wlan net80211/ieee80211_xauth.c optional wlan_xauth netatalk/aarp.c optional netatalk netatalk/at_control.c optional netatalk netatalk/at_proto.c optional netatalk netatalk/at_rmx.c optional netatalkdebug netatalk/ddp_input.c optional netatalk netatalk/ddp_output.c optional netatalk netatalk/ddp_pcb.c optional netatalk netatalk/ddp_usrreq.c optional netatalk netatm/atm_aal5.c optional atm_core netatm/atm_cm.c optional atm_core netatm/atm_device.c optional atm_core netatm/atm_if.c optional atm_core netatm/atm_proto.c optional atm_core netatm/atm_signal.c optional atm_core netatm/atm_socket.c optional atm_core netatm/atm_subr.c optional atm_core netatm/atm_usrreq.c optional atm_core netatm/ipatm/ipatm_event.c optional atm_ip atm_core netatm/ipatm/ipatm_if.c optional atm_ip atm_core netatm/ipatm/ipatm_input.c optional atm_ip atm_core netatm/ipatm/ipatm_load.c optional atm_ip atm_core netatm/ipatm/ipatm_output.c optional atm_ip atm_core netatm/ipatm/ipatm_usrreq.c optional atm_ip atm_core netatm/ipatm/ipatm_vcm.c optional atm_ip atm_core netatm/sigpvc/sigpvc_if.c optional atm_sigpvc atm_core netatm/sigpvc/sigpvc_subr.c optional atm_sigpvc atm_core netatm/spans/spans_arp.c optional atm_spans atm_core \ dependency "spans_xdr.h" netatm/spans/spans_cls.c optional atm_spans atm_core netatm/spans/spans_if.c optional atm_spans atm_core netatm/spans/spans_kxdr.c optional atm_spans atm_core netatm/spans/spans_msg.c optional atm_spans atm_core netatm/spans/spans_print.c optional atm_spans atm_core netatm/spans/spans_proto.c optional atm_spans atm_core netatm/spans/spans_subr.c optional atm_spans atm_core netatm/spans/spans_util.c optional atm_spans atm_core spans_xdr.h optional atm_spans atm_core \ before-depend \ dependency "$S/netatm/spans/spans_xdr.x" \ compile-with "rpcgen -h -C $S/netatm/spans/spans_xdr.x | grep -v rpc/rpc.h > spans_xdr.h" \ clean "spans_xdr.h" \ no-obj no-implicit-rule spans_xdr.c optional atm_spans atm_core \ before-depend \ dependency "$S/netatm/spans/spans_xdr.x" \ compile-with "rpcgen -c -C $S/netatm/spans/spans_xdr.x | grep -v rpc/rpc.h > spans_xdr.c" \ clean "spans_xdr.c" \ no-obj no-implicit-rule local spans_xdr.o optional atm_spans atm_core \ dependency "$S/netatm/spans/spans_xdr.x" \ compile-with "${NORMAL_C}" \ no-implicit-rule local netatm/uni/q2110_sigaa.c optional atm_uni atm_core netatm/uni/q2110_sigcpcs.c optional atm_uni atm_core netatm/uni/q2110_subr.c optional atm_uni atm_core netatm/uni/qsaal1_sigaa.c optional atm_uni atm_core netatm/uni/qsaal1_sigcpcs.c optional atm_uni atm_core netatm/uni/qsaal1_subr.c optional atm_uni atm_core netatm/uni/sscf_uni.c optional atm_uni atm_core netatm/uni/sscf_uni_lower.c optional atm_uni atm_core netatm/uni/sscf_uni_upper.c optional atm_uni atm_core netatm/uni/sscop.c optional atm_uni atm_core netatm/uni/sscop_lower.c optional atm_uni atm_core netatm/uni/sscop_pdu.c optional atm_uni atm_core netatm/uni/sscop_sigaa.c optional atm_uni atm_core netatm/uni/sscop_sigcpcs.c optional atm_uni atm_core netatm/uni/sscop_subr.c optional atm_uni atm_core netatm/uni/sscop_timer.c optional atm_uni atm_core netatm/uni/sscop_upper.c optional atm_uni atm_core netatm/uni/uni_load.c optional atm_uni atm_core netatm/uni/uniarp.c optional atm_uni atm_core netatm/uni/uniarp_cache.c optional atm_uni atm_core netatm/uni/uniarp_input.c optional atm_uni atm_core netatm/uni/uniarp_output.c optional atm_uni atm_core netatm/uni/uniarp_timer.c optional atm_uni atm_core netatm/uni/uniarp_vcm.c optional atm_uni atm_core netatm/uni/uniip.c optional atm_uni atm_core netatm/uni/unisig_decode.c optional atm_uni atm_core netatm/uni/unisig_encode.c optional atm_uni atm_core netatm/uni/unisig_if.c optional atm_uni atm_core netatm/uni/unisig_mbuf.c optional atm_uni atm_core netatm/uni/unisig_msg.c optional atm_uni atm_core netatm/uni/unisig_print.c optional atm_uni atm_core netatm/uni/unisig_proto.c optional atm_uni atm_core netatm/uni/unisig_sigmgr_state.c optional atm_uni atm_core netatm/uni/unisig_subr.c optional atm_uni atm_core netatm/uni/unisig_util.c optional atm_uni atm_core netatm/uni/unisig_vc_state.c optional atm_uni atm_core netgraph/atm/atmpif/ng_atmpif.c optional netgraph_atm_atmpif netgraph/atm/atmpif/ng_atmpif_harp.c optional netgraph_atm_atmpif netgraph/atm/ccatm/ng_ccatm.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" netgraph/atm/ng_atm.c optional ngatm_atm netgraph/atm/ngatmbase.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" netgraph/atm/sscfu/ng_sscfu.c optional ngatm_sscfu \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" netgraph/atm/sscop/ng_sscop.c optional ngatm_sscop \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" netgraph/atm/uni/ng_uni.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" netgraph/bluetooth/common/ng_bluetooth.c optional netgraph_bluetooth netgraph/bluetooth/drivers/bt3c/ng_bt3c_pccard.c optional netgraph_bluetooth_bt3c netgraph/bluetooth/drivers/h4/ng_h4.c optional netgraph_bluetooth_h4 netgraph/bluetooth/drivers/ubt/ng_ubt.c optional netgraph_bluetooth_ubt netgraph/bluetooth/drivers/ubtbcmfw/ubtbcmfw.c optional netgraph_bluetooth_ubtbcmfw netgraph/bluetooth/hci/ng_hci_cmds.c optional netgraph_bluetooth_hci netgraph/bluetooth/hci/ng_hci_evnt.c optional netgraph_bluetooth_hci netgraph/bluetooth/hci/ng_hci_main.c optional netgraph_bluetooth_hci netgraph/bluetooth/hci/ng_hci_misc.c optional netgraph_bluetooth_hci netgraph/bluetooth/hci/ng_hci_ulpi.c optional netgraph_bluetooth_hci netgraph/bluetooth/l2cap/ng_l2cap_cmds.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_evnt.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_llpi.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_main.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_misc.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_ulpi.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/socket/ng_btsocket.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_hci_raw.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_l2cap.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_l2cap_raw.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_rfcomm.c optional netgraph_bluetooth_socket netgraph/netflow/netflow.c optional netgraph_netflow netgraph/netflow/ng_netflow.c optional netgraph_netflow netgraph/ng_UI.c optional netgraph_UI netgraph/ng_async.c optional netgraph_async netgraph/ng_atmllc.c optional netgraph_atmllc netgraph/ng_base.c optional netgraph netgraph/ng_bpf.c optional netgraph_bpf netgraph/ng_bridge.c optional netgraph_bridge netgraph/ng_cisco.c optional netgraph_cisco netgraph/ng_device.c optional netgraph_device netgraph/ng_echo.c optional netgraph_echo netgraph/ng_eiface.c optional netgraph_eiface netgraph/ng_ether.c optional netgraph_ether netgraph/ng_fec.c optional netgraph_fec netgraph/ng_frame_relay.c optional netgraph_frame_relay netgraph/ng_gif.c optional netgraph_gif netgraph/ng_gif_demux.c optional netgraph_gif_demux netgraph/ng_hole.c optional netgraph_hole netgraph/ng_iface.c optional netgraph_iface netgraph/ng_ip_input.c optional netgraph_ip_input netgraph/ng_ipfw.c optional netgraph_ipfw netgraph/ng_ksocket.c optional netgraph_ksocket netgraph/ng_l2tp.c optional netgraph_l2tp netgraph/ng_lmi.c optional netgraph_lmi netgraph/ng_mppc.c optional netgraph_mppc_compression | \ netgraph_mppc_encryption netgraph/ng_nat.c optional netgraph_nat netgraph/ng_one2many.c optional netgraph_one2many netgraph/ng_parse.c optional netgraph netgraph/ng_ppp.c optional netgraph_ppp netgraph/ng_pppoe.c optional netgraph_pppoe netgraph/ng_pptpgre.c optional netgraph_pptpgre netgraph/ng_rfc1490.c optional netgraph_rfc1490 netgraph/ng_socket.c optional netgraph_socket netgraph/ng_split.c optional netgraph_split netgraph/ng_sppp.c optional netgraph_sppp netgraph/ng_tcpmss.c optional netgraph_tcpmss netgraph/ng_tee.c optional netgraph_tee netgraph/ng_tty.c optional netgraph_tty netgraph/ng_vjc.c optional netgraph_vjc netinet/accf_data.c optional accept_filter_data netinet/accf_http.c optional accept_filter_http netinet/if_atm.c optional atm netinet/if_ether.c optional ether netinet/igmp.c optional inet netinet/in.c optional inet netinet/ip_carp.c optional carp netinet/in_gif.c optional gif inet netinet/ip_gre.c optional gre inet netinet/ip_id.c optional inet netinet/in_pcb.c optional inet netinet/in_proto.c optional inet \ compile-with "${NORMAL_C} -I$S/contrib/pf" netinet/in_rmx.c optional inet netinet/ip_divert.c optional ipdivert netinet/ip_dummynet.c optional dummynet netinet/ip_ecn.c optional inet | inet6 netinet/ip_encap.c optional inet | inet6 netinet/ip_fastfwd.c optional inet netinet/ip_fw2.c optional ipfirewall netinet/ip_fw_pfil.c optional ipfirewall netinet/ip_icmp.c optional inet netinet/ip_input.c optional inet netinet/ip_ipsec.c optional ipsec netinet/ip_ipsec.c optional fast_ipsec netinet/ip_mroute.c optional mrouting netinet/ip_options.c optional inet netinet/ip_output.c optional inet netinet/raw_ip.c optional inet netinet/tcp_debug.c optional tcpdebug netinet/tcp_hostcache.c optional inet netinet/tcp_input.c optional inet netinet/tcp_output.c optional inet netinet/tcp_sack.c optional inet netinet/tcp_subr.c optional inet netinet/tcp_syncache.c optional inet netinet/tcp_timer.c optional inet netinet/tcp_usrreq.c optional inet netinet/udp_usrreq.c optional inet netinet/libalias/alias.c optional libalias netinet/libalias/alias_cuseeme.c optional libalias netinet/libalias/alias_db.c optional libalias netinet/libalias/alias_ftp.c optional libalias netinet/libalias/alias_irc.c optional libalias netinet/libalias/alias_nbt.c optional libalias netinet/libalias/alias_pptp.c optional libalias netinet/libalias/alias_proxy.c optional libalias netinet/libalias/alias_skinny.c optional libalias netinet/libalias/alias_smedia.c optional libalias netinet/libalias/alias_util.c optional libalias netinet6/ah_aesxcbcmac.c optional ipsec netinet6/ah_core.c optional ipsec netinet6/ah_input.c optional ipsec netinet6/ah_output.c optional ipsec netinet6/dest6.c optional inet6 netinet6/esp_aesctr.c optional ipsec ipsec_esp netinet6/esp_core.c optional ipsec ipsec_esp netinet6/esp_input.c optional ipsec ipsec_esp netinet6/esp_output.c optional ipsec ipsec_esp netinet6/esp_rijndael.c optional ipsec ipsec_esp netinet6/frag6.c optional inet6 netinet6/icmp6.c optional inet6 netinet6/in6.c optional inet6 netinet6/in6_cksum.c optional inet6 netinet6/in6_gif.c optional gif inet6 netinet6/in6_ifattach.c optional inet6 netinet6/in6_pcb.c optional inet6 netinet6/in6_proto.c optional inet6 netinet6/in6_rmx.c optional inet6 netinet6/in6_src.c optional inet6 netinet6/ip6_forward.c optional inet6 netinet6/ip6_id.c optional inet6 netinet6/ip6_input.c optional inet6 netinet6/ip6_mroute.c optional inet6 netinet6/ip6_output.c optional inet6 netinet6/ipcomp_core.c optional ipsec netinet6/ipcomp_input.c optional ipsec netinet6/ipcomp_output.c optional ipsec netinet6/ipsec.c optional ipsec netinet6/mld6.c optional inet6 netinet6/nd6.c optional inet6 netinet6/nd6_nbr.c optional inet6 netinet6/nd6_rtr.c optional inet6 netinet6/raw_ip6.c optional inet6 netinet6/route6.c optional inet6 netinet6/scope6.c optional inet6 netinet6/udp6_output.c optional inet6 netinet6/udp6_usrreq.c optional inet6 netipsec/ipsec.c optional fast_ipsec netipsec/ipsec_input.c optional fast_ipsec netipsec/ipsec_mbuf.c optional fast_ipsec netipsec/ipsec_output.c optional fast_ipsec netipsec/key.c optional fast_ipsec netipsec/key_debug.c optional fast_ipsec netipsec/keysock.c optional fast_ipsec netipsec/xform_ah.c optional fast_ipsec netipsec/xform_esp.c optional fast_ipsec netipsec/xform_ipcomp.c optional fast_ipsec netipsec/xform_ipip.c optional fast_ipsec netipsec/xform_tcp.c optional fast_ipsec tcp_signature netipx/ipx.c optional ipx netipx/ipx_cksum.c optional ipx netipx/ipx_input.c optional ipx netipx/ipx_ip.c optional ipx netipx/ipx_outputfl.c optional ipx netipx/ipx_pcb.c optional ipx netipx/ipx_proto.c optional ipx netipx/ipx_usrreq.c optional ipx netipx/spx_debug.c optional ipx netipx/spx_usrreq.c optional ipx netkey/key.c optional ipsec netkey/key_debug.c optional ipsec netkey/keydb.c optional ipsec netkey/keysock.c optional ipsec netnatm/natm.c optional natm netnatm/natm_pcb.c optional natm netnatm/natm_proto.c optional natm netncp/ncp_conn.c optional ncp netncp/ncp_crypt.c optional ncp netncp/ncp_login.c optional ncp netncp/ncp_mod.c optional ncp netncp/ncp_ncp.c optional ncp netncp/ncp_nls.c optional ncp netncp/ncp_rq.c optional ncp netncp/ncp_sock.c optional ncp netncp/ncp_subr.c optional ncp netsmb/smb_conn.c optional netsmb netsmb/smb_crypt.c optional netsmb netsmb/smb_dev.c optional netsmb netsmb/smb_iod.c optional netsmb netsmb/smb_rq.c optional netsmb netsmb/smb_smb.c optional netsmb netsmb/smb_subr.c optional netsmb netsmb/smb_trantcp.c optional netsmb netsmb/smb_usr.c optional netsmb nfs/nfs_common.c optional nfsclient | nfsserver nfs4client/nfs4_dev.c optional nfsclient nfs4client/nfs4_idmap.c optional nfsclient nfs4client/nfs4_socket.c optional nfsclient nfs4client/nfs4_subs.c optional nfsclient nfs4client/nfs4_vfs_subs.c optional nfsclient nfs4client/nfs4_vfsops.c optional nfsclient nfs4client/nfs4_vn_subs.c optional nfsclient nfs4client/nfs4_vnops.c optional nfsclient nfsclient/bootp_subr.c optional bootp nfsclient nfsclient/krpc_subr.c optional bootp nfsclient nfsclient/nfs_bio.c optional nfsclient nfsclient/nfs_diskless.c optional nfsclient nfs_root nfsclient/nfs_node.c optional nfsclient nfsclient/nfs_socket.c optional nfsclient nfsclient/nfs_subs.c optional nfsclient nfsclient/nfs_nfsiod.c optional nfsclient nfsclient/nfs_vfsops.c optional nfsclient nfsclient/nfs_vnops.c optional nfsclient nfsclient/nfs_lock.c optional nfsclient nfsserver/nfs_serv.c optional nfsserver nfsserver/nfs_srvsock.c optional nfsserver nfsserver/nfs_srvcache.c optional nfsserver nfsserver/nfs_srvsubs.c optional nfsserver nfsserver/nfs_syscalls.c optional nfsserver # crypto support opencrypto/cast.c optional crypto | ipsec ipsec_esp opencrypto/criov.c optional crypto opencrypto/crypto.c optional crypto opencrypto/cryptodev.c optional cryptodev opencrypto/cryptosoft.c optional crypto opencrypto/deflate.c optional crypto opencrypto/rmd160.c optional crypto | ipsec opencrypto/skipjack.c optional crypto opencrypto/xform.c optional crypto pci/agp.c optional agp pci pci/agp_if.m optional agp pci pci/alpm.c optional alpm pci pci/amdpm.c optional amdpm pci | nfpm pci pci/amdsmb.c optional amdsmb pci pci/if_mn.c optional mn pci pci/if_pcn.c optional pcn pci pci/if_rl.c optional rl pci pci/if_sf.c optional sf pci pci/if_sis.c optional sis pci pci/if_ste.c optional ste pci pci/if_tl.c optional tl pci pci/if_vr.c optional vr pci pci/if_wb.c optional wb pci pci/if_xl.c optional xl pci pci/intpm.c optional intpm pci pci/ncr.c optional ncr pci pci/nfsmb.c optional nfsmb pci pci/viapm.c optional viapm pci pci/xrpu.c optional xrpu pci posix4/ksched.c optional _kposix_priority_scheduling posix4/p1003_1b.c standard posix4/posix4_mib.c standard rpc/rpcclnt.c optional nfsclient security/audit/audit.c optional audit security/audit/audit_arg.c optional audit security/audit/audit_bsm.c optional audit security/audit/audit_bsm_klib.c optional audit security/audit/audit_bsm_token.c optional audit security/audit/audit_pipe.c optional audit security/audit/audit_syscalls.c standard security/audit/audit_trigger.c optional audit security/audit/audit_worker.c optional audit security/mac/mac_inet.c optional mac inet security/mac/mac_label.c optional mac security/mac/mac_net.c optional mac security/mac/mac_pipe.c optional mac security/mac/mac_posix_sem.c optional mac security/mac/mac_process.c optional mac security/mac/mac_socket.c optional mac security/mac/mac_system.c optional mac security/mac/mac_sysv_msg.c optional mac security/mac/mac_sysv_sem.c optional mac security/mac/mac_sysv_shm.c optional mac security/mac/mac_vfs.c optional mac security/mac_biba/mac_biba.c optional mac_biba security/mac_bsdextended/mac_bsdextended.c optional mac_bsdextended security/mac_ifoff/mac_ifoff.c optional mac_ifoff security/mac_lomac/mac_lomac.c optional mac_lomac security/mac_mls/mac_mls.c optional mac_mls security/mac_none/mac_none.c optional mac_none security/mac_partition/mac_partition.c optional mac_partition security/mac_portacl/mac_portacl.c optional mac_portacl security/mac_seeotheruids/mac_seeotheruids.c optional mac_seeotheruids security/mac_stub/mac_stub.c optional mac_stub security/mac_test/mac_test.c optional mac_test ufs/ffs/ffs_alloc.c optional ffs ufs/ffs/ffs_balloc.c optional ffs ufs/ffs/ffs_inode.c optional ffs ufs/ffs/ffs_snapshot.c optional ffs ufs/ffs/ffs_softdep.c optional ffs ufs/ffs/ffs_subr.c optional ffs ufs/ffs/ffs_tables.c optional ffs ufs/ffs/ffs_vfsops.c optional ffs ufs/ffs/ffs_vnops.c optional ffs ufs/ffs/ffs_rawread.c optional directio ufs/ufs/ufs_acl.c optional ffs ufs/ufs/ufs_bmap.c optional ffs ufs/ufs/ufs_dirhash.c optional ffs ufs/ufs/ufs_extattr.c optional ffs ufs/ufs/ufs_inode.c optional ffs ufs/ufs/ufs_lookup.c optional ffs ufs/ufs/ufs_quota.c optional ffs ufs/ufs/ufs_vfsops.c optional ffs ufs/ufs/ufs_vnops.c optional ffs vm/default_pager.c standard vm/device_pager.c standard vm/phys_pager.c standard vm/redzone.c optional DEBUG_REDZONE vm/swap_pager.c standard vm/uma_core.c standard vm/uma_dbg.c standard vm/vm_contig.c standard vm/memguard.c optional DEBUG_MEMGUARD vm/vm_fault.c standard vm/vm_glue.c standard vm/vm_init.c standard vm/vm_kern.c standard vm/vm_map.c standard vm/vm_meter.c standard vm/vm_mmap.c standard vm/vm_object.c standard vm/vm_page.c standard vm/vm_pageout.c standard vm/vm_pageq.c standard vm/vm_pager.c standard vm/vm_unix.c standard vm/vm_zeroidle.c standard vm/vnode_pager.c standard # gnu/fs/xfs/xfs_alloc.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" \ warning "kernel contains GPL contaminated xfs filesystem" gnu/fs/xfs/xfs_alloc_btree.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_bit.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_bmap.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_bmap_btree.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_btree.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_buf_item.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_da_btree.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dir.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dir2.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dir2_block.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dir2_data.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dir2_leaf.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dir2_node.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dir2_sf.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dir2_trace.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dir_leaf.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_error.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_extfree_item.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_fsops.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_ialloc.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_ialloc_btree.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_inode.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_inode_item.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_iocore.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_itable.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dfrag.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_log.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_log_recover.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_mount.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_rename.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_trans.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_trans_ail.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_trans_buf.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_trans_extfree.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_trans_inode.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_trans_item.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_utils.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_vfsops.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_vnodeops.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_rw.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_attr_leaf.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_attr.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dmops.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_qmops.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_iget.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_freebsd_iget.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_mountops.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_vnops.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_frw.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_buf.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_globals.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_dmistubs.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_super.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_stats.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_vfs.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_vnode.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_sysctl.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_fs_subr.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_ioctl.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/support/debug.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/support/ktrace.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/support/mrlock.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/support/uuid.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/support/kmem.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_iomap.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_behavior.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" Index: head/sys/conf/options =================================================================== --- head/sys/conf/options (revision 159569) +++ head/sys/conf/options (revision 159570) @@ -1,724 +1,725 @@ # $FreeBSD$ # # On the handling of kernel options # # All kernel options should be listed in NOTES, with suitable # descriptions. Negative options (options that make some code not # compile) should be commented out; LINT (generated from NOTES) should # compile as much code as possible. Try to structure option-using # code so that a single option only switch code on, or only switch # code off, to make it possible to have a full compile-test. If # necessary, you can check for COMPILING_LINT to get maximum code # coverage. # # All new options shall also be listed in either "conf/options" or # "conf/options.". Options that affect a single source-file # .[c|s] should be directed into "opt_.h", while options # that affect multiple files should either go in "opt_global.h" if # this is a kernel-wide option (used just about everywhere), or in # "opt_.h" if it affect only some files. # Note that the effect of listing only an option without a # header-file-name in conf/options (and cousins) is that the last # convention is followed. # # This handling scheme is not yet fully implemented. # # # Format of this file: # Option name filename # # If filename is missing, the default is # opt_.h AAC_DEBUG opt_aac.h AHC_ALLOW_MEMIO opt_aic7xxx.h AHC_TMODE_ENABLE opt_aic7xxx.h AHC_DUMP_EEPROM opt_aic7xxx.h AHC_DEBUG opt_aic7xxx.h AHC_DEBUG_OPTS opt_aic7xxx.h AHC_REG_PRETTY_PRINT opt_aic7xxx.h AHD_DEBUG opt_aic79xx.h AHD_DEBUG_OPTS opt_aic79xx.h AHD_TMODE_ENABLE opt_aic79xx.h AHD_REG_PRETTY_PRINT opt_aic79xx.h ADW_ALLOW_MEMIO opt_adw.h TWA_DEBUG opt_twa.h TWA_FLASH_FIRMWARE opt_twa.h # Debugging options. DDB DDB_NUMSYM opt_ddb.h GDB GDBSPEED opt_gdb.h KDB opt_global.h KDB_TRACE opt_kdb.h KDB_UNATTENDED opt_kdb.h SYSCTL_DEBUG opt_sysctl.h # Miscellaneous options. ADAPTIVE_GIANT opt_adaptive_mutexes.h NO_ADAPTIVE_MUTEXES opt_adaptive_mutexes.h ALQ AUDIT opt_global.h CODA_COMPAT_5 opt_coda.h COMPAT_43 opt_compat.h COMPAT_43TTY opt_compat.h COMPAT_FREEBSD4 opt_compat.h COMPAT_FREEBSD5 opt_compat.h COMPILING_LINT opt_global.h CONSPEED opt_comconsole.h CY_PCI_FASTINTR DIRECTIO FULL_PREEMPTION opt_sched.h IPI_PREEMPTION opt_sched.h GEOM_AES opt_geom.h GEOM_APPLE opt_geom.h GEOM_BDE opt_geom.h GEOM_BSD opt_geom.h GEOM_CONCAT opt_geom.h GEOM_ELI opt_geom.h GEOM_FOX opt_geom.h GEOM_GATE opt_geom.h GEOM_GPT opt_geom.h GEOM_LABEL opt_geom.h GEOM_MBR opt_geom.h GEOM_MIRROR opt_geom.h GEOM_NOP opt_geom.h GEOM_PC98 opt_geom.h GEOM_RAID3 opt_geom.h GEOM_SHSEC opt_geom.h GEOM_STRIPE opt_geom.h GEOM_SUNLABEL opt_geom.h GEOM_UZIP opt_geom.h GEOM_VOL opt_geom.h GEOM_ZERO opt_geom.h KSTACK_MAX_PAGES KSTACK_PAGES KTRACE KTRACE_REQUEST_POOL opt_ktrace.h LIBICONV MAC MAC_ALWAYS_LABEL_MBUF opt_mac.h MAC_BIBA opt_dontuse.h MAC_BSDEXTENDED opt_dontuse.h MAC_DEBUG opt_mac.h MAC_IFOFF opt_dontuse.h MAC_LOMAC opt_dontuse.h MAC_MLS opt_dontuse.h MAC_NONE opt_dontuse.h MAC_PARTITION opt_dontuse.h MAC_PORTACL opt_dontuse.h MAC_SEEOTHERUIDS opt_dontuse.h MAC_STATIC opt_mac.h MAC_STUB opt_dontuse.h MAC_TEST opt_dontuse.h MD_ROOT opt_md.h MD_ROOT_SIZE opt_md.h MFI_DEBUG opt_mfi.h MFI_DECODE_LOG opt_mfi.h MPROF_BUFFERS opt_mprof.h MPROF_HASH_SIZE opt_mprof.h MUTEX_WAKE_ALL NSWBUF_MIN opt_swap.h PANIC_REBOOT_WAIT_TIME opt_panic.h PPC_DEBUG opt_ppc.h PPC_PROBE_CHIPSET opt_ppc.h PPS_SYNC opt_ntp.h PREEMPTION opt_sched.h QUOTA SCHED_4BSD opt_sched.h +SCHED_CORE opt_sched.h SCHED_ULE opt_sched.h SHOW_BUSYBUFS SLEEPQUEUE_PROFILING SLHCI_DEBUG opt_slhci.h SPX_HACK SUIDDIR MSGMNB opt_sysvipc.h MSGMNI opt_sysvipc.h MSGSEG opt_sysvipc.h MSGSSZ opt_sysvipc.h MSGTQL opt_sysvipc.h SEMMAP opt_sysvipc.h SEMMNI opt_sysvipc.h SEMMNS opt_sysvipc.h SEMMNU opt_sysvipc.h SEMMSL opt_sysvipc.h SEMOPM opt_sysvipc.h SEMUME opt_sysvipc.h SHMALL opt_sysvipc.h SHMMAX opt_sysvipc.h SHMMAXPGS opt_sysvipc.h SHMMIN opt_sysvipc.h SHMMNI opt_sysvipc.h SHMSEG opt_sysvipc.h SYSVMSG opt_sysvipc.h SYSVSEM opt_sysvipc.h SYSVSHM opt_sysvipc.h SW_WATCHDOG opt_watchdog.h TURNSTILE_PROFILING TTYHOG opt_tty.h VFS_AIO VERBOSE_SYSINIT opt_global.h WLCACHE opt_wavelan.h WLDEBUG opt_wavelan.h # POSIX kernel options P1003_1B_MQUEUE opt_posix.h P1003_1B_SEMAPHORES opt_posix.h _KPOSIX_PRIORITY_SCHEDULING opt_posix.h # Do we want the config file compiled into the kernel? INCLUDE_CONFIG_FILE opt_config.h # Options for static filesystems. These should only be used at config # time, since the corresponding lkms cannot work if there are any static # dependencies. Unusability is enforced by hiding the defines for the # options in a never-included header. CD9660 opt_dontuse.h CODA opt_dontuse.h EXT2FS opt_dontuse.h FDESCFS opt_dontuse.h HPFS opt_dontuse.h MSDOSFS opt_dontuse.h NTFS opt_dontuse.h NULLFS opt_dontuse.h NWFS opt_dontuse.h PORTALFS opt_dontuse.h PROCFS opt_dontuse.h PSEUDOFS opt_dontuse.h REISERFS opt_dontuse.h SMBFS opt_dontuse.h UDF opt_dontuse.h UMAPFS opt_dontuse.h UNIONFS opt_dontuse.h # Pseudofs debugging PSEUDOFS_TRACE opt_pseudofs.h # Broken - ffs_snapshot() dependency from ufs_lookup() :-( FFS opt_ffs_broken_fixme.h # These static filesystems have one slightly bogus static dependency in # sys/i386/i386/autoconf.c. If any of these filesystems are # statically compiled into the kernel, code for mounting them as root # filesystems will be enabled - but look below. NFSCLIENT opt_nfs.h NFSSERVER opt_nfs.h # filesystems and libiconv bridge CD9660_ICONV opt_dontuse.h MSDOSFS_ICONV opt_dontuse.h NTFS_ICONV opt_dontuse.h UDF_ICONV opt_dontuse.h # If you are following the conditions in the copyright, # you can enable soft-updates which will speed up a lot of thigs # and make the system safer from crashes at the same time. # otherwise a STUB module will be compiled in. SOFTUPDATES opt_ffs.h # On small, embedded systems, it can be useful to turn off support for # snapshots. It saves about 30-40k for a feature that would be lightly # used, if it is used at all. NO_FFS_SNAPSHOT opt_ffs.h # Enabling this option turns on support for Access Control Lists in UFS, # which can be used to support high security configurations. Depends on # UFS_EXTATTR. UFS_ACL opt_ufs.h # Enabling this option turns on support for extended attributes in UFS-based # filesystems, which can be used to support high security configurations # as well as new filesystem features. UFS_EXTATTR opt_ufs.h UFS_EXTATTR_AUTOSTART opt_ufs.h # Enable fast hash lookups for large directories on UFS-based filesystems. UFS_DIRHASH opt_ufs.h # The below sentence is not in English, and neither is this one. # We plan to remove the static dependences above, with a # _ROOT option to control if it usable as root. This list # allows these options to be present in config files already (though # they won't make any difference yet). NFS_ROOT opt_nfsroot.h # SMB/CIFS requester NETSMB opt_netsmb.h # Experimental support for large MS-DOS filesystems; SEE WARNING IN "NOTES"! MSDOSFS_LARGE opt_msdosfs.h # Options used only in subr_param.c. HZ opt_param.h MAXFILES opt_param.h NBUF opt_param.h NMBCLUSTERS opt_param.h NSFBUFS opt_param.h VM_BCACHE_SIZE_MAX opt_param.h VM_SWZONE_SIZE_MAX opt_param.h MAXUSERS DFLDSIZ opt_param.h MAXDSIZ opt_param.h MAXSSIZ opt_param.h # Generic SCSI options. CAM_MAX_HIGHPOWER opt_cam.h CAMDEBUG opt_cam.h CAM_DEBUG_DELAY opt_cam.h CAM_DEBUG_BUS opt_cam.h CAM_DEBUG_TARGET opt_cam.h CAM_DEBUG_LUN opt_cam.h CAM_DEBUG_FLAGS opt_cam.h CAM_NEW_TRAN_CODE opt_cam.h SCSI_DELAY opt_scsi.h SCSI_NO_SENSE_STRINGS opt_scsi.h SCSI_NO_OP_STRINGS opt_scsi.h # Options used only in cam/scsi/scsi_cd.c CHANGER_MIN_BUSY_SECONDS opt_cd.h CHANGER_MAX_BUSY_SECONDS opt_cd.h # Options used only in cam/scsi/scsi_sa.c. SA_IO_TIMEOUT opt_sa.h SA_SPACE_TIMEOUT opt_sa.h SA_REWIND_TIMEOUT opt_sa.h SA_ERASE_TIMEOUT opt_sa.h SA_1FM_AT_EOD opt_sa.h # Options used only in cam/scsi/scsi_pt.c SCSI_PT_DEFAULT_TIMEOUT opt_pt.h # Options used only in cam/scsi/scsi_ses.c SES_ENABLE_PASSTHROUGH opt_ses.h # Options used in dev/sym/ (Symbios SCSI driver). SYM_SETUP_LP_PROBE_MAP opt_sym.h #-Low Priority Probe Map (bits) # Allows the ncr to take precedence # 1 (1<<0) -> 810a, 860 # 2 (1<<1) -> 825a, 875, 885, 895 # 4 (1<<2) -> 895a, 896, 1510d SYM_SETUP_SCSI_DIFF opt_sym.h #-HVD support for 825a, 875, 885 # disabled:0 (default), enabled:1 SYM_SETUP_PCI_PARITY opt_sym.h #-PCI parity checking # disabled:0, enabled:1 (default) SYM_SETUP_MAX_LUN opt_sym.h #-Number of LUNs supported # default:8, range:[1..64] # Options used only in pci/ncr.c SCSI_NCR_DEBUG opt_ncr.h SCSI_NCR_MAX_SYNC opt_ncr.h SCSI_NCR_MAX_WIDE opt_ncr.h SCSI_NCR_MYADDR opt_ncr.h # Options used only in dev/isp/* ISP_TARGET_MODE opt_isp.h ISP_FW_CRASH_DUMP opt_isp.h ISP_DEFAULT_ROLES opt_isp.h # Options used in the 'ata' ATA/ATAPI driver ATA_STATIC_ID opt_ata.h ATA_NOPCI opt_ata.h # Net stuff. ACCEPT_FILTER_DATA ACCEPT_FILTER_HTTP ALTQ opt_global.h ALTQ_CBQ opt_altq.h ALTQ_RED opt_altq.h ALTQ_RIO opt_altq.h ALTQ_HFSC opt_altq.h ALTQ_CDNR opt_altq.h ALTQ_PRIQ opt_altq.h ALTQ_NOPCC opt_altq.h ALTQ_DEBUG opt_altq.h BOOTP opt_bootp.h BOOTP_COMPAT opt_bootp.h BOOTP_NFSROOT opt_bootp.h BOOTP_NFSV3 opt_bootp.h BOOTP_WIRED_TO opt_bootp.h DEVICE_POLLING DEV_PF opt_pf.h DEV_PFLOG opt_pf.h DEV_PFSYNC opt_pf.h ETHER_II opt_ef.h ETHER_8023 opt_ef.h ETHER_8022 opt_ef.h ETHER_SNAP opt_ef.h MROUTING opt_mrouting.h PIM opt_mrouting.h INET opt_inet.h INET6 opt_inet6.h IPSEC opt_ipsec.h IPSEC_ESP opt_ipsec.h IPSEC_DEBUG opt_ipsec.h IPSEC_FILTERGIF opt_ipsec.h FAST_IPSEC opt_ipsec.h IPDIVERT DUMMYNET opt_ipdn.h IPFILTER opt_ipfilter.h IPFILTER_LOG opt_ipfilter.h IPFILTER_LOOKUP opt_ipfilter.h IPFILTER_DEFAULT_BLOCK opt_ipfilter.h IPFIREWALL opt_ipfw.h IPFIREWALL_VERBOSE opt_ipfw.h IPFIREWALL_VERBOSE_LIMIT opt_ipfw.h IPFIREWALL_DEFAULT_TO_ACCEPT opt_ipfw.h IPFIREWALL_FORWARD opt_ipfw.h IPFIREWALL_FORWARD_EXTENDED opt_ipfw.h IPSTEALTH IPX IPXIP opt_ipx.h LIBMBPOOL LIBMCHAIN LIBALIAS MBUF_STRESS_TEST NCP NETATALK opt_atalk.h NET_WITH_GIANT opt_net.h PPP_BSDCOMP opt_ppp.h PPP_DEFLATE opt_ppp.h PPP_FILTER opt_ppp.h SLIP_IFF_OPTS opt_slip.h TCPDEBUG TCP_SIGNATURE opt_inet.h TCP_SACK_DEBUG opt_tcp_sack.h TCP_DROP_SYNFIN opt_tcp_input.h DEV_VLAN opt_vlan.h VLAN_ARRAY opt_vlan.h XBONEHACK # Netgraph(4). Use option NETGRAPH to enable the base netgraph code. # Each netgraph node type can be either be compiled into the kernel # or loaded dynamically. To get the former, include the corresponding # option below. Each type has its own man page, e.g. ng_async(4). NETGRAPH NETGRAPH_DEBUG opt_netgraph.h NETGRAPH_ASYNC opt_netgraph.h NETGRAPH_ATMLLC opt_netgraph.h NETGRAPH_ATM_ATMPIF opt_netgraph.h NETGRAPH_BLUETOOTH opt_netgraph.h NETGRAPH_BLUETOOTH_BT3C opt_netgraph.h NETGRAPH_BLUETOOTH_H4 opt_netgraph.h NETGRAPH_BLUETOOTH_HCI opt_netgraph.h NETGRAPH_BLUETOOTH_L2CAP opt_netgraph.h NETGRAPH_BLUETOOTH_SOCKET opt_netgraph.h NETGRAPH_BLUETOOTH_UBT opt_netgraph.h NETGRAPH_BLUETOOTH_UBTBCMFW opt_netgraph.h NETGRAPH_BPF opt_netgraph.h NETGRAPH_BRIDGE opt_netgraph.h NETGRAPH_CISCO opt_netgraph.h NETGRAPH_DEVICE opt_netgraph.h NETGRAPH_ECHO opt_netgraph.h NETGRAPH_EIFACE opt_netgraph.h NETGRAPH_ETHER opt_netgraph.h NETGRAPH_FEC opt_netgraph.h NETGRAPH_FRAME_RELAY opt_netgraph.h NETGRAPH_GIF opt_netgraph.h NETGRAPH_GIF_DEMUX opt_netgraph.h NETGRAPH_HOLE opt_netgraph.h NETGRAPH_IFACE opt_netgraph.h NETGRAPH_IP_INPUT opt_netgraph.h NETGRAPH_IPFW opt_netgraph.h NETGRAPH_KSOCKET opt_netgraph.h NETGRAPH_L2TP opt_netgraph.h NETGRAPH_LMI opt_netgraph.h # MPPC compression requires proprietary files (not included) NETGRAPH_MPPC_COMPRESSION opt_netgraph.h NETGRAPH_MPPC_ENCRYPTION opt_netgraph.h NETGRAPH_NAT opt_netgraph.h NETGRAPH_NETFLOW opt_netgraph.h NETGRAPH_ONE2MANY opt_netgraph.h NETGRAPH_PPP opt_netgraph.h NETGRAPH_PPPOE opt_netgraph.h NETGRAPH_PPTPGRE opt_netgraph.h NETGRAPH_RFC1490 opt_netgraph.h NETGRAPH_SOCKET opt_netgraph.h NETGRAPH_SPLIT opt_netgraph.h NETGRAPH_SPPP opt_netgraph.h NETGRAPH_TCPMSS opt_netgraph.h NETGRAPH_TEE opt_netgraph.h NETGRAPH_TTY opt_netgraph.h NETGRAPH_UI opt_netgraph.h NETGRAPH_VJC opt_netgraph.h # NgATM options NGATM_ATM opt_netgraph.h NGATM_ATMBASE opt_netgraph.h NGATM_SSCOP opt_netgraph.h NGATM_SSCFU opt_netgraph.h NGATM_UNI opt_netgraph.h NGATM_CCATM opt_netgraph.h # DRM options DRM_DEBUG opt_drm.h ZERO_COPY_SOCKETS opt_zero.h TI_PRIVATE_JUMBOS opt_ti.h TI_JUMBO_HDRSPLIT opt_ti.h # ATM (HARP version) ATM_CORE opt_atm.h ATM_IP opt_atm.h ATM_SIGPVC opt_atm.h ATM_SPANS opt_atm.h ATM_UNI opt_atm.h # XXX Conflict: # of devices vs network protocol (Native ATM). # This makes "atm.h" unusable. NATM # DPT driver debug flags DPT_MEASURE_PERFORMANCE opt_dpt.h DPT_HANDLE_TIMEOUTS opt_dpt.h DPT_TIMEOUT_FACTOR opt_dpt.h DPT_LOST_IRQ opt_dpt.h DPT_RESET_HBA opt_dpt.h # Misc debug flags. Most of these should probably be replaced with # 'DEBUG', and then let people recompile just the interesting modules # with 'make CC="cc -DDEBUG"'. CLUSTERDEBUG opt_debug_cluster.h DEBUG_1284 opt_ppb_1284.h VP0_DEBUG opt_vpo.h LPT_DEBUG opt_lpt.h PLIP_DEBUG opt_plip.h LOCKF_DEBUG opt_debug_lockf.h NETATALKDEBUG opt_atalk.h SI_DEBUG opt_debug_si.h # Fb options FB_DEBUG opt_fb.h FB_INSTALL_CDEV opt_fb.h # ppbus related options PERIPH_1284 opt_ppb_1284.h DONTPROBE_1284 opt_ppb_1284.h # smbus related options ENABLE_ALART opt_intpm.h # These cause changes all over the kernel BLKDEV_IOSIZE opt_global.h BURN_BRIDGES opt_global.h DEBUG opt_global.h DEBUG_LOCKS opt_global.h DEBUG_VFS_LOCKS opt_global.h DIAGNOSTIC opt_global.h INVARIANT_SUPPORT opt_global.h INVARIANTS opt_global.h MCLSHIFT opt_global.h MUTEX_DEBUG opt_global.h MUTEX_NOINLINE opt_global.h MUTEX_PROFILING opt_global.h MSIZE opt_global.h REGRESSION opt_global.h RESTARTABLE_PANICS opt_global.h RWLOCK_NOINLINE opt_global.h VFS_BIO_DEBUG opt_global.h # These are VM related options VM_KMEM_SIZE opt_vm.h VM_KMEM_SIZE_SCALE opt_vm.h VM_KMEM_SIZE_MAX opt_vm.h NO_SWAPPING opt_vm.h MALLOC_MAKE_FAILURES opt_vm.h MALLOC_PROFILE opt_vm.h PQ_NOOPT opt_vmpage.h # The MemGuard replacement allocator used for tamper-after-free detection DEBUG_MEMGUARD opt_vm.h # The RedZone malloc(9) protection DEBUG_REDZONE opt_vm.h # Standard SMP options SMP opt_global.h # Size of the kernel message buffer MSGBUF_SIZE opt_msgbuf.h # NFS options NFS_MINATTRTIMO opt_nfs.h NFS_MAXATTRTIMO opt_nfs.h NFS_MINDIRATTRTIMO opt_nfs.h NFS_MAXDIRATTRTIMO opt_nfs.h NFS_GATHERDELAY opt_nfs.h NFS_WDELAYHASHSIZ opt_nfs.h NFS_DEBUG opt_nfs.h # For the Bt848/Bt848A/Bt849/Bt878/Bt879 driver OVERRIDE_CARD opt_bktr.h OVERRIDE_TUNER opt_bktr.h OVERRIDE_DBX opt_bktr.h OVERRIDE_MSP opt_bktr.h BROOKTREE_SYSTEM_DEFAULT opt_bktr.h BROOKTREE_ALLOC_PAGES opt_bktr.h BKTR_OVERRIDE_CARD opt_bktr.h BKTR_OVERRIDE_TUNER opt_bktr.h BKTR_OVERRIDE_DBX opt_bktr.h BKTR_OVERRIDE_MSP opt_bktr.h BKTR_SYSTEM_DEFAULT opt_bktr.h BKTR_ALLOC_PAGES opt_bktr.h BKTR_USE_PLL opt_bktr.h BKTR_GPIO_ACCESS opt_bktr.h BKTR_NO_MSP_RESET opt_bktr.h BKTR_430_FX_MODE opt_bktr.h BKTR_SIS_VIA_MODE opt_bktr.h BKTR_USE_FREEBSD_SMBUS opt_bktr.h BKTR_NEW_MSP34XX_DRIVER opt_bktr.h # options for serial support COM_ESP opt_sio.h COM_MULTIPORT opt_sio.h BREAK_TO_DEBUGGER opt_comconsole.h ALT_BREAK_TO_DEBUGGER opt_comconsole.h # Options to support PPS UART_PPS_ON_CTS opt_uart.h # options for bus/device framework BUS_DEBUG opt_bus.h # options for USB support USB_DEBUG opt_usb.h USBVERBOSE opt_usb.h UKBD_DFLT_KEYMAP opt_ukbd.h UPLCOM_INTR_INTERVAL opt_uplcom.h UVSCOM_DEFAULT_OPKTSIZE opt_uvscom.h UVSCOM_INTR_INTERVAL opt_uvscom.h # Embedded system options INIT_PATH ROOTDEVNAME FDC_DEBUG opt_fdc.h PCFCLOCK_VERBOSE opt_pcfclock.h PCFCLOCK_MAX_RETRIES opt_pcfclock.h KTR opt_global.h KTR_ALQ opt_ktr.h KTR_MASK opt_ktr.h KTR_CPUMASK opt_ktr.h KTR_COMPILE opt_global.h KTR_ENTRIES opt_global.h KTR_VERBOSE opt_ktr.h WITNESS opt_global.h WITNESS_KDB opt_witness.h WITNESS_SKIPSPIN opt_witness.h # options for ACPI support ACPI_DEBUG opt_acpi.h ACPI_MAX_THREADS opt_acpi.h ACPI_NO_SEMAPHORES opt_acpi.h ACPICA_PEDANTIC opt_acpi.h # ISA support DEV_ISA opt_isa.h ISAPNP opt_isa.h # options for DEVFS, see sys/fs/devfs/devfs.h NDEVFSINO opt_devfs.h NDEVFSOVERFLOW opt_devfs.h # various 'device presence' options. DEV_BPF opt_bpf.h DEV_MCA opt_mca.h DEV_CARP opt_carp.h DEV_SPLASH opt_splash.h EISA_SLOTS opt_eisa.h # ed driver ED_HPP opt_ed.h ED_3C503 opt_ed.h ED_SIC opt_ed.h # bce driver BCE_DEBUG opt_bce.h # wi driver WI_SYMBOL_FIRMWARE opt_wi.h SOCKBUF_DEBUG opt_global.h # options for ubsec driver UBSEC_DEBUG opt_ubsec.h UBSEC_RNDTEST opt_ubsec.h UBSEC_NO_RNG opt_ubsec.h # options for hifn driver HIFN_DEBUG opt_hifn.h HIFN_RNDTEST opt_hifn.h # options for safenet driver SAFE_DEBUG opt_safe.h SAFE_NO_RNG opt_safe.h SAFE_RNDTEST opt_safe.h # syscons options MAXCONS opt_syscons.h SC_ALT_MOUSE_IMAGE opt_syscons.h SC_CUT_SPACES2TABS opt_syscons.h SC_CUT_SEPCHARS opt_syscons.h SC_DEBUG_LEVEL opt_syscons.h SC_DFLT_FONT opt_syscons.h SC_DISABLE_KDBKEY opt_syscons.h SC_DISABLE_REBOOT opt_syscons.h SC_HISTORY_SIZE opt_syscons.h SC_KERNEL_CONS_ATTR opt_syscons.h SC_KERNEL_CONS_REV_ATTR opt_syscons.h SC_MOUSE_CHAR opt_syscons.h SC_NO_CUTPASTE opt_syscons.h SC_NO_FONT_LOADING opt_syscons.h SC_NO_HISTORY opt_syscons.h SC_NO_SUSPEND_VTYSWITCH opt_syscons.h SC_NO_SYSMOUSE opt_syscons.h SC_NORM_ATTR opt_syscons.h SC_NORM_REV_ATTR opt_syscons.h SC_PIXEL_MODE opt_syscons.h SC_RENDER_DEBUG opt_syscons.h SC_TWOBUTTON_MOUSE opt_syscons.h # kbd options KBD_DISABLE_KEYMAP_LOAD opt_kbd.h KBD_INSTALL_CDEV opt_kbd.h KBD_MAXRETRY opt_kbd.h KBD_MAXWAIT opt_kbd.h KBD_RESETDELAY opt_kbd.h KBDIO_DEBUG opt_kbd.h # options for the Atheros driver ATH_DEBUG opt_ath.h ATH_TXBUF opt_ath.h ATH_RXBUF opt_ath.h ATH_DIAGAPI opt_ath.h ATH_TX99_DIAG opt_ath.h # dcons options DCONS_BUF_SIZE opt_dcons.h DCONS_POLL_HZ opt_dcons.h DCONS_FORCE_CONSOLE opt_dcons.h DCONS_FORCE_GDB opt_dcons.h # Static unit counts NI4BTRC opt_i4b.h NI4BRBCH opt_i4b.h NI4BTEL opt_i4b.h NI4BIPR opt_i4b.h NI4BING opt_i4b.h NI4BISPPP opt_i4b.h # VFS options LOOKUP_SHARED opt_vfs.h # HWPMC options HWPMC_HOOKS # XBOX options for FreeBSD/i386, but some files are MI XBOX opt_xbox.h # XFS XFS Index: head/sys/i386/conf/GENERIC =================================================================== --- head/sys/i386/conf/GENERIC (revision 159569) +++ head/sys/i386/conf/GENERIC (revision 159570) @@ -1,295 +1,296 @@ # # GENERIC -- Generic kernel configuration file for FreeBSD/i386 # # For more information on this file, please read the handbook section on # Kernel Configuration Files: # # http://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html # # The handbook is also available locally in /usr/share/doc/handbook # if you've installed the doc distribution, otherwise always see the # FreeBSD World Wide Web server (http://www.FreeBSD.org/) for the # latest information. # # An exhaustive list of options and more detailed explanations of the # device lines is also present in the ../../conf/NOTES and NOTES files. # If you are in doubt as to the purpose or necessity of a line, check first # in NOTES. # # $FreeBSD$ cpu I486_CPU cpu I586_CPU cpu I686_CPU ident GENERIC # To statically compile in device wiring instead of /boot/device.hints #hints "GENERIC.hints" # Default places to look for devices. makeoptions DEBUG=-g # Build kernel with gdb(1) debug symbols #options SCHED_ULE # ULE scheduler options SCHED_4BSD # 4BSD scheduler +#options SCHED_CORE # CORE scheduler options PREEMPTION # Enable kernel thread preemption options INET # InterNETworking options INET6 # IPv6 communications protocols options FFS # Berkeley Fast Filesystem options SOFTUPDATES # Enable FFS soft updates support options UFS_ACL # Support for access control lists options UFS_DIRHASH # Improve performance on big directories options MD_ROOT # MD is a potential root device options NFSCLIENT # Network Filesystem Client options NFSSERVER # Network Filesystem Server options NFS_ROOT # NFS usable as /, requires NFSCLIENT options MSDOSFS # MSDOS Filesystem options CD9660 # ISO 9660 Filesystem options PROCFS # Process filesystem (requires PSEUDOFS) options PSEUDOFS # Pseudo-filesystem framework options GEOM_GPT # GUID Partition Tables. options COMPAT_43 # Compatible with BSD 4.3 [KEEP THIS!] options COMPAT_43TTY # BSD 4.3 TTY compat [KEEP THIS!] options COMPAT_FREEBSD4 # Compatible with FreeBSD4 options COMPAT_FREEBSD5 # Compatible with FreeBSD5 options SCSI_DELAY=5000 # Delay (in ms) before probing SCSI options KTRACE # ktrace(1) support options SYSVSHM # SYSV-style shared memory options SYSVMSG # SYSV-style message queues options SYSVSEM # SYSV-style semaphores options _KPOSIX_PRIORITY_SCHEDULING # POSIX P1003_1B real-time extensions options KBD_INSTALL_CDEV # install a CDEV entry in /dev options ADAPTIVE_GIANT # Giant mutex is adaptive. options STOP_NMI # Stop CPUS using NMI instead of IPI # Debugging for use in -current options KDB # Enable kernel debugger support. options DDB # Support DDB. options GDB # Support remote GDB. options INVARIANTS # Enable calls of extra sanity checking options INVARIANT_SUPPORT # Extra sanity checks of internal structures, required by INVARIANTS options WITNESS # Enable checks to detect deadlocks and cycles options WITNESS_SKIPSPIN # Don't run witness on spinlocks for speed # To make an SMP kernel, the next two lines are needed options SMP # Symmetric MultiProcessor Kernel device apic # I/O APIC # Bus support. device eisa device pci # Floppy drives device fdc # ATA and ATAPI devices device ata device atadisk # ATA disk drives device ataraid # ATA RAID drives device atapicd # ATAPI CDROM drives device atapifd # ATAPI floppy drives device atapist # ATAPI tape drives options ATA_STATIC_ID # Static device numbering # SCSI Controllers device ahb # EISA AHA1742 family device ahc # AHA2940 and onboard AIC7xxx devices options AHC_REG_PRETTY_PRINT # Print register bitfields in debug # output. Adds ~128k to driver. device ahd # AHA39320/29320 and onboard AIC79xx devices options AHD_REG_PRETTY_PRINT # Print register bitfields in debug # output. Adds ~215k to driver. device amd # AMD 53C974 (Tekram DC-390(T)) device isp # Qlogic family #device ispfw # Firmware for QLogic HBAs- normally a module device mpt # LSI-Logic MPT-Fusion #device ncr # NCR/Symbios Logic device sym # NCR/Symbios Logic (newer chipsets + those of `ncr') device trm # Tekram DC395U/UW/F DC315U adapters device adv # Advansys SCSI adapters device adw # Advansys wide SCSI adapters device aha # Adaptec 154x SCSI adapters device aic # Adaptec 15[012]x SCSI adapters, AIC-6[23]60. device bt # Buslogic/Mylex MultiMaster SCSI adapters device ncv # NCR 53C500 device nsp # Workbit Ninja SCSI-3 device stg # TMC 18C30/18C50 # SCSI peripherals device scbus # SCSI bus (required for SCSI) device ch # SCSI media changers device da # Direct Access (disks) device sa # Sequential Access (tape etc) device cd # CD device pass # Passthrough device (direct SCSI access) device ses # SCSI Environmental Services (and SAF-TE) # RAID controllers interfaced to the SCSI subsystem device amr # AMI MegaRAID device arcmsr # Areca SATA II RAID device asr # DPT SmartRAID V, VI and Adaptec SCSI RAID device ciss # Compaq Smart RAID 5* device dpt # DPT Smartcache III, IV - See NOTES for options device hptmv # Highpoint RocketRAID 182x device rr232x # Highpoint RocketRAID 232x device iir # Intel Integrated RAID device ips # IBM (Adaptec) ServeRAID device mly # Mylex AcceleRAID/eXtremeRAID device twa # 3ware 9000 series PATA/SATA RAID # RAID controllers device aac # Adaptec FSA RAID device aacp # SCSI passthrough for aac (requires CAM) device ida # Compaq Smart RAID device mfi # LSI MegaRAID SAS device mlx # Mylex DAC960 family device pst # Promise Supertrak SX6000 device twe # 3ware ATA RAID # atkbdc0 controls both the keyboard and the PS/2 mouse device atkbdc # AT keyboard controller device atkbd # AT keyboard device psm # PS/2 mouse device kbdmux # keyboard multiplexer device vga # VGA video card driver device splash # Splash screen and screen saver support # syscons is the default console driver, resembling an SCO console device sc device agp # support several AGP chipsets # Power management support (see NOTES for more options) #device apm # Add suspend/resume support for the i8254. device pmtimer # PCCARD (PCMCIA) support # PCMCIA and cardbus bridge support device cbb # cardbus (yenta) bridge device pccard # PC Card (16-bit) bus device cardbus # CardBus (32-bit) bus # Serial (COM) ports device sio # 8250, 16[45]50 based serial ports device uart # Generic UART driver # Parallel port device ppc device ppbus # Parallel port bus (required) device lpt # Printer device plip # TCP/IP over parallel device ppi # Parallel port interface device #device vpo # Requires scbus and da # If you've got a "dumb" serial or parallel PCI card that is # supported by the puc(4) glue driver, uncomment the following # line to enable it (connects to sio, uart and/or ppc drivers): #device puc # PCI Ethernet NICs. device de # DEC/Intel DC21x4x (``Tulip'') device em # Intel PRO/1000 adapter Gigabit Ethernet Card device ixgb # Intel PRO/10GbE Ethernet Card device le # AMD Am7900 LANCE and Am79C9xx PCnet device txp # 3Com 3cR990 (``Typhoon'') device vx # 3Com 3c590, 3c595 (``Vortex'') # PCI Ethernet NICs that use the common MII bus controller code. # NOTE: Be sure to keep the 'device miibus' line in order to use these NICs! device miibus # MII bus support device bce # Broadcom BCM5706/BCM5708 Gigabit Ethernet device bfe # Broadcom BCM440x 10/100 Ethernet device bge # Broadcom BCM570xx Gigabit Ethernet device dc # DEC/Intel 21143 and various workalikes device fxp # Intel EtherExpress PRO/100B (82557, 82558) device lge # Level 1 LXT1001 gigabit Ethernet device nge # NatSemi DP83820 gigabit Ethernet device nve # nVidia nForce MCP on-board Ethernet Networking device pcn # AMD Am79C97x PCI 10/100 (precedence over 'le') device re # RealTek 8139C+/8169/8169S/8110S device rl # RealTek 8129/8139 device sf # Adaptec AIC-6915 (``Starfire'') device sis # Silicon Integrated Systems SiS 900/SiS 7016 device sk # SysKonnect SK-984x & SK-982x gigabit Ethernet device ste # Sundance ST201 (D-Link DFE-550TX) device ti # Alteon Networks Tigon I/II gigabit Ethernet device tl # Texas Instruments ThunderLAN device tx # SMC EtherPower II (83c170 ``EPIC'') device vge # VIA VT612x gigabit Ethernet device vr # VIA Rhine, Rhine II device wb # Winbond W89C840F device xl # 3Com 3c90x (``Boomerang'', ``Cyclone'') # ISA Ethernet NICs. pccard NICs included. device cs # Crystal Semiconductor CS89x0 NIC # 'device ed' requires 'device miibus' device ed # NE[12]000, SMC Ultra, 3c503, DS8390 cards device ex # Intel EtherExpress Pro/10 and Pro/10+ device ep # Etherlink III based cards device fe # Fujitsu MB8696x based cards device ie # EtherExpress 8/16, 3C507, StarLAN 10 etc. device sn # SMC's 9000 series of Ethernet chips device xe # Xircom pccard Ethernet # Wireless NIC cards device wlan # 802.11 support device wlan_wep # 802.11 WEP support device wlan_ccmp # 802.11 CCMP support device wlan_tkip # 802.11 TKIP support device an # Aironet 4500/4800 802.11 wireless NICs. device ath # Atheros pci/cardbus NIC's device ath_hal # Atheros HAL (Hardware Access Layer) device ath_rate_sample # SampleRate tx rate control for ath device awi # BayStack 660 and others device ral # Ralink Technology RT2500 wireless NICs. device wi # WaveLAN/Intersil/Symbol 802.11 wireless NICs. #device wl # Older non 802.11 Wavelan wireless NIC. # Pseudo devices. device loop # Network loopback device random # Entropy device device ether # Ethernet support device sl # Kernel SLIP device ppp # Kernel PPP device tun # Packet tunnel. device pty # Pseudo-ttys (telnet etc) device md # Memory "disks" device gif # IPv6 and IPv4 tunneling device faith # IPv6-to-IPv4 relaying (translation) # The `bpf' device enables the Berkeley Packet Filter. # Be aware of the administrative consequences of enabling this! # Note that 'bpf' is required for DHCP. device bpf # Berkeley packet filter # USB support device uhci # UHCI PCI->USB interface device ohci # OHCI PCI->USB interface device ehci # EHCI PCI->USB interface (USB 2.0) device usb # USB Bus (required) #device udbp # USB Double Bulk Pipe devices device ugen # Generic device uhid # "Human Interface Devices" device ukbd # Keyboard device ulpt # Printer device umass # Disks/Mass storage - Requires scbus and da device ums # Mouse device ural # Ralink Technology RT2500USB wireless NICs device urio # Diamond Rio 500 MP3 player device uscanner # Scanners # USB Ethernet, requires miibus device aue # ADMtek USB Ethernet device axe # ASIX Electronics USB Ethernet device cdce # Generic USB over Ethernet device cue # CATC USB Ethernet device kue # Kawasaki LSI USB Ethernet device rue # RealTek RTL8150 USB Ethernet # FireWire support device firewire # FireWire bus code device sbp # SCSI over FireWire (Requires scbus and da) device fwe # Ethernet over FireWire (non-standard!) Index: head/sys/kern/kern_clock.c =================================================================== --- head/sys/kern/kern_clock.c (revision 159569) +++ head/sys/kern/kern_clock.c (revision 159570) @@ -1,579 +1,580 @@ /*- * Copyright (c) 1982, 1986, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 */ #include __FBSDID("$FreeBSD$"); #include "opt_device_polling.h" #include "opt_hwpmc_hooks.h" #include "opt_ntp.h" #include "opt_watchdog.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef GPROF #include #endif #ifdef HWPMC_HOOKS #include #endif #ifdef DEVICE_POLLING extern void hardclock_device_poll(void); #endif /* DEVICE_POLLING */ static void initclocks(void *dummy); SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL) /* Some of these don't belong here, but it's easiest to concentrate them. */ long cp_time[CPUSTATES]; static int sysctl_kern_cp_time(SYSCTL_HANDLER_ARGS) { int error; #ifdef SCTL_MASK32 int i; unsigned int cp_time32[CPUSTATES]; if (req->flags & SCTL_MASK32) { if (!req->oldptr) return SYSCTL_OUT(req, 0, sizeof(cp_time32)); for (i = 0; i < CPUSTATES; i++) cp_time32[i] = (unsigned int)cp_time[i]; error = SYSCTL_OUT(req, cp_time32, sizeof(cp_time32)); } else #endif { if (!req->oldptr) return SYSCTL_OUT(req, 0, sizeof(cp_time)); error = SYSCTL_OUT(req, cp_time, sizeof(cp_time)); } return error; } SYSCTL_PROC(_kern, OID_AUTO, cp_time, CTLTYPE_LONG|CTLFLAG_RD, 0,0, sysctl_kern_cp_time, "LU", "CPU time statistics"); #ifdef SW_WATCHDOG #include static int watchdog_ticks; static int watchdog_enabled; static void watchdog_fire(void); static void watchdog_config(void *, u_int, int *); #endif /* SW_WATCHDOG */ /* * Clock handling routines. * * This code is written to operate with two timers that run independently of * each other. * * The main timer, running hz times per second, is used to trigger interval * timers, timeouts and rescheduling as needed. * * The second timer handles kernel and user profiling, * and does resource use estimation. If the second timer is programmable, * it is randomized to avoid aliasing between the two clocks. For example, * the randomization prevents an adversary from always giving up the cpu * just before its quantum expires. Otherwise, it would never accumulate * cpu ticks. The mean frequency of the second timer is stathz. * * If no second timer exists, stathz will be zero; in this case we drive * profiling and statistics off the main clock. This WILL NOT be accurate; * do not do it unless absolutely necessary. * * The statistics clock may (or may not) be run at a higher rate while * profiling. This profile clock runs at profhz. We require that profhz * be an integral multiple of stathz. * * If the statistics clock is running fast, it must be divided by the ratio * profhz/stathz for statistics. (For profiling, every tick counts.) * * Time-of-day is maintained using a "timecounter", which may or may * not be related to the hardware generating the above mentioned * interrupts. */ int stathz; int profhz; int profprocs; int ticks; int psratio; /* * Initialize clock frequencies and start both clocks running. */ /* ARGSUSED*/ static void initclocks(dummy) void *dummy; { register int i; /* * Set divisors to 1 (normal case) and let the machine-specific * code do its bit. */ cpu_initclocks(); /* * Compute profhz/stathz, and fix profhz if needed. */ i = stathz ? stathz : hz; if (profhz == 0) profhz = i; psratio = profhz / i; #ifdef SW_WATCHDOG EVENTHANDLER_REGISTER(watchdog_list, watchdog_config, NULL, 0); #endif } /* * Each time the real-time timer fires, this function is called on all CPUs. * Note that hardclock() calls hardclock_cpu() for the boot CPU, so only * the other CPUs in the system need to call this function. */ void hardclock_cpu(int usermode) { struct pstats *pstats; struct thread *td = curthread; struct proc *p = td->td_proc; /* * Run current process's virtual and profile time, as needed. */ mtx_lock_spin_flags(&sched_lock, MTX_QUIET); + sched_tick(); if (p->p_flag & P_SA) { /* XXXKSE What to do? */ } else { pstats = p->p_stats; if (usermode && timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) && itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) { p->p_sflag |= PS_ALRMPEND; td->td_flags |= TDF_ASTPENDING; } if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) && itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) { p->p_sflag |= PS_PROFPEND; td->td_flags |= TDF_ASTPENDING; } } mtx_unlock_spin_flags(&sched_lock, MTX_QUIET); #ifdef HWPMC_HOOKS if (PMC_CPU_HAS_SAMPLES(PCPU_GET(cpuid))) PMC_CALL_HOOK_UNLOCKED(curthread, PMC_FN_DO_SAMPLES, NULL); #endif } /* * The real-time timer, interrupting hz times per second. */ void hardclock(int usermode, uintfptr_t pc) { int need_softclock = 0; hardclock_cpu(usermode); tc_ticktock(); /* * If no separate statistics clock is available, run it from here. * * XXX: this only works for UP */ if (stathz == 0) { profclock(usermode, pc); statclock(usermode); } #ifdef DEVICE_POLLING hardclock_device_poll(); /* this is very short and quick */ #endif /* DEVICE_POLLING */ /* * Process callouts at a very low cpu priority, so we don't keep the * relatively high clock interrupt priority any longer than necessary. */ mtx_lock_spin_flags(&callout_lock, MTX_QUIET); ticks++; if (TAILQ_FIRST(&callwheel[ticks & callwheelmask]) != NULL) { need_softclock = 1; } else if (softticks + 1 == ticks) ++softticks; mtx_unlock_spin_flags(&callout_lock, MTX_QUIET); /* * swi_sched acquires sched_lock, so we don't want to call it with * callout_lock held; incorrect locking order. */ if (need_softclock) swi_sched(softclock_ih, 0); #ifdef SW_WATCHDOG if (watchdog_enabled > 0 && --watchdog_ticks <= 0) watchdog_fire(); #endif /* SW_WATCHDOG */ } /* * Compute number of ticks in the specified amount of time. */ int tvtohz(tv) struct timeval *tv; { register unsigned long ticks; register long sec, usec; /* * If the number of usecs in the whole seconds part of the time * difference fits in a long, then the total number of usecs will * fit in an unsigned long. Compute the total and convert it to * ticks, rounding up and adding 1 to allow for the current tick * to expire. Rounding also depends on unsigned long arithmetic * to avoid overflow. * * Otherwise, if the number of ticks in the whole seconds part of * the time difference fits in a long, then convert the parts to * ticks separately and add, using similar rounding methods and * overflow avoidance. This method would work in the previous * case but it is slightly slower and assumes that hz is integral. * * Otherwise, round the time difference down to the maximum * representable value. * * If ints have 32 bits, then the maximum value for any timeout in * 10ms ticks is 248 days. */ sec = tv->tv_sec; usec = tv->tv_usec; if (usec < 0) { sec--; usec += 1000000; } if (sec < 0) { #ifdef DIAGNOSTIC if (usec > 0) { sec++; usec -= 1000000; } printf("tvotohz: negative time difference %ld sec %ld usec\n", sec, usec); #endif ticks = 1; } else if (sec <= LONG_MAX / 1000000) ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1)) / tick + 1; else if (sec <= LONG_MAX / hz) ticks = sec * hz + ((unsigned long)usec + (tick - 1)) / tick + 1; else ticks = LONG_MAX; if (ticks > INT_MAX) ticks = INT_MAX; return ((int)ticks); } /* * Start profiling on a process. * * Kernel profiling passes proc0 which never exits and hence * keeps the profile clock running constantly. */ void startprofclock(p) register struct proc *p; { /* * XXX; Right now sched_lock protects statclock(), but perhaps * it should be protected later on by a time_lock, which would * cover psdiv, etc. as well. */ PROC_LOCK_ASSERT(p, MA_OWNED); if (p->p_flag & P_STOPPROF) return; if ((p->p_flag & P_PROFIL) == 0) { mtx_lock_spin(&sched_lock); p->p_flag |= P_PROFIL; if (++profprocs == 1) cpu_startprofclock(); mtx_unlock_spin(&sched_lock); } } /* * Stop profiling on a process. */ void stopprofclock(p) register struct proc *p; { PROC_LOCK_ASSERT(p, MA_OWNED); if (p->p_flag & P_PROFIL) { if (p->p_profthreads != 0) { p->p_flag |= P_STOPPROF; while (p->p_profthreads != 0) msleep(&p->p_profthreads, &p->p_mtx, PPAUSE, "stopprof", 0); p->p_flag &= ~P_STOPPROF; } if ((p->p_flag & P_PROFIL) == 0) return; mtx_lock_spin(&sched_lock); p->p_flag &= ~P_PROFIL; if (--profprocs == 0) cpu_stopprofclock(); mtx_unlock_spin(&sched_lock); } } /* * Statistics clock. Grab profile sample, and if divider reaches 0, * do process and kernel statistics. Most of the statistics are only * used by user-level statistics programs. The main exceptions are * ke->ke_uticks, p->p_rux.rux_sticks, p->p_rux.rux_iticks, and p->p_estcpu. * This should be called by all active processors. */ void statclock(int usermode) { struct rusage *ru; struct vmspace *vm; struct thread *td; struct proc *p; long rss; td = curthread; p = td->td_proc; mtx_lock_spin_flags(&sched_lock, MTX_QUIET); if (usermode) { /* * Charge the time as appropriate. */ if (p->p_flag & P_SA) thread_statclock(1); td->td_uticks++; if (p->p_nice > NZERO) cp_time[CP_NICE]++; else cp_time[CP_USER]++; } else { /* * Came from kernel mode, so we were: * - handling an interrupt, * - doing syscall or trap work on behalf of the current * user process, or * - spinning in the idle loop. * Whichever it is, charge the time as appropriate. * Note that we charge interrupts to the current process, * regardless of whether they are ``for'' that process, * so that we know how much of its real time was spent * in ``non-process'' (i.e., interrupt) work. */ if ((td->td_pflags & TDP_ITHREAD) || td->td_intr_nesting_level >= 2) { td->td_iticks++; cp_time[CP_INTR]++; } else { if (p->p_flag & P_SA) thread_statclock(0); td->td_pticks++; td->td_sticks++; if (td != PCPU_GET(idlethread)) cp_time[CP_SYS]++; else cp_time[CP_IDLE]++; } } CTR4(KTR_SCHED, "statclock: %p(%s) prio %d stathz %d", td, td->td_proc->p_comm, td->td_priority, (stathz)?stathz:hz); sched_clock(td); /* Update resource usage integrals and maximums. */ MPASS(p->p_stats != NULL); MPASS(p->p_vmspace != NULL); vm = p->p_vmspace; ru = &p->p_stats->p_ru; ru->ru_ixrss += pgtok(vm->vm_tsize); ru->ru_idrss += pgtok(vm->vm_dsize); ru->ru_isrss += pgtok(vm->vm_ssize); rss = pgtok(vmspace_resident_count(vm)); if (ru->ru_maxrss < rss) ru->ru_maxrss = rss; mtx_unlock_spin_flags(&sched_lock, MTX_QUIET); } void profclock(int usermode, uintfptr_t pc) { struct thread *td; #ifdef GPROF struct gmonparam *g; uintfptr_t i; #endif td = curthread; if (usermode) { /* * Came from user mode; CPU was in user state. * If this process is being profiled, record the tick. * if there is no related user location yet, don't * bother trying to count it. */ if (td->td_proc->p_flag & P_PROFIL) addupc_intr(td, pc, 1); } #ifdef GPROF else { /* * Kernel statistics are just like addupc_intr, only easier. */ g = &_gmonparam; if (g->state == GMON_PROF_ON && pc >= g->lowpc) { i = PC_TO_I(g, pc); if (i < g->textsize) { KCOUNT(g, i)++; } } } #endif } /* * Return information about system clocks. */ static int sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS) { struct clockinfo clkinfo; /* * Construct clockinfo structure. */ bzero(&clkinfo, sizeof(clkinfo)); clkinfo.hz = hz; clkinfo.tick = tick; clkinfo.profhz = profhz; clkinfo.stathz = stathz ? stathz : hz; return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req)); } SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD, 0, 0, sysctl_kern_clockrate, "S,clockinfo", "Rate and period of various kernel clocks"); #ifdef SW_WATCHDOG static void watchdog_config(void *unused __unused, u_int cmd, int *err) { u_int u; u = cmd & WD_INTERVAL; if ((cmd & WD_ACTIVE) && u >= WD_TO_1SEC) { watchdog_ticks = (1 << (u - WD_TO_1SEC)) * hz; watchdog_enabled = 1; *err = 0; } else { watchdog_enabled = 0; } } /* * Handle a watchdog timeout by dumping interrupt information and * then either dropping to DDB or panicing. */ static void watchdog_fire(void) { int nintr; u_int64_t inttotal; u_long *curintr; char *curname; curintr = intrcnt; curname = intrnames; inttotal = 0; nintr = eintrcnt - intrcnt; printf("interrupt total\n"); while (--nintr >= 0) { if (*curintr) printf("%-12s %20lu\n", curname, *curintr); curname += strlen(curname) + 1; inttotal += *curintr++; } printf("Total %20ju\n", (uintmax_t)inttotal); #ifdef KDB kdb_backtrace(); kdb_enter("watchdog timeout"); #else panic("watchdog timeout"); #endif /* KDB */ } #endif /* SW_WATCHDOG */ Index: head/sys/kern/kern_switch.c =================================================================== --- head/sys/kern/kern_switch.c (revision 159569) +++ head/sys/kern/kern_switch.c (revision 159570) @@ -1,1035 +1,1040 @@ /*- * Copyright (c) 2001 Jake Burkholder * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*** Here is the logic.. If there are N processors, then there are at most N KSEs (kernel schedulable entities) working to process threads that belong to a KSEGROUP (kg). If there are X of these KSEs actually running at the moment in question, then there are at most M (N-X) of these KSEs on the run queue, as running KSEs are not on the queue. Runnable threads are queued off the KSEGROUP in priority order. If there are M or more threads runnable, the top M threads (by priority) are 'preassigned' to the M KSEs not running. The KSEs take their priority from those threads and are put on the run queue. The last thread that had a priority high enough to have a KSE associated with it, AND IS ON THE RUN QUEUE is pointed to by kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs assigned as all the available KSEs are activly running, or because there are no threads queued, that pointer is NULL. When a KSE is removed from the run queue to become runnable, we know it was associated with the highest priority thread in the queue (at the head of the queue). If it is also the last assigned we know M was 1 and must now be 0. Since the thread is no longer queued that pointer must be removed from it. Since we know there were no more KSEs available, (M was 1 and is now 0) and since we are not FREEING our KSE but using it, we know there are STILL no more KSEs available, we can prove that the next thread in the ksegrp list will not have a KSE to assign to it, so we can show that the pointer must be made 'invalid' (NULL). The pointer exists so that when a new thread is made runnable, it can have its priority compared with the last assigned thread to see if it should 'steal' its KSE or not.. i.e. is it 'earlier' on the list than that thread or later.. If it's earlier, then the KSE is removed from the last assigned (which is now not assigned a KSE) and reassigned to the new thread, which is placed earlier in the list. The pointer is then backed up to the previous thread (which may or may not be the new thread). When a thread sleeps or is removed, the KSE becomes available and if there are queued threads that are not assigned KSEs, the highest priority one of them is assigned the KSE, which is then placed back on the run queue at the approipriate place, and the kg->kg_last_assigned pointer is adjusted down to point to it. The following diagram shows 2 KSEs and 3 threads from a single process. RUNQ: --->KSE---KSE--... (KSEs queued at priorities from threads) \ \____ \ \ KSEGROUP---thread--thread--thread (queued in priority order) \ / \_______________/ (last_assigned) The result of this scheme is that the M available KSEs are always queued at the priorities they have inherrited from the M highest priority threads for that KSEGROUP. If this situation changes, the KSEs are reassigned to keep this true. ***/ #include __FBSDID("$FreeBSD$"); #include "opt_sched.h" #ifndef KERN_SWITCH_INCLUDE #include #include #include #include #include #include #include #include #include #include #else /* KERN_SWITCH_INCLUDE */ #if defined(SMP) && (defined(__i386__) || defined(__amd64__)) #include #endif #if defined(SMP) && defined(SCHED_4BSD) #include #endif /* Uncomment this to enable logging of critical_enter/exit. */ #if 0 #define KTR_CRITICAL KTR_SCHED #else #define KTR_CRITICAL 0 #endif #ifdef FULL_PREEMPTION #ifndef PREEMPTION #error "The FULL_PREEMPTION option requires the PREEMPTION option" #endif #endif CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS); #define td_kse td_sched /* * kern.sched.preemption allows user space to determine if preemption support * is compiled in or not. It is not currently a boot or runtime flag that * can be changed. */ #ifdef PREEMPTION static int kern_sched_preemption = 1; #else static int kern_sched_preemption = 0; #endif SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD, &kern_sched_preemption, 0, "Kernel preemption enabled"); /************************************************************************ * Functions that manipulate runnability from a thread perspective. * ************************************************************************/ /* * Select the KSE that will be run next. From that find the thread, and * remove it from the KSEGRP's run queue. If there is thread clustering, * this will be what does it. */ struct thread * choosethread(void) { struct kse *ke; struct thread *td; struct ksegrp *kg; #if defined(SMP) && (defined(__i386__) || defined(__amd64__)) if (smp_active == 0 && PCPU_GET(cpuid) != 0) { /* Shutting down, run idlethread on AP's */ td = PCPU_GET(idlethread); ke = td->td_kse; CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td); ke->ke_flags |= KEF_DIDRUN; TD_SET_RUNNING(td); return (td); } #endif retry: ke = sched_choose(); if (ke) { td = ke->ke_thread; KASSERT((td->td_kse == ke), ("kse/thread mismatch")); kg = ke->ke_ksegrp; if (td->td_proc->p_flag & P_HADTHREADS) { if (kg->kg_last_assigned == td) { kg->kg_last_assigned = TAILQ_PREV(td, threadqueue, td_runq); } TAILQ_REMOVE(&kg->kg_runq, td, td_runq); } CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d", td, td->td_priority); } else { /* Simulate runq_choose() having returned the idle thread */ td = PCPU_GET(idlethread); ke = td->td_kse; CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td); } ke->ke_flags |= KEF_DIDRUN; /* * If we are in panic, only allow system threads, * plus the one we are running in, to be run. */ if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 && (td->td_flags & TDF_INPANIC) == 0)) { /* note that it is no longer on the run queue */ TD_SET_CAN_RUN(td); goto retry; } TD_SET_RUNNING(td); return (td); } /* * Given a surplus system slot, try assign a new runnable thread to it. * Called from: * sched_thread_exit() (local) * sched_switch() (local) * sched_thread_exit() (local) * remrunqueue() (local) (not at the moment) */ static void slot_fill(struct ksegrp *kg) { struct thread *td; mtx_assert(&sched_lock, MA_OWNED); while (kg->kg_avail_opennings > 0) { /* * Find the first unassigned thread */ if ((td = kg->kg_last_assigned) != NULL) td = TAILQ_NEXT(td, td_runq); else td = TAILQ_FIRST(&kg->kg_runq); /* * If we found one, send it to the system scheduler. */ if (td) { kg->kg_last_assigned = td; sched_add(td, SRQ_YIELDING); CTR2(KTR_RUNQ, "slot_fill: td%p -> kg%p", td, kg); } else { /* no threads to use up the slots. quit now */ break; } } } #ifdef SCHED_4BSD /* * Remove a thread from its KSEGRP's run queue. * This in turn may remove it from a KSE if it was already assigned * to one, possibly causing a new thread to be assigned to the KSE * and the KSE getting a new priority. */ static void remrunqueue(struct thread *td) { struct thread *td2, *td3; struct ksegrp *kg; struct kse *ke; mtx_assert(&sched_lock, MA_OWNED); KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue")); kg = td->td_ksegrp; ke = td->td_kse; CTR1(KTR_RUNQ, "remrunqueue: td%p", td); TD_SET_CAN_RUN(td); /* * If it is not a threaded process, take the shortcut. */ if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { /* remve from sys run queue and free up a slot */ sched_rem(td); return; } td3 = TAILQ_PREV(td, threadqueue, td_runq); TAILQ_REMOVE(&kg->kg_runq, td, td_runq); if (ke->ke_state == KES_ONRUNQ) { /* * This thread has been assigned to the system run queue. * We need to dissociate it and try assign the * KSE to the next available thread. Then, we should * see if we need to move the KSE in the run queues. */ sched_rem(td); td2 = kg->kg_last_assigned; KASSERT((td2 != NULL), ("last assigned has wrong value")); if (td2 == td) kg->kg_last_assigned = td3; /* slot_fill(kg); */ /* will replace it with another */ } } #endif /* * Change the priority of a thread that is on the run queue. */ void adjustrunqueue( struct thread *td, int newpri) { struct ksegrp *kg; struct kse *ke; mtx_assert(&sched_lock, MA_OWNED); KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue")); ke = td->td_kse; CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td); /* * If it is not a threaded process, take the shortcut. */ if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { /* We only care about the kse in the run queue. */ td->td_priority = newpri; - if (ke->ke_rqindex != (newpri / RQ_PPQ)) { +#ifndef SCHED_CORE + if (ke->ke_rqindex != (newpri / RQ_PPQ)) +#else + if (ke->ke_rqindex != newpri) +#endif + { sched_rem(td); sched_add(td, SRQ_BORING); } return; } /* It is a threaded process */ kg = td->td_ksegrp; if (ke->ke_state == KES_ONRUNQ #ifdef SCHED_ULE || ((ke->ke_flags & KEF_ASSIGNED) != 0 && (ke->ke_flags & KEF_REMOVED) == 0) #endif ) { if (kg->kg_last_assigned == td) { kg->kg_last_assigned = TAILQ_PREV(td, threadqueue, td_runq); } sched_rem(td); } TAILQ_REMOVE(&kg->kg_runq, td, td_runq); TD_SET_CAN_RUN(td); td->td_priority = newpri; setrunqueue(td, SRQ_BORING); } /* * This function is called when a thread is about to be put on a * ksegrp run queue because it has been made runnable or its * priority has been adjusted and the ksegrp does not have a * free kse slot. It determines if a thread from the same ksegrp * should be preempted. If so, it tries to switch threads * if the thread is on the same cpu or notifies another cpu that * it should switch threads. */ static void maybe_preempt_in_ksegrp(struct thread *td) #if !defined(SMP) { struct thread *running_thread; mtx_assert(&sched_lock, MA_OWNED); running_thread = curthread; if (running_thread->td_ksegrp != td->td_ksegrp) return; if (td->td_priority >= running_thread->td_priority) return; #ifdef PREEMPTION #ifndef FULL_PREEMPTION if (td->td_priority > PRI_MAX_ITHD) { running_thread->td_flags |= TDF_NEEDRESCHED; return; } #endif /* FULL_PREEMPTION */ if (running_thread->td_critnest > 1) running_thread->td_owepreempt = 1; else mi_switch(SW_INVOL, NULL); #else /* PREEMPTION */ running_thread->td_flags |= TDF_NEEDRESCHED; #endif /* PREEMPTION */ return; } #else /* SMP */ { struct thread *running_thread; int worst_pri; struct ksegrp *kg; cpumask_t cpumask,dontuse; struct pcpu *pc; struct pcpu *best_pcpu; struct thread *cputhread; mtx_assert(&sched_lock, MA_OWNED); running_thread = curthread; #if !defined(KSEG_PEEMPT_BEST_CPU) if (running_thread->td_ksegrp != td->td_ksegrp) { #endif kg = td->td_ksegrp; /* if someone is ahead of this thread, wait our turn */ if (td != TAILQ_FIRST(&kg->kg_runq)) return; worst_pri = td->td_priority; best_pcpu = NULL; dontuse = stopped_cpus | idle_cpus_mask; /* * Find a cpu with the worst priority that runs at thread from * the same ksegrp - if multiple exist give first the last run * cpu and then the current cpu priority */ SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { cpumask = pc->pc_cpumask; cputhread = pc->pc_curthread; if ((cpumask & dontuse) || cputhread->td_ksegrp != kg) continue; if (cputhread->td_priority > worst_pri) { worst_pri = cputhread->td_priority; best_pcpu = pc; continue; } if (cputhread->td_priority == worst_pri && best_pcpu != NULL && (td->td_lastcpu == pc->pc_cpuid || (PCPU_GET(cpumask) == cpumask && td->td_lastcpu != best_pcpu->pc_cpuid))) best_pcpu = pc; } /* Check if we need to preempt someone */ if (best_pcpu == NULL) return; #if defined(IPI_PREEMPTION) && defined(PREEMPTION) #if !defined(FULL_PREEMPTION) if (td->td_priority <= PRI_MAX_ITHD) #endif /* ! FULL_PREEMPTION */ { ipi_selected(best_pcpu->pc_cpumask, IPI_PREEMPT); return; } #endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */ if (PCPU_GET(cpuid) != best_pcpu->pc_cpuid) { best_pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED; ipi_selected(best_pcpu->pc_cpumask, IPI_AST); return; } #if !defined(KSEG_PEEMPT_BEST_CPU) } #endif if (td->td_priority >= running_thread->td_priority) return; #ifdef PREEMPTION #if !defined(FULL_PREEMPTION) if (td->td_priority > PRI_MAX_ITHD) { running_thread->td_flags |= TDF_NEEDRESCHED; } #endif /* ! FULL_PREEMPTION */ if (running_thread->td_critnest > 1) running_thread->td_owepreempt = 1; else mi_switch(SW_INVOL, NULL); #else /* PREEMPTION */ running_thread->td_flags |= TDF_NEEDRESCHED; #endif /* PREEMPTION */ return; } #endif /* !SMP */ int limitcount; void setrunqueue(struct thread *td, int flags) { struct ksegrp *kg; struct thread *td2; struct thread *tda; CTR3(KTR_RUNQ, "setrunqueue: td:%p kg:%p pid:%d", td, td->td_ksegrp, td->td_proc->p_pid); CTR5(KTR_SCHED, "setrunqueue: %p(%s) prio %d by %p(%s)", td, td->td_proc->p_comm, td->td_priority, curthread, curthread->td_proc->p_comm); mtx_assert(&sched_lock, MA_OWNED); KASSERT((td->td_inhibitors == 0), ("setrunqueue: trying to run inhibitted thread")); KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), ("setrunqueue: bad thread state")); TD_SET_RUNQ(td); kg = td->td_ksegrp; if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { /* * Common path optimisation: Only one of everything * and the KSE is always already attached. * Totally ignore the ksegrp run queue. */ if (kg->kg_avail_opennings != 1) { if (limitcount < 1) { limitcount++; printf("pid %d: corrected slot count (%d->1)\n", td->td_proc->p_pid, kg->kg_avail_opennings); } kg->kg_avail_opennings = 1; } sched_add(td, flags); return; } /* * If the concurrency has reduced, and we would go in the * assigned section, then keep removing entries from the * system run queue, until we are not in that section * or there is room for us to be put in that section. * What we MUST avoid is the case where there are threads of less * priority than the new one scheduled, but it can not * be scheduled itself. That would lead to a non contiguous set * of scheduled threads, and everything would break. */ tda = kg->kg_last_assigned; while ((kg->kg_avail_opennings <= 0) && (tda && (tda->td_priority > td->td_priority))) { /* * None free, but there is one we can commandeer. */ CTR2(KTR_RUNQ, "setrunqueue: kg:%p: take slot from td: %p", kg, tda); sched_rem(tda); tda = kg->kg_last_assigned = TAILQ_PREV(tda, threadqueue, td_runq); } /* * Add the thread to the ksegrp's run queue at * the appropriate place. */ TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) { if (td2->td_priority > td->td_priority) { TAILQ_INSERT_BEFORE(td2, td, td_runq); break; } } if (td2 == NULL) { /* We ran off the end of the TAILQ or it was empty. */ TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq); } /* * If we have a slot to use, then put the thread on the system * run queue and if needed, readjust the last_assigned pointer. * it may be that we need to schedule something anyhow * even if the availabel slots are -ve so that * all the items < last_assigned are scheduled. */ if (kg->kg_avail_opennings > 0) { if (tda == NULL) { /* * No pre-existing last assigned so whoever is first * gets the slot.. (maybe us) */ td2 = TAILQ_FIRST(&kg->kg_runq); kg->kg_last_assigned = td2; } else if (tda->td_priority > td->td_priority) { td2 = td; } else { /* * We are past last_assigned, so * give the next slot to whatever is next, * which may or may not be us. */ td2 = TAILQ_NEXT(tda, td_runq); kg->kg_last_assigned = td2; } sched_add(td2, flags); } else { CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d", td, td->td_ksegrp, td->td_proc->p_pid); if ((flags & SRQ_YIELDING) == 0) maybe_preempt_in_ksegrp(td); } } /* * Kernel thread preemption implementation. Critical sections mark * regions of code in which preemptions are not allowed. */ void critical_enter(void) { struct thread *td; td = curthread; td->td_critnest++; CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td, (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest); } void critical_exit(void) { struct thread *td; td = curthread; KASSERT(td->td_critnest != 0, ("critical_exit: td_critnest == 0")); #ifdef PREEMPTION if (td->td_critnest == 1) { td->td_critnest = 0; mtx_assert(&sched_lock, MA_NOTOWNED); if (td->td_owepreempt) { td->td_critnest = 1; mtx_lock_spin(&sched_lock); td->td_critnest--; mi_switch(SW_INVOL, NULL); mtx_unlock_spin(&sched_lock); } } else #endif td->td_critnest--; CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td, (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest); } /* * This function is called when a thread is about to be put on run queue * because it has been made runnable or its priority has been adjusted. It * determines if the new thread should be immediately preempted to. If so, * it switches to it and eventually returns true. If not, it returns false * so that the caller may place the thread on an appropriate run queue. */ int maybe_preempt(struct thread *td) { #ifdef PREEMPTION struct thread *ctd; int cpri, pri; #endif mtx_assert(&sched_lock, MA_OWNED); #ifdef PREEMPTION /* * The new thread should not preempt the current thread if any of the * following conditions are true: * * - The kernel is in the throes of crashing (panicstr). * - The current thread has a higher (numerically lower) or * equivalent priority. Note that this prevents curthread from * trying to preempt to itself. * - It is too early in the boot for context switches (cold is set). * - The current thread has an inhibitor set or is in the process of * exiting. In this case, the current thread is about to switch * out anyways, so there's no point in preempting. If we did, * the current thread would not be properly resumed as well, so * just avoid that whole landmine. * - If the new thread's priority is not a realtime priority and * the current thread's priority is not an idle priority and * FULL_PREEMPTION is disabled. * * If all of these conditions are false, but the current thread is in * a nested critical section, then we have to defer the preemption * until we exit the critical section. Otherwise, switch immediately * to the new thread. */ ctd = curthread; KASSERT ((ctd->td_kse != NULL && ctd->td_kse->ke_thread == ctd), ("thread has no (or wrong) sched-private part.")); KASSERT((td->td_inhibitors == 0), ("maybe_preempt: trying to run inhibitted thread")); pri = td->td_priority; cpri = ctd->td_priority; if (panicstr != NULL || pri >= cpri || cold /* || dumping */ || TD_IS_INHIBITED(ctd) || td->td_kse->ke_state != KES_THREAD) return (0); #ifndef FULL_PREEMPTION if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE) return (0); #endif if (ctd->td_critnest > 1) { CTR1(KTR_PROC, "maybe_preempt: in critical section %d", ctd->td_critnest); ctd->td_owepreempt = 1; return (0); } /* * Thread is runnable but not yet put on system run queue. */ MPASS(TD_ON_RUNQ(td)); MPASS(td->td_sched->ke_state != KES_ONRUNQ); if (td->td_proc->p_flag & P_HADTHREADS) { /* * If this is a threaded process we actually ARE on the * ksegrp run queue so take it off that first. * Also undo any damage done to the last_assigned pointer. * XXX Fix setrunqueue so this isn't needed */ struct ksegrp *kg; kg = td->td_ksegrp; if (kg->kg_last_assigned == td) kg->kg_last_assigned = TAILQ_PREV(td, threadqueue, td_runq); TAILQ_REMOVE(&kg->kg_runq, td, td_runq); } TD_SET_RUNNING(td); CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td, td->td_proc->p_pid, td->td_proc->p_comm); mi_switch(SW_INVOL|SW_PREEMPT, td); return (1); #else return (0); #endif } #if 0 #ifndef PREEMPTION /* XXX: There should be a non-static version of this. */ static void printf_caddr_t(void *data) { printf("%s", (char *)data); } static char preempt_warning[] = "WARNING: Kernel preemption is disabled, expect reduced performance.\n"; SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t, preempt_warning) #endif #endif /************************************************************************ * SYSTEM RUN QUEUE manipulations and tests * ************************************************************************/ /* * Initialize a run structure. */ void runq_init(struct runq *rq) { int i; bzero(rq, sizeof *rq); for (i = 0; i < RQ_NQS; i++) TAILQ_INIT(&rq->rq_queues[i]); } /* * Clear the status bit of the queue corresponding to priority level pri, * indicating that it is empty. */ static __inline void runq_clrbit(struct runq *rq, int pri) { struct rqbits *rqb; rqb = &rq->rq_status; CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d", rqb->rqb_bits[RQB_WORD(pri)], rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri), RQB_BIT(pri), RQB_WORD(pri)); rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri); } /* * Find the index of the first non-empty run queue. This is done by * scanning the status bits, a set bit indicates a non-empty queue. */ static __inline int runq_findbit(struct runq *rq) { struct rqbits *rqb; int pri; int i; rqb = &rq->rq_status; for (i = 0; i < RQB_LEN; i++) if (rqb->rqb_bits[i]) { pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW); CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d", rqb->rqb_bits[i], i, pri); return (pri); } return (-1); } /* * Set the status bit of the queue corresponding to priority level pri, * indicating that it is non-empty. */ static __inline void runq_setbit(struct runq *rq, int pri) { struct rqbits *rqb; rqb = &rq->rq_status; CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d", rqb->rqb_bits[RQB_WORD(pri)], rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri), RQB_BIT(pri), RQB_WORD(pri)); rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri); } /* * Add the KSE to the queue specified by its priority, and set the * corresponding status bit. */ void runq_add(struct runq *rq, struct kse *ke, int flags) { struct rqhead *rqh; int pri; pri = ke->ke_thread->td_priority / RQ_PPQ; ke->ke_rqindex = pri; runq_setbit(rq, pri); rqh = &rq->rq_queues[pri]; CTR5(KTR_RUNQ, "runq_add: td=%p ke=%p pri=%d %d rqh=%p", ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh); if (flags & SRQ_PREEMPTED) { TAILQ_INSERT_HEAD(rqh, ke, ke_procq); } else { TAILQ_INSERT_TAIL(rqh, ke, ke_procq); } } /* * Return true if there are runnable processes of any priority on the run * queue, false otherwise. Has no side effects, does not modify the run * queue structure. */ int runq_check(struct runq *rq) { struct rqbits *rqb; int i; rqb = &rq->rq_status; for (i = 0; i < RQB_LEN; i++) if (rqb->rqb_bits[i]) { CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d", rqb->rqb_bits[i], i); return (1); } CTR0(KTR_RUNQ, "runq_check: empty"); return (0); } #if defined(SMP) && defined(SCHED_4BSD) int runq_fuzz = 1; SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, ""); #endif /* * Find the highest priority process on the run queue. */ struct kse * runq_choose(struct runq *rq) { struct rqhead *rqh; struct kse *ke; int pri; mtx_assert(&sched_lock, MA_OWNED); while ((pri = runq_findbit(rq)) != -1) { rqh = &rq->rq_queues[pri]; #if defined(SMP) && defined(SCHED_4BSD) /* fuzz == 1 is normal.. 0 or less are ignored */ if (runq_fuzz > 1) { /* * In the first couple of entries, check if * there is one for our CPU as a preference. */ int count = runq_fuzz; int cpu = PCPU_GET(cpuid); struct kse *ke2; ke2 = ke = TAILQ_FIRST(rqh); while (count-- && ke2) { if (ke->ke_thread->td_lastcpu == cpu) { ke = ke2; break; } ke2 = TAILQ_NEXT(ke2, ke_procq); } } else #endif ke = TAILQ_FIRST(rqh); KASSERT(ke != NULL, ("runq_choose: no proc on busy queue")); CTR3(KTR_RUNQ, "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh); return (ke); } CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri); return (NULL); } /* * Remove the KSE from the queue specified by its priority, and clear the * corresponding status bit if the queue becomes empty. * Caller must set ke->ke_state afterwards. */ void runq_remove(struct runq *rq, struct kse *ke) { struct rqhead *rqh; int pri; KASSERT(ke->ke_proc->p_sflag & PS_INMEM, ("runq_remove: process swapped out")); pri = ke->ke_rqindex; rqh = &rq->rq_queues[pri]; CTR5(KTR_RUNQ, "runq_remove: td=%p, ke=%p pri=%d %d rqh=%p", ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh); KASSERT(ke != NULL, ("runq_remove: no proc on busy queue")); TAILQ_REMOVE(rqh, ke, ke_procq); if (TAILQ_EMPTY(rqh)) { CTR0(KTR_RUNQ, "runq_remove: empty"); runq_clrbit(rq, pri); } } /****** functions that are temporarily here ***********/ #include extern struct mtx kse_zombie_lock; /* * Allocate scheduler specific per-process resources. * The thread and ksegrp have already been linked in. * In this case just set the default concurrency value. * * Called from: * proc_init() (UMA init method) */ void sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td) { /* This can go in sched_fork */ sched_init_concurrency(kg); } /* * thread is being either created or recycled. * Fix up the per-scheduler resources associated with it. * Called from: * sched_fork_thread() * thread_dtor() (*may go away) * thread_init() (*may go away) */ void sched_newthread(struct thread *td) { struct td_sched *ke; ke = (struct td_sched *) (td + 1); bzero(ke, sizeof(*ke)); td->td_sched = ke; ke->ke_thread = td; ke->ke_state = KES_THREAD; } /* * Set up an initial concurrency of 1 * and set the given thread (if given) to be using that * concurrency slot. * May be used "offline"..before the ksegrp is attached to the world * and thus wouldn't need schedlock in that case. * Called from: * thr_create() * proc_init() (UMA) via sched_newproc() */ void sched_init_concurrency(struct ksegrp *kg) { CTR1(KTR_RUNQ,"kg %p init slots and concurrency to 1", kg); kg->kg_concurrency = 1; kg->kg_avail_opennings = 1; } /* * Change the concurrency of an existing ksegrp to N * Called from: * kse_create() * kse_exit() * thread_exit() * thread_single() */ void sched_set_concurrency(struct ksegrp *kg, int concurrency) { CTR4(KTR_RUNQ,"kg %p set concurrency to %d, slots %d -> %d", kg, concurrency, kg->kg_avail_opennings, kg->kg_avail_opennings + (concurrency - kg->kg_concurrency)); kg->kg_avail_opennings += (concurrency - kg->kg_concurrency); kg->kg_concurrency = concurrency; } /* * Called from thread_exit() for all exiting thread * * Not to be confused with sched_exit_thread() * that is only called from thread_exit() for threads exiting * without the rest of the process exiting because it is also called from * sched_exit() and we wouldn't want to call it twice. * XXX This can probably be fixed. */ void sched_thread_exit(struct thread *td) { SLOT_RELEASE(td->td_ksegrp); slot_fill(td->td_ksegrp); } #endif /* KERN_SWITCH_INCLUDE */ Index: head/sys/kern/sched_4bsd.c =================================================================== --- head/sys/kern/sched_4bsd.c (revision 159569) +++ head/sys/kern/sched_4bsd.c (revision 159570) @@ -1,1390 +1,1395 @@ /*- * Copyright (c) 1982, 1986, 1990, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_hwpmc_hooks.h" #define kse td_sched #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HWPMC_HOOKS #include #endif /* * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in * the range 100-256 Hz (approximately). */ #define ESTCPULIM(e) \ min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \ RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1) #ifdef SMP #define INVERSE_ESTCPU_WEIGHT (8 * smp_cpus) #else #define INVERSE_ESTCPU_WEIGHT 8 /* 1 / (priorities per estcpu level). */ #endif #define NICE_WEIGHT 1 /* Priorities per nice level. */ /* * The schedulable entity that can be given a context to run. * A process may have several of these. Probably one per processor * but posibly a few more. In this universe they are grouped * with a KSEG that contains the priority and niceness * for the group. */ struct kse { TAILQ_ENTRY(kse) ke_procq; /* (j/z) Run queue. */ struct thread *ke_thread; /* (*) Active associated thread. */ fixpt_t ke_pctcpu; /* (j) %cpu during p_swtime. */ u_char ke_rqindex; /* (j) Run queue index. */ enum { KES_THREAD = 0x0, /* slaved to thread state */ KES_ONRUNQ } ke_state; /* (j) KSE status. */ int ke_cpticks; /* (j) Ticks of cpu time. */ struct runq *ke_runq; /* runq the kse is currently on */ }; #define ke_proc ke_thread->td_proc #define ke_ksegrp ke_thread->td_ksegrp #define td_kse td_sched /* flags kept in td_flags */ #define TDF_DIDRUN TDF_SCHED0 /* KSE actually ran. */ #define TDF_EXIT TDF_SCHED1 /* KSE is being killed. */ #define TDF_BOUND TDF_SCHED2 #define ke_flags ke_thread->td_flags #define KEF_DIDRUN TDF_DIDRUN /* KSE actually ran. */ #define KEF_EXIT TDF_EXIT /* KSE is being killed. */ #define KEF_BOUND TDF_BOUND /* stuck to one CPU */ #define SKE_RUNQ_PCPU(ke) \ ((ke)->ke_runq != 0 && (ke)->ke_runq != &runq) struct kg_sched { struct thread *skg_last_assigned; /* (j) Last thread assigned to */ /* the system scheduler. */ int skg_avail_opennings; /* (j) Num KSEs requested in group. */ int skg_concurrency; /* (j) Num KSEs requested in group. */ }; #define kg_last_assigned kg_sched->skg_last_assigned #define kg_avail_opennings kg_sched->skg_avail_opennings #define kg_concurrency kg_sched->skg_concurrency #define SLOT_RELEASE(kg) \ do { \ kg->kg_avail_opennings++; \ CTR3(KTR_RUNQ, "kg %p(%d) Slot released (->%d)", \ kg, \ kg->kg_concurrency, \ kg->kg_avail_opennings); \ /* KASSERT((kg->kg_avail_opennings <= kg->kg_concurrency), \ ("slots out of whack"));*/ \ } while (0) #define SLOT_USE(kg) \ do { \ kg->kg_avail_opennings--; \ CTR3(KTR_RUNQ, "kg %p(%d) Slot used (->%d)", \ kg, \ kg->kg_concurrency, \ kg->kg_avail_opennings); \ /* KASSERT((kg->kg_avail_opennings >= 0), \ ("slots out of whack"));*/ \ } while (0) /* * KSE_CAN_MIGRATE macro returns true if the kse can migrate between * cpus. */ #define KSE_CAN_MIGRATE(ke) \ ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0) static struct kse kse0; static struct kg_sched kg_sched0; static int sched_tdcnt; /* Total runnable threads in the system. */ static int sched_quantum; /* Roundrobin scheduling quantum in ticks. */ #define SCHED_QUANTUM (hz / 10) /* Default sched quantum */ static struct callout roundrobin_callout; static void slot_fill(struct ksegrp *kg); static struct kse *sched_choose(void); /* XXX Should be thread * */ static void setup_runqs(void); static void roundrobin(void *arg); static void schedcpu(void); static void schedcpu_thread(void); static void sched_priority(struct thread *td, u_char prio); static void sched_setup(void *dummy); static void maybe_resched(struct thread *td); static void updatepri(struct ksegrp *kg); static void resetpriority(struct ksegrp *kg); static void resetpriority_thread(struct thread *td, struct ksegrp *kg); #ifdef SMP static int forward_wakeup(int cpunum); #endif static struct kproc_desc sched_kp = { "schedcpu", schedcpu_thread, NULL }; SYSINIT(schedcpu, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, kproc_start, &sched_kp) SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) /* * Global run queue. */ static struct runq runq; #ifdef SMP /* * Per-CPU run queues */ static struct runq runq_pcpu[MAXCPU]; #endif static void setup_runqs(void) { #ifdef SMP int i; for (i = 0; i < MAXCPU; ++i) runq_init(&runq_pcpu[i]); #endif runq_init(&runq); } static int sysctl_kern_quantum(SYSCTL_HANDLER_ARGS) { int error, new_val; new_val = sched_quantum * tick; error = sysctl_handle_int(oidp, &new_val, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (new_val < tick) return (EINVAL); sched_quantum = new_val / tick; hogticks = 2 * sched_quantum; return (0); } SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RD, 0, "Scheduler"); SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "4BSD", 0, "Scheduler name"); SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW, 0, sizeof sched_quantum, sysctl_kern_quantum, "I", "Roundrobin scheduling quantum in microseconds"); #ifdef SMP /* Enable forwarding of wakeups to all other cpus */ SYSCTL_NODE(_kern_sched, OID_AUTO, ipiwakeup, CTLFLAG_RD, NULL, "Kernel SMP"); static int forward_wakeup_enabled = 1; SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, enabled, CTLFLAG_RW, &forward_wakeup_enabled, 0, "Forwarding of wakeup to idle CPUs"); static int forward_wakeups_requested = 0; SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, requested, CTLFLAG_RD, &forward_wakeups_requested, 0, "Requests for Forwarding of wakeup to idle CPUs"); static int forward_wakeups_delivered = 0; SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, delivered, CTLFLAG_RD, &forward_wakeups_delivered, 0, "Completed Forwarding of wakeup to idle CPUs"); static int forward_wakeup_use_mask = 1; SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, usemask, CTLFLAG_RW, &forward_wakeup_use_mask, 0, "Use the mask of idle cpus"); static int forward_wakeup_use_loop = 0; SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, useloop, CTLFLAG_RW, &forward_wakeup_use_loop, 0, "Use a loop to find idle cpus"); static int forward_wakeup_use_single = 0; SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, onecpu, CTLFLAG_RW, &forward_wakeup_use_single, 0, "Only signal one idle cpu"); static int forward_wakeup_use_htt = 0; SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, htt2, CTLFLAG_RW, &forward_wakeup_use_htt, 0, "account for htt"); #endif static int sched_followon = 0; SYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW, &sched_followon, 0, "allow threads to share a quantum"); static int sched_pfollowons = 0; SYSCTL_INT(_kern_sched, OID_AUTO, pfollowons, CTLFLAG_RD, &sched_pfollowons, 0, "number of followons done to a different ksegrp"); static int sched_kgfollowons = 0; SYSCTL_INT(_kern_sched, OID_AUTO, kgfollowons, CTLFLAG_RD, &sched_kgfollowons, 0, "number of followons done in a ksegrp"); static __inline void sched_load_add(void) { sched_tdcnt++; CTR1(KTR_SCHED, "global load: %d", sched_tdcnt); } static __inline void sched_load_rem(void) { sched_tdcnt--; CTR1(KTR_SCHED, "global load: %d", sched_tdcnt); } /* * Arrange to reschedule if necessary, taking the priorities and * schedulers into account. */ static void maybe_resched(struct thread *td) { mtx_assert(&sched_lock, MA_OWNED); if (td->td_priority < curthread->td_priority) curthread->td_flags |= TDF_NEEDRESCHED; } /* * Force switch among equal priority processes every 100ms. * We don't actually need to force a context switch of the current process. * The act of firing the event triggers a context switch to softclock() and * then switching back out again which is equivalent to a preemption, thus * no further work is needed on the local CPU. */ /* ARGSUSED */ static void roundrobin(void *arg) { #ifdef SMP mtx_lock_spin(&sched_lock); forward_roundrobin(); mtx_unlock_spin(&sched_lock); #endif callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL); } /* * Constants for digital decay and forget: * 90% of (kg_estcpu) usage in 5 * loadav time * 95% of (ke_pctcpu) usage in 60 seconds (load insensitive) * Note that, as ps(1) mentions, this can let percentages * total over 100% (I've seen 137.9% for 3 processes). * * Note that schedclock() updates kg_estcpu and p_cpticks asynchronously. * * We wish to decay away 90% of kg_estcpu in (5 * loadavg) seconds. * That is, the system wants to compute a value of decay such * that the following for loop: * for (i = 0; i < (5 * loadavg); i++) * kg_estcpu *= decay; * will compute * kg_estcpu *= 0.1; * for all values of loadavg: * * Mathematically this loop can be expressed by saying: * decay ** (5 * loadavg) ~= .1 * * The system computes decay as: * decay = (2 * loadavg) / (2 * loadavg + 1) * * We wish to prove that the system's computation of decay * will always fulfill the equation: * decay ** (5 * loadavg) ~= .1 * * If we compute b as: * b = 2 * loadavg * then * decay = b / (b + 1) * * We now need to prove two things: * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) * * Facts: * For x close to zero, exp(x) =~ 1 + x, since * exp(x) = 0! + x**1/1! + x**2/2! + ... . * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. * For x close to zero, ln(1+x) =~ x, since * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). * ln(.1) =~ -2.30 * * Proof of (1): * Solve (factor)**(power) =~ .1 given power (5*loadav): * solving for factor, * ln(factor) =~ (-2.30/5*loadav), or * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED * * Proof of (2): * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): * solving for power, * power*ln(b/(b+1)) =~ -2.30, or * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED * * Actual power values for the implemented algorithm are as follows: * loadav: 1 2 3 4 * power: 5.68 10.32 14.94 19.55 */ /* calculations for digital decay to forget 90% of usage in 5*loadav sec */ #define loadfactor(loadav) (2 * (loadav)) #define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) /* decay 95% of `ke_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); /* * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). * * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). * * If you don't want to bother with the faster/more-accurate formula, you * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate * (more general) method of calculating the %age of CPU used by a process. */ #define CCPU_SHIFT 11 /* * Recompute process priorities, every hz ticks. * MP-safe, called without the Giant mutex. */ /* ARGSUSED */ static void schedcpu(void) { register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); struct thread *td; struct proc *p; struct kse *ke; struct ksegrp *kg; int awake, realstathz; realstathz = stathz ? stathz : hz; sx_slock(&allproc_lock); FOREACH_PROC_IN_SYSTEM(p) { /* * Prevent state changes and protect run queue. */ mtx_lock_spin(&sched_lock); /* * Increment time in/out of memory. We ignore overflow; with * 16-bit int's (remember them?) overflow takes 45 days. */ p->p_swtime++; FOREACH_KSEGRP_IN_PROC(p, kg) { awake = 0; FOREACH_THREAD_IN_GROUP(kg, td) { ke = td->td_kse; /* * Increment sleep time (if sleeping). We * ignore overflow, as above. */ /* * The kse slptimes are not touched in wakeup * because the thread may not HAVE a KSE. */ if (ke->ke_state == KES_ONRUNQ) { awake = 1; ke->ke_flags &= ~KEF_DIDRUN; } else if ((ke->ke_state == KES_THREAD) && (TD_IS_RUNNING(td))) { awake = 1; /* Do not clear KEF_DIDRUN */ } else if (ke->ke_flags & KEF_DIDRUN) { awake = 1; ke->ke_flags &= ~KEF_DIDRUN; } /* * ke_pctcpu is only for ps and ttyinfo(). * Do it per kse, and add them up at the end? * XXXKSE */ ke->ke_pctcpu = (ke->ke_pctcpu * ccpu) >> FSHIFT; /* * If the kse has been idle the entire second, * stop recalculating its priority until * it wakes up. */ if (ke->ke_cpticks == 0) continue; #if (FSHIFT >= CCPU_SHIFT) ke->ke_pctcpu += (realstathz == 100) ? ((fixpt_t) ke->ke_cpticks) << (FSHIFT - CCPU_SHIFT) : 100 * (((fixpt_t) ke->ke_cpticks) << (FSHIFT - CCPU_SHIFT)) / realstathz; #else ke->ke_pctcpu += ((FSCALE - ccpu) * (ke->ke_cpticks * FSCALE / realstathz)) >> FSHIFT; #endif ke->ke_cpticks = 0; } /* end of kse loop */ /* * If there are ANY running threads in this KSEGRP, * then don't count it as sleeping. */ if (awake) { if (kg->kg_slptime > 1) { /* * In an ideal world, this should not * happen, because whoever woke us * up from the long sleep should have * unwound the slptime and reset our * priority before we run at the stale * priority. Should KASSERT at some * point when all the cases are fixed. */ updatepri(kg); } kg->kg_slptime = 0; } else kg->kg_slptime++; if (kg->kg_slptime > 1) continue; kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu); resetpriority(kg); FOREACH_THREAD_IN_GROUP(kg, td) { resetpriority_thread(td, kg); } } /* end of ksegrp loop */ mtx_unlock_spin(&sched_lock); } /* end of process loop */ sx_sunlock(&allproc_lock); } /* * Main loop for a kthread that executes schedcpu once a second. */ static void schedcpu_thread(void) { int nowake; for (;;) { schedcpu(); tsleep(&nowake, 0, "-", hz); } } /* * Recalculate the priority of a process after it has slept for a while. * For all load averages >= 1 and max kg_estcpu of 255, sleeping for at * least six times the loadfactor will decay kg_estcpu to zero. */ static void updatepri(struct ksegrp *kg) { register fixpt_t loadfac; register unsigned int newcpu; loadfac = loadfactor(averunnable.ldavg[0]); if (kg->kg_slptime > 5 * loadfac) kg->kg_estcpu = 0; else { newcpu = kg->kg_estcpu; kg->kg_slptime--; /* was incremented in schedcpu() */ while (newcpu && --kg->kg_slptime) newcpu = decay_cpu(loadfac, newcpu); kg->kg_estcpu = newcpu; } } /* * Compute the priority of a process when running in user mode. * Arrange to reschedule if the resulting priority is better * than that of the current process. */ static void resetpriority(struct ksegrp *kg) { register unsigned int newpriority; if (kg->kg_pri_class == PRI_TIMESHARE) { newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT + NICE_WEIGHT * (kg->kg_proc->p_nice - PRIO_MIN); newpriority = min(max(newpriority, PRI_MIN_TIMESHARE), PRI_MAX_TIMESHARE); kg->kg_user_pri = newpriority; } } /* * Update the thread's priority when the associated ksegroup's user * priority changes. */ static void resetpriority_thread(struct thread *td, struct ksegrp *kg) { /* Only change threads with a time sharing user priority. */ if (td->td_priority < PRI_MIN_TIMESHARE || td->td_priority > PRI_MAX_TIMESHARE) return; /* XXX the whole needresched thing is broken, but not silly. */ maybe_resched(td); sched_prio(td, kg->kg_user_pri); } /* ARGSUSED */ static void sched_setup(void *dummy) { setup_runqs(); if (sched_quantum == 0) sched_quantum = SCHED_QUANTUM; hogticks = 2 * sched_quantum; callout_init(&roundrobin_callout, CALLOUT_MPSAFE); /* Kick off timeout driven events by calling first time. */ roundrobin(NULL); /* Account for thread0. */ sched_load_add(); } /* External interfaces start here */ /* * Very early in the boot some setup of scheduler-specific * parts of proc0 and of some scheduler resources needs to be done. * Called from: * proc0_init() */ void schedinit(void) { /* * Set up the scheduler specific parts of proc0. */ proc0.p_sched = NULL; /* XXX */ ksegrp0.kg_sched = &kg_sched0; thread0.td_sched = &kse0; kse0.ke_thread = &thread0; kse0.ke_state = KES_THREAD; kg_sched0.skg_concurrency = 1; kg_sched0.skg_avail_opennings = 0; /* we are already running */ } int sched_runnable(void) { #ifdef SMP return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]); #else return runq_check(&runq); #endif } int sched_rr_interval(void) { if (sched_quantum == 0) sched_quantum = SCHED_QUANTUM; return (sched_quantum); } /* * We adjust the priority of the current process. The priority of * a process gets worse as it accumulates CPU time. The cpu usage * estimator (kg_estcpu) is increased here. resetpriority() will * compute a different priority each time kg_estcpu increases by * INVERSE_ESTCPU_WEIGHT * (until MAXPRI is reached). The cpu usage estimator ramps up * quite quickly when the process is running (linearly), and decays * away exponentially, at a rate which is proportionally slower when * the system is busy. The basic principle is that the system will * 90% forget that the process used a lot of CPU time in 5 * loadav * seconds. This causes the system to favor processes which haven't * run much recently, and to round-robin among other processes. */ void sched_clock(struct thread *td) { struct ksegrp *kg; struct kse *ke; mtx_assert(&sched_lock, MA_OWNED); kg = td->td_ksegrp; ke = td->td_kse; ke->ke_cpticks++; kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1); if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) { resetpriority(kg); resetpriority_thread(td, kg); } } /* * charge childs scheduling cpu usage to parent. * * XXXKSE assume only one thread & kse & ksegrp keep estcpu in each ksegrp. * Charge it to the ksegrp that did the wait since process estcpu is sum of * all ksegrps, this is strictly as expected. Assume that the child process * aggregated all the estcpu into the 'built-in' ksegrp. */ void sched_exit(struct proc *p, struct thread *td) { sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td); sched_exit_thread(FIRST_THREAD_IN_PROC(p), td); } void sched_exit_ksegrp(struct ksegrp *kg, struct thread *childtd) { mtx_assert(&sched_lock, MA_OWNED); kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + childtd->td_ksegrp->kg_estcpu); } void sched_exit_thread(struct thread *td, struct thread *child) { CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d", child, child->td_proc->p_comm, child->td_priority); if ((child->td_proc->p_flag & P_NOLOAD) == 0) sched_load_rem(); } void sched_fork(struct thread *td, struct thread *childtd) { sched_fork_ksegrp(td, childtd->td_ksegrp); sched_fork_thread(td, childtd); } void sched_fork_ksegrp(struct thread *td, struct ksegrp *child) { mtx_assert(&sched_lock, MA_OWNED); child->kg_estcpu = td->td_ksegrp->kg_estcpu; } void sched_fork_thread(struct thread *td, struct thread *childtd) { sched_newthread(childtd); } void sched_nice(struct proc *p, int nice) { struct ksegrp *kg; struct thread *td; PROC_LOCK_ASSERT(p, MA_OWNED); mtx_assert(&sched_lock, MA_OWNED); p->p_nice = nice; FOREACH_KSEGRP_IN_PROC(p, kg) { resetpriority(kg); FOREACH_THREAD_IN_GROUP(kg, td) { resetpriority_thread(td, kg); } } } void sched_class(struct ksegrp *kg, int class) { mtx_assert(&sched_lock, MA_OWNED); kg->kg_pri_class = class; } /* * Adjust the priority of a thread. * This may include moving the thread within the KSEGRP, * changing the assignment of a kse to the thread, * and moving a KSE in the system run queue. */ static void sched_priority(struct thread *td, u_char prio) { CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)", td, td->td_proc->p_comm, td->td_priority, prio, curthread, curthread->td_proc->p_comm); mtx_assert(&sched_lock, MA_OWNED); if (td->td_priority == prio) return; if (TD_ON_RUNQ(td)) { adjustrunqueue(td, prio); } else { td->td_priority = prio; } } /* * Update a thread's priority when it is lent another thread's * priority. */ void sched_lend_prio(struct thread *td, u_char prio) { td->td_flags |= TDF_BORROWING; sched_priority(td, prio); } /* * Restore a thread's priority when priority propagation is * over. The prio argument is the minimum priority the thread * needs to have to satisfy other possible priority lending * requests. If the thread's regulary priority is less * important than prio the thread will keep a priority boost * of prio. */ void sched_unlend_prio(struct thread *td, u_char prio) { u_char base_pri; if (td->td_base_pri >= PRI_MIN_TIMESHARE && td->td_base_pri <= PRI_MAX_TIMESHARE) base_pri = td->td_ksegrp->kg_user_pri; else base_pri = td->td_base_pri; if (prio >= base_pri) { td->td_flags &= ~TDF_BORROWING; sched_prio(td, base_pri); } else sched_lend_prio(td, prio); } void sched_prio(struct thread *td, u_char prio) { u_char oldprio; /* First, update the base priority. */ td->td_base_pri = prio; /* * If the thread is borrowing another thread's priority, don't ever * lower the priority. */ if (td->td_flags & TDF_BORROWING && td->td_priority < prio) return; /* Change the real priority. */ oldprio = td->td_priority; sched_priority(td, prio); /* * If the thread is on a turnstile, then let the turnstile update * its state. */ if (TD_ON_LOCK(td) && oldprio != prio) turnstile_adjust(td, oldprio); } void sched_sleep(struct thread *td) { mtx_assert(&sched_lock, MA_OWNED); td->td_ksegrp->kg_slptime = 0; } static void remrunqueue(struct thread *td); void sched_switch(struct thread *td, struct thread *newtd, int flags) { struct kse *ke; struct ksegrp *kg; struct proc *p; ke = td->td_kse; p = td->td_proc; mtx_assert(&sched_lock, MA_OWNED); if ((p->p_flag & P_NOLOAD) == 0) sched_load_rem(); /* * We are volunteering to switch out so we get to nominate * a successor for the rest of our quantum * First try another thread in our ksegrp, and then look for * other ksegrps in our process. */ if (sched_followon && (p->p_flag & P_HADTHREADS) && (flags & SW_VOL) && newtd == NULL) { /* lets schedule another thread from this process */ kg = td->td_ksegrp; if ((newtd = TAILQ_FIRST(&kg->kg_runq))) { remrunqueue(newtd); sched_kgfollowons++; } else { FOREACH_KSEGRP_IN_PROC(p, kg) { if ((newtd = TAILQ_FIRST(&kg->kg_runq))) { sched_pfollowons++; remrunqueue(newtd); break; } } } } if (newtd) newtd->td_flags |= (td->td_flags & TDF_NEEDRESCHED); td->td_lastcpu = td->td_oncpu; td->td_flags &= ~TDF_NEEDRESCHED; td->td_owepreempt = 0; td->td_oncpu = NOCPU; /* * At the last moment, if this thread is still marked RUNNING, * then put it back on the run queue as it has not been suspended * or stopped or any thing else similar. We never put the idle * threads on the run queue, however. */ if (td == PCPU_GET(idlethread)) TD_SET_CAN_RUN(td); else { SLOT_RELEASE(td->td_ksegrp); if (TD_IS_RUNNING(td)) { /* Put us back on the run queue (kse and all). */ setrunqueue(td, (flags & SW_PREEMPT) ? SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : SRQ_OURSELF|SRQ_YIELDING); } else if (p->p_flag & P_HADTHREADS) { /* * We will not be on the run queue. So we must be * sleeping or similar. As it's available, * someone else can use the KSE if they need it. * It's NOT available if we are about to need it */ if (newtd == NULL || newtd->td_ksegrp != td->td_ksegrp) slot_fill(td->td_ksegrp); } } if (newtd) { /* * The thread we are about to run needs to be counted * as if it had been added to the run queue and selected. * It came from: * * A preemption * * An upcall * * A followon */ KASSERT((newtd->td_inhibitors == 0), ("trying to run inhibitted thread")); SLOT_USE(newtd->td_ksegrp); newtd->td_kse->ke_flags |= KEF_DIDRUN; TD_SET_RUNNING(newtd); if ((newtd->td_proc->p_flag & P_NOLOAD) == 0) sched_load_add(); } else { newtd = choosethread(); } if (td != newtd) { #ifdef HWPMC_HOOKS if (PMC_PROC_IS_USING_PMCS(td->td_proc)) PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); #endif cpu_switch(td, newtd); #ifdef HWPMC_HOOKS if (PMC_PROC_IS_USING_PMCS(td->td_proc)) PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN); #endif } sched_lock.mtx_lock = (uintptr_t)td; td->td_oncpu = PCPU_GET(cpuid); } void sched_wakeup(struct thread *td) { struct ksegrp *kg; mtx_assert(&sched_lock, MA_OWNED); kg = td->td_ksegrp; if (kg->kg_slptime > 1) { updatepri(kg); resetpriority(kg); } kg->kg_slptime = 0; setrunqueue(td, SRQ_BORING); } #ifdef SMP /* enable HTT_2 if you have a 2-way HTT cpu.*/ static int forward_wakeup(int cpunum) { cpumask_t map, me, dontuse; cpumask_t map2; struct pcpu *pc; cpumask_t id, map3; mtx_assert(&sched_lock, MA_OWNED); CTR0(KTR_RUNQ, "forward_wakeup()"); if ((!forward_wakeup_enabled) || (forward_wakeup_use_mask == 0 && forward_wakeup_use_loop == 0)) return (0); if (!smp_started || cold || panicstr) return (0); forward_wakeups_requested++; /* * check the idle mask we received against what we calculated before * in the old version. */ me = PCPU_GET(cpumask); /* * don't bother if we should be doing it ourself.. */ if ((me & idle_cpus_mask) && (cpunum == NOCPU || me == (1 << cpunum))) return (0); dontuse = me | stopped_cpus | hlt_cpus_mask; map3 = 0; if (forward_wakeup_use_loop) { SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { id = pc->pc_cpumask; if ( (id & dontuse) == 0 && pc->pc_curthread == pc->pc_idlethread) { map3 |= id; } } } if (forward_wakeup_use_mask) { map = 0; map = idle_cpus_mask & ~dontuse; /* If they are both on, compare and use loop if different */ if (forward_wakeup_use_loop) { if (map != map3) { printf("map (%02X) != map3 (%02X)\n", map, map3); map = map3; } } } else { map = map3; } /* If we only allow a specific CPU, then mask off all the others */ if (cpunum != NOCPU) { KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum.")); map &= (1 << cpunum); } else { /* Try choose an idle die. */ if (forward_wakeup_use_htt) { map2 = (map & (map >> 1)) & 0x5555; if (map2) { map = map2; } } /* set only one bit */ if (forward_wakeup_use_single) { map = map & ((~map) + 1); } } if (map) { forward_wakeups_delivered++; ipi_selected(map, IPI_AST); return (1); } if (cpunum == NOCPU) printf("forward_wakeup: Idle processor not found\n"); return (0); } #endif #ifdef SMP static void kick_other_cpu(int pri,int cpuid); static void kick_other_cpu(int pri,int cpuid) { struct pcpu * pcpu = pcpu_find(cpuid); int cpri = pcpu->pc_curthread->td_priority; if (idle_cpus_mask & pcpu->pc_cpumask) { forward_wakeups_delivered++; ipi_selected(pcpu->pc_cpumask, IPI_AST); return; } if (pri >= cpri) return; #if defined(IPI_PREEMPTION) && defined(PREEMPTION) #if !defined(FULL_PREEMPTION) if (pri <= PRI_MAX_ITHD) #endif /* ! FULL_PREEMPTION */ { ipi_selected(pcpu->pc_cpumask, IPI_PREEMPT); return; } #endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */ pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED; ipi_selected( pcpu->pc_cpumask , IPI_AST); return; } #endif /* SMP */ void sched_add(struct thread *td, int flags) #ifdef SMP { struct kse *ke; int forwarded = 0; int cpu; int single_cpu = 0; ke = td->td_kse; mtx_assert(&sched_lock, MA_OWNED); KASSERT(ke->ke_state != KES_ONRUNQ, ("sched_add: kse %p (%s) already in run queue", ke, ke->ke_proc->p_comm)); KASSERT(ke->ke_proc->p_sflag & PS_INMEM, ("sched_add: process swapped out")); CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)", td, td->td_proc->p_comm, td->td_priority, curthread, curthread->td_proc->p_comm); if (td->td_pinned != 0) { cpu = td->td_lastcpu; ke->ke_runq = &runq_pcpu[cpu]; single_cpu = 1; CTR3(KTR_RUNQ, "sched_add: Put kse:%p(td:%p) on cpu%d runq", ke, td, cpu); } else if ((ke)->ke_flags & KEF_BOUND) { /* Find CPU from bound runq */ KASSERT(SKE_RUNQ_PCPU(ke),("sched_add: bound kse not on cpu runq")); cpu = ke->ke_runq - &runq_pcpu[0]; single_cpu = 1; CTR3(KTR_RUNQ, "sched_add: Put kse:%p(td:%p) on cpu%d runq", ke, td, cpu); } else { CTR2(KTR_RUNQ, "sched_add: adding kse:%p (td:%p) to gbl runq", ke, td); cpu = NOCPU; ke->ke_runq = &runq; } if (single_cpu && (cpu != PCPU_GET(cpuid))) { kick_other_cpu(td->td_priority,cpu); } else { if (!single_cpu) { cpumask_t me = PCPU_GET(cpumask); int idle = idle_cpus_mask & me; if (!idle && ((flags & SRQ_INTR) == 0) && (idle_cpus_mask & ~(hlt_cpus_mask | me))) forwarded = forward_wakeup(cpu); } if (!forwarded) { if ((flags & SRQ_YIELDING) == 0 && maybe_preempt(td)) return; else maybe_resched(td); } } if ((td->td_proc->p_flag & P_NOLOAD) == 0) sched_load_add(); SLOT_USE(td->td_ksegrp); runq_add(ke->ke_runq, ke, flags); ke->ke_state = KES_ONRUNQ; } #else /* SMP */ { struct kse *ke; ke = td->td_kse; mtx_assert(&sched_lock, MA_OWNED); KASSERT(ke->ke_state != KES_ONRUNQ, ("sched_add: kse %p (%s) already in run queue", ke, ke->ke_proc->p_comm)); KASSERT(ke->ke_proc->p_sflag & PS_INMEM, ("sched_add: process swapped out")); CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)", td, td->td_proc->p_comm, td->td_priority, curthread, curthread->td_proc->p_comm); CTR2(KTR_RUNQ, "sched_add: adding kse:%p (td:%p) to runq", ke, td); ke->ke_runq = &runq; /* * If we are yielding (on the way out anyhow) * or the thread being saved is US, * then don't try be smart about preemption * or kicking off another CPU * as it won't help and may hinder. * In the YIEDLING case, we are about to run whoever is * being put in the queue anyhow, and in the * OURSELF case, we are puting ourself on the run queue * which also only happens when we are about to yield. */ if((flags & SRQ_YIELDING) == 0) { if (maybe_preempt(td)) return; } if ((td->td_proc->p_flag & P_NOLOAD) == 0) sched_load_add(); SLOT_USE(td->td_ksegrp); runq_add(ke->ke_runq, ke, flags); ke->ke_state = KES_ONRUNQ; maybe_resched(td); } #endif /* SMP */ void sched_rem(struct thread *td) { struct kse *ke; ke = td->td_kse; KASSERT(ke->ke_proc->p_sflag & PS_INMEM, ("sched_rem: process swapped out")); KASSERT((ke->ke_state == KES_ONRUNQ), ("sched_rem: KSE not on run queue")); mtx_assert(&sched_lock, MA_OWNED); CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)", td, td->td_proc->p_comm, td->td_priority, curthread, curthread->td_proc->p_comm); if ((td->td_proc->p_flag & P_NOLOAD) == 0) sched_load_rem(); SLOT_RELEASE(td->td_ksegrp); runq_remove(ke->ke_runq, ke); ke->ke_state = KES_THREAD; } /* * Select threads to run. * Notice that the running threads still consume a slot. */ struct kse * sched_choose(void) { struct kse *ke; struct runq *rq; #ifdef SMP struct kse *kecpu; rq = &runq; ke = runq_choose(&runq); kecpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]); if (ke == NULL || (kecpu != NULL && kecpu->ke_thread->td_priority < ke->ke_thread->td_priority)) { CTR2(KTR_RUNQ, "choosing kse %p from pcpu runq %d", kecpu, PCPU_GET(cpuid)); ke = kecpu; rq = &runq_pcpu[PCPU_GET(cpuid)]; } else { CTR1(KTR_RUNQ, "choosing kse %p from main runq", ke); } #else rq = &runq; ke = runq_choose(&runq); #endif if (ke != NULL) { runq_remove(rq, ke); ke->ke_state = KES_THREAD; KASSERT(ke->ke_proc->p_sflag & PS_INMEM, ("sched_choose: process swapped out")); } return (ke); } void sched_userret(struct thread *td) { struct ksegrp *kg; /* * XXX we cheat slightly on the locking here to avoid locking in * the usual case. Setting td_priority here is essentially an * incomplete workaround for not setting it properly elsewhere. * Now that some interrupt handlers are threads, not setting it * properly elsewhere can clobber it in the window between setting * it here and returning to user mode, so don't waste time setting * it perfectly here. */ KASSERT((td->td_flags & TDF_BORROWING) == 0, ("thread with borrowed priority returning to userland")); kg = td->td_ksegrp; if (td->td_priority != kg->kg_user_pri) { mtx_lock_spin(&sched_lock); td->td_priority = kg->kg_user_pri; td->td_base_pri = kg->kg_user_pri; mtx_unlock_spin(&sched_lock); } } void sched_bind(struct thread *td, int cpu) { struct kse *ke; mtx_assert(&sched_lock, MA_OWNED); KASSERT(TD_IS_RUNNING(td), ("sched_bind: cannot bind non-running thread")); ke = td->td_kse; ke->ke_flags |= KEF_BOUND; #ifdef SMP ke->ke_runq = &runq_pcpu[cpu]; if (PCPU_GET(cpuid) == cpu) return; ke->ke_state = KES_THREAD; mi_switch(SW_VOL, NULL); #endif } void sched_unbind(struct thread* td) { mtx_assert(&sched_lock, MA_OWNED); td->td_kse->ke_flags &= ~KEF_BOUND; } int sched_is_bound(struct thread *td) { mtx_assert(&sched_lock, MA_OWNED); return (td->td_kse->ke_flags & KEF_BOUND); } int sched_load(void) { return (sched_tdcnt); } int sched_sizeof_ksegrp(void) { return (sizeof(struct ksegrp) + sizeof(struct kg_sched)); } int sched_sizeof_proc(void) { return (sizeof(struct proc)); } int sched_sizeof_thread(void) { return (sizeof(struct thread) + sizeof(struct kse)); } fixpt_t sched_pctcpu(struct thread *td) { struct kse *ke; ke = td->td_kse; return (ke->ke_pctcpu); return (0); } + +void +sched_tick(void) +{ +} #define KERN_SWITCH_INCLUDE 1 #include "kern/kern_switch.c" Index: head/sys/kern/sched_core.c =================================================================== --- head/sys/kern/sched_core.c (nonexistent) +++ head/sys/kern/sched_core.c (revision 159570) @@ -0,0 +1,2329 @@ +/*- + * Copyright (c) 2005-2006, David Xu + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "opt_hwpmc_hooks.h" +#include "opt_sched.h" + +#define kse td_sched + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef KTRACE +#include +#include +#endif + +#ifdef HWPMC_HOOKS +#include +#endif + +#include +#include + +/* get process's nice value, skip value 20 which is not supported */ +#define PROC_NICE(p) MIN((p)->p_nice, 19) + +/* convert nice to kernel thread priority */ +#define NICE_TO_PRI(nice) (PUSER + 20 + (nice)) + +/* get process's static priority */ +#define PROC_PRI(p) NICE_TO_PRI(PROC_NICE(p)) + +/* convert kernel thread priority to user priority */ +#define USER_PRI(pri) MIN((pri) - PUSER, 39) + +/* convert nice value to user priority */ +#define PROC_USER_PRI(p) (PROC_NICE(p) + 20) + +/* maximum user priority, highest prio + 1 */ +#define MAX_USER_PRI 40 + +/* maximum kernel priority its nice is 19 */ +#define PUSER_MAX (PUSER + 39) + +/* ticks and nanosecond converters */ +#define NS_TO_HZ(n) ((n) / (1000000000 / hz)) +#define HZ_TO_NS(h) ((h) * (1000000000 / hz)) + +/* ticks and microsecond converters */ +#define MS_TO_HZ(m) ((m) / (1000000 / hz)) + +#define PRI_SCORE_RATIO 25 +#define MAX_SCORE (MAX_USER_PRI * PRI_SCORE_RATIO / 100) +#define MAX_SLEEP_TIME (def_timeslice * MAX_SCORE) +#define NS_MAX_SLEEP_TIME (HZ_TO_NS(MAX_SLEEP_TIME)) +#define STARVATION_TIME (MAX_SLEEP_TIME) + +#define CURRENT_SCORE(kg) \ + (MAX_SCORE * NS_TO_HZ((kg)->kg_slptime) / MAX_SLEEP_TIME) + +#define SCALE_USER_PRI(x, upri) \ + MAX(x * (upri + 1) / (MAX_USER_PRI/2), min_timeslice) + +/* + * For a thread whose nice is zero, the score is used to determine + * if it is an interactive thread. + */ +#define INTERACTIVE_BASE_SCORE (MAX_SCORE * 20)/100 + +/* + * Calculate a score which a thread must have to prove itself is + * an interactive thread. + */ +#define INTERACTIVE_SCORE(ke) \ + (PROC_NICE((ke)->ke_proc) * MAX_SCORE / 40 + INTERACTIVE_BASE_SCORE) + +/* Test if a thread is an interactive thread */ +#define THREAD_IS_INTERACTIVE(ke) \ + ((ke)->ke_ksegrp->kg_user_pri <= \ + PROC_PRI((ke)->ke_proc) - INTERACTIVE_SCORE(ke)) + +/* + * Calculate how long a thread must sleep to prove itself is an + * interactive sleep. + */ +#define INTERACTIVE_SLEEP_TIME(ke) \ + (HZ_TO_NS(MAX_SLEEP_TIME * \ + (MAX_SCORE / 2 + INTERACTIVE_SCORE((ke)) + 1) / MAX_SCORE - 1)) + +#define CHILD_WEIGHT 90 +#define PARENT_WEIGHT 90 +#define EXIT_WEIGHT 3 + +#define SCHED_LOAD_SCALE 128UL + +#define IDLE 0 +#define IDLE_IDLE 1 +#define NOT_IDLE 2 + +#define KQB_LEN (8) /* Number of priority status words. */ +#define KQB_L2BPW (5) /* Log2(sizeof(rqb_word_t) * NBBY)). */ +#define KQB_BPW (1<> KQB_L2BPW) +#define KQB_FFS(word) (ffs(word) - 1) + +#define KQ_NQS 256 + +/* + * Type of run queue status word. + */ +typedef u_int32_t kqb_word_t; + +/* + * Head of run queues. + */ +TAILQ_HEAD(krqhead, kse); + +/* + * Bit array which maintains the status of a run queue. When a queue is + * non-empty the bit corresponding to the queue number will be set. + */ +struct krqbits { + kqb_word_t rqb_bits[KQB_LEN]; +}; + +/* + * Run queue structure. Contains an array of run queues on which processes + * are placed, and a structure to maintain the status of each queue. + */ +struct krunq { + struct krqbits rq_status; + struct krqhead rq_queues[KQ_NQS]; +}; + +/* + * The following datastructures are allocated within their parent structure + * but are scheduler specific. + */ +/* + * The schedulable entity that can be given a context to run. A process may + * have several of these. + */ +struct kse { + TAILQ_ENTRY(kse) ke_procq; /* (j/z) Run queue. */ + int ke_flags; /* (j) KEF_* flags. */ + struct thread *ke_thread; /* (*) Active associated thread. */ + fixpt_t ke_pctcpu; /* (j) %cpu during p_swtime. */ + u_char ke_rqindex; /* (j) Run queue index. */ + enum { + KES_THREAD = 0x0, /* slaved to thread state */ + KES_ONRUNQ + } ke_state; /* (j) thread sched specific status. */ + int ke_slice; + struct krunq *ke_runq; + int ke_cpu; /* CPU that we have affinity for. */ + int ke_activated; + uint64_t ke_timestamp; + uint64_t ke_lastran; +#ifdef SMP + int ke_tocpu; +#endif + /* The following variables are only used for pctcpu calculation */ + int ke_ltick; /* Last tick that we were running on */ + int ke_ftick; /* First tick that we were running on */ + int ke_ticks; /* Tick count */ +}; + +#define td_kse td_sched +#define ke_proc ke_thread->td_proc +#define ke_ksegrp ke_thread->td_ksegrp + +/* flags kept in ke_flags */ +#define KEF_ASSIGNED 0x0001 /* Thread is being migrated. */ +#define KEF_BOUND 0x0002 /* Thread can not migrate. */ +#define KEF_XFERABLE 0x0004 /* Thread was added as transferable. */ +#define KEF_HOLD 0x0008 /* Thread is temporarily bound. */ +#define KEF_REMOVED 0x0010 /* Thread was removed while ASSIGNED */ +#define KEF_INTERNAL 0x0020 /* Thread added due to migration. */ +#define KEF_PREEMPTED 0x0040 /* Thread was preempted. */ +#define KEF_MIGRATING 0x0080 /* Thread is migrating. */ +#define KEF_SLEEP 0x0100 /* Thread did sleep. */ +#define KEF_DIDRUN 0x2000 /* Thread actually ran. */ +#define KEF_EXIT 0x4000 /* Thread is being killed. */ +#define KEF_NEXTRQ 0x8000 /* Thread should be in next queue. */ +#define KEF_FIRST_SLICE 0x10000 /* Thread has first time slice left. */ + +struct kg_sched { + struct thread *skg_last_assigned; /* (j) Last thread assigned to */ + /* the system scheduler */ + u_long skg_slptime; /* (j) Number of ticks we vol. slept */ + u_long skg_runtime; /* (j) Temp total run time. */ + int skg_avail_opennings; /* (j) Num unfilled slots in group.*/ + int skg_concurrency; /* (j) Num threads requested in group.*/ +}; +#define kg_last_assigned kg_sched->skg_last_assigned +#define kg_avail_opennings kg_sched->skg_avail_opennings +#define kg_concurrency kg_sched->skg_concurrency +#define kg_slptime kg_sched->skg_slptime +#define kg_runtime kg_sched->skg_runtime + +#define SLOT_RELEASE(kg) (kg)->kg_avail_opennings++ +#define SLOT_USE(kg) (kg)->kg_avail_opennings-- + +/* + * Cpu percentage computation macros and defines. + * + * SCHED_CPU_TIME: Number of seconds to average the cpu usage across. + * SCHED_CPU_TICKS: Number of hz ticks to average the cpu usage across. + */ + +#define SCHED_CPU_TIME 10 +#define SCHED_CPU_TICKS (hz * SCHED_CPU_TIME) + +/* + * kseq - per processor runqs and statistics. + */ +struct kseq { + struct krunq ksq_idle; /* Queue of IDLE threads. */ + struct krunq ksq_timeshare[2]; /* Run queues for !IDLE. */ + struct krunq *ksq_next; /* Next timeshare queue. */ + struct krunq *ksq_curr; /* Current queue. */ + int ksq_load_timeshare; /* Load for timeshare. */ + int ksq_load_idle; + int ksq_load; /* Aggregate load. */ + int ksq_sysload; /* For loadavg, !P_NOLOAD */ + uint64_t ksq_expired_timestamp; + uint64_t ksq_last_timestamp; + signed char ksq_best_expired_nice; +#ifdef SMP + int ksq_transferable; + LIST_ENTRY(kseq) ksq_siblings; /* Next in kseq group. */ + struct kseq_group *ksq_group; /* Our processor group. */ + struct thread *ksq_migrated; + TAILQ_HEAD(,kse) ksq_migrateq; + int ksq_avgload; +#endif +}; + +#ifdef SMP +/* + * kseq groups are groups of processors which can cheaply share threads. When + * one processor in the group goes idle it will check the runqs of the other + * processors in its group prior to halting and waiting for an interrupt. + * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA. + * In a NUMA environment we'd want an idle bitmap per group and a two tiered + * load balancer. + */ +struct kseq_group { + int ksg_cpus; /* Count of CPUs in this kseq group. */ + cpumask_t ksg_cpumask; /* Mask of cpus in this group. */ + cpumask_t ksg_idlemask; /* Idle cpus in this group. */ + cpumask_t ksg_mask; /* Bit mask for first cpu. */ + int ksg_transferable; /* Transferable load of this group. */ + LIST_HEAD(, kseq) ksg_members; /* Linked list of all members. */ + int ksg_balance_tick; +}; +#endif + +static struct kse kse0; +static struct kg_sched kg_sched0; + +static int min_timeslice = 5; +static int def_timeslice = 100; +static int granularity = 10; +static int realstathz; + +/* + * One kse queue per processor. + */ +#ifdef SMP +static cpumask_t kseq_idle; +static int ksg_maxid; +static struct kseq kseq_cpu[MAXCPU]; +static struct kseq_group kseq_groups[MAXCPU]; +static int balance_tick; +static int balance_interval = 1; +static int balance_interval_max = 32; +static int balance_interval_min = 8; +static int balance_busy_factor = 32; +static int imbalance_pct = 25; +static int imbalance_pct2 = 50; +static int ignore_topology = 1; + +#define KSEQ_SELF() (&kseq_cpu[PCPU_GET(cpuid)]) +#define KSEQ_CPU(x) (&kseq_cpu[(x)]) +#define KSEQ_ID(x) ((x) - kseq_cpu) +#define KSEQ_GROUP(x) (&kseq_groups[(x)]) +#else /* !SMP */ +static struct kseq kseq_cpu; + +#define KSEQ_SELF() (&kseq_cpu) +#define KSEQ_CPU(x) (&kseq_cpu) +#endif + +/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ +static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ +SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); + +static void sched_setup(void *dummy); +SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL); + +static void sched_initticks(void *dummy); +SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, NULL) + +static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler"); + +SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "core", 0, + "Scheduler name"); + +#ifdef SMP +SYSCTL_INT(_kern_sched, OID_AUTO, imbalance_pct, CTLFLAG_RW, + &imbalance_pct, 0, ""); + +SYSCTL_INT(_kern_sched, OID_AUTO, imbalance_pct2, CTLFLAG_RW, + &imbalance_pct2, 0, ""); + +SYSCTL_INT(_kern_sched, OID_AUTO, balance_interval_min, CTLFLAG_RW, + &balance_interval_min, 0, ""); + +SYSCTL_INT(_kern_sched, OID_AUTO, balance_interval_max, CTLFLAG_RW, + &balance_interval_max, 0, ""); +#endif + +static void slot_fill(struct ksegrp *); + +static void krunq_add(struct krunq *, struct kse *, int flags); +static struct kse *krunq_choose(struct krunq *); +static void krunq_clrbit(struct krunq *rq, int pri); +static int krunq_findbit(struct krunq *rq); +static void krunq_init(struct krunq *); +static void krunq_remove(struct krunq *, struct kse *); +#ifdef SMP +static struct kse *krunq_steal(struct krunq *rq, int my_cpu); +#endif + +static struct kse * kseq_choose(struct kseq *); +static void kseq_load_add(struct kseq *, struct kse *); +static void kseq_load_rem(struct kseq *, struct kse *); +static void kseq_runq_add(struct kseq *, struct kse *, int); +static void kseq_runq_rem(struct kseq *, struct kse *); +static void kseq_setup(struct kseq *); + +static int sched_is_timeshare(struct ksegrp *kg); +static struct kse *sched_choose(void); +static int sched_calc_pri(struct ksegrp *kg); +static int sched_starving(struct kseq *, uint64_t, struct kse *); +static void sched_pctcpu_update(struct kse *); +static void sched_thread_priority(struct thread *, u_char); +static uint64_t sched_timestamp(void); +static int sched_recalc_pri(struct kse *ke, uint64_t now); +static int sched_timeslice(struct kse *ke); +static void sched_update_runtime(struct kse *ke, uint64_t now); +static void sched_commit_runtime(struct kse *ke); + +#ifdef SMP +static void sched_balance_tick(int my_cpu, int idle); +static int sched_balance_idle(int my_cpu, int idle); +static int sched_balance(int my_cpu, int idle); +struct kseq_group *sched_find_busiest_group(int my_cpu, int idle, + int *imbalance); +static struct kseq *sched_find_busiest_queue(struct kseq_group *ksg); +static int sched_find_idlest_cpu(struct kse *ke, int cpu); +static int sched_pull_threads(struct kseq *high, struct kseq *myksq, + int max_move, int idle); +static int sched_pull_one(struct kseq *from, struct kseq *myksq, int idle); +static struct kse *sched_steal(struct kseq *, int my_cpu, int stealidle); +static int sched_idled(struct kseq *, int idle); +static int sched_find_idle_cpu(int defcpu); +static void migrated_setup(void *dummy); +static void migrated(void *dummy); +SYSINIT(migrated_setup, SI_SUB_KTHREAD_IDLE, SI_ORDER_MIDDLE, migrated_setup, + NULL); + +#endif /* SMP */ + +static inline int +kse_pinned(struct kse *ke) +{ + if (ke->ke_thread->td_pinned) + return (1); + + if (ke->ke_flags & KEF_BOUND) + return (1); + + return (0); +} + +#ifdef SMP +static inline int +kse_can_migrate(struct kse *ke) +{ + if (kse_pinned(ke)) + return (0); + return (1); +} +#endif + +/* + * Initialize a run structure. + */ +static void +krunq_init(struct krunq *rq) +{ + int i; + + bzero(rq, sizeof *rq); + for (i = 0; i < KQ_NQS; i++) + TAILQ_INIT(&rq->rq_queues[i]); +} + +/* + * Clear the status bit of the queue corresponding to priority level pri, + * indicating that it is empty. + */ +static inline void +krunq_clrbit(struct krunq *rq, int pri) +{ + struct krqbits *rqb; + + rqb = &rq->rq_status; + rqb->rqb_bits[KQB_WORD(pri)] &= ~KQB_BIT(pri); +} + +/* + * Find the index of the first non-empty run queue. This is done by + * scanning the status bits, a set bit indicates a non-empty queue. + */ +static int +krunq_findbit(struct krunq *rq) +{ + struct krqbits *rqb; + int pri; + int i; + + rqb = &rq->rq_status; + for (i = 0; i < KQB_LEN; i++) { + if (rqb->rqb_bits[i]) { + pri = KQB_FFS(rqb->rqb_bits[i]) + (i << KQB_L2BPW); + return (pri); + } + } + return (-1); +} + +/* + * Set the status bit of the queue corresponding to priority level pri, + * indicating that it is non-empty. + */ +static inline void +krunq_setbit(struct krunq *rq, int pri) +{ + struct krqbits *rqb; + + rqb = &rq->rq_status; + rqb->rqb_bits[KQB_WORD(pri)] |= KQB_BIT(pri); +} + +/* + * Add the KSE to the queue specified by its priority, and set the + * corresponding status bit. + */ +static void +krunq_add(struct krunq *rq, struct kse *ke, int flags) +{ + struct krqhead *rqh; + int pri; + + pri = ke->ke_thread->td_priority; + ke->ke_rqindex = pri; + krunq_setbit(rq, pri); + rqh = &rq->rq_queues[pri]; + if (flags & SRQ_PREEMPTED) + TAILQ_INSERT_HEAD(rqh, ke, ke_procq); + else + TAILQ_INSERT_TAIL(rqh, ke, ke_procq); +} + +/* + * Find the highest priority process on the run queue. + */ +static struct kse * +krunq_choose(struct krunq *rq) +{ + struct krqhead *rqh; + struct kse *ke; + int pri; + + mtx_assert(&sched_lock, MA_OWNED); + if ((pri = krunq_findbit(rq)) != -1) { + rqh = &rq->rq_queues[pri]; + ke = TAILQ_FIRST(rqh); + KASSERT(ke != NULL, ("runq_choose: no proc on busy queue")); + return (ke); + } + + return (NULL); +} + +/* + * Remove the KSE from the queue specified by its priority, and clear the + * corresponding status bit if the queue becomes empty. + * Caller must set ke->ke_state afterwards. + */ +static void +krunq_remove(struct krunq *rq, struct kse *ke) +{ + struct krqhead *rqh; + int pri; + + KASSERT(ke->ke_proc->p_sflag & PS_INMEM, + ("runq_remove: process swapped out")); + pri = ke->ke_rqindex; + rqh = &rq->rq_queues[pri]; + KASSERT(ke != NULL, ("krunq_remove: no proc on busy queue")); + TAILQ_REMOVE(rqh, ke, ke_procq); + if (TAILQ_EMPTY(rqh)) + krunq_clrbit(rq, pri); +} + +#ifdef SMP +static struct kse * +krunq_steal(struct krunq *rq, int my_cpu) +{ + struct krqhead *rqh; + struct krqbits *rqb; + struct kse *ke; + kqb_word_t word; + int i, bit; + + (void)my_cpu; + + mtx_assert(&sched_lock, MA_OWNED); + rqb = &rq->rq_status; + for (i = 0; i < KQB_LEN; i++) { + if ((word = rqb->rqb_bits[i]) == 0) + continue; + do { + bit = KQB_FFS(word); + rqh = &rq->rq_queues[bit + (i << KQB_L2BPW)]; + TAILQ_FOREACH(ke, rqh, ke_procq) { + if (kse_can_migrate(ke)) + return (ke); + } + word &= ~((kqb_word_t)1 << bit); + } while (word != 0); + } + return (NULL); +} +#endif + +static inline void +kseq_runq_add(struct kseq *kseq, struct kse *ke, int flags) +{ +#ifdef SMP + if (kse_pinned(ke) == 0) { + kseq->ksq_transferable++; + kseq->ksq_group->ksg_transferable++; + ke->ke_flags |= KEF_XFERABLE; + } +#endif + if (ke->ke_flags & KEF_PREEMPTED) + flags |= SRQ_PREEMPTED; + krunq_add(ke->ke_runq, ke, flags); +} + +static inline void +kseq_runq_rem(struct kseq *kseq, struct kse *ke) +{ +#ifdef SMP + if (ke->ke_flags & KEF_XFERABLE) { + kseq->ksq_transferable--; + kseq->ksq_group->ksg_transferable--; + ke->ke_flags &= ~KEF_XFERABLE; + } +#endif + krunq_remove(ke->ke_runq, ke); + ke->ke_runq = NULL; +} + +static void +kseq_load_add(struct kseq *kseq, struct kse *ke) +{ + int class; + + mtx_assert(&sched_lock, MA_OWNED); +#ifdef SMP + if (__predict_false(ke->ke_thread == kseq->ksq_migrated)) + return; +#endif + class = PRI_BASE(ke->ke_ksegrp->kg_pri_class); + if (class == PRI_TIMESHARE) + kseq->ksq_load_timeshare++; + else if (class == PRI_IDLE) + kseq->ksq_load_idle++; + kseq->ksq_load++; + if ((ke->ke_proc->p_flag & P_NOLOAD) == 0) + kseq->ksq_sysload++; +} + +static void +kseq_load_rem(struct kseq *kseq, struct kse *ke) +{ + int class; + + mtx_assert(&sched_lock, MA_OWNED); +#ifdef SMP + if (__predict_false(ke->ke_thread == kseq->ksq_migrated)) + return; +#endif + class = PRI_BASE(ke->ke_ksegrp->kg_pri_class); + if (class == PRI_TIMESHARE) + kseq->ksq_load_timeshare--; + else if (class == PRI_IDLE) + kseq->ksq_load_idle--; + kseq->ksq_load--; + if ((ke->ke_proc->p_flag & P_NOLOAD) == 0) + kseq->ksq_sysload--; +} + +/* + * Pick the highest priority task we have and return it. + */ + +static struct kse * +kseq_choose(struct kseq *kseq) +{ + struct krunq *swap; + struct kse *ke; + + mtx_assert(&sched_lock, MA_OWNED); + + ke = krunq_choose(kseq->ksq_curr); + if (ke != NULL) + return (ke); + + kseq->ksq_best_expired_nice = 21; + kseq->ksq_expired_timestamp = 0; + swap = kseq->ksq_curr; + kseq->ksq_curr = kseq->ksq_next; + kseq->ksq_next = swap; + ke = krunq_choose(kseq->ksq_curr); + if (ke != NULL) + return (ke); + + return krunq_choose(&kseq->ksq_idle); +} + +static inline uint64_t +sched_timestamp(void) +{ + uint64_t now = cputick2usec(cpu_ticks()) * 1000; + return (now); +} + +static inline int +sched_timeslice(struct kse *ke) +{ + struct proc *p = ke->ke_proc; + + if (ke->ke_proc->p_nice < 0) + return SCALE_USER_PRI(def_timeslice*4, PROC_USER_PRI(p)); + else + return SCALE_USER_PRI(def_timeslice, PROC_USER_PRI(p)); +} + +static inline int +sched_is_timeshare(struct ksegrp *kg) +{ + /* + * XXX P_KTHREAD should be checked, but unfortunately, the + * readonly flag resides in a volatile member p_flag, reading + * it could cause lots of cache line sharing and invalidating. + */ + return (kg->kg_pri_class != PRI_TIMESHARE); +} + +static int +sched_calc_pri(struct ksegrp *kg) +{ + int score, pri; + + if (__predict_false(!sched_is_timeshare(kg))) + return (kg->kg_user_pri); + score = CURRENT_SCORE(kg) - MAX_SCORE / 2; + pri = PROC_PRI(kg->kg_proc) - score; + if (pri < PUSER) + pri = PUSER; + if (pri > PUSER_MAX) + pri = PUSER_MAX; + return (pri); +} + +static int +sched_recalc_pri(struct kse *ke, uint64_t now) +{ + uint64_t delta; + unsigned int sleep_time; + struct ksegrp *kg; + + kg = ke->ke_ksegrp; + delta = now - ke->ke_timestamp; + if (__predict_false(!sched_is_timeshare(kg))) + return (kg->kg_user_pri); + + if (delta > NS_MAX_SLEEP_TIME) + sleep_time = NS_MAX_SLEEP_TIME; + else + sleep_time = (unsigned int)delta; + if (__predict_false(sleep_time == 0)) + goto out; + + if (ke->ke_activated != -1 && + sleep_time > INTERACTIVE_SLEEP_TIME(ke)) { + kg->kg_slptime = HZ_TO_NS(MAX_SLEEP_TIME - def_timeslice); + } else { + sleep_time *= (MAX_SCORE - CURRENT_SCORE(kg)) ? : 1; + + /* + * If thread is waking from uninterruptible sleep, it is + * unlikely an interactive sleep, limit its sleep time to + * prevent it from being an interactive thread. + */ + if (ke->ke_activated == -1) { + if (kg->kg_slptime >= INTERACTIVE_SLEEP_TIME(ke)) + sleep_time = 0; + else if (kg->kg_slptime + sleep_time >= + INTERACTIVE_SLEEP_TIME(ke)) { + kg->kg_slptime = INTERACTIVE_SLEEP_TIME(ke); + sleep_time = 0; + } + } + + /* + * Thread gets priority boost here. + */ + kg->kg_slptime += sleep_time; + + /* Sleep time should never be larger than maximum */ + if (kg->kg_slptime > NS_MAX_SLEEP_TIME) + kg->kg_slptime = NS_MAX_SLEEP_TIME; + } + +out: + return (sched_calc_pri(kg)); +} + +static void +sched_update_runtime(struct kse *ke, uint64_t now) +{ + uint64_t runtime; + struct ksegrp *kg = ke->ke_ksegrp; + + if (sched_is_timeshare(kg)) { + if ((int64_t)(now - ke->ke_timestamp) < NS_MAX_SLEEP_TIME) { + runtime = now - ke->ke_timestamp; + if ((int64_t)(now - ke->ke_timestamp) < 0) + runtime = 0; + } else { + runtime = NS_MAX_SLEEP_TIME; + } + runtime /= (CURRENT_SCORE(kg) ? : 1); + kg->kg_runtime += runtime; + ke->ke_timestamp = now; + } +} + +static void +sched_commit_runtime(struct kse *ke) +{ + struct ksegrp *kg = ke->ke_ksegrp; + + if (kg->kg_runtime > kg->kg_slptime) + kg->kg_slptime = 0; + else + kg->kg_slptime -= kg->kg_runtime; + kg->kg_runtime = 0; +} + +#ifdef SMP + +/* staged balancing operations between CPUs */ +#define CPU_OFFSET(cpu) (hz * cpu / MAXCPU) + +static void +sched_balance_tick(int my_cpu, int idle) +{ + struct kseq *kseq = KSEQ_CPU(my_cpu); + unsigned t = ticks + CPU_OFFSET(my_cpu); + int old_load, cur_load; + int interval; + + old_load = kseq->ksq_avgload; + cur_load = kseq->ksq_load * SCHED_LOAD_SCALE; + if (cur_load > old_load) + old_load++; + kseq->ksq_avgload = (old_load + cur_load) / 2; + + interval = balance_interval; + if (idle == NOT_IDLE) + interval *= balance_busy_factor; + interval = MS_TO_HZ(interval); + if (interval == 0) + interval = 1; + if (t - balance_tick >= interval) { + sched_balance(my_cpu, idle); + balance_tick += interval; + } +} + +static int +sched_balance(int my_cpu, int idle) +{ + struct kseq_group *high_group; + struct kseq *high_queue; + int imbalance, pulled; + + mtx_assert(&sched_lock, MA_OWNED); + high_group = sched_find_busiest_group(my_cpu, idle, &imbalance); + if (high_group == NULL) + goto out; + high_queue = sched_find_busiest_queue(high_group); + if (high_queue == NULL) + goto out; + pulled = sched_pull_threads(high_queue, KSEQ_CPU(my_cpu), imbalance, + idle); + if (pulled == 0) { + if (balance_interval < balance_interval_max) + balance_interval++; + } else { + balance_interval = balance_interval_min; + } + return (pulled); +out: + if (balance_interval < balance_interval_max) + balance_interval *= 2; + return (0); +} + +static int +sched_balance_idle(int my_cpu, int idle) +{ + struct kseq_group *high_group; + struct kseq *high_queue; + int imbalance, pulled; + + mtx_assert(&sched_lock, MA_OWNED); + high_group = sched_find_busiest_group(my_cpu, idle, &imbalance); + if (high_group == NULL) + return (0); + high_queue = sched_find_busiest_queue(high_group); + if (high_queue == NULL) + return (0); + pulled = sched_pull_threads(high_queue, KSEQ_CPU(my_cpu), imbalance, + idle); + return (pulled); +} + +static inline int +kseq_source_load(struct kseq *ksq) +{ + int load = ksq->ksq_load * SCHED_LOAD_SCALE; + return (MIN(ksq->ksq_avgload, load)); +} + +static inline int +kseq_dest_load(struct kseq *ksq) +{ + int load = ksq->ksq_load * SCHED_LOAD_SCALE; + return (MAX(ksq->ksq_avgload, load)); +} + +struct kseq_group * +sched_find_busiest_group(int my_cpu, int idle, int *imbalance) +{ + static unsigned stage_cpu; + struct kseq_group *high; + struct kseq_group *ksg; + struct kseq *my_ksq, *ksq; + int my_load, high_load, avg_load, total_load, load; + int diff, cnt, i; + + *imbalance = 0; + if (__predict_false(smp_started == 0)) + return (NULL); + + my_ksq = KSEQ_CPU(my_cpu); + high = NULL; + high_load = total_load = my_load = 0; + i = (stage_cpu++) % (ksg_maxid + 1); + for (cnt = 0; cnt <= ksg_maxid; cnt++) { + ksg = KSEQ_GROUP(i); + /* + * Find the CPU with the highest load that has some + * threads to transfer. + */ + load = 0; + LIST_FOREACH(ksq, &ksg->ksg_members, ksq_siblings) { + if (ksg == my_ksq->ksq_group) + load += kseq_dest_load(ksq); + else + load += kseq_source_load(ksq); + } + if (ksg == my_ksq->ksq_group) { + my_load = load; + } else if (load > high_load && ksg->ksg_transferable) { + high = ksg; + high_load = load; + } + total_load += load; + if (++i > ksg_maxid) + i = 0; + } + + avg_load = total_load / (ksg_maxid + 1); + + if (high == NULL) + return (NULL); + + if (my_load >= avg_load || + (high_load - my_load) * 100 < imbalance_pct * my_load) { + if (idle == IDLE_IDLE || + (idle == IDLE && high_load > SCHED_LOAD_SCALE)) { + *imbalance = 1; + return (high); + } else { + return (NULL); + } + } + + /* + * Pick a minimum imbalance value, avoid raising our load + * higher than average and pushing busiest load under average. + */ + diff = MIN(high_load - avg_load, avg_load - my_load); + if (diff < SCHED_LOAD_SCALE) { + if (high_load - my_load >= SCHED_LOAD_SCALE * 2) { + *imbalance = 1; + return (high); + } + } + + *imbalance = diff / SCHED_LOAD_SCALE; + return (high); +} + +static struct kseq * +sched_find_busiest_queue(struct kseq_group *ksg) +{ + struct kseq *kseq, *high = NULL; + int load, high_load = 0; + + LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) { + load = kseq_source_load(kseq); + if (load > high_load) { + high_load = load; + high = kseq; + } + } + + return (high); +} + +static int +sched_pull_threads(struct kseq *high, struct kseq *myksq, int max_pull, + int idle) +{ + int pulled, i; + + mtx_assert(&sched_lock, MA_OWNED); + pulled = 0; + for (i = 0; i < max_pull; i++) { + if (sched_pull_one(high, myksq, idle)) + pulled++; + else + break; + } + return (pulled); +} + +static int +sched_pull_one(struct kseq *from, struct kseq *myksq, int idle) +{ + struct kseq *kseq; + struct kse *ke; + struct krunq *destq; + int class; + + mtx_assert(&sched_lock, MA_OWNED); + kseq = from; + ke = sched_steal(kseq, KSEQ_ID(myksq), idle); + if (ke == NULL) { + /* doing balance in same group */ + if (from->ksq_group == myksq->ksq_group) + return (0); + + struct kseq_group *ksg; + + ksg = kseq->ksq_group; + LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) { + if (kseq == from || kseq == myksq || + kseq->ksq_transferable == 0) + continue; + ke = sched_steal(kseq, KSEQ_ID(myksq), idle); + break; + } + if (ke == NULL) + return (0); + } + ke->ke_timestamp = ke->ke_timestamp + myksq->ksq_last_timestamp - + kseq->ksq_last_timestamp; + ke->ke_lastran = 0; + if (ke->ke_runq == from->ksq_curr) + destq = myksq->ksq_curr; + else if (ke->ke_runq == from->ksq_next) + destq = myksq->ksq_next; + else + destq = &myksq->ksq_idle; + kseq_runq_rem(kseq, ke); + kseq_load_rem(kseq, ke); + ke->ke_cpu = KSEQ_ID(myksq); + ke->ke_runq = destq; + ke->ke_state = KES_ONRUNQ; + kseq_runq_add(myksq, ke, 0); + kseq_load_add(myksq, ke); + class = PRI_BASE(ke->ke_ksegrp->kg_pri_class); + if (class != PRI_IDLE) { + if (kseq_idle & myksq->ksq_group->ksg_mask) + kseq_idle &= ~myksq->ksq_group->ksg_mask; + if (myksq->ksq_group->ksg_idlemask & PCPU_GET(cpumask)) + myksq->ksq_group->ksg_idlemask &= ~PCPU_GET(cpumask); + } + if (ke->ke_thread->td_priority < curthread->td_priority) + curthread->td_flags |= TDF_NEEDRESCHED; + return (1); +} + +static struct kse * +sched_steal(struct kseq *kseq, int my_cpu, int idle) +{ + struct kse *ke; + + /* + * Steal from expired queue first to try to get a non-interactive + * task that may not have run for a while. + */ + if ((ke = krunq_steal(kseq->ksq_next, my_cpu)) != NULL) + return (ke); + if ((ke = krunq_steal(kseq->ksq_curr, my_cpu)) != NULL) + return (ke); + if (idle == IDLE_IDLE) + return (krunq_steal(&kseq->ksq_idle, my_cpu)); + return (NULL); +} + +static int +sched_idled(struct kseq *kseq, int idle) +{ + struct kseq_group *ksg; + struct kseq *steal; + + mtx_assert(&sched_lock, MA_OWNED); + ksg = kseq->ksq_group; + /* + * If we're in a cpu group, try and steal kses from another cpu in + * the group before idling. + */ + if (ksg->ksg_cpus > 1 && ksg->ksg_transferable) { + LIST_FOREACH(steal, &ksg->ksg_members, ksq_siblings) { + if (steal == kseq || steal->ksq_transferable == 0) + continue; + if (sched_pull_one(steal, kseq, idle)) + return (0); + } + } + + if (sched_balance_idle(PCPU_GET(cpuid), idle)) + return (0); + + /* + * We only set the idled bit when all of the cpus in the group are + * idle. Otherwise we could get into a situation where a KSE bounces + * back and forth between two idle cores on seperate physical CPUs. + */ + ksg->ksg_idlemask |= PCPU_GET(cpumask); + if (ksg->ksg_idlemask != ksg->ksg_cpumask) + return (1); + kseq_idle |= ksg->ksg_mask; + return (1); +} + +static int +sched_find_idle_cpu(int defcpu) +{ + struct pcpu *pcpu; + struct kseq_group *ksg; + struct kseq *ksq; + int cpu; + + mtx_assert(&sched_lock, MA_OWNED); + ksq = KSEQ_CPU(defcpu); + ksg = ksq->ksq_group; + pcpu = pcpu_find(defcpu); + if (ksg->ksg_idlemask & pcpu->pc_cpumask) + return (defcpu); + + /* Try to find a fully idled cpu. */ + if (kseq_idle) { + cpu = ffs(kseq_idle); + if (cpu) + goto migrate; + } + + /* + * If another cpu in this group has idled, assign a thread over + * to them after checking to see if there are idled groups. + */ + if (ksg->ksg_idlemask) { + cpu = ffs(ksg->ksg_idlemask); + if (cpu) + goto migrate; + } + return (defcpu); + +migrate: + /* + * Now that we've found an idle CPU, migrate the thread. + */ + cpu--; + return (cpu); +} + +static int +sched_find_idlest_cpu(struct kse *ke, int cpu) +{ + static unsigned stage_cpu; + + struct kseq_group *ksg; + struct kseq *ksq; + int load, min_load = INT_MAX; + int first = 1; + int idlest = -1; + int i, cnt; + + (void)ke; + + if (__predict_false(smp_started == 0)) + return (cpu); + + first = 1; + i = (stage_cpu++) % (ksg_maxid + 1); + for (cnt = 0; cnt <= ksg_maxid; cnt++) { + ksg = KSEQ_GROUP(i); + LIST_FOREACH(ksq, &ksg->ksg_members, ksq_siblings) { + load = kseq_source_load(ksq); + if (first || load < min_load) { + first = 0; + load = min_load; + idlest = KSEQ_ID(ksq); + } + } + if (++i > ksg_maxid) + i = 0; + } + return (idlest); +} + +static void +migrated_setup(void *dummy) +{ + struct kseq *kseq; + struct proc *p; + struct thread *td; + int i, error; + + for (i = 0; i < MAXCPU; i++) { + if (CPU_ABSENT(i)) + continue; + kseq = &kseq_cpu[i]; + error = kthread_create(migrated, kseq, &p, RFSTOPPED, 0, + "migrated%d", i); + if (error) + panic("can not create migration thread"); + PROC_LOCK(p); + p->p_flag |= P_NOLOAD; + mtx_lock_spin(&sched_lock); + td = FIRST_THREAD_IN_PROC(p); + td->td_kse->ke_flags |= KEF_BOUND; + td->td_kse->ke_cpu = i; + kseq->ksq_migrated = td; + sched_class(td->td_ksegrp, PRI_ITHD); + td->td_kse->ke_runq = kseq->ksq_curr; + sched_prio(td, PRI_MIN); + SLOT_USE(td->td_ksegrp); + kseq_runq_add(kseq, td->td_kse, 0); + td->td_kse->ke_state = KES_ONRUNQ; + mtx_unlock_spin(&sched_lock); + PROC_UNLOCK(p); + } +} + +static void +migrated(void *dummy) +{ + struct thread *td = curthread; + struct kseq *kseq = KSEQ_SELF(); + struct kse *ke; + + mtx_lock_spin(&sched_lock); + for (;;) { + while ((ke = TAILQ_FIRST(&kseq->ksq_migrateq)) != NULL) { + TAILQ_REMOVE(&kseq->ksq_migrateq, ke, ke_procq); + kseq_load_rem(kseq, ke); + ke->ke_flags &= ~KEF_MIGRATING; + ke->ke_cpu = ke->ke_tocpu; + setrunqueue(ke->ke_thread, SRQ_BORING); + } + TD_SET_IWAIT(td); + mi_switch(SW_VOL, NULL); + } + mtx_unlock_spin(&sched_lock); +} +#else + +static inline void +sched_balance_tick(int my_cpu, int idle) +{ +} + +#endif /* SMP */ + + +static void +kseq_setup(struct kseq *kseq) +{ + krunq_init(&kseq->ksq_timeshare[0]); + krunq_init(&kseq->ksq_timeshare[1]); + krunq_init(&kseq->ksq_idle); + kseq->ksq_curr = &kseq->ksq_timeshare[0]; + kseq->ksq_next = &kseq->ksq_timeshare[1]; + kseq->ksq_best_expired_nice = 21; +#ifdef SMP + TAILQ_INIT(&kseq->ksq_migrateq); +#endif +} + +static void +sched_setup(void *dummy) +{ +#ifdef SMP + int i; + int t; +#endif + + /* + * To avoid divide-by-zero, we set realstathz a dummy value + * in case which sched_clock() called before sched_initticks(). + */ + realstathz = hz; + min_timeslice = MAX(5 * hz / 1000, 1); + def_timeslice = MAX(100 * hz / 1000, 1); + granularity = MAX(10 * hz / 1000, 1); + +#ifdef SMP + t = ticks; + balance_tick = t; + /* + * Initialize the kseqs. + */ + for (i = 0; i < MAXCPU; i++) { + struct kseq *ksq; + + ksq = &kseq_cpu[i]; + kseq_setup(&kseq_cpu[i]); + } + if (smp_topology == NULL || ignore_topology) { + struct kseq_group *ksg; + struct kseq *ksq; + int cpus; + + for (cpus = 0, i = 0; i < MAXCPU; i++) { + if (CPU_ABSENT(i)) + continue; + ksq = &kseq_cpu[i]; + ksg = &kseq_groups[cpus]; + /* + * Setup a kseq group with one member. + */ + ksq->ksq_group = ksg; + ksg->ksg_cpus = 1; + ksg->ksg_idlemask = 0; + ksg->ksg_cpumask = ksg->ksg_mask = 1 << i; + ksg->ksg_balance_tick = t; + LIST_INIT(&ksg->ksg_members); + LIST_INSERT_HEAD(&ksg->ksg_members, ksq, ksq_siblings); + cpus++; + } + ksg_maxid = cpus - 1; + } else { + struct kseq_group *ksg; + struct cpu_group *cg; + int j; + + for (i = 0; i < smp_topology->ct_count; i++) { + cg = &smp_topology->ct_group[i]; + ksg = &kseq_groups[i]; + /* + * Initialize the group. + */ + ksg->ksg_idlemask = 0; + ksg->ksg_cpus = cg->cg_count; + ksg->ksg_cpumask = cg->cg_mask; + LIST_INIT(&ksg->ksg_members); + /* + * Find all of the group members and add them. + */ + for (j = 0; j < MAXCPU; j++) { + if ((cg->cg_mask & (1 << j)) != 0) { + if (ksg->ksg_mask == 0) + ksg->ksg_mask = 1 << j; + kseq_cpu[j].ksq_group = ksg; + LIST_INSERT_HEAD(&ksg->ksg_members, + &kseq_cpu[j], ksq_siblings); + } + } + ksg->ksg_balance_tick = t; + } + ksg_maxid = smp_topology->ct_count - 1; + } +#else + kseq_setup(KSEQ_SELF()); +#endif + mtx_lock_spin(&sched_lock); + kseq_load_add(KSEQ_SELF(), &kse0); + mtx_unlock_spin(&sched_lock); +} + +/* ARGSUSED */ +static void +sched_initticks(void *dummy) +{ + mtx_lock_spin(&sched_lock); + realstathz = stathz ? stathz : hz; + mtx_unlock_spin(&sched_lock); +} + +/* + * Very early in the boot some setup of scheduler-specific + * parts of proc0 and of soem scheduler resources needs to be done. + * Called from: + * proc0_init() + */ +void +schedinit(void) +{ + /* + * Set up the scheduler specific parts of proc0. + */ + proc0.p_sched = NULL; /* XXX */ + ksegrp0.kg_sched = &kg_sched0; + thread0.td_sched = &kse0; + kse0.ke_thread = &thread0; + kse0.ke_state = KES_THREAD; + kse0.ke_slice = 100; + kg_sched0.skg_concurrency = 1; + kg_sched0.skg_avail_opennings = 0; /* we are already running */ +} + +/* + * This is only somewhat accurate since given many processes of the same + * priority they will switch when their slices run out, which will be + * at most SCHED_SLICE_MAX. + */ +int +sched_rr_interval(void) +{ + return (def_timeslice); +} + +static void +sched_pctcpu_update(struct kse *ke) +{ + /* + * Adjust counters and watermark for pctcpu calc. + */ + if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) { + /* + * Shift the tick count out so that the divide doesn't + * round away our results. + */ + ke->ke_ticks <<= 10; + ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) * + SCHED_CPU_TICKS; + ke->ke_ticks >>= 10; + } else + ke->ke_ticks = 0; + ke->ke_ltick = ticks; + ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS; +} + +void +sched_thread_priority(struct thread *td, u_char prio) +{ + struct kse *ke; + + ke = td->td_kse; + mtx_assert(&sched_lock, MA_OWNED); + if (__predict_false(td->td_priority == prio)) + return; + + if (TD_ON_RUNQ(td)) { + /* + * If the priority has been elevated due to priority + * propagation, we may have to move ourselves to a new + * queue. We still call adjustrunqueue below in case kse + * needs to fix things up. + */ + if (prio < td->td_priority && ke->ke_runq != NULL && + ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) { + krunq_remove(ke->ke_runq, ke); + ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr; + krunq_add(ke->ke_runq, ke, 0); + } + /* + * Hold this kse on this cpu so that sched_prio() doesn't + * cause excessive migration. We only want migration to + * happen as the result of a wakeup. + */ + ke->ke_flags |= KEF_HOLD; + adjustrunqueue(td, prio); + ke->ke_flags &= ~KEF_HOLD; + } else + td->td_priority = prio; +} + +/* + * Update a thread's priority when it is lent another thread's + * priority. + */ +void +sched_lend_prio(struct thread *td, u_char prio) +{ + + td->td_flags |= TDF_BORROWING; + sched_thread_priority(td, prio); +} + +/* + * Restore a thread's priority when priority propagation is + * over. The prio argument is the minimum priority the thread + * needs to have to satisfy other possible priority lending + * requests. If the thread's regular priority is less + * important than prio, the thread will keep a priority boost + * of prio. + */ +void +sched_unlend_prio(struct thread *td, u_char prio) +{ + u_char base_pri; + + if (td->td_base_pri >= PRI_MIN_TIMESHARE && + td->td_base_pri <= PRI_MAX_TIMESHARE) + base_pri = td->td_ksegrp->kg_user_pri; + else + base_pri = td->td_base_pri; + if (prio >= base_pri) { + td->td_flags &= ~TDF_BORROWING; + sched_thread_priority(td, base_pri); + } else + sched_lend_prio(td, prio); +} + +void +sched_prio(struct thread *td, u_char prio) +{ + u_char oldprio; + + /* First, update the base priority. */ + td->td_base_pri = prio; + + /* + * If the thread is borrowing another thread's priority, don't + * ever lower the priority. + */ + if (td->td_flags & TDF_BORROWING && td->td_priority < prio) + return; + + /* Change the real priority. */ + oldprio = td->td_priority; + sched_thread_priority(td, prio); + + /* + * If the thread is on a turnstile, then let the turnstile update + * its state. + */ + if (TD_ON_LOCK(td) && oldprio != prio) + turnstile_adjust(td, oldprio); +} + +void +sched_switch(struct thread *td, struct thread *newtd, int flags) +{ + struct kseq *ksq; + struct kse *ke; + struct ksegrp *kg; + uint64_t now; + + mtx_assert(&sched_lock, MA_OWNED); + + now = sched_timestamp(); + ke = td->td_kse; + kg = td->td_ksegrp; + ksq = KSEQ_SELF(); + + td->td_lastcpu = td->td_oncpu; + td->td_oncpu = NOCPU; + td->td_flags &= ~TDF_NEEDRESCHED; + td->td_owepreempt = 0; + + /* + * If the KSE has been assigned it may be in the process of switching + * to the new cpu. This is the case in sched_bind(). + */ + if (__predict_false(td == PCPU_GET(idlethread))) { + TD_SET_CAN_RUN(td); + } else if (__predict_false(ke->ke_flags & KEF_MIGRATING)) { + SLOT_RELEASE(td->td_ksegrp); + } else { + /* We are ending our run so make our slot available again */ + SLOT_RELEASE(td->td_ksegrp); + kseq_load_rem(ksq, ke); + if (TD_IS_RUNNING(td)) { + /* + * Don't allow the thread to migrate + * from a preemption. + */ + ke->ke_flags |= KEF_HOLD; + setrunqueue(td, (flags & SW_PREEMPT) ? + SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : + SRQ_OURSELF|SRQ_YIELDING); + ke->ke_flags &= ~KEF_HOLD; + } else if ((td->td_proc->p_flag & P_HADTHREADS) && + (newtd == NULL || newtd->td_ksegrp != td->td_ksegrp)) + /* + * We will not be on the run queue. + * So we must be sleeping or similar. + * Don't use the slot if we will need it + * for newtd. + */ + slot_fill(td->td_ksegrp); + } + + if (newtd != NULL) { + /* + * If we bring in a thread account for it as if it had been + * added to the run queue and then chosen. + */ + newtd->td_kse->ke_flags |= KEF_DIDRUN; + TD_SET_RUNNING(newtd); + kseq_load_add(KSEQ_SELF(), newtd->td_kse); + /* + * XXX When we preempt, we've already consumed a slot because + * we got here through sched_add(). However, newtd can come + * from thread_switchout() which can't SLOT_USE() because + * the SLOT code is scheduler dependent. We must use the + * slot here otherwise. + */ + if ((flags & SW_PREEMPT) == 0) + SLOT_USE(newtd->td_ksegrp); + newtd->td_kse->ke_timestamp = now; + } else + newtd = choosethread(); + if (td != newtd) { + sched_update_runtime(ke, now); + ke->ke_lastran = now; + +#ifdef HWPMC_HOOKS + if (PMC_PROC_IS_USING_PMCS(td->td_proc)) + PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); +#endif + cpu_switch(td, newtd); +#ifdef HWPMC_HOOKS + if (PMC_PROC_IS_USING_PMCS(td->td_proc)) + PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN); +#endif + } + + sched_lock.mtx_lock = (uintptr_t)td; + + td->td_oncpu = PCPU_GET(cpuid); +} + +void +sched_nice(struct proc *p, int nice) +{ + struct ksegrp *kg; + struct thread *td; + + PROC_LOCK_ASSERT(p, MA_OWNED); + mtx_assert(&sched_lock, MA_OWNED); + p->p_nice = nice; + FOREACH_KSEGRP_IN_PROC(p, kg) { + if (kg->kg_pri_class == PRI_TIMESHARE) { + kg->kg_user_pri = sched_calc_pri(kg); + FOREACH_THREAD_IN_GROUP(kg, td) + td->td_flags |= TDF_NEEDRESCHED; + } + } +} + +void +sched_sleep(struct thread *td) +{ + struct kse *ke; + + mtx_assert(&sched_lock, MA_OWNED); + ke = td->td_kse; + if (td->td_flags & TDF_SINTR) + ke->ke_activated = 0; + else + ke->ke_activated = -1; + ke->ke_flags |= KEF_SLEEP; +} + +void +sched_wakeup(struct thread *td) +{ + struct kse *ke; + struct ksegrp *kg; + struct kseq *kseq, *mykseq; + uint64_t now; + + mtx_assert(&sched_lock, MA_OWNED); + ke = td->td_kse; + kg = td->td_ksegrp; + kseq = KSEQ_CPU(ke->ke_cpu); + mykseq = KSEQ_SELF(); + if (ke->ke_flags & KEF_SLEEP) { + ke->ke_flags &= ~KEF_SLEEP; + if (sched_is_timeshare(kg)) { + now = sched_timestamp(); + sched_commit_runtime(ke); +#ifdef SMP + if (kseq != mykseq) + now = now - mykseq->ksq_last_timestamp + + kseq->ksq_last_timestamp; +#endif + kg->kg_user_pri = sched_recalc_pri(ke, now); + } + } + ke->ke_flags &= ~KEF_NEXTRQ; + setrunqueue(td, SRQ_BORING); +} + +/* + * Penalize the parent for creating a new child and initialize the child's + * priority. + */ +void +sched_fork(struct thread *td, struct thread *childtd) +{ + + mtx_assert(&sched_lock, MA_OWNED); + sched_fork_ksegrp(td, childtd->td_ksegrp); + sched_fork_thread(td, childtd); +} + +void +sched_fork_ksegrp(struct thread *td, struct ksegrp *child) +{ + struct ksegrp *kg = td->td_ksegrp; + + mtx_assert(&sched_lock, MA_OWNED); + child->kg_slptime = kg->kg_slptime * CHILD_WEIGHT / 100; + if (child->kg_pri_class == PRI_TIMESHARE) + child->kg_user_pri = sched_calc_pri(child); + kg->kg_slptime = kg->kg_slptime * PARENT_WEIGHT / 100; +} + +void +sched_fork_thread(struct thread *td, struct thread *child) +{ + struct kse *ke; + struct kse *ke2; + + sched_newthread(child); + + ke = td->td_kse; + ke2 = child->td_kse; +#ifdef SMP + ke2->ke_cpu = sched_find_idlest_cpu(ke, PCPU_GET(cpuid)); +#else + ke2->ke_cpu = ke->ke_cpu; +#endif + ke2->ke_slice = (ke->ke_slice + 1) >> 1; + ke2->ke_flags |= KEF_FIRST_SLICE; + ke2->ke_activated = 0; + ke2->ke_timestamp = sched_timestamp(); + ke->ke_slice >>= 1; + if (ke->ke_slice == 0) { + ke->ke_slice = 1; + sched_tick(); + } + + /* Grab our parents cpu estimation information. */ + ke2->ke_ticks = ke->ke_ticks; + ke2->ke_ltick = ke->ke_ltick; + ke2->ke_ftick = ke->ke_ftick; +} + +void +sched_class(struct ksegrp *kg, int class) +{ + struct kseq *kseq; + struct kse *ke; + struct thread *td; + int nclass; + int oclass; + + mtx_assert(&sched_lock, MA_OWNED); + if (kg->kg_pri_class == class) + return; + + nclass = PRI_BASE(class); + oclass = PRI_BASE(kg->kg_pri_class); + FOREACH_THREAD_IN_GROUP(kg, td) { + ke = td->td_kse; + + /* New thread does not have runq assigned */ + if (ke->ke_runq == NULL) + continue; + + kseq = KSEQ_CPU(ke->ke_cpu); + if (oclass == PRI_TIMESHARE) + kseq->ksq_load_timeshare--; + else if (oclass == PRI_IDLE) + kseq->ksq_load_idle--; + + if (nclass == PRI_TIMESHARE) + kseq->ksq_load_timeshare++; + else if (nclass == PRI_IDLE) + kseq->ksq_load_idle++; + } + + kg->kg_pri_class = class; +} + +/* + * Return some of the child's priority and interactivity to the parent. + */ +void +sched_exit(struct proc *p, struct thread *childtd) +{ + mtx_assert(&sched_lock, MA_OWNED); + sched_exit_thread(FIRST_THREAD_IN_PROC(p), childtd); + sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), childtd); +} + +void +sched_exit_ksegrp(struct ksegrp *parentkg, struct thread *td) +{ + if (td->td_ksegrp->kg_slptime < parentkg->kg_slptime) { + parentkg->kg_slptime = parentkg->kg_slptime / + (EXIT_WEIGHT) * (EXIT_WEIGHT - 1) + + td->td_ksegrp->kg_slptime / EXIT_WEIGHT; + } +} + +void +sched_exit_thread(struct thread *td, struct thread *childtd) +{ + struct kse *childke = childtd->td_kse; + struct kse *parentke = td->td_kse; + + kseq_load_rem(KSEQ_CPU(childke->ke_cpu), childke); + sched_update_runtime(childke, sched_timestamp()); + sched_commit_runtime(childke); + if ((childke->ke_flags & KEF_FIRST_SLICE) && + td->td_proc == childtd->td_proc->p_pptr) { + parentke->ke_slice += childke->ke_slice; + if (parentke->ke_slice > sched_timeslice(parentke)) + parentke->ke_slice = sched_timeslice(parentke); + } +} + +static int +sched_starving(struct kseq *ksq, uint64_t now, struct kse *ke) +{ + uint64_t delta; + + if (PROC_NICE(ke->ke_proc) > ksq->ksq_best_expired_nice) + return (1); + if (ksq->ksq_expired_timestamp == 0) + return (0); + delta = now - ksq->ksq_expired_timestamp; + if (delta > STARVATION_TIME * (ksq->ksq_load - ksq->ksq_load_idle)) + return (1); + return (0); +} + +/* + * An interactive thread has smaller time slice granularity, + * a cpu hog can have larger granularity. + */ +static inline int +sched_timeslice_split(struct kse *ke) +{ + int score, g; + + score = (int)(MAX_SCORE - CURRENT_SCORE(ke->ke_ksegrp)); + if (score == 0) + score = 1; +#ifdef SMP + g = granularity * ((1 << score) - 1) * smp_cpus; +#else + g = granularity * ((1 << score) - 1); +#endif + return (ke->ke_slice >= g && ke->ke_slice % g == 0); +} + +void +sched_tick(void) +{ + struct thread *td; + struct proc *p; + struct kse *ke; + struct ksegrp *kg; + struct kseq *kseq; + uint64_t now; + int cpuid; + int class; + + mtx_assert(&sched_lock, MA_OWNED); + + td = curthread; + ke = td->td_kse; + kg = ke->ke_ksegrp; + p = ke->ke_proc; + class = PRI_BASE(kg->kg_pri_class); + now = sched_timestamp(); + cpuid = PCPU_GET(cpuid); + kseq = KSEQ_CPU(cpuid); + kseq->ksq_last_timestamp = now; + + if (class == PRI_IDLE) { + int idle_td = (curthread == PCPU_GET(idlethread)); + /* + * Processes of equal idle priority are run round-robin. + */ + if (!idle_td && --ke->ke_slice <= 0) { + ke->ke_slice = def_timeslice; + td->td_flags |= TDF_NEEDRESCHED; + } + sched_balance_tick(cpuid, idle_td ? IDLE_IDLE : IDLE); + return; + } + + if (ke->ke_flags & KEF_NEXTRQ) { + /* The thread was already scheduled off. */ + curthread->td_flags |= TDF_NEEDRESCHED; + goto out; + } + + if (class == PRI_REALTIME) { + /* + * Realtime scheduling, do round robin for RR class, FIFO + * is not affected. + */ + if (PRI_NEED_RR(kg->kg_pri_class) && --ke->ke_slice <= 0) { + ke->ke_slice = def_timeslice; + curthread->td_flags |= TDF_NEEDRESCHED; + } + goto out; + } + + /* + * Current, we skip kernel thread, though it may be classified as + * TIMESHARE. + */ + if (class != PRI_TIMESHARE || (p->p_flag & P_KTHREAD) != 0) + goto out; + + if (--ke->ke_slice <= 0) { + curthread->td_flags |= TDF_NEEDRESCHED; + sched_update_runtime(ke, now); + sched_commit_runtime(ke); + kg->kg_user_pri = sched_calc_pri(kg); + ke->ke_slice = sched_timeslice(ke); + ke->ke_flags &= ~KEF_FIRST_SLICE; + if (!kseq->ksq_expired_timestamp) + kseq->ksq_expired_timestamp = now; + if (!THREAD_IS_INTERACTIVE(ke) || + sched_starving(kseq, now, ke)) { + /* The thead becomes cpu hog, schedule it off. */ + ke->ke_flags |= KEF_NEXTRQ; + if (PROC_NICE(p) < kseq->ksq_best_expired_nice) + kseq->ksq_best_expired_nice = PROC_NICE(p); + } + } else { + /* + * Don't allow an interactive thread which has long timeslice + * to monopolize CPU, split the long timeslice into small + * chunks. This essentially does round-robin between + * interactive threads. + */ + if (THREAD_IS_INTERACTIVE(ke) && sched_timeslice_split(ke)) + curthread->td_flags |= TDF_NEEDRESCHED; + } + +out: + sched_balance_tick(cpuid, NOT_IDLE); +} + +void +sched_clock(struct thread *td) +{ + struct ksegrp *kg; + struct kse *ke; + + mtx_assert(&sched_lock, MA_OWNED); + ke = td->td_kse; + kg = ke->ke_ksegrp; + + /* Adjust ticks for pctcpu */ + ke->ke_ticks++; + ke->ke_ltick = ticks; + + /* Go up to one second beyond our max and then trim back down */ + if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick) + sched_pctcpu_update(ke); +} + +int +sched_runnable(void) +{ + struct kseq *kseq; + + kseq = KSEQ_SELF(); + if (krunq_findbit(kseq->ksq_curr) != -1 || + krunq_findbit(kseq->ksq_next) != -1 || + krunq_findbit(&kseq->ksq_idle) != -1) + return (1); + return (0); +} + +void +sched_userret(struct thread *td) +{ + struct ksegrp *kg; + + KASSERT((td->td_flags & TDF_BORROWING) == 0, + ("thread with borrowed priority returning to userland")); + kg = td->td_ksegrp; + if (td->td_priority != kg->kg_user_pri) { + mtx_lock_spin(&sched_lock); + td->td_priority = kg->kg_user_pri; + td->td_base_pri = kg->kg_user_pri; + mtx_unlock_spin(&sched_lock); + } +} + +struct kse * +sched_choose(void) +{ + struct kseq *kseq; + struct kse *ke; + + mtx_assert(&sched_lock, MA_OWNED); + kseq = KSEQ_SELF(); +#ifdef SMP +restart: +#endif + ke = kseq_choose(kseq); + if (ke) { +#ifdef SMP + if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE) + if (sched_idled(kseq, IDLE) == 0) + goto restart; +#endif + kseq_runq_rem(kseq, ke); + ke->ke_state = KES_THREAD; + ke->ke_flags &= ~KEF_PREEMPTED; + ke->ke_timestamp = sched_timestamp(); + return (ke); + } +#ifdef SMP + if (sched_idled(kseq, IDLE_IDLE) == 0) + goto restart; +#endif + return (NULL); +} + +void +sched_add(struct thread *td, int flags) +{ + struct kseq *ksq, *my_ksq; + struct ksegrp *kg; + struct kse *ke; + int preemptive; + int canmigrate; + int class; + int my_cpu; + int nextrq; +#ifdef SMP + struct thread *td2; + struct pcpu *pcpu; + int cpu, new_cpu; + int load, my_load; +#endif + + mtx_assert(&sched_lock, MA_OWNED); + ke = td->td_kse; + kg = td->td_ksegrp; + KASSERT(ke->ke_state != KES_ONRUNQ, + ("sched_add: kse %p (%s) already in run queue", ke, + ke->ke_proc->p_comm)); + KASSERT(ke->ke_proc->p_sflag & PS_INMEM, + ("sched_add: process swapped out")); + KASSERT(ke->ke_runq == NULL, + ("sched_add: KSE %p is still assigned to a run queue", ke)); + + canmigrate = 1; + preemptive = !(flags & SRQ_YIELDING); + class = PRI_BASE(kg->kg_pri_class); + my_cpu = PCPU_GET(cpuid); + my_ksq = KSEQ_CPU(my_cpu); + if (flags & SRQ_PREEMPTED) + ke->ke_flags |= KEF_PREEMPTED; + if ((ke->ke_flags & KEF_INTERNAL) == 0) + SLOT_USE(td->td_ksegrp); + nextrq = (ke->ke_flags & KEF_NEXTRQ); + ke->ke_flags &= ~(KEF_NEXTRQ | KEF_INTERNAL); + +#ifdef SMP + cpu = ke->ke_cpu; + canmigrate = kse_can_migrate(ke); + /* + * Don't migrate running threads here. Force the long term balancer + * to do it. + */ + if (ke->ke_flags & KEF_HOLD) { + ke->ke_flags &= ~KEF_HOLD; + canmigrate = 0; + } + + /* + * If this thread is pinned or bound, notify the target cpu. + */ + if (!canmigrate) + goto activate_it; + + if (class == PRI_ITHD) { + ke->ke_cpu = my_cpu; + goto activate_it; + } + + if (ke->ke_cpu == my_cpu) + goto activate_it; + + if (my_ksq->ksq_group->ksg_idlemask & PCPU_GET(cpumask)) { + ke->ke_cpu = my_cpu; + goto activate_it; + } + + new_cpu = my_cpu; + + load = kseq_source_load(KSEQ_CPU(cpu)); + my_load = kseq_dest_load(my_ksq); + if ((my_load - load) * 100 < my_load * imbalance_pct2) + goto try_idle_cpu; + new_cpu = cpu; + +try_idle_cpu: + new_cpu = sched_find_idle_cpu(new_cpu); + ke->ke_cpu = new_cpu; + +activate_it: + if (ke->ke_cpu != cpu) + ke->ke_lastran = 0; +#endif + ksq = KSEQ_CPU(ke->ke_cpu); + switch (class) { + case PRI_ITHD: + case PRI_REALTIME: + ke->ke_runq = ksq->ksq_curr; + break; + case PRI_TIMESHARE: + if ((td->td_flags & TDF_BORROWING) == 0 && nextrq) + ke->ke_runq = ksq->ksq_next; + else + ke->ke_runq = ksq->ksq_curr; + break; + case PRI_IDLE: + /* + * This is for priority prop. + */ + if (td->td_priority < PRI_MIN_IDLE) + ke->ke_runq = ksq->ksq_curr; + else + ke->ke_runq = &ksq->ksq_idle; + break; + default: + panic("Unknown pri class."); + break; + } + + if (ke->ke_runq == my_ksq->ksq_curr && + td->td_priority < curthread->td_priority) { + curthread->td_flags |= TDF_NEEDRESCHED; + if (preemptive && maybe_preempt(td)) + return; + if (curthread->td_ksegrp->kg_pri_class == PRI_IDLE) + td->td_owepreempt = 1; + } + ke->ke_state = KES_ONRUNQ; + kseq_runq_add(ksq, ke, flags); + kseq_load_add(ksq, ke); +#ifdef SMP + pcpu = pcpu_find(ke->ke_cpu); + if (class != PRI_IDLE) { + if (kseq_idle & ksq->ksq_group->ksg_mask) + kseq_idle &= ~ksq->ksq_group->ksg_mask; + if (ksq->ksq_group->ksg_idlemask & pcpu->pc_cpumask) + ksq->ksq_group->ksg_idlemask &= ~pcpu->pc_cpumask; + } + if (ke->ke_cpu != my_cpu) { + td2 = pcpu->pc_curthread; + if (__predict_false(td2 == pcpu->pc_idlethread)) { + td2->td_flags |= TDF_NEEDRESCHED; + ipi_selected(pcpu->pc_cpumask, IPI_AST); + } else if (td->td_priority < td2->td_priority) { + if (class == PRI_ITHD || class == PRI_REALTIME || + td2->td_ksegrp->kg_pri_class == PRI_IDLE) + ipi_selected(pcpu->pc_cpumask, IPI_PREEMPT); + else if ((td2->td_flags & TDF_NEEDRESCHED) == 0) { + td2->td_flags |= TDF_NEEDRESCHED; + ipi_selected(pcpu->pc_cpumask, IPI_AST); + } + } + } +#endif +} + +void +sched_rem(struct thread *td) +{ + struct kseq *kseq; + struct kse *ke; + + mtx_assert(&sched_lock, MA_OWNED); + ke = td->td_kse; + SLOT_RELEASE(td->td_ksegrp); + ke->ke_flags &= ~KEF_PREEMPTED; + KASSERT((ke->ke_state == KES_ONRUNQ), + ("sched_rem: KSE not on run queue")); + + kseq = KSEQ_CPU(ke->ke_cpu); +#ifdef SMP + if (ke->ke_flags & KEF_MIGRATING) { + ke->ke_flags &= ~KEF_MIGRATING; + kseq_load_rem(kseq, ke); + TAILQ_REMOVE(&kseq->ksq_migrateq, ke, ke_procq); + ke->ke_cpu = ke->ke_tocpu; + } else +#endif + { + KASSERT((ke->ke_state == KES_ONRUNQ), + ("sched_rem: KSE not on run queue")); + kseq_runq_rem(kseq, ke); + kseq_load_rem(kseq, ke); + } + ke->ke_state = KES_THREAD; +} + +fixpt_t +sched_pctcpu(struct thread *td) +{ + fixpt_t pctcpu; + struct kse *ke; + + pctcpu = 0; + ke = td->td_kse; + if (ke == NULL) + return (0); + + mtx_lock_spin(&sched_lock); + if (ke->ke_ticks) { + int rtick; + + /* + * Don't update more frequently than twice a second. Allowing + * this causes the cpu usage to decay away too quickly due to + * rounding errors. + */ + if (ke->ke_ftick + SCHED_CPU_TICKS < ke->ke_ltick || + ke->ke_ltick < (ticks - (hz / 2))) + sched_pctcpu_update(ke); + /* How many rtick per second ? */ + rtick = MIN(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS); + pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT; + } + + ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick; + mtx_unlock_spin(&sched_lock); + + return (pctcpu); +} + +void +sched_bind(struct thread *td, int cpu) +{ + struct kseq *kseq; + struct kse *ke; + + mtx_assert(&sched_lock, MA_OWNED); + ke = td->td_kse; + ke->ke_flags |= KEF_BOUND; +#ifdef SMP + if (PCPU_GET(cpuid) == cpu) + return; + kseq = KSEQ_SELF(); + ke->ke_flags |= KEF_MIGRATING; + ke->ke_tocpu = cpu; + TAILQ_INSERT_TAIL(&kseq->ksq_migrateq, ke, ke_procq); + if (kseq->ksq_migrated) { + if (TD_AWAITING_INTR(kseq->ksq_migrated)) { + TD_CLR_IWAIT(kseq->ksq_migrated); + setrunqueue(kseq->ksq_migrated, SRQ_YIELDING); + } + } + /* When we return from mi_switch we'll be on the correct cpu. */ + mi_switch(SW_VOL, NULL); +#else + (void)kseq; +#endif +} + +void +sched_unbind(struct thread *td) +{ + mtx_assert(&sched_lock, MA_OWNED); + td->td_kse->ke_flags &= ~KEF_BOUND; +} + +int +sched_is_bound(struct thread *td) +{ + mtx_assert(&sched_lock, MA_OWNED); + return (td->td_kse->ke_flags & KEF_BOUND); +} + +int +sched_load(void) +{ +#ifdef SMP + int total; + int i; + + total = 0; + for (i = 0; i < MAXCPU; i++) + total += KSEQ_CPU(i)->ksq_sysload; + return (total); +#else + return (KSEQ_SELF()->ksq_sysload); +#endif +} + +int +sched_sizeof_ksegrp(void) +{ + return (sizeof(struct ksegrp) + sizeof(struct kg_sched)); +} + +int +sched_sizeof_proc(void) +{ + return (sizeof(struct proc)); +} + +int +sched_sizeof_thread(void) +{ + return (sizeof(struct thread) + sizeof(struct td_sched)); +} +#define KERN_SWITCH_INCLUDE 1 +#include "kern/kern_switch.c" Property changes on: head/sys/kern/sched_core.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/kern/sched_ule.c =================================================================== --- head/sys/kern/sched_ule.c (revision 159569) +++ head/sys/kern/sched_ule.c (revision 159570) @@ -1,2011 +1,2016 @@ /*- * Copyright (c) 2002-2005, Jeffrey Roberson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_hwpmc_hooks.h" #include "opt_sched.h" #define kse td_sched #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef KTRACE #include #include #endif #ifdef HWPMC_HOOKS #include #endif #include #include /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ /* XXX This is bogus compatability crap for ps */ static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); static void sched_setup(void *dummy); SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) static void sched_initticks(void *dummy); SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, NULL) static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler"); SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ule", 0, "Scheduler name"); static int slice_min = 1; SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, ""); static int slice_max = 10; SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, ""); int realstathz; int tickincr = 1 << 10; /* * The following datastructures are allocated within their parent structure * but are scheduler specific. */ /* * The schedulable entity that can be given a context to run. A process may * have several of these. */ struct kse { TAILQ_ENTRY(kse) ke_procq; /* (j/z) Run queue. */ int ke_flags; /* (j) KEF_* flags. */ struct thread *ke_thread; /* (*) Active associated thread. */ fixpt_t ke_pctcpu; /* (j) %cpu during p_swtime. */ u_char ke_rqindex; /* (j) Run queue index. */ enum { KES_THREAD = 0x0, /* slaved to thread state */ KES_ONRUNQ } ke_state; /* (j) thread sched specific status. */ int ke_slptime; int ke_slice; struct runq *ke_runq; u_char ke_cpu; /* CPU that we have affinity for. */ /* The following variables are only used for pctcpu calculation */ int ke_ltick; /* Last tick that we were running on */ int ke_ftick; /* First tick that we were running on */ int ke_ticks; /* Tick count */ }; #define td_kse td_sched #define td_slptime td_kse->ke_slptime #define ke_proc ke_thread->td_proc #define ke_ksegrp ke_thread->td_ksegrp #define ke_assign ke_procq.tqe_next /* flags kept in ke_flags */ #define KEF_ASSIGNED 0x0001 /* Thread is being migrated. */ #define KEF_BOUND 0x0002 /* Thread can not migrate. */ #define KEF_XFERABLE 0x0004 /* Thread was added as transferable. */ #define KEF_HOLD 0x0008 /* Thread is temporarily bound. */ #define KEF_REMOVED 0x0010 /* Thread was removed while ASSIGNED */ #define KEF_INTERNAL 0x0020 /* Thread added due to migration. */ #define KEF_PREEMPTED 0x0040 /* Thread was preempted */ #define KEF_DIDRUN 0x02000 /* Thread actually ran. */ #define KEF_EXIT 0x04000 /* Thread is being killed. */ struct kg_sched { struct thread *skg_last_assigned; /* (j) Last thread assigned to */ /* the system scheduler */ int skg_slptime; /* Number of ticks we vol. slept */ int skg_runtime; /* Number of ticks we were running */ int skg_avail_opennings; /* (j) Num unfilled slots in group.*/ int skg_concurrency; /* (j) Num threads requested in group.*/ }; #define kg_last_assigned kg_sched->skg_last_assigned #define kg_avail_opennings kg_sched->skg_avail_opennings #define kg_concurrency kg_sched->skg_concurrency #define kg_runtime kg_sched->skg_runtime #define kg_slptime kg_sched->skg_slptime #define SLOT_RELEASE(kg) (kg)->kg_avail_opennings++ #define SLOT_USE(kg) (kg)->kg_avail_opennings-- static struct kse kse0; static struct kg_sched kg_sched0; /* * The priority is primarily determined by the interactivity score. Thus, we * give lower(better) priorities to kse groups that use less CPU. The nice * value is then directly added to this to allow nice to have some effect * on latency. * * PRI_RANGE: Total priority range for timeshare threads. * PRI_NRESV: Number of nice values. * PRI_BASE: The start of the dynamic range. */ #define SCHED_PRI_RANGE (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1) #define SCHED_PRI_NRESV ((PRIO_MAX - PRIO_MIN) + 1) #define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2) #define SCHED_PRI_BASE (PRI_MIN_TIMESHARE) #define SCHED_PRI_INTERACT(score) \ ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX) /* * These determine the interactivity of a process. * * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate * before throttling back. * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time. * INTERACT_MAX: Maximum interactivity value. Smaller is better. * INTERACT_THRESH: Threshhold for placement on the current runq. */ #define SCHED_SLP_RUN_MAX ((hz * 5) << 10) #define SCHED_SLP_RUN_FORK ((hz / 2) << 10) #define SCHED_INTERACT_MAX (100) #define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2) #define SCHED_INTERACT_THRESH (30) /* * These parameters and macros determine the size of the time slice that is * granted to each thread. * * SLICE_MIN: Minimum time slice granted, in units of ticks. * SLICE_MAX: Maximum time slice granted. * SLICE_RANGE: Range of available time slices scaled by hz. * SLICE_SCALE: The number slices granted per val in the range of [0, max]. * SLICE_NICE: Determine the amount of slice granted to a scaled nice. * SLICE_NTHRESH: The nice cutoff point for slice assignment. */ #define SCHED_SLICE_MIN (slice_min) #define SCHED_SLICE_MAX (slice_max) #define SCHED_SLICE_INTERACTIVE (slice_max) #define SCHED_SLICE_NTHRESH (SCHED_PRI_NHALF - 1) #define SCHED_SLICE_RANGE (SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1) #define SCHED_SLICE_SCALE(val, max) (((val) * SCHED_SLICE_RANGE) / (max)) #define SCHED_SLICE_NICE(nice) \ (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_SLICE_NTHRESH)) /* * This macro determines whether or not the thread belongs on the current or * next run queue. */ #define SCHED_INTERACTIVE(kg) \ (sched_interact_score(kg) < SCHED_INTERACT_THRESH) #define SCHED_CURR(kg, ke) \ ((ke->ke_thread->td_flags & TDF_BORROWING) || \ (ke->ke_flags & KEF_PREEMPTED) || SCHED_INTERACTIVE(kg)) /* * Cpu percentage computation macros and defines. * * SCHED_CPU_TIME: Number of seconds to average the cpu usage across. * SCHED_CPU_TICKS: Number of hz ticks to average the cpu usage across. */ #define SCHED_CPU_TIME 10 #define SCHED_CPU_TICKS (hz * SCHED_CPU_TIME) /* * kseq - per processor runqs and statistics. */ struct kseq { struct runq ksq_idle; /* Queue of IDLE threads. */ struct runq ksq_timeshare[2]; /* Run queues for !IDLE. */ struct runq *ksq_next; /* Next timeshare queue. */ struct runq *ksq_curr; /* Current queue. */ int ksq_load_timeshare; /* Load for timeshare. */ int ksq_load; /* Aggregate load. */ short ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */ short ksq_nicemin; /* Least nice. */ #ifdef SMP int ksq_transferable; LIST_ENTRY(kseq) ksq_siblings; /* Next in kseq group. */ struct kseq_group *ksq_group; /* Our processor group. */ volatile struct kse *ksq_assigned; /* assigned by another CPU. */ #else int ksq_sysload; /* For loadavg, !ITHD load. */ #endif }; #ifdef SMP /* * kseq groups are groups of processors which can cheaply share threads. When * one processor in the group goes idle it will check the runqs of the other * processors in its group prior to halting and waiting for an interrupt. * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA. * In a numa environment we'd want an idle bitmap per group and a two tiered * load balancer. */ struct kseq_group { int ksg_cpus; /* Count of CPUs in this kseq group. */ cpumask_t ksg_cpumask; /* Mask of cpus in this group. */ cpumask_t ksg_idlemask; /* Idle cpus in this group. */ cpumask_t ksg_mask; /* Bit mask for first cpu. */ int ksg_load; /* Total load of this group. */ int ksg_transferable; /* Transferable load of this group. */ LIST_HEAD(, kseq) ksg_members; /* Linked list of all members. */ }; #endif /* * One kse queue per processor. */ #ifdef SMP static cpumask_t kseq_idle; static int ksg_maxid; static struct kseq kseq_cpu[MAXCPU]; static struct kseq_group kseq_groups[MAXCPU]; static int bal_tick; static int gbal_tick; static int balance_groups; #define KSEQ_SELF() (&kseq_cpu[PCPU_GET(cpuid)]) #define KSEQ_CPU(x) (&kseq_cpu[(x)]) #define KSEQ_ID(x) ((x) - kseq_cpu) #define KSEQ_GROUP(x) (&kseq_groups[(x)]) #else /* !SMP */ static struct kseq kseq_cpu; #define KSEQ_SELF() (&kseq_cpu) #define KSEQ_CPU(x) (&kseq_cpu) #endif static void slot_fill(struct ksegrp *); static struct kse *sched_choose(void); /* XXX Should be thread * */ static void sched_slice(struct kse *); static void sched_priority(struct ksegrp *); static void sched_thread_priority(struct thread *, u_char); static int sched_interact_score(struct ksegrp *); static void sched_interact_update(struct ksegrp *); static void sched_interact_fork(struct ksegrp *); static void sched_pctcpu_update(struct kse *); /* Operations on per processor queues */ static struct kse * kseq_choose(struct kseq *); static void kseq_setup(struct kseq *); static void kseq_load_add(struct kseq *, struct kse *); static void kseq_load_rem(struct kseq *, struct kse *); static __inline void kseq_runq_add(struct kseq *, struct kse *, int); static __inline void kseq_runq_rem(struct kseq *, struct kse *); static void kseq_nice_add(struct kseq *, int); static void kseq_nice_rem(struct kseq *, int); void kseq_print(int cpu); #ifdef SMP static int kseq_transfer(struct kseq *, struct kse *, int); static struct kse *runq_steal(struct runq *); static void sched_balance(void); static void sched_balance_groups(void); static void sched_balance_group(struct kseq_group *); static void sched_balance_pair(struct kseq *, struct kseq *); static void kseq_move(struct kseq *, int); static int kseq_idled(struct kseq *); static void kseq_notify(struct kse *, int); static void kseq_assign(struct kseq *); static struct kse *kseq_steal(struct kseq *, int); #define KSE_CAN_MIGRATE(ke) \ ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0) #endif void kseq_print(int cpu) { struct kseq *kseq; int i; kseq = KSEQ_CPU(cpu); printf("kseq:\n"); printf("\tload: %d\n", kseq->ksq_load); printf("\tload TIMESHARE: %d\n", kseq->ksq_load_timeshare); #ifdef SMP printf("\tload transferable: %d\n", kseq->ksq_transferable); #endif printf("\tnicemin:\t%d\n", kseq->ksq_nicemin); printf("\tnice counts:\n"); for (i = 0; i < SCHED_PRI_NRESV; i++) if (kseq->ksq_nice[i]) printf("\t\t%d = %d\n", i - SCHED_PRI_NHALF, kseq->ksq_nice[i]); } static __inline void kseq_runq_add(struct kseq *kseq, struct kse *ke, int flags) { #ifdef SMP if (KSE_CAN_MIGRATE(ke)) { kseq->ksq_transferable++; kseq->ksq_group->ksg_transferable++; ke->ke_flags |= KEF_XFERABLE; } #endif if (ke->ke_flags & KEF_PREEMPTED) flags |= SRQ_PREEMPTED; runq_add(ke->ke_runq, ke, flags); } static __inline void kseq_runq_rem(struct kseq *kseq, struct kse *ke) { #ifdef SMP if (ke->ke_flags & KEF_XFERABLE) { kseq->ksq_transferable--; kseq->ksq_group->ksg_transferable--; ke->ke_flags &= ~KEF_XFERABLE; } #endif runq_remove(ke->ke_runq, ke); } static void kseq_load_add(struct kseq *kseq, struct kse *ke) { int class; mtx_assert(&sched_lock, MA_OWNED); class = PRI_BASE(ke->ke_ksegrp->kg_pri_class); if (class == PRI_TIMESHARE) kseq->ksq_load_timeshare++; kseq->ksq_load++; CTR1(KTR_SCHED, "load: %d", kseq->ksq_load); if (class != PRI_ITHD && (ke->ke_proc->p_flag & P_NOLOAD) == 0) #ifdef SMP kseq->ksq_group->ksg_load++; #else kseq->ksq_sysload++; #endif if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) kseq_nice_add(kseq, ke->ke_proc->p_nice); } static void kseq_load_rem(struct kseq *kseq, struct kse *ke) { int class; mtx_assert(&sched_lock, MA_OWNED); class = PRI_BASE(ke->ke_ksegrp->kg_pri_class); if (class == PRI_TIMESHARE) kseq->ksq_load_timeshare--; if (class != PRI_ITHD && (ke->ke_proc->p_flag & P_NOLOAD) == 0) #ifdef SMP kseq->ksq_group->ksg_load--; #else kseq->ksq_sysload--; #endif kseq->ksq_load--; CTR1(KTR_SCHED, "load: %d", kseq->ksq_load); ke->ke_runq = NULL; if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) kseq_nice_rem(kseq, ke->ke_proc->p_nice); } static void kseq_nice_add(struct kseq *kseq, int nice) { mtx_assert(&sched_lock, MA_OWNED); /* Normalize to zero. */ kseq->ksq_nice[nice + SCHED_PRI_NHALF]++; if (nice < kseq->ksq_nicemin || kseq->ksq_load_timeshare == 1) kseq->ksq_nicemin = nice; } static void kseq_nice_rem(struct kseq *kseq, int nice) { int n; mtx_assert(&sched_lock, MA_OWNED); /* Normalize to zero. */ n = nice + SCHED_PRI_NHALF; kseq->ksq_nice[n]--; KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count.")); /* * If this wasn't the smallest nice value or there are more in * this bucket we can just return. Otherwise we have to recalculate * the smallest nice. */ if (nice != kseq->ksq_nicemin || kseq->ksq_nice[n] != 0 || kseq->ksq_load_timeshare == 0) return; for (; n < SCHED_PRI_NRESV; n++) if (kseq->ksq_nice[n]) { kseq->ksq_nicemin = n - SCHED_PRI_NHALF; return; } } #ifdef SMP /* * sched_balance is a simple CPU load balancing algorithm. It operates by * finding the least loaded and most loaded cpu and equalizing their load * by migrating some processes. * * Dealing only with two CPUs at a time has two advantages. Firstly, most * installations will only have 2 cpus. Secondly, load balancing too much at * once can have an unpleasant effect on the system. The scheduler rarely has * enough information to make perfect decisions. So this algorithm chooses * algorithm simplicity and more gradual effects on load in larger systems. * * It could be improved by considering the priorities and slices assigned to * each task prior to balancing them. There are many pathological cases with * any approach and so the semi random algorithm below may work as well as any. * */ static void sched_balance(void) { struct kseq_group *high; struct kseq_group *low; struct kseq_group *ksg; int cnt; int i; bal_tick = ticks + (random() % (hz * 2)); if (smp_started == 0) return; low = high = NULL; i = random() % (ksg_maxid + 1); for (cnt = 0; cnt <= ksg_maxid; cnt++) { ksg = KSEQ_GROUP(i); /* * Find the CPU with the highest load that has some * threads to transfer. */ if ((high == NULL || ksg->ksg_load > high->ksg_load) && ksg->ksg_transferable) high = ksg; if (low == NULL || ksg->ksg_load < low->ksg_load) low = ksg; if (++i > ksg_maxid) i = 0; } if (low != NULL && high != NULL && high != low) sched_balance_pair(LIST_FIRST(&high->ksg_members), LIST_FIRST(&low->ksg_members)); } static void sched_balance_groups(void) { int i; gbal_tick = ticks + (random() % (hz * 2)); mtx_assert(&sched_lock, MA_OWNED); if (smp_started) for (i = 0; i <= ksg_maxid; i++) sched_balance_group(KSEQ_GROUP(i)); } static void sched_balance_group(struct kseq_group *ksg) { struct kseq *kseq; struct kseq *high; struct kseq *low; int load; if (ksg->ksg_transferable == 0) return; low = NULL; high = NULL; LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) { load = kseq->ksq_load; if (high == NULL || load > high->ksq_load) high = kseq; if (low == NULL || load < low->ksq_load) low = kseq; } if (high != NULL && low != NULL && high != low) sched_balance_pair(high, low); } static void sched_balance_pair(struct kseq *high, struct kseq *low) { int transferable; int high_load; int low_load; int move; int diff; int i; /* * If we're transfering within a group we have to use this specific * kseq's transferable count, otherwise we can steal from other members * of the group. */ if (high->ksq_group == low->ksq_group) { transferable = high->ksq_transferable; high_load = high->ksq_load; low_load = low->ksq_load; } else { transferable = high->ksq_group->ksg_transferable; high_load = high->ksq_group->ksg_load; low_load = low->ksq_group->ksg_load; } if (transferable == 0) return; /* * Determine what the imbalance is and then adjust that to how many * kses we actually have to give up (transferable). */ diff = high_load - low_load; move = diff / 2; if (diff & 0x1) move++; move = min(move, transferable); for (i = 0; i < move; i++) kseq_move(high, KSEQ_ID(low)); return; } static void kseq_move(struct kseq *from, int cpu) { struct kseq *kseq; struct kseq *to; struct kse *ke; kseq = from; to = KSEQ_CPU(cpu); ke = kseq_steal(kseq, 1); if (ke == NULL) { struct kseq_group *ksg; ksg = kseq->ksq_group; LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) { if (kseq == from || kseq->ksq_transferable == 0) continue; ke = kseq_steal(kseq, 1); break; } if (ke == NULL) panic("kseq_move: No KSEs available with a " "transferable count of %d\n", ksg->ksg_transferable); } if (kseq == to) return; ke->ke_state = KES_THREAD; kseq_runq_rem(kseq, ke); kseq_load_rem(kseq, ke); kseq_notify(ke, cpu); } static int kseq_idled(struct kseq *kseq) { struct kseq_group *ksg; struct kseq *steal; struct kse *ke; ksg = kseq->ksq_group; /* * If we're in a cpu group, try and steal kses from another cpu in * the group before idling. */ if (ksg->ksg_cpus > 1 && ksg->ksg_transferable) { LIST_FOREACH(steal, &ksg->ksg_members, ksq_siblings) { if (steal == kseq || steal->ksq_transferable == 0) continue; ke = kseq_steal(steal, 0); if (ke == NULL) continue; ke->ke_state = KES_THREAD; kseq_runq_rem(steal, ke); kseq_load_rem(steal, ke); ke->ke_cpu = PCPU_GET(cpuid); ke->ke_flags |= KEF_INTERNAL | KEF_HOLD; sched_add(ke->ke_thread, SRQ_YIELDING); return (0); } } /* * We only set the idled bit when all of the cpus in the group are * idle. Otherwise we could get into a situation where a KSE bounces * back and forth between two idle cores on seperate physical CPUs. */ ksg->ksg_idlemask |= PCPU_GET(cpumask); if (ksg->ksg_idlemask != ksg->ksg_cpumask) return (1); atomic_set_int(&kseq_idle, ksg->ksg_mask); return (1); } static void kseq_assign(struct kseq *kseq) { struct kse *nke; struct kse *ke; do { *(volatile struct kse **)&ke = kseq->ksq_assigned; } while(!atomic_cmpset_ptr((volatile uintptr_t *)&kseq->ksq_assigned, (uintptr_t)ke, (uintptr_t)NULL)); for (; ke != NULL; ke = nke) { nke = ke->ke_assign; kseq->ksq_group->ksg_load--; kseq->ksq_load--; ke->ke_flags &= ~KEF_ASSIGNED; if (ke->ke_flags & KEF_REMOVED) { ke->ke_flags &= ~KEF_REMOVED; continue; } ke->ke_flags |= KEF_INTERNAL | KEF_HOLD; sched_add(ke->ke_thread, SRQ_YIELDING); } } static void kseq_notify(struct kse *ke, int cpu) { struct kseq *kseq; struct thread *td; struct pcpu *pcpu; int class; int prio; kseq = KSEQ_CPU(cpu); /* XXX */ class = PRI_BASE(ke->ke_ksegrp->kg_pri_class); if ((class == PRI_TIMESHARE || class == PRI_REALTIME) && (kseq_idle & kseq->ksq_group->ksg_mask)) atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask); kseq->ksq_group->ksg_load++; kseq->ksq_load++; ke->ke_cpu = cpu; ke->ke_flags |= KEF_ASSIGNED; prio = ke->ke_thread->td_priority; /* * Place a KSE on another cpu's queue and force a resched. */ do { *(volatile struct kse **)&ke->ke_assign = kseq->ksq_assigned; } while(!atomic_cmpset_ptr((volatile uintptr_t *)&kseq->ksq_assigned, (uintptr_t)ke->ke_assign, (uintptr_t)ke)); /* * Without sched_lock we could lose a race where we set NEEDRESCHED * on a thread that is switched out before the IPI is delivered. This * would lead us to miss the resched. This will be a problem once * sched_lock is pushed down. */ pcpu = pcpu_find(cpu); td = pcpu->pc_curthread; if (ke->ke_thread->td_priority < td->td_priority || td == pcpu->pc_idlethread) { td->td_flags |= TDF_NEEDRESCHED; ipi_selected(1 << cpu, IPI_AST); } } static struct kse * runq_steal(struct runq *rq) { struct rqhead *rqh; struct rqbits *rqb; struct kse *ke; int word; int bit; mtx_assert(&sched_lock, MA_OWNED); rqb = &rq->rq_status; for (word = 0; word < RQB_LEN; word++) { if (rqb->rqb_bits[word] == 0) continue; for (bit = 0; bit < RQB_BPW; bit++) { if ((rqb->rqb_bits[word] & (1ul << bit)) == 0) continue; rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)]; TAILQ_FOREACH(ke, rqh, ke_procq) { if (KSE_CAN_MIGRATE(ke)) return (ke); } } } return (NULL); } static struct kse * kseq_steal(struct kseq *kseq, int stealidle) { struct kse *ke; /* * Steal from next first to try to get a non-interactive task that * may not have run for a while. */ if ((ke = runq_steal(kseq->ksq_next)) != NULL) return (ke); if ((ke = runq_steal(kseq->ksq_curr)) != NULL) return (ke); if (stealidle) return (runq_steal(&kseq->ksq_idle)); return (NULL); } int kseq_transfer(struct kseq *kseq, struct kse *ke, int class) { struct kseq_group *nksg; struct kseq_group *ksg; struct kseq *old; int cpu; int idx; if (smp_started == 0) return (0); cpu = 0; /* * If our load exceeds a certain threshold we should attempt to * reassign this thread. The first candidate is the cpu that * originally ran the thread. If it is idle, assign it there, * otherwise, pick an idle cpu. * * The threshold at which we start to reassign kses has a large impact * on the overall performance of the system. Tuned too high and * some CPUs may idle. Too low and there will be excess migration * and context switches. */ old = KSEQ_CPU(ke->ke_cpu); nksg = old->ksq_group; ksg = kseq->ksq_group; if (kseq_idle) { if (kseq_idle & nksg->ksg_mask) { cpu = ffs(nksg->ksg_idlemask); if (cpu) { CTR2(KTR_SCHED, "kseq_transfer: %p found old cpu %X " "in idlemask.", ke, cpu); goto migrate; } } /* * Multiple cpus could find this bit simultaneously * but the race shouldn't be terrible. */ cpu = ffs(kseq_idle); if (cpu) { CTR2(KTR_SCHED, "kseq_transfer: %p found %X " "in idlemask.", ke, cpu); goto migrate; } } idx = 0; #if 0 if (old->ksq_load < kseq->ksq_load) { cpu = ke->ke_cpu + 1; CTR2(KTR_SCHED, "kseq_transfer: %p old cpu %X " "load less than ours.", ke, cpu); goto migrate; } /* * No new CPU was found, look for one with less load. */ for (idx = 0; idx <= ksg_maxid; idx++) { nksg = KSEQ_GROUP(idx); if (nksg->ksg_load /*+ (nksg->ksg_cpus * 2)*/ < ksg->ksg_load) { cpu = ffs(nksg->ksg_cpumask); CTR2(KTR_SCHED, "kseq_transfer: %p cpu %X load less " "than ours.", ke, cpu); goto migrate; } } #endif /* * If another cpu in this group has idled, assign a thread over * to them after checking to see if there are idled groups. */ if (ksg->ksg_idlemask) { cpu = ffs(ksg->ksg_idlemask); if (cpu) { CTR2(KTR_SCHED, "kseq_transfer: %p cpu %X idle in " "group.", ke, cpu); goto migrate; } } return (0); migrate: /* * Now that we've found an idle CPU, migrate the thread. */ cpu--; ke->ke_runq = NULL; kseq_notify(ke, cpu); return (1); } #endif /* SMP */ /* * Pick the highest priority task we have and return it. */ static struct kse * kseq_choose(struct kseq *kseq) { struct runq *swap; struct kse *ke; int nice; mtx_assert(&sched_lock, MA_OWNED); swap = NULL; for (;;) { ke = runq_choose(kseq->ksq_curr); if (ke == NULL) { /* * We already swapped once and didn't get anywhere. */ if (swap) break; swap = kseq->ksq_curr; kseq->ksq_curr = kseq->ksq_next; kseq->ksq_next = swap; continue; } /* * If we encounter a slice of 0 the kse is in a * TIMESHARE kse group and its nice was too far out * of the range that receives slices. */ nice = ke->ke_proc->p_nice + (0 - kseq->ksq_nicemin); #if 0 if (ke->ke_slice == 0 || (nice > SCHED_SLICE_NTHRESH && ke->ke_proc->p_nice != 0)) { runq_remove(ke->ke_runq, ke); sched_slice(ke); ke->ke_runq = kseq->ksq_next; runq_add(ke->ke_runq, ke, 0); continue; } #endif return (ke); } return (runq_choose(&kseq->ksq_idle)); } static void kseq_setup(struct kseq *kseq) { runq_init(&kseq->ksq_timeshare[0]); runq_init(&kseq->ksq_timeshare[1]); runq_init(&kseq->ksq_idle); kseq->ksq_curr = &kseq->ksq_timeshare[0]; kseq->ksq_next = &kseq->ksq_timeshare[1]; kseq->ksq_load = 0; kseq->ksq_load_timeshare = 0; } static void sched_setup(void *dummy) { #ifdef SMP int i; #endif /* * To avoid divide-by-zero, we set realstathz a dummy value * in case which sched_clock() called before sched_initticks(). */ realstathz = hz; slice_min = (hz/100); /* 10ms */ slice_max = (hz/7); /* ~140ms */ #ifdef SMP balance_groups = 0; /* * Initialize the kseqs. */ for (i = 0; i < MAXCPU; i++) { struct kseq *ksq; ksq = &kseq_cpu[i]; ksq->ksq_assigned = NULL; kseq_setup(&kseq_cpu[i]); } if (smp_topology == NULL) { struct kseq_group *ksg; struct kseq *ksq; int cpus; for (cpus = 0, i = 0; i < MAXCPU; i++) { if (CPU_ABSENT(i)) continue; ksq = &kseq_cpu[i]; ksg = &kseq_groups[cpus]; /* * Setup a kseq group with one member. */ ksq->ksq_transferable = 0; ksq->ksq_group = ksg; ksg->ksg_cpus = 1; ksg->ksg_idlemask = 0; ksg->ksg_cpumask = ksg->ksg_mask = 1 << i; ksg->ksg_load = 0; ksg->ksg_transferable = 0; LIST_INIT(&ksg->ksg_members); LIST_INSERT_HEAD(&ksg->ksg_members, ksq, ksq_siblings); cpus++; } ksg_maxid = cpus - 1; } else { struct kseq_group *ksg; struct cpu_group *cg; int j; for (i = 0; i < smp_topology->ct_count; i++) { cg = &smp_topology->ct_group[i]; ksg = &kseq_groups[i]; /* * Initialize the group. */ ksg->ksg_idlemask = 0; ksg->ksg_load = 0; ksg->ksg_transferable = 0; ksg->ksg_cpus = cg->cg_count; ksg->ksg_cpumask = cg->cg_mask; LIST_INIT(&ksg->ksg_members); /* * Find all of the group members and add them. */ for (j = 0; j < MAXCPU; j++) { if ((cg->cg_mask & (1 << j)) != 0) { if (ksg->ksg_mask == 0) ksg->ksg_mask = 1 << j; kseq_cpu[j].ksq_transferable = 0; kseq_cpu[j].ksq_group = ksg; LIST_INSERT_HEAD(&ksg->ksg_members, &kseq_cpu[j], ksq_siblings); } } if (ksg->ksg_cpus > 1) balance_groups = 1; } ksg_maxid = smp_topology->ct_count - 1; } /* * Stagger the group and global load balancer so they do not * interfere with each other. */ bal_tick = ticks + hz; if (balance_groups) gbal_tick = ticks + (hz / 2); #else kseq_setup(KSEQ_SELF()); #endif mtx_lock_spin(&sched_lock); kseq_load_add(KSEQ_SELF(), &kse0); mtx_unlock_spin(&sched_lock); } /* ARGSUSED */ static void sched_initticks(void *dummy) { mtx_lock_spin(&sched_lock); realstathz = stathz ? stathz : hz; slice_min = (realstathz/100); /* 10ms */ slice_max = (realstathz/7); /* ~140ms */ tickincr = (hz << 10) / realstathz; /* * XXX This does not work for values of stathz that are much * larger than hz. */ if (tickincr == 0) tickincr = 1; mtx_unlock_spin(&sched_lock); } /* * Scale the scheduling priority according to the "interactivity" of this * process. */ static void sched_priority(struct ksegrp *kg) { int pri; if (kg->kg_pri_class != PRI_TIMESHARE) return; pri = SCHED_PRI_INTERACT(sched_interact_score(kg)); pri += SCHED_PRI_BASE; pri += kg->kg_proc->p_nice; if (pri > PRI_MAX_TIMESHARE) pri = PRI_MAX_TIMESHARE; else if (pri < PRI_MIN_TIMESHARE) pri = PRI_MIN_TIMESHARE; kg->kg_user_pri = pri; return; } /* * Calculate a time slice based on the properties of the kseg and the runq * that we're on. This is only for PRI_TIMESHARE ksegrps. */ static void sched_slice(struct kse *ke) { struct kseq *kseq; struct ksegrp *kg; kg = ke->ke_ksegrp; kseq = KSEQ_CPU(ke->ke_cpu); if (ke->ke_thread->td_flags & TDF_BORROWING) { ke->ke_slice = SCHED_SLICE_MIN; return; } /* * Rationale: * KSEs in interactive ksegs get a minimal slice so that we * quickly notice if it abuses its advantage. * * KSEs in non-interactive ksegs are assigned a slice that is * based on the ksegs nice value relative to the least nice kseg * on the run queue for this cpu. * * If the KSE is less nice than all others it gets the maximum * slice and other KSEs will adjust their slice relative to * this when they first expire. * * There is 20 point window that starts relative to the least * nice kse on the run queue. Slice size is determined by * the kse distance from the last nice ksegrp. * * If the kse is outside of the window it will get no slice * and will be reevaluated each time it is selected on the * run queue. The exception to this is nice 0 ksegs when * a nice -20 is running. They are always granted a minimum * slice. */ if (!SCHED_INTERACTIVE(kg)) { int nice; nice = kg->kg_proc->p_nice + (0 - kseq->ksq_nicemin); if (kseq->ksq_load_timeshare == 0 || kg->kg_proc->p_nice < kseq->ksq_nicemin) ke->ke_slice = SCHED_SLICE_MAX; else if (nice <= SCHED_SLICE_NTHRESH) ke->ke_slice = SCHED_SLICE_NICE(nice); else if (kg->kg_proc->p_nice == 0) ke->ke_slice = SCHED_SLICE_MIN; else ke->ke_slice = SCHED_SLICE_MIN; /* 0 */ } else ke->ke_slice = SCHED_SLICE_INTERACTIVE; return; } /* * This routine enforces a maximum limit on the amount of scheduling history * kept. It is called after either the slptime or runtime is adjusted. * This routine will not operate correctly when slp or run times have been * adjusted to more than double their maximum. */ static void sched_interact_update(struct ksegrp *kg) { int sum; sum = kg->kg_runtime + kg->kg_slptime; if (sum < SCHED_SLP_RUN_MAX) return; /* * If we have exceeded by more than 1/5th then the algorithm below * will not bring us back into range. Dividing by two here forces * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX] */ if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) { kg->kg_runtime /= 2; kg->kg_slptime /= 2; return; } kg->kg_runtime = (kg->kg_runtime / 5) * 4; kg->kg_slptime = (kg->kg_slptime / 5) * 4; } static void sched_interact_fork(struct ksegrp *kg) { int ratio; int sum; sum = kg->kg_runtime + kg->kg_slptime; if (sum > SCHED_SLP_RUN_FORK) { ratio = sum / SCHED_SLP_RUN_FORK; kg->kg_runtime /= ratio; kg->kg_slptime /= ratio; } } static int sched_interact_score(struct ksegrp *kg) { int div; if (kg->kg_runtime > kg->kg_slptime) { div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF); return (SCHED_INTERACT_HALF + (SCHED_INTERACT_HALF - (kg->kg_slptime / div))); } if (kg->kg_slptime > kg->kg_runtime) { div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF); return (kg->kg_runtime / div); } /* * This can happen if slptime and runtime are 0. */ return (0); } /* * Very early in the boot some setup of scheduler-specific * parts of proc0 and of soem scheduler resources needs to be done. * Called from: * proc0_init() */ void schedinit(void) { /* * Set up the scheduler specific parts of proc0. */ proc0.p_sched = NULL; /* XXX */ ksegrp0.kg_sched = &kg_sched0; thread0.td_sched = &kse0; kse0.ke_thread = &thread0; kse0.ke_state = KES_THREAD; kg_sched0.skg_concurrency = 1; kg_sched0.skg_avail_opennings = 0; /* we are already running */ } /* * This is only somewhat accurate since given many processes of the same * priority they will switch when their slices run out, which will be * at most SCHED_SLICE_MAX. */ int sched_rr_interval(void) { return (SCHED_SLICE_MAX); } static void sched_pctcpu_update(struct kse *ke) { /* * Adjust counters and watermark for pctcpu calc. */ if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) { /* * Shift the tick count out so that the divide doesn't * round away our results. */ ke->ke_ticks <<= 10; ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) * SCHED_CPU_TICKS; ke->ke_ticks >>= 10; } else ke->ke_ticks = 0; ke->ke_ltick = ticks; ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS; } void sched_thread_priority(struct thread *td, u_char prio) { struct kse *ke; CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)", td, td->td_proc->p_comm, td->td_priority, prio, curthread, curthread->td_proc->p_comm); ke = td->td_kse; mtx_assert(&sched_lock, MA_OWNED); if (td->td_priority == prio) return; if (TD_ON_RUNQ(td)) { /* * If the priority has been elevated due to priority * propagation, we may have to move ourselves to a new * queue. We still call adjustrunqueue below in case kse * needs to fix things up. */ if (prio < td->td_priority && ke->ke_runq != NULL && (ke->ke_flags & KEF_ASSIGNED) == 0 && ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) { runq_remove(ke->ke_runq, ke); ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr; runq_add(ke->ke_runq, ke, 0); } /* * Hold this kse on this cpu so that sched_prio() doesn't * cause excessive migration. We only want migration to * happen as the result of a wakeup. */ ke->ke_flags |= KEF_HOLD; adjustrunqueue(td, prio); ke->ke_flags &= ~KEF_HOLD; } else td->td_priority = prio; } /* * Update a thread's priority when it is lent another thread's * priority. */ void sched_lend_prio(struct thread *td, u_char prio) { td->td_flags |= TDF_BORROWING; sched_thread_priority(td, prio); } /* * Restore a thread's priority when priority propagation is * over. The prio argument is the minimum priority the thread * needs to have to satisfy other possible priority lending * requests. If the thread's regular priority is less * important than prio, the thread will keep a priority boost * of prio. */ void sched_unlend_prio(struct thread *td, u_char prio) { u_char base_pri; if (td->td_base_pri >= PRI_MIN_TIMESHARE && td->td_base_pri <= PRI_MAX_TIMESHARE) base_pri = td->td_ksegrp->kg_user_pri; else base_pri = td->td_base_pri; if (prio >= base_pri) { td->td_flags &= ~TDF_BORROWING; sched_thread_priority(td, base_pri); } else sched_lend_prio(td, prio); } void sched_prio(struct thread *td, u_char prio) { u_char oldprio; /* First, update the base priority. */ td->td_base_pri = prio; /* * If the thread is borrowing another thread's priority, don't * ever lower the priority. */ if (td->td_flags & TDF_BORROWING && td->td_priority < prio) return; /* Change the real priority. */ oldprio = td->td_priority; sched_thread_priority(td, prio); /* * If the thread is on a turnstile, then let the turnstile update * its state. */ if (TD_ON_LOCK(td) && oldprio != prio) turnstile_adjust(td, oldprio); } void sched_switch(struct thread *td, struct thread *newtd, int flags) { struct kseq *ksq; struct kse *ke; mtx_assert(&sched_lock, MA_OWNED); ke = td->td_kse; ksq = KSEQ_SELF(); td->td_lastcpu = td->td_oncpu; td->td_oncpu = NOCPU; td->td_flags &= ~TDF_NEEDRESCHED; td->td_owepreempt = 0; /* * If the KSE has been assigned it may be in the process of switching * to the new cpu. This is the case in sched_bind(). */ if (td == PCPU_GET(idlethread)) { TD_SET_CAN_RUN(td); } else if ((ke->ke_flags & KEF_ASSIGNED) == 0) { /* We are ending our run so make our slot available again */ SLOT_RELEASE(td->td_ksegrp); kseq_load_rem(ksq, ke); if (TD_IS_RUNNING(td)) { /* * Don't allow the thread to migrate * from a preemption. */ ke->ke_flags |= KEF_HOLD; setrunqueue(td, (flags & SW_PREEMPT) ? SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : SRQ_OURSELF|SRQ_YIELDING); ke->ke_flags &= ~KEF_HOLD; } else if ((td->td_proc->p_flag & P_HADTHREADS) && (newtd == NULL || newtd->td_ksegrp != td->td_ksegrp)) /* * We will not be on the run queue. * So we must be sleeping or similar. * Don't use the slot if we will need it * for newtd. */ slot_fill(td->td_ksegrp); } if (newtd != NULL) { /* * If we bring in a thread account for it as if it had been * added to the run queue and then chosen. */ newtd->td_kse->ke_flags |= KEF_DIDRUN; newtd->td_kse->ke_runq = ksq->ksq_curr; TD_SET_RUNNING(newtd); kseq_load_add(KSEQ_SELF(), newtd->td_kse); /* * XXX When we preempt, we've already consumed a slot because * we got here through sched_add(). However, newtd can come * from thread_switchout() which can't SLOT_USE() because * the SLOT code is scheduler dependent. We must use the * slot here otherwise. */ if ((flags & SW_PREEMPT) == 0) SLOT_USE(newtd->td_ksegrp); } else newtd = choosethread(); if (td != newtd) { #ifdef HWPMC_HOOKS if (PMC_PROC_IS_USING_PMCS(td->td_proc)) PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); #endif cpu_switch(td, newtd); #ifdef HWPMC_HOOKS if (PMC_PROC_IS_USING_PMCS(td->td_proc)) PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN); #endif } sched_lock.mtx_lock = (uintptr_t)td; td->td_oncpu = PCPU_GET(cpuid); } void sched_nice(struct proc *p, int nice) { struct ksegrp *kg; struct kse *ke; struct thread *td; struct kseq *kseq; PROC_LOCK_ASSERT(p, MA_OWNED); mtx_assert(&sched_lock, MA_OWNED); /* * We need to adjust the nice counts for running KSEs. */ FOREACH_KSEGRP_IN_PROC(p, kg) { if (kg->kg_pri_class == PRI_TIMESHARE) { FOREACH_THREAD_IN_GROUP(kg, td) { ke = td->td_kse; if (ke->ke_runq == NULL) continue; kseq = KSEQ_CPU(ke->ke_cpu); kseq_nice_rem(kseq, p->p_nice); kseq_nice_add(kseq, nice); } } } p->p_nice = nice; FOREACH_KSEGRP_IN_PROC(p, kg) { sched_priority(kg); FOREACH_THREAD_IN_GROUP(kg, td) td->td_flags |= TDF_NEEDRESCHED; } } void sched_sleep(struct thread *td) { mtx_assert(&sched_lock, MA_OWNED); td->td_slptime = ticks; } void sched_wakeup(struct thread *td) { mtx_assert(&sched_lock, MA_OWNED); /* * Let the kseg know how long we slept for. This is because process * interactivity behavior is modeled in the kseg. */ if (td->td_slptime) { struct ksegrp *kg; int hzticks; kg = td->td_ksegrp; hzticks = (ticks - td->td_slptime) << 10; if (hzticks >= SCHED_SLP_RUN_MAX) { kg->kg_slptime = SCHED_SLP_RUN_MAX; kg->kg_runtime = 1; } else { kg->kg_slptime += hzticks; sched_interact_update(kg); } sched_priority(kg); sched_slice(td->td_kse); td->td_slptime = 0; } setrunqueue(td, SRQ_BORING); } /* * Penalize the parent for creating a new child and initialize the child's * priority. */ void sched_fork(struct thread *td, struct thread *childtd) { mtx_assert(&sched_lock, MA_OWNED); sched_fork_ksegrp(td, childtd->td_ksegrp); sched_fork_thread(td, childtd); } void sched_fork_ksegrp(struct thread *td, struct ksegrp *child) { struct ksegrp *kg = td->td_ksegrp; mtx_assert(&sched_lock, MA_OWNED); child->kg_slptime = kg->kg_slptime; child->kg_runtime = kg->kg_runtime; child->kg_user_pri = kg->kg_user_pri; sched_interact_fork(child); kg->kg_runtime += tickincr; sched_interact_update(kg); } void sched_fork_thread(struct thread *td, struct thread *child) { struct kse *ke; struct kse *ke2; sched_newthread(child); ke = td->td_kse; ke2 = child->td_kse; ke2->ke_slice = 1; /* Attempt to quickly learn interactivity. */ ke2->ke_cpu = ke->ke_cpu; ke2->ke_runq = NULL; /* Grab our parents cpu estimation information. */ ke2->ke_ticks = ke->ke_ticks; ke2->ke_ltick = ke->ke_ltick; ke2->ke_ftick = ke->ke_ftick; } void sched_class(struct ksegrp *kg, int class) { struct kseq *kseq; struct kse *ke; struct thread *td; int nclass; int oclass; mtx_assert(&sched_lock, MA_OWNED); if (kg->kg_pri_class == class) return; nclass = PRI_BASE(class); oclass = PRI_BASE(kg->kg_pri_class); FOREACH_THREAD_IN_GROUP(kg, td) { ke = td->td_kse; if ((ke->ke_state != KES_ONRUNQ && ke->ke_state != KES_THREAD) || ke->ke_runq == NULL) continue; kseq = KSEQ_CPU(ke->ke_cpu); #ifdef SMP /* * On SMP if we're on the RUNQ we must adjust the transferable * count because could be changing to or from an interrupt * class. */ if (ke->ke_state == KES_ONRUNQ) { if (KSE_CAN_MIGRATE(ke)) { kseq->ksq_transferable--; kseq->ksq_group->ksg_transferable--; } if (KSE_CAN_MIGRATE(ke)) { kseq->ksq_transferable++; kseq->ksq_group->ksg_transferable++; } } #endif if (oclass == PRI_TIMESHARE) { kseq->ksq_load_timeshare--; kseq_nice_rem(kseq, kg->kg_proc->p_nice); } if (nclass == PRI_TIMESHARE) { kseq->ksq_load_timeshare++; kseq_nice_add(kseq, kg->kg_proc->p_nice); } } kg->kg_pri_class = class; } /* * Return some of the child's priority and interactivity to the parent. */ void sched_exit(struct proc *p, struct thread *childtd) { mtx_assert(&sched_lock, MA_OWNED); sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), childtd); sched_exit_thread(NULL, childtd); } void sched_exit_ksegrp(struct ksegrp *kg, struct thread *td) { /* kg->kg_slptime += td->td_ksegrp->kg_slptime; */ kg->kg_runtime += td->td_ksegrp->kg_runtime; sched_interact_update(kg); } void sched_exit_thread(struct thread *td, struct thread *childtd) { CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d", childtd, childtd->td_proc->p_comm, childtd->td_priority); kseq_load_rem(KSEQ_CPU(childtd->td_kse->ke_cpu), childtd->td_kse); } void sched_clock(struct thread *td) { struct kseq *kseq; struct ksegrp *kg; struct kse *ke; mtx_assert(&sched_lock, MA_OWNED); kseq = KSEQ_SELF(); #ifdef SMP if (ticks >= bal_tick) sched_balance(); if (ticks >= gbal_tick && balance_groups) sched_balance_groups(); /* * We could have been assigned a non real-time thread without an * IPI. */ if (kseq->ksq_assigned) kseq_assign(kseq); /* Potentially sets NEEDRESCHED */ #endif ke = td->td_kse; kg = ke->ke_ksegrp; /* Adjust ticks for pctcpu */ ke->ke_ticks++; ke->ke_ltick = ticks; /* Go up to one second beyond our max and then trim back down */ if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick) sched_pctcpu_update(ke); if (td->td_flags & TDF_IDLETD) return; /* * We only do slicing code for TIMESHARE ksegrps. */ if (kg->kg_pri_class != PRI_TIMESHARE) return; /* * We used a tick charge it to the ksegrp so that we can compute our * interactivity. */ kg->kg_runtime += tickincr; sched_interact_update(kg); /* * We used up one time slice. */ if (--ke->ke_slice > 0) return; /* * We're out of time, recompute priorities and requeue. */ kseq_load_rem(kseq, ke); sched_priority(kg); sched_slice(ke); if (SCHED_CURR(kg, ke)) ke->ke_runq = kseq->ksq_curr; else ke->ke_runq = kseq->ksq_next; kseq_load_add(kseq, ke); td->td_flags |= TDF_NEEDRESCHED; } int sched_runnable(void) { struct kseq *kseq; int load; load = 1; kseq = KSEQ_SELF(); #ifdef SMP if (kseq->ksq_assigned) { mtx_lock_spin(&sched_lock); kseq_assign(kseq); mtx_unlock_spin(&sched_lock); } #endif if ((curthread->td_flags & TDF_IDLETD) != 0) { if (kseq->ksq_load > 0) goto out; } else if (kseq->ksq_load - 1 > 0) goto out; load = 0; out: return (load); } void sched_userret(struct thread *td) { struct ksegrp *kg; KASSERT((td->td_flags & TDF_BORROWING) == 0, ("thread with borrowed priority returning to userland")); kg = td->td_ksegrp; if (td->td_priority != kg->kg_user_pri) { mtx_lock_spin(&sched_lock); td->td_priority = kg->kg_user_pri; td->td_base_pri = kg->kg_user_pri; mtx_unlock_spin(&sched_lock); } } struct kse * sched_choose(void) { struct kseq *kseq; struct kse *ke; mtx_assert(&sched_lock, MA_OWNED); kseq = KSEQ_SELF(); #ifdef SMP restart: if (kseq->ksq_assigned) kseq_assign(kseq); #endif ke = kseq_choose(kseq); if (ke) { #ifdef SMP if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE) if (kseq_idled(kseq) == 0) goto restart; #endif kseq_runq_rem(kseq, ke); ke->ke_state = KES_THREAD; ke->ke_flags &= ~KEF_PREEMPTED; return (ke); } #ifdef SMP if (kseq_idled(kseq) == 0) goto restart; #endif return (NULL); } void sched_add(struct thread *td, int flags) { struct kseq *kseq; struct ksegrp *kg; struct kse *ke; int preemptive; int canmigrate; int class; CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)", td, td->td_proc->p_comm, td->td_priority, curthread, curthread->td_proc->p_comm); mtx_assert(&sched_lock, MA_OWNED); ke = td->td_kse; kg = td->td_ksegrp; canmigrate = 1; preemptive = !(flags & SRQ_YIELDING); class = PRI_BASE(kg->kg_pri_class); kseq = KSEQ_SELF(); if ((ke->ke_flags & KEF_INTERNAL) == 0) SLOT_USE(td->td_ksegrp); ke->ke_flags &= ~KEF_INTERNAL; #ifdef SMP if (ke->ke_flags & KEF_ASSIGNED) { if (ke->ke_flags & KEF_REMOVED) ke->ke_flags &= ~KEF_REMOVED; return; } canmigrate = KSE_CAN_MIGRATE(ke); /* * Don't migrate running threads here. Force the long term balancer * to do it. */ if (ke->ke_flags & KEF_HOLD) { ke->ke_flags &= ~KEF_HOLD; canmigrate = 0; } #endif KASSERT(ke->ke_state != KES_ONRUNQ, ("sched_add: kse %p (%s) already in run queue", ke, ke->ke_proc->p_comm)); KASSERT(ke->ke_proc->p_sflag & PS_INMEM, ("sched_add: process swapped out")); KASSERT(ke->ke_runq == NULL, ("sched_add: KSE %p is still assigned to a run queue", ke)); if (flags & SRQ_PREEMPTED) ke->ke_flags |= KEF_PREEMPTED; switch (class) { case PRI_ITHD: case PRI_REALTIME: ke->ke_runq = kseq->ksq_curr; ke->ke_slice = SCHED_SLICE_MAX; if (canmigrate) ke->ke_cpu = PCPU_GET(cpuid); break; case PRI_TIMESHARE: if (SCHED_CURR(kg, ke)) ke->ke_runq = kseq->ksq_curr; else ke->ke_runq = kseq->ksq_next; break; case PRI_IDLE: /* * This is for priority prop. */ if (ke->ke_thread->td_priority < PRI_MIN_IDLE) ke->ke_runq = kseq->ksq_curr; else ke->ke_runq = &kseq->ksq_idle; ke->ke_slice = SCHED_SLICE_MIN; break; default: panic("Unknown pri class."); break; } #ifdef SMP /* * If this thread is pinned or bound, notify the target cpu. */ if (!canmigrate && ke->ke_cpu != PCPU_GET(cpuid) ) { ke->ke_runq = NULL; kseq_notify(ke, ke->ke_cpu); return; } /* * If we had been idle, clear our bit in the group and potentially * the global bitmap. If not, see if we should transfer this thread. */ if ((class == PRI_TIMESHARE || class == PRI_REALTIME) && (kseq->ksq_group->ksg_idlemask & PCPU_GET(cpumask)) != 0) { /* * Check to see if our group is unidling, and if so, remove it * from the global idle mask. */ if (kseq->ksq_group->ksg_idlemask == kseq->ksq_group->ksg_cpumask) atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask); /* * Now remove ourselves from the group specific idle mask. */ kseq->ksq_group->ksg_idlemask &= ~PCPU_GET(cpumask); } else if (canmigrate && kseq->ksq_load > 1 && class != PRI_ITHD) if (kseq_transfer(kseq, ke, class)) return; ke->ke_cpu = PCPU_GET(cpuid); #endif if (td->td_priority < curthread->td_priority && ke->ke_runq == kseq->ksq_curr) curthread->td_flags |= TDF_NEEDRESCHED; if (preemptive && maybe_preempt(td)) return; ke->ke_state = KES_ONRUNQ; kseq_runq_add(kseq, ke, flags); kseq_load_add(kseq, ke); } void sched_rem(struct thread *td) { struct kseq *kseq; struct kse *ke; CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)", td, td->td_proc->p_comm, td->td_priority, curthread, curthread->td_proc->p_comm); mtx_assert(&sched_lock, MA_OWNED); ke = td->td_kse; SLOT_RELEASE(td->td_ksegrp); ke->ke_flags &= ~KEF_PREEMPTED; if (ke->ke_flags & KEF_ASSIGNED) { ke->ke_flags |= KEF_REMOVED; return; } KASSERT((ke->ke_state == KES_ONRUNQ), ("sched_rem: KSE not on run queue")); ke->ke_state = KES_THREAD; kseq = KSEQ_CPU(ke->ke_cpu); kseq_runq_rem(kseq, ke); kseq_load_rem(kseq, ke); } fixpt_t sched_pctcpu(struct thread *td) { fixpt_t pctcpu; struct kse *ke; pctcpu = 0; ke = td->td_kse; if (ke == NULL) return (0); mtx_lock_spin(&sched_lock); if (ke->ke_ticks) { int rtick; /* * Don't update more frequently than twice a second. Allowing * this causes the cpu usage to decay away too quickly due to * rounding errors. */ if (ke->ke_ftick + SCHED_CPU_TICKS < ke->ke_ltick || ke->ke_ltick < (ticks - (hz / 2))) sched_pctcpu_update(ke); /* How many rtick per second ? */ rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS); pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT; } ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick; mtx_unlock_spin(&sched_lock); return (pctcpu); } void sched_bind(struct thread *td, int cpu) { struct kse *ke; mtx_assert(&sched_lock, MA_OWNED); ke = td->td_kse; ke->ke_flags |= KEF_BOUND; #ifdef SMP if (PCPU_GET(cpuid) == cpu) return; /* sched_rem without the runq_remove */ ke->ke_state = KES_THREAD; kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke); kseq_notify(ke, cpu); /* When we return from mi_switch we'll be on the correct cpu. */ mi_switch(SW_VOL, NULL); #endif } void sched_unbind(struct thread *td) { mtx_assert(&sched_lock, MA_OWNED); td->td_kse->ke_flags &= ~KEF_BOUND; } int sched_is_bound(struct thread *td) { mtx_assert(&sched_lock, MA_OWNED); return (td->td_kse->ke_flags & KEF_BOUND); } int sched_load(void) { #ifdef SMP int total; int i; total = 0; for (i = 0; i <= ksg_maxid; i++) total += KSEQ_GROUP(i)->ksg_load; return (total); #else return (KSEQ_SELF()->ksq_sysload); #endif } int sched_sizeof_ksegrp(void) { return (sizeof(struct ksegrp) + sizeof(struct kg_sched)); } int sched_sizeof_proc(void) { return (sizeof(struct proc)); } int sched_sizeof_thread(void) { return (sizeof(struct thread) + sizeof(struct td_sched)); } + +void +sched_tick(void) +{ +} #define KERN_SWITCH_INCLUDE 1 #include "kern/kern_switch.c" Index: head/sys/sys/sched.h =================================================================== --- head/sys/sys/sched.h (revision 159569) +++ head/sys/sys/sched.h (revision 159570) @@ -1,121 +1,122 @@ /*- * Copyright (c) 2002, Jeffrey Roberson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_SCHED_H_ #define _SYS_SCHED_H_ /* * General scheduling info. * * sched_load: * Total runnable non-ithread threads in the system. * * sched_runnable: * Runnable threads for this processor. */ int sched_load(void); int sched_rr_interval(void); int sched_runnable(void); /* * Proc related scheduling hooks. */ void sched_exit(struct proc *p, struct thread *childtd); void sched_fork(struct thread *td, struct thread *childtd); /* * KSE Groups contain scheduling priority information. They record the * behavior of groups of KSEs and threads. */ void sched_class(struct ksegrp *kg, int class); void sched_exit_ksegrp(struct ksegrp *kg, struct thread *childtd); void sched_fork_ksegrp(struct thread *td, struct ksegrp *child); void sched_nice(struct proc *p, int nice); /* * Threads are switched in and out, block on resources, have temporary * priorities inherited from their ksegs, and use up cpu time. */ void sched_exit_thread(struct thread *td, struct thread *child); void sched_fork_thread(struct thread *td, struct thread *child); fixpt_t sched_pctcpu(struct thread *td); void sched_prio(struct thread *td, u_char prio); void sched_lend_prio(struct thread *td, u_char prio); void sched_sleep(struct thread *td); void sched_switch(struct thread *td, struct thread *newtd, int flags); void sched_unlend_prio(struct thread *td, u_char prio); void sched_userret(struct thread *td); void sched_wakeup(struct thread *td); /* * Threads are moved on and off of run queues */ void sched_add(struct thread *td, int flags); void sched_clock(struct thread *td); void sched_rem(struct thread *td); +void sched_tick(void); /* * Binding makes cpu affinity permanent while pinning is used to temporarily * hold a thread on a particular CPU. */ void sched_bind(struct thread *td, int cpu); static __inline void sched_pin(void); void sched_unbind(struct thread *td); static __inline void sched_unpin(void); int sched_is_bound(struct thread *td); /* * These procedures tell the process data structure allocation code how * many bytes to actually allocate. */ int sched_sizeof_ksegrp(void); int sched_sizeof_proc(void); int sched_sizeof_thread(void); static __inline void sched_pin(void) { curthread->td_pinned++; } static __inline void sched_unpin(void) { curthread->td_pinned--; } /* temporarily here */ void schedinit(void); void sched_init_concurrency(struct ksegrp *kg); void sched_set_concurrency(struct ksegrp *kg, int cuncurrency); void sched_schedinit(void); void sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td); void sched_thread_exit(struct thread *td); void sched_newthread(struct thread *td); #endif /* !_SYS_SCHED_H_ */