Index: head/share/man/man4/firewire.4 =================================================================== --- head/share/man/man4/firewire.4 (revision 336756) +++ head/share/man/man4/firewire.4 (revision 336757) @@ -1,127 +1,127 @@ .\" Copyright (c) 1998-2002 Katsushi Kobayashi and Hidetoshi Shimokawa .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" 3. All advertising materials mentioning features or use of this software .\" must display the acknowledgement as bellow: .\" .\" This product includes software developed by K. Kobayashi and H. Shimokawa .\" .\" 4. The name of the author may not be used to endorse or promote products .\" derived from this software without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR .\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED .\" WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE .\" DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, .\" INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES .\" (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR .\" SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, .\" STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN .\" ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE .\" POSSIBILITY OF SUCH DAMAGE. .\" .\" $FreeBSD$ .\" .Dd May 11, 2012 .Dt FIREWIRE 4 .Os .Sh NAME .Nm firewire .Nd IEEE1394 High-performance Serial Bus .Sh SYNOPSIS To compile this driver into the kernel, place the following line in your kernel configuration file: .Bd -ragged -offset indent .Cd "device firewire" .Ed .Pp Alternatively, to load the driver as a module at boot time, place the following line in .Xr loader.conf 5 : .Bd -literal -offset indent firewire_load="YES" .Ed .Sh DESCRIPTION .Fx provides machine-independent bus support and raw drivers for .Nm interfaces. .Pp The .Nm driver consists of two layers: the controller and the bus layer. The controller attaches to a physical bus (like .Xr pci 4 ) . The .Nm bus attaches to the controller. Additional drivers can be attached to the bus. .Pp Up to 63 devices, including the host itself, can be attached to a .Nm bus. The root node is dynamically assigned with a PHY device function. Also, the other .Nm bus specific parameters, e.g., node ID, cycle master, isochronous resource manager and bus manager, are dynamically assigned, after bus reset is initiated. On the .Nm bus, every device is identified by an EUI 64 address. .Pp Debugging over the firewire interace is possible with the .Xr dcons 4 driver. Please see -.Pa http://wiki.freebsd.org/DebugWithDcons +.Pa https://wiki.freebsd.org/DebugWithDcons for details on how to setup debugging with firewire. .Sh FILES .Bl -tag -width "Pa /dev/fwmem0.0" -compact .It Pa /dev/fw0.0 .It Pa /dev/fwmem0.0 .El .Sh SEE ALSO .Xr dcons 4 , .Xr fwe 4 , .Xr fwip 4 , .Xr fwohci 4 , .Xr pci 4 , .Xr sbp 4 , .Xr eui64 5 , .Xr fwcontrol 8 , .Xr kldload 8 , .Xr sysctl 8 .Sh HISTORY The .Nm driver first appeared in .Fx 5.0 . .Sh AUTHORS .An -nosplit The .Nm driver was written by .An Katsushi Kobayashi and .An Hidetoshi Shimokawa for the .Fx project. .Sh BUGS See .Xr fwohci 4 for security notes. Index: head/share/man/man7/tests.7 =================================================================== --- head/share/man/man7/tests.7 (revision 336756) +++ head/share/man/man7/tests.7 (revision 336757) @@ -1,239 +1,239 @@ .\" $FreeBSD$ .\" $NetBSD: tests.kyua.7,v 1.2 2013/07/20 21:39:59 wiz Exp $ .\" .\" Copyright (c) 2010 The NetBSD Foundation, Inc. .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND .\" CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, .\" INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF .\" MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. .\" IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS BE LIABLE FOR ANY .\" DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE .\" GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS .\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER .\" IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR .\" OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN .\" IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .\" .Dd August 21, 2017 .Dt TESTS 7 .Os .Sh NAME .Nm tests .Nd introduction to the .Fx Test Suite .Sh DESCRIPTION The .Fx Test Suite provides a collection of automated tests for two major purposes. On one hand, the test suite aids .Em developers to detect bugs and regressions when they modify the source tree. On the other hand, it allows .Em end users (and, in particular, system administrators) to verify that fresh installations of the .Fx operating system behave correctly on their hardware platform and also to ensure that the system does not suffer from regressions during regular operation and maintenance. .Pp The .Fx Test Suite can be found in the .Pa /usr/tests hierarchy. .Pp This manual page describes how to run the test suite and how to configure some of its optional features. .Ss Installing the test suite The test suite is installed by default as of .Fx 11.0-RELEASE. .Pp If the .Pa /usr/tests directory is missing, then you will have to enable the build of the test suite, rebuild your system and install the results. You can do so by setting .Sq WITH_TESTS=yes in your .Pa /etc/src.conf file (see .Xr src.conf 5 for details) and rebuilding the system as described in .Xr build 7 . .Ss When to run the tests? Before diving into the details of how to run the test suite, here are some scenarios in which you should run it: .Bl -bullet -offset indent .It After a fresh installation of .Fx to ensure that the system works correctly on your hardware platform. .It After an upgrade of .Fx to a different version to ensure that the new code works well on your hardware platform and that the upgrade did not introduce regressions in your configuration. .It After modifying the source tree to detect any new bugs and/or regressions. .It Periodically, maybe from a .Xr cron 8 job, to ensure that any changes to the system (such as the installation of third-party packages or manual modifications to configuration files) do not introduce unexpected failures. .El .Ss Running the tests First, you will need to install the .Sq devel/kyua package from .Xr ports 7 . Then use the following command to run the whole test suite: .Bd -literal -offset indent $ kyua test -k /usr/tests/Kyuafile .Ed .Pp The above will iterate through all test programs in .Pa /usr/tests recursively, execute them, store their results and debugging data in Kyua's database (by default in .Pa ~/.kyua/store.db ) , and print a summary of the results. This summary includes a brief count of all total tests run and how many of them failed. .Pp It is possible to restrict which tests to run by providing their names in the command line. For example, this would execute the tests for the .Xr cp 1 and .Xr cut 1 utilities: .Bd -literal -offset indent $ kyua test -k /usr/tests/Kyuafile bin/cp usr.bin/cut .Ed .Ss Obtaining reports of the tests execution Additional information about the test results can be retrieved by using Kyua's various reporting commands. For example, the following would print a plain-text report of the executed tests and show which ones failed: .Bd -literal -offset indent $ kyua report .Ed .Pp This example would generate an HTML report ready to be published on a web server: .Bd -literal -offset indent $ kyua report-html --output ~/public_html/tests .Ed .Pp For further details on the command-line interface of Kyua, please refer to its manual page .Xr kyua 1 . .Ss Configuring the tests Some test cases in the .Fx Test Suite require manual configuration by the administrator before they can be run. Unless certain properties are defined, the tests that require them will be skipped. .Pp Test suites are configured by defining their configuration variables in .Pa /usr/local/etc/kyua/kyua.conf . The format of this file is detailed in .Xr kyua.conf 5 . .Pp The following configuration variables are available in the .Fx Test Suite: .Bl -tag -width "allow_sysctl_side_effects" .It allow_devfs_side_effects If defined, enables tests that may destroy and recreate semipermanent device nodes, like disk devices. Without this variable, tests may still create and destroy devices nodes that are normally transient, like /dev/tap* and /dev/pts*, as long as they clean them up afterwards. However, tests that require this variable have a relaxed cleanup requirement; they must recreate any devices that they destroyed, but not necessarily with the same devnames. .It allow_sysctl_side_effects Enables tests that change globally significant .Xr sysctl 8 variables. The tests will undo any changes in their cleanup phases. .It disks Must be set to a space delimited list of disk device nodes. Tests that need destructive access to disks must use these devices. Tests are not required to preserve any data present on these disks. .It fibs Must be set to a space delimited list of FIBs (routing tables). Tests that need to modify a routing table may use any of these. Tests will cleanup any new routes that they create. .El .Ss What to do if something fails? If there is .Em any failure during the execution of the test suite, please consider reporting it to the .Fx developers so that the failure can be analyzed and fixed. To do so, either send a message to the appropriate mailing list or file a problem report. For more details please refer to: .Bl -bullet -offset indent -compact .It -.Lk http://lists.freebsd.org/ "FreeBSD Mailing Lists" +.Lk https://lists.freebsd.org/ "FreeBSD Mailing Lists" .It .Lk https://www.freebsd.org/support.html "Problem Reporting" .El .Sh FILES .Bl -tag -compact -width usrXlocalXetcXkyuaXkyuaXconfXX .It Pa /usr/local/etc/kyua/kyua.conf System-wide configuration file for .Xr kyua 1 . .It Pa ~/.kyua/kyua.conf User-specific configuration file for .Xr kyua 1 ; overrides the system file. .It Pa ~/.kyua/store.db Default result database used by Kyua. .It Pa /usr/tests/ Location of the .Fx Test Suite. .It Pa /usr/tests/Kyuafile Top-level test suite definition file. .El .Sh SEE ALSO .Xr kyua 1 , .Xr build 7 .Sh HISTORY The .Fx Test Suite first appeared in .Fx 10.1 . .Pp The .Nm manual page first appeared in .Nx 6.0 and was later ported to .Fx 10.1 . .Sh AUTHORS .An Julio Merino Aq Mt jmmv@FreeBSD.org Index: head/share/misc/bsd-family-tree =================================================================== --- head/share/misc/bsd-family-tree (revision 336756) +++ head/share/misc/bsd-family-tree (revision 336757) @@ -1,819 +1,819 @@ The UNIX system family tree: Research and BSD --------------------------------------------- First Edition (V1) | Second Edition (V2) | Third Edition (V3) | Fourth Edition (V4) | Fifth Edition (V5) | Sixth Edition (V6) -----* \ | \ | \ | Seventh Edition (V7) | \ | \ 1BSD 32V | \ 2BSD---------------* \ / | \ / | \/ | 3BSD | | | 4.0BSD 2.79BSD | | 4.1BSD --------------> 2.8BSD | | 4.1aBSD -----------\ | | \ | 4.1bBSD \ | | \ | *------ 4.1cBSD --------------> 2.9BSD / | | Eighth Edition | 2.9BSD-Seismo | | | +----<--- 4.2BSD 2.9.1BSD | | | +----<--- 4.3BSD -------------> 2.10BSD | | / | Ninth Edition | / 2.10.1BSD | 4.3BSD Tahoe-----+ | | | \ | | | \ | v | 2.11BSD Tenth Edition | | | 2.11BSD rev #430 4.3BSD NET/1 | | v 4.3BSD Reno | *---------- 4.3BSD NET/2 -------------------+-------------* | | | | 386BSD 0.0 | | BSD/386 ALPHA | | | | 386BSD 0.1 ------------>+ | BSD/386 0.3.[13] | \ | 4.4BSD Alpha | | 386BSD 1.0 | | BSD/386 0.9.[34] | | 4.4BSD | | | / | | | | 4.4BSD-Encumbered | | | -NetBSD 0.8 | BSD/386 1.0 | / | | | FreeBSD 1.0 <-----' NetBSD 0.9 | BSD/386 1.1 | | .----- 4.4BSD Lite | FreeBSD 1.1 | / / | \ | | | / / | \ | FreeBSD 1.1.5 .---|--------' / | \ | | / | / | \ | FreeBSD 1.1.5.1 / | / | \ | | / NetBSD 1.0 <-' | \ | | / | | \ | FreeBSD 2.0 <--' | | BSD/OS 2.0 | \ | | FreeBSD 2.0.5 \ | BSD/OS 2.0.1 | .-----\------------- 4.4BSD Lite2 | | | \ | | | | | | | .-----|------Rhapsody | | | | | | | | NetBSD 1.3 | | | | | | | OpenBSD 2.3 | | | | | | BSD/OS 3.0 | FreeBSD 2.1 | | | | | | | | NetBSD 1.1 ------. BSD/OS 2.1 | FreeBSD 2.1.5 | | | \ | | | | | NetBSD 1.2 \ BSD/OS 3.0 | FreeBSD 2.1.6 | | | \ OpenBSD 2.0 | | | | | | \ | | | FreeBSD 2.1.6.1 | | | \ | | | | | | | \ | | | FreeBSD 2.1.7 | | | | | | | | | | | NetBSD 1.2.1 | | | FreeBSD 2.1.7.1 | | | | | | | | | | | | | | | | | *-FreeBSD 2.2 | | | | | | \ | | | | | | FreeBSD 2.2.1 | | | | | | | | | | | | | FreeBSD 2.2.2 | | | OpenBSD 2.1 | | | | | | | | | FreeBSD 2.2.5 | | | | | | | | | | OpenBSD 2.2 | | | | | NetBSD 1.3 | | | FreeBSD 2.2.6 | | | | | | | | | | | NetBSD 1.3.1 | BSD/OS 3.1 | | | | | | OpenBSD 2.3 | | | | | | NetBSD 1.3.2 | | | FreeBSD 2.2.7 | | | | | | | | | | | | | BSD/OS 4.0 | FreeBSD 2.2.8 | | | | | | | | | | | | | | | v | | | | OpenBSD 2.4 | | FreeBSD 2.2.9 | | | | | | | | | | | | | FreeBSD 3.0 <--------* | | v | | | | | NetBSD 1.3.3 | | *---FreeBSD 3.1 | | | | | | | | | BSD/OS 4.0.1 | FreeBSD 3.2----* | NetBSD 1.4 OpenBSD 2.5 | | | | | | | | | | | | | | | | | | | | | | | | | | | | | FreeBSD 3.3 | | | | NetBSD 1.4.1 | | | | | | | | | OpenBSD 2.6 | | FreeBSD 3.4 | | | | | | | | | | | | | | | BSD/OS 4.1 FreeBSD 4.0 | | | | | NetBSD 1.4.2 | | | | | | | | | | | | | | | | | | | | | FreeBSD 3.5 | | | | | OpenBSD 2.7 | | | | | | | | | | | FreeBSD 3.5.1 | | | | | | | | | | | | | | | *---FreeBSD 4.1 | | | | | | | | | | | (?) | | | | | FreeBSD 4.1.1 | | / | | | | | | | | / | | | | | FreeBSD 4.2 Darwin/ | NetBSD 1.4.3 | | | | Mac OS X | OpenBSD 2.8 BSD/OS 4.2 | | | | | | | | | | | | | | 10.0 NetBSD 1.5 | | | FreeBSD 4.3 | | | | | | | | | | OpenBSD 2.9 | | | | | NetBSD 1.5.1 | | | | | | | | | | FreeBSD 4.4-. | | NetBSD 1.5.2 | | | | | Mac OS X | | | | | | | 10.1 | | OpenBSD 3.0 | | FreeBSD 4.5 | | | | | | | | \ | | | | BSD/OS 4.3 | FreeBSD 4.6 \ | | | OpenBSD 3.1 | | | \ | | NetBSD 1.5.3 | | | FreeBSD 4.6.2 Mac OS X | | | | | 10.2 | | | | FreeBSD 4.7 | | | | | | | NetBSD 1.6 OpenBSD 3.2 | | FreeBSD 4.8 | | | | | | | | | NetBSD 1.6.1 | | | |--------. | | | OpenBSD 3.3 BSD/OS 5.0 | | \ | | | | | | FreeBSD 4.9 | | | | OpenBSD 3.4 BSD/OS 5.1 ISE | | | | | | | | | | | | NetBSD 1.6.2 | | | | | | | | | | | | | | OpenBSD 3.5 | | | | | v | | FreeBSD 4.10 | | | | | | | | | | | FreeBSD 4.11 | | | | | | | | | | `-|------|-----------------|---------------------. | | | | \ FreeBSD 5.0 | | | | | | | | | FreeBSD 5.1 | | | DragonFly 1.0 | \ | | | | | ----- Mac OS X | | | | 10.3 | | | FreeBSD 5.2 | | | | | | | | | | | FreeBSD 5.2.1 | | | | | | | | | *-------FreeBSD 5.3 | | | | | | | | OpenBSD 3.6 | | | | NetBSD 2.0 | | | | | | | | | DragonFly 1.2.0 | | Mac OS X | | NetBSD 2.0.2 | | | | 10.4 | | | | | | FreeBSD 5.4 | | | | | | | | | | | | OpenBSD 3.7 | | | | | | NetBSD 2.0.3 | | | | | | | | | | *--FreeBSD | | | | v OpenBSD 3.8 | | 6.0 | | | | | | | | | | | \ | | | | | | | NetBSD 2.1 | | | | | | | | | | | | | NetBSD 3.0 | | | | | | | | | | DragonFly 1.4.0 | | | | | | | OpenBSD 3.9 | | FreeBSD | | | | | | | | 6.1 | | | | | | | | | FreeBSD 5.5 | | | | | | | | | | | NetBSD 3.0.1 | DragonFly 1.6.0 | | | | | | | | | | | | | | OpenBSD 4.0 | | | | | | NetBSD 3.0.2 | | | | | | NetBSD 3.1 | | | FreeBSD 6.2 | | | | | | | | | DragonFly 1.8.0 | | | | OpenBSD 4.1 | | | | | | DragonFly 1.10.0 | | Mac OS X | | | | | 10.5 | | | | | | | OpenBSD 4.2 | | | | NetBSD 4.0 | | | FreeBSD 6.3 | | | | | | \ | | | | | *--FreeBSD | | | | | DragonFly 1.12.0 | 7.0 | | | | | | | | | | | | OpenBSD 4.3 | | | | | | NetBSD | DragonFly 2.0.0 | | FreeBSD | | 4.0.1 OpenBSD 4.4 | | | 6.4 | | | | | | | | | | | FreeBSD 7.1 | | | | | | | | | DragonFly 2.2.0 | FreeBSD 7.2 | NetBSD 5.0 OpenBSD 4.5 | | \ | | | \ | | | | Mac OS X | | \ | | | | 10.6 | | \ | | | | | | | NetBSD | DragonFly 2.4.0 | | | | | 5.0.1 OpenBSD 4.6 | | | | | | | | | *--FreeBSD | | | | | | | | 8.0 | | | | | | | | | FreeBSD | | | NetBSD | | | | 7.3 | | | 5.0.2 | DragonFly 2.6.0 | | | | | | OpenBSD 4.7 | | FreeBSD | | | | | | | 8.1 | | | | | | | | | | | | | DragonFly 2.8.2 | | | | | | OpenBSD 4.8 | | | | | | *--NetBSD | | | FreeBSD FreeBSD | | | 5.1 | | | 8.2 7.4 | | | | | DragonFly 2.10.1 | | | | | | OpenBSD 4.9 | | `-----. Mac OS X | | | | | | \ 10.7 | | | | | | | | | | | OpenBSD 5.0 | *--FreeBSD | | | | | | | | 9.0 | | | | NetBSD | DragonFly 3.0.1 | | FreeBSD | | | 5.1.2 | | | | 8.3 | | | | | | | | | | | | NetBSD | | | | | | | | 5.1.3 | | | | | | | | | | | | | | | | | NetBSD | | | | | | | | 5.1.4 | | | | | | | | OpenBSD 5.1 | | | | Mac OS X | `----. | | | | | 10.8 | \ | | | | | | NetBSD 6.0 | | | | | | | | | | | OpenBSD 5.2 DragonFly 3.2.1 | FreeBSD | | | | | NetBSD | | | 9.1 | | | | | 5.2 | | | | | | | | | | | | | | | | | | | NetBSD | | | | | | | | | 5.2.1 | | | | | | | | | | | | | | | | | | | NetBSD | | | | | | | | | 5.2.2 | | | | | | | | | | | | | | | | | \ | | | | | | | | NetBSD | | | | | | | | 6.0.1 | | | | | | | | | OpenBSD 5.3 DragonFly 3.4.1 | | | | | | NetBSD | | | | | | | | 6.0.2 | | | | | | | | | | | | | | | | | NetBSD | | | | | | | | 6.0.3 | | | | | | | | | | | | | | | | | NetBSD | | | | | | | | 6.0.4 | | | | | | | | | | | | | | | | | NetBSD | | | | | | | | 6.0.5 | | | | | | | | | | | | | | | | | NetBSD | | | | | | | | 6.0.6 | | | | | | | | | | | | | | | |`-NetBSD 6.1 | | | | FreeBSD | | | | | | | 8.4 | | NetBSD 6.1.1 | | | | | | | | | | FreeBSD | | NetBSD 6.1.2 | | | 9.2 Mac OS X | | | | | | 10.9 | | OpenBSD 5.4 | | `-----. | | | | DragonFly 3.6.0 | \ | | | | | *--FreeBSD | | | NetBSD 6.1.3 | | | 10.0 | | | | | | | | | | | | | DragonFly 3.6.1 | | | | | | | | | | | | | | | | | | | | | | | DragonFly 3.6.2 | | | | | NetBSD 6.1.4 | | | | | | | | | | | | | | | | OpenBSD 5.5 | | | | | | | | | | | | | | | | DragonFly 3.8.0 | | | | | | | | | | | | | | | | | | | | | | | DragonFly 3.8.1 | | | | | | | | | | | | | | | | | | | | | | | DragonFly 3.6.3 | | | | | | | | | | FreeBSD | | | | | | | 9.3 | | | | | | | | | NetBSD 6.1.5 | DragonFly 3.8.2 | | Mac OS X | | | | | 10.10 | | | | | | | OpenBSD 5.6 | | FreeBSD | | | | | 10.1 | | | DragonFly 4.0.1 | | | | | | | | | | | DragonFly 4.0.2 | | | | | | | | | | | DragonFly 4.0.3 | | | | | | | | | | | DragonFly 4.0.4 | | | | | | | | | | | DragonFly 4.0.5 | | | | | | | | | | OpenBSD 5.7 | | | | | | DragonFly 4.2.0 | FreeBSD | | | | | 10.2 | | | | | | macOS NetBSD 7.0 | | | | 10.11 | | | OpenBSD 5.8 | | | | | | `--. | DragonFly 4.4.1 | FreeBSD | | | | OpenBSD 5.9 | | 10.3 | | | | | | | | | | | NetBSD 7.0.1 | | | `------. | | | | | DragonFly 4.6.0 | | | | | | | | | | | | | | | | *--FreeBSD | macOS | | | OpenBSD 6.0 | | 11.0 | 10.12 | | NetBSD 7.0.2 | | | | | | | | | | | | | | | *- NetBSD 7.1 | | | | | | | | | | | | | | | | | | | | | macOS | | | DragonFly 4.8.0 | | | 10.13 | | OpenBSD 6.1 | | FreeBSD | | | | | DragonFly 5.0.0 | 11.1 FreeBSD | | | | | | | 10.4 | | | OpenBSD 6.2 DragonFly 5.0.1 | | | | | | | | | | | NetBSD 7.1.1 | DragonFly 5.0.2 | | | | | | | | | | | NetBSD 7.1.2 | | | | | | | | | | | | | | OpenBSD 6.3 | | | | | | | DragonFly 5.2.0 | | | | | | | | | | | | | DragonFly 5.2.1 | | | | | | | | | | | | | DragonFly 5.2.2 | FreeBSD | | NetBSD 8.0 | | | 11.2 | | | | | | v | | | | | | | | v | | FreeBSD 12 -current | NetBSD -current OpenBSD -current DragonFly -current | | | | | v v v v v Time ---------------- Time tolerance +/- 6 months, depending on which book/article you read; if it was the announcement in Usenet or if it was available as tape. [44B] McKusick, Marshall Kirk, Keith Bostic, Michael J Karels, and John Quarterman. The Design and Implementation of the 4.4BSD Operating System. [APL] Apple website [https://www.apple.com/macosx/] [BSDI] Berkeley Software Design, Inc. [DFB] DragonFlyBSD Project, The. [DOC] README, COPYRIGHT on tape. [FBD] FreeBSD Project, The. [KB] Keith Bostic. BSD2.10 available from Usenix. comp.unix.sources, Volume 11, Info 4, April, 1987. [KKK] Mike Karels, Kirk McKusick, and Keith Bostic. tahoe announcement. comp.bugs.4bsd.ucb-fixes, June 15, 1988. [KSJ] Michael J. Karels, Carl F. Smith, and William F. Jolitz. Changes in the Kernel in 2.9BSD. Second Berkeley Software Distribution UNIX Version 2.9, July, 1983. [NBD] NetBSD Project, The. [OBD] OpenBSD Project, The. [QCU] Salus, Peter H. A quarter century of UNIX. [SMS] Steven M. Schultz. 2.11BSD, UNIX for the PDP-11. [TUHS] The Unix Historical Society. http://minnie.tuhs.org/Unix_History/. [USE] Usenet announcement. [WRS] Wind River Systems, Inc. [dmr] Dennis Ritchie, via E-Mail Multics 1965 UNIX Summer 1969 DEC PDP-7 First Edition 1971-11-03 [QCU] DEC PDP-11/20, Assembler Second Edition 1972-06-12 [QCU] 10 UNIX installations Third Edition 1973-02-xx [QCU] Pipes, 16 installations Fourth Edition 1973-11-xx [QCU] rewriting in C effected, above 30 installations Fifth Edition 1974-06-xx [QCU] above 50 installations Sixth Edition 1975-05-xx [QCU] port to DEC Vax Seventh Edition 1979-01-xx [QCU] 1979-01-10 [TUHS] first portable UNIX Eighth Edition 1985-02-xx [QCU] VAX 11/750, VAX 11/780 [dmr] descended from 4.1c BSD [dmr] descended from 4.1 BSD [44B] scooping-out and replacement of the character-device and networking part by the streams mechanism Ninth Edition 1986-09-xx [QCU] Tenth Edition 1989-10-xx [QCU] 1BSD late 1977 1978-03-09 [QCU] PDP-11, Pascal, ex(1) 30 free copies of 1BSD sent out 35 tapes sold for 50 USD [QCU] 2BSD mid 1978 [QCU] 1979-05-10 [TUHS] 75 2BSD tapes shipped 2.79BSD 1980-04-xx [TUHS] 2.8BSD 1981-07-xx [KSJ] 2.8.1BSD 1982-01-xx [QCU] set of performance improvements 2.9BSD 1983-07-xx [KSJ] 2.9.1BSD 1983-11-xx [TUHS] 2.9BSD-Seismo 1985-08-xx [SMS] 2.10BSD 1987-04-xx [KKK] 2.10.1BSD 1989-01-xx [SMS] 2.11BSD 1992-02-xx [SMS] 2.11BSD rev #430 1999-12-13 [SMS] 32V 1978-1[01]-xx [QCU] 1979-03-26 [TUHS] 3BSD late 1979 [QCU] March 1980 [TUHS] virtual memory, page replacement, demand paging 4.0BSD 1980-10-xx 4.1BSD 1981-07-08 [DOC] 4.1aBSD 1982-04-xx alpha release, 100 sites, networking [44B] 4.1bBSD internal release, fast filesystem [44B] 4.1cBSD late 1982 beta release, IPC [44B] 4.2BSD 1983-09-xx [QCU] 1983-08-03 [DOC] 4.3BSD 1986-06-xx [QCU] 1986-04-05 [KB], [DOC] 4.3BSD Tahoe 1988-06-15 [QCU], [DOC] 4.3BSD NET/1 1988-11-xx [QCU] 1989-01-01 [DOC] 4.3BSD Reno 1990-06-29 [QCU], [DOC] 4.3BSD NET/2 1991-06-28 [QCU], [DOC] BSD/386 ALPHA 1991-12-xx [BSDI] first code released to people outside BSDI 386BSD 0.0 1992-02-xx [DOC] BSD/386 0.3.1 1992-04-xx [BSDI] first ext. beta; B customers BSD/386 0.3.3 1992-06-xx [BSDI] first CDROM version 386BSD 0.1 1992-07-28 [DOC] 4.4BSD Alpha 1992-07-07 BSD/386 0.9.3 1992-10-xx [BSDI] first external gamma; G customers BSD/386 0.9.4 1992-12-xx [BSDI] would have been 1.0 except for request for preliminary injunction BSD/386 1.0 1993-03-xx [BSDI] injunction denied; first official release NetBSD 0.8 1993-04-20 [NBD] 4.4BSD 1993-06-01 [USE] NetBSD 0.9 1993-08-23 [NBD] FreeBSD 1.0 1993-11-01 [FBD] FreeBSD 1.0.2 1993-11-14 [FBD] supersedes 1.0 13 days after release. BSD/386 1.1 1994-02-xx [BSDI] 4.4BSD Lite 1994-03-01 [USE] FreeBSD 1.1 1994-05-07 [FBD] FreeBSD 1.1.5 1994-06-30 [FBD] FreeBSD 1.1.5.1 1994-07-05 [FBD] supersedes 1.1.5 5 days after release. NetBSD 1.0 1994-10-26 [NBD] 386BSD 1.0 1994-11-12 [USE] FreeBSD 2.0 1994-11-23 [FBD] BSD/OS 2.0 1995-01-xx [BSDI] 4.4 lite based FreeBSD 2.0.5 1995-06-10 [FBD] BSD/OS 2.0.1 1995-06-xx [BSDI] 4.4BSD Lite Release 2 1995-06-xx [44B] the true final distribution from the CSRG FreeBSD 2.1.0 1995-11-19 [FBD] NetBSD 1.1 1995-11-26 [NBD] BSD/OS 2.1 1996-01-xx [BSDI] FreeBSD 2.1.5 1996-07-14 [FBD] NetBSD 1.2 1996-10-04 [NBD] OpenBSD 2.0 1996-10-18 [OBD] FreeBSD 2.1.6 1996-11-16 [FBD] FreeBSD 2.1.6.1 1996-11-25 [FBD] (sendmail security release) Rhapsody 1997-xx-xx FreeBSD 2.1.7 1997-02-20 [FBD] BSD/OS 3.0 1997-02-xx [BSDI] 4.4 lite2 based FreeBSD 2.2.0 1997-03-16 [FBD] FreeBSD 2.2.1 1997-03-25 [FBD] FreeBSD 2.2.2 1997-05-16 [FBD] NetBSD 1.2.1 1997-05-20 [NBD] (patch release) OpenBSD 2.1 1997-06-01 [OBD] FreeBSD 2.2.5 1997-10-22 [FBD] OpenBSD 2.2 1997-12-01 [OBD] NetBSD 1.3 1998-01-04 [NBD] FreeBSD 2.2.6 1998-03-25 [FBD] NetBSD 1.3.1 1998-03-09 [NBD] (patch release) BSD/OS 3.1 1998-03-xx [BSDI] OpenBSD 2.3 1998-05-19 [OBD] NetBSD 1.3.2 1998-05-29 [NBD] (patch release) FreeBSD 2.2.7 1998-07-22 [FBD] BSD/OS 4.0 1998-08-xx [BSDI] 2-lock MP support, ELF executables FreeBSD 3.0 1998-10-16 [FBD] FreeBSD-3.0 is a snapshot from -current, while 3.1 and 3.2 are from 3.x-stable which was branched quite some time after 3.0-release FreeBSD 2.2.8 1998-11-29 [FBD] OpenBSD 2.4 1998-12-01 [OBD] NetBSD 1.3.3 1998-12-23 [NBD] (patch release) FreeBSD 3.1 1999-02-15 [FBD] BSD/OS 4.0.1 1999-03-xx [BSDI] NetBSD 1.4 1999-05-12 [NBD] FreeBSD 3.2 1999-05-17 [FBD] OpenBSD 2.5 1999-05-19 [OBD] NetBSD 1.4.1 1999-08-26 [NBD] (patch release) FreeBSD 3.3 1999-09-17 [FBD] OpenBSD 2.6 1999-12-01 [OBD] FreeBSD 3.4 1999-12-20 [FBD] BSD/OS 4.1 1999-12-xx [BSDI] FreeBSD 4.0 2000-03-13 [FBD] NetBSD 1.4.2 2000-03-19 [NBD] (patch release) OpenBSD 2.7 2000-06-15 [OBD] FreeBSD 3.5 2000-06-24 [FBD] FreeBSD 4.1 2000-07-27 [FBD] FreeBSD 3.5.1 2000-07-28 [FBD] FreeBSD 4.1.1 2000-09-25 [FBD] (a network-only patch release) FreeBSD 4.2 2000-11-21 [FBD] NetBSD 1.4.3 2000-11-25 [NBD] (patch release) BSD/OS 4.2 2000-11-29 [BSDI] OpenBSD 2.8 2000-12-01 [OBD] NetBSD 1.5 2000-12-06 [NBD] Mac OS X 10.0 2001-03-24 [APL] FreeBSD 4.3 2001-04-20 [FBD] OpenBSD 2.9 2001-06-01 [OBD] NetBSD 1.5.1 2001-07-11 [NBD] (patch release) NetBSD 1.5.2 2001-09-13 [NBD] (patch release) FreeBSD 4.4 2001-09-18 [FBD] Mac OS X 10.1 2001-09-29 [APL] OpenBSD 3.0 2001-12-01 [OBD] FreeBSD 4.5 2002-01-29 [FBD] BSD/OS 4.3 2002-03-14 [WRS] OpenBSD 3.1 2002-05-19 [OBD] FreeBSD 4.6 2002-06-15 [FBD] NetBSD 1.5.3 2002-07-22 [NBD] (patch release) FreeBSD 4.6.2 2002-08-15 [FBD] (patch release) Mac OS X 10.2 2002-08-23 [APL] NetBSD 1.6 2002-09-14 [NBD] FreeBSD 4.7 2002-10-08 [FBD] OpenBSD 3.2 2002-11-01 [OBD] FreeBSD 5.0 2003-01-17 [FBD] FreeBSD 5.0 is a separate branch off of -current, similar to 3.0. FreeBSD 4.8 2003-04-03 [FBD] NetBSD 1.6.1 2003-04-21 [NBD] (patch release) OpenBSD 3.3 2003-05-01 [OBD] BSD/OS 5.0 2003-05-?? [WRS] FreeBSD 5.1 2003-06-09 [FBD] Mac OS X 10.3 2003-10-24 [APL] FreeBSD 4.9 2003-10-28 [FBD] BSD/OS 5.1 ISE 2003-10-?? [WRS] (final version) OpenBSD 3.4 2003-11-01 [OBD] FreeBSD 5.2 2004-01-12 [FBD] FreeBSD 5.2.1 2004-02-22 [FBD] (patch release) NetBSD 1.6.2 2004-03-01 [NBD] (patch release) OpenBSD 3.5 2004-04-01 [OBD] FreeBSD 4.10 2004-05-27 [FBD] DragonFly 1.0 2004-07-12 [DFB] OpenBSD 3.6 2004-10-29 [OBD] FreeBSD 5.3 2004-11-06 [FBD] NetBSD 2.0 2004-12-09 [NBD] FreeBSD 4.11 2005-01-25 [FBD] DragonFly 1.2.0 2005-04-08 [DFB] NetBSD 2.0.2 2005-04-14 [NBD] (security/critical release) Mac OS X 10.4 2005-04-29 [APL] FreeBSD 5.4 2005-05-09 [FBD] OpenBSD 3.7 2005-05-19 [OBD] NetBSD 2.0.3 2005-10-31 [NBD] (security/critical release) OpenBSD 3.8 2005-11-01 [OBD] FreeBSD 6.0 2005-11-01 [FBD] NetBSD 2.1 2005-11-02 [NBD] NetBSD 3.0 2005-12-23 [NBD] DragonFly 1.4.0 2006-01-08 [DFB] FreeBSD 2.2.9 2006-04-01 [FBD] OpenBSD 3.9 2006-05-01 [OBD] FreeBSD 6.1 2006-05-08 [FBD] FreeBSD 5.5 2006-05-25 [FBD] NetBSD 3.0.1 2006-07-24 [NBD] (security/critical release) DragonFly 1.6.0 2006-07-24 [DFB] OpenBSD 4.0 2006-11-01 [OBD] NetBSD 3.0.2 2006-11-04 [NBD] (security/critical release) NetBSD 3.1 2006-11-04 [NBD] FreeBSD 6.2 2007-01-15 [FBD] DragonFly 1.8.0 2007-01-30 [DFB] OpenBSD 4.1 2007-05-01 [OBD] DragonFly 1.10.0 2007-08-06 [DFB] Mac OS X 10.5 2007-10-26 [APL] OpenBSD 4.2 2007-11-01 [OBD] NetBSD 4.0 2007-12-19 [NBD] FreeBSD 6.3 2008-01-18 [FBD] DragonFly 1.12.0 2008-02-26 [DFB] FreeBSD 7.0 2008-02-27 [FBD] OpenBSD 4.3 2008-05-01 [OBD] DragonFly 2.0.0 2008-07-21 [DFB] OpenBSD 4.4 2008-11-01 [OBD] FreeBSD 6.4 2008-11-28 [FBD] FreeBSD 7.1 2009-01-04 [FBD] DragonFly 2.2.0 2009-02-17 [DFB] NetBSD 5.0 2009-04-29 [NBD] OpenBSD 4.5 2009-05-01 [OBD] FreeBSD 7.2 2009-05-04 [FBD] Mac OS X 10.6 2009-06-08 [APL] NetBSD 5.0.1 2009-08-02 [NBD] (security/critical release) DragonFly 2.4.0 2009-09-16 [DFB] OpenBSD 4.6 2009-10-18 [OBD] FreeBSD 8.0 2009-11-26 [FBD] NetBSD 5.0.2 2010-02-12 [NBD] (security/critical release) FreeBSD 7.3 2010-03-23 [FBD] DragonFly 2.6.0 2010-03-28 [DFB] OpenBSD 4.7 2010-05-19 [OBD] FreeBSD 8.1 2010-07-24 [FBD] DragonFly 2.8.2 2010-10-30 [DFB] OpenBSD 4.8 2010-11-01 [OBD] NetBSD 5.1 2010-11-19 [NBD] FreeBSD 7.4 2011-02-24 [FBD] FreeBSD 8.2 2011-02-24 [FBD] DragonFly 2.10.1 2011-04-26 [DFB] OpenBSD 4.9 2011-05-01 [OBD] Mac OS X 10.7 2011-07-20 [APL] OpenBSD 5.0 2011-11-01 [OBD] FreeBSD 9.0 2012-01-12 [FBD] NetBSD 5.1.2 2012-02-02 [NBD] (security/critical release) DragonFly 3.0.1 2012-02-21 [DFB] FreeBSD 8.3 2012-04-18 [FBD] OpenBSD 5.1 2012-05-01 [OBD] Mac OS X 10.8 2012-07-25 [APL] NetBSD 6.0 2012-10-17 [NBD] OpenBSD 5.2 2012-11-01 [OBD] DragonFly 3.2.1 2012-11-02 [DFB] NetBSD 5.2 2012-12-03 [NBD] NetBSD 6.0.1 2012-12-26 [NBD] (security/critical release) FreeBSD 9.1 2012-12-30 [FBD] DragonFly 3.4.1 2013-04-29 [DFB] OpenBSD 5.3 2013-05-01 [OBD] NetBSD 6.0.2 2013-05-18 [NBD] (security/critical release) NetBSD 6.1 2013-05-18 [NBD] FreeBSD 8.4 2013-06-07 [FBD] NetBSD 6.1.1 2013-08-22 [NBD] NetBSD 5.1.3 2013-09-29 [NBD] NetBSD 5.2.1 2013-09-29 [NBD] FreeBSD 9.2 2013-09-30 [FBD] NetBSD 6.0.3 2013-09-30 [NBD] NetBSD 6.1.2 2013-09-30 [NBD] Mac OS X 10.9 2013-10-22 [APL] OpenBSD 5.4 2013-11-01 [OBD] DragonFly 3.6.0 2013-11-25 [DFB] FreeBSD 10.0 2014-01-20 [FBD] NetBSD 5.1.4 2014-01-25 [NBD] NetBSD 5.2.2 2014-01-25 [NBD] NetBSD 6.0.4 2014-01-25 [NBD] NetBSD 6.1.3 2014-01-25 [NBD] DragonFly 3.6.1 2014-02-22 [DFB] DragonFly 3.6.2 2014-04-10 [DFB] NetBSD 6.0.5 2014-04-12 [NBD] NetBSD 6.1.4 2014-04-12 [NBD] OpenBSD 5.5 2014-05-01 [OBD] DragonFly 3.8.0 2014-06-04 [DFB] DragonFly 3.8.1 2014-06-16 [DFB] DragonFly 3.6.3 2014-06-17 [DFB] FreeBSD 9.3 2014-07-05 [FBD] DragonFly 3.8.2 2014-08-08 [DFB] NetBSD 6.0.6 2014-09-22 [NBD] NetBSD 6.1.5 2014-09-22 [NBD] Mac OS X 10.10 2014-10-16 [APL] OpenBSD 5.6 2014-11-01 [OBD] FreeBSD 10.1 2014-11-14 [FBD] DragonFly 4.0.1 2014-11-25 [DFB] DragonFly 4.0.2 2015-01-07 [DFB] DragonFly 4.0.3 2015-01-21 [DFB] DragonFly 4.0.4 2015-03-09 [DFB] DragonFly 4.0.5 2015-03-23 [DFB] OpenBSD 5.7 2015-05-01 [OBD] DragonFly 4.2.0 2015-06-29 [DFB] FreeBSD 10.2 2015-08-13 [FBD] NetBSD 7.0 2015-09-25 [NBD] OS X 10.11 2015-09-30 [APL] OpenBSD 5.8 2015-10-18 [OBD] DragonFly 4.4.1 2015-12-07 [DFB] OpenBSD 5.9 2016-03-29 [OBD] FreeBSD 10.3 2016-04-04 [FBD] NetBSD 7.0.1 2016-05-22 [NBD] DragonFly 4.6.0 2016-08-02 [DFB] OpenBSD 6.0 2016-09-01 [OBD] macOS 10.12 2016-09-20 [APL] FreeBSD 11.0 2016-10-10 [FBD] NetBSD 7.0.2 2016-10-21 [NBD] NetBSD 7.1 2017-03-11 [NBD] DragonFly 4.8.0 2017-03-27 [DFB] OpenBSD 6.1 2017-04-11 [OBD] FreeBSD 11.1 2017-07-26 [FBD] macOS 10.13 2017-09-25 [APL] FreeBSD 10.4 2017-10-03 [FBD] OpenBSD 6.2 2017-10-09 [OBD] DragonFly 5.0.0 2017-10-16 [DFB] DragonFly 5.0.1 2017-11-06 [DFB] DragonFly 5.0.2 2017-12-04 [DFB] NetBSD 7.1.1 2017-12-22 [NBD] NetBSD 7.1.2 2018-03-15 [NBD] OpenBSD 6.3 2018-04-02 [OBD] DragonFly 5.2.0 2018-04-10 [DFB] DragonFly 5.2.1 2018-05-20 [DFB] DragonFly 5.2.2 2018-06-18 [DFB] FreeBSD 11.2 2018-06-27 [FBD] NetBSD 8.0 2018-07-17 [NBD] Bibliography ------------------------ Leffler, Samuel J., Marshall Kirk McKusick, Michael J Karels and John Quarterman. The Design and Implementation of the 4.3BSD UNIX Operating System. Reading, Mass. Addison-Wesley, 1989. ISBN 0-201-06196-1 Salus, Peter H. A quarter century of UNIX. Addison-Wesley Publishing Company, Inc., 1994. ISBN 0-201-54777-5 McKusick, Marshall Kirk, Keith Bostic, Michael J Karels, and John Quarterman. The Design and Implementation of the 4.4BSD Operating System. Reading, Mass. Addison-Wesley, 1996. ISBN 0-201-54979-4 McKusick, Marshall Kirk, George Neville-Neil. The Design and Implementation of the FreeBSD Operating System. Addison-Wesley Professional, Published: Aug 2, 2004. ISBN 0-201-70245-2 McKusick, Marshall Kirk, George Neville-Neil, Robert Watson. The Design and Implementation of the FreeBSD Operating System, 2nd Edition. Pearson Education, Inc., 2014. ISBN 0-321-96897-2 Doug McIlroy. Research Unix Reader. Michael G. Brown. The Role of BSD in the Development of Unix. Presented to the Tasmanian Unix Special Interest Group of the Australian Computer Society, Hobart, August 1993. Peter H. Salus. Unix at 25. Byte Magazine, October 1994. URL: http://www.byte.com/art/9410/sec8/art3.htm Andreas Klemm, Lars Köller. If you're going to San Francisco ... Die freien BSD-Varianten von Unix. c't April 1997, page 368ff. BSD Release Announcements collection. URL: https://www.FreeBSD.org/releases/ BSD Hypertext Man Pages URL: https://www.FreeBSD.org/cgi/man.cgi UNIX history graphing project URL: http://minnie.tuhs.org/Unix_History/index.html UNIX history URL: http://www.levenez.com/unix/ James Howard: The BSD Family Tree URL: http://ezine.daemonnews.org/200104/bsd_family.html ("what are the differences between FreeBSD, NetBSD, and OpenBSD?") Acknowledgments --------------- Josh Gilliam for suggestions, bug fixes, and finding very old original BSD announcements from Usenet or tapes. Steven M. Schultz for providing 2.8BSD, 2.10BSD, 2.11BSD manual pages. -- Copyright (c) 1997-2012 Wolfram Schneider -URL: http://svnweb.freebsd.org/base/head/share/misc/bsd-family-tree +URL: https://svnweb.freebsd.org/base/head/share/misc/bsd-family-tree $FreeBSD$ Index: head/sys/dev/firewire/00README =================================================================== --- head/sys/dev/firewire/00README (revision 336756) +++ head/sys/dev/firewire/00README (revision 336757) @@ -1,148 +1,148 @@ $FreeBSD$ IEEE 1394 support for FreeBSD-5.X and 4.X. 1. Introduction This tarball contains IEEE1394(FireWire) driver which is first written by Katsushi Kobayashi[1] and modified by Hidetoshi Shimokawa . Please note this driver is still under development. You can find latest snapshots under: - http://people.freebsd.org/~simokawa/ + https://people.freebsd.org/~simokawa/ named firewire-2002XXXX.tar.gz The driver consists of 6 parts: - fwohci.c/fwohci_pci.c OHCI[2] driver - IEEE1394 link/phy chip control - firewire.c Chip independent driver - CSR - Transaction - Character devices for userland - fwmem.c /dev/fwmem0: physical memory of a remote node. - sbp.c SBP-II[3] (a.k.a. SCSI over FireWire) driver - if_fwe.c NON-Standard implementation of Ethernet over FireWire. - bus_mgm.c (userland) Bus management function for user. show topology map, change gap count, bus reset, etc. 2. Installation Suppose you have kernel source at /sys. - Extract tarball at root directory. - cd /sys/dev/firewire - make - make install - make load 3. SBP-II support (sbp) - You need CAM(SCSI) support in your kernel. If you are using FreeBSD-5 before 2002/03/23 or FreeBSD-4 before 2002/4/8, you need to apply CAM-patch in this archive to handle HDD's(T_RBC or T_DIRECT which doesn't support READ_6). - If you connect a few firewire devices only, try the following to reduce gap overhead. - ./bus_mgm -g 8 4. Ethernet over FireWire (if_fwe) This is a sample driver for ethernet emulation. Please note this does NOT conform to any standards like IP over FireWire(RFC2734[4]). It just sends ethernet frames encapsulated in asynchronous stream packets. It doesn't scale because it does something like unicast over multicast, but it's easy to be implemented and you can use any facilities what ethernet can do. (ipv6, bridging, vlan etc.) It also has DEVICE_POLLING[5] support. To enable it, edit your kernel config file and Makefile.fwe then rebuild kernel and if_fwe.ko. 5. FireWire for Kernel Hackers As you know, IEEE1394 is a bus and OHCI supports physical access to the host memory. This means that you can access the remote host over firewire without software support at the remote host. In other words, you can investigate remote host's physical memory whether its OS is alive or crashed or hangs up. You need to apply KVMLIB-patch and rebuild libkvm then rebuild ps, dmesg and gdb those are statically linked. You may want to apply GDB-patch in this archive to get same behavior as gdb with /dev/mem or want to insert savectx(&dumppcb) into panic(), breakpoint() and so on to emulation crash dump. You have to determine target node_id manually at this point. (guess using bus_mgm -t or dmesg) (Targets should be specified by EUI64 in the future) # sysctl kern.firewire.fwmem_node=[node_id] # ps -agx -M /dev/fwmem0 -N /sys/i386/compile/GENERIC/kernel # dmesg -M /dev/fwmem0 -N /sys/i386/compile/GENERIC/kernel # gdb -k -c /dev/fwmem0 /sys/i386/compile/GENERIC/kernel.debug # dd if=/dev/fwmem0 of=vmcore bs=1m count=[phys. memory in MB] remote gdb at 400,000,000 bps :-) 6. DV I have not tested yet. 7. Tested HW OS - FreeBSD-4/i386 - FreeBSD-4/alpha - FreeBSD-5/i386 * Not tested on SMP. * Not tested on big-endian machine... OHCI - Texas Instruments TSB12LV26 (PCI) - Texas Instruments TSB43AA22 (PCI/Cardbus) * There might be phy probing problem but most of the OHCI chips should work. * Tested with multiple firewire buses. SBP-II - HDD: Logitec USB/FireWire LHD-P30FU - HDD: Yano A-dish 120GB - HDD: Yano B-Max 320GB The repository of cvsup2.jp.freebsd.org is on this device. - HDD: Personal Storage 3000XT 160GB The last sector of this drive cannot be accessed.. - DVD-RAM: Panasonic LF-D340JD - SCSI-FireWire converter: Yano FWSCSI-01 We can recognize only 1 device/lun at this point - HDD: iPod, PowerBook G4 (target mode) Reported by ikob - Scanner: Epson GT-9700F Now works!! Sane-backend needs a patch(SANE-patch in this archive). if_fwe - IPv4, IPv6, bridging, vlan. - You need at least two FreeBSD machines with this driver to use. References: [1] ftp://ftp.uec.ac.jp/pub/firewire/beta/ [2] http://developer.intel.com/technology/1394/download/ohci_11.htm [3] http://www.t10.org/scsi-3.htm [4] http://www.faqs.org/rfcs/rfc2734.html [5] http://info.iet.unipi.it/~luigi/polling/ Hidetoshi Shimokawa simokawa@freebsd.org Index: head/sys/dev/hwpmc/pmc_events.h =================================================================== --- head/sys/dev/hwpmc/pmc_events.h (revision 336756) +++ head/sys/dev/hwpmc/pmc_events.h (revision 336757) @@ -1,1817 +1,1817 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2005 Joseph Koshy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _DEV_HWPMC_PMC_EVENTS_H_ #define _DEV_HWPMC_PMC_EVENTS_H_ /* * Note: Documentation on adding events can be found both in * the source tree at src/share/doc/papers/hwpmc/hwpmc.ms * as well as on-line at: * - * http://wiki.freebsd.org/PmcTools/PmcHardwareHowTo + * https://wiki.freebsd.org/PmcTools/PmcHardwareHowTo * * Please refer to those resources before you attempt to modify * this file or the hwpmc driver/subsystem. */ /* * PMC event codes. * * __PMC_EV(CLASS, SYMBOLIC-NAME) * */ /* timestamp counters. */ #define __PMC_EV_TSC() \ __PMC_EV(TSC, TSC) #define PMC_EV_TSC_FIRST PMC_EV_TSC_TSC #define PMC_EV_TSC_LAST PMC_EV_TSC_TSC /* * Software events are dynamically defined. */ #define PMC_EV_DYN_COUNT 0x1000 #define PMC_EV_SOFT_FIRST 0x20000 #define PMC_EV_SOFT_LAST (PMC_EV_SOFT_FIRST + PMC_EV_DYN_COUNT - 1) /* * AMD K7 Events, from "The AMD Athlon(tm) Processor x86 Code * Optimization Guide" [Doc#22007K, Feb 2002] */ #define __PMC_EV_K7() \ __PMC_EV(K7, DC_ACCESSES) \ __PMC_EV(K7, DC_MISSES) \ __PMC_EV(K7, DC_REFILLS_FROM_L2) \ __PMC_EV(K7, DC_REFILLS_FROM_SYSTEM) \ __PMC_EV(K7, DC_WRITEBACKS) \ __PMC_EV(K7, L1_DTLB_MISS_AND_L2_DTLB_HITS) \ __PMC_EV(K7, L1_AND_L2_DTLB_MISSES) \ __PMC_EV(K7, MISALIGNED_REFERENCES) \ __PMC_EV(K7, IC_FETCHES) \ __PMC_EV(K7, IC_MISSES) \ __PMC_EV(K7, L1_ITLB_MISSES) \ __PMC_EV(K7, L1_L2_ITLB_MISSES) \ __PMC_EV(K7, RETIRED_INSTRUCTIONS) \ __PMC_EV(K7, RETIRED_OPS) \ __PMC_EV(K7, RETIRED_BRANCHES) \ __PMC_EV(K7, RETIRED_BRANCHES_MISPREDICTED) \ __PMC_EV(K7, RETIRED_TAKEN_BRANCHES) \ __PMC_EV(K7, RETIRED_TAKEN_BRANCHES_MISPREDICTED) \ __PMC_EV(K7, RETIRED_FAR_CONTROL_TRANSFERS) \ __PMC_EV(K7, RETIRED_RESYNC_BRANCHES) \ __PMC_EV(K7, INTERRUPTS_MASKED_CYCLES) \ __PMC_EV(K7, INTERRUPTS_MASKED_WHILE_PENDING_CYCLES) \ __PMC_EV(K7, HARDWARE_INTERRUPTS) #define PMC_EV_K7_FIRST PMC_EV_K7_DC_ACCESSES #define PMC_EV_K7_LAST PMC_EV_K7_HARDWARE_INTERRUPTS /* AMD K8 PMCs */ #define __PMC_EV_K8() \ __PMC_EV(K8, FP_DISPATCHED_FPU_OPS) \ __PMC_EV(K8, FP_CYCLES_WITH_NO_FPU_OPS_RETIRED) \ __PMC_EV(K8, FP_DISPATCHED_FPU_FAST_FLAG_OPS) \ __PMC_EV(K8, LS_SEGMENT_REGISTER_LOAD) \ __PMC_EV(K8, LS_MICROARCHITECTURAL_RESYNC_BY_SELF_MODIFYING_CODE) \ __PMC_EV(K8, LS_MICROARCHITECTURAL_RESYNC_BY_SNOOP) \ __PMC_EV(K8, LS_BUFFER2_FULL) \ __PMC_EV(K8, LS_LOCKED_OPERATION) \ __PMC_EV(K8, LS_MICROARCHITECTURAL_LATE_CANCEL) \ __PMC_EV(K8, LS_RETIRED_CFLUSH_INSTRUCTIONS) \ __PMC_EV(K8, LS_RETIRED_CPUID_INSTRUCTIONS) \ __PMC_EV(K8, DC_ACCESS) \ __PMC_EV(K8, DC_MISS) \ __PMC_EV(K8, DC_REFILL_FROM_L2) \ __PMC_EV(K8, DC_REFILL_FROM_SYSTEM) \ __PMC_EV(K8, DC_COPYBACK) \ __PMC_EV(K8, DC_L1_DTLB_MISS_AND_L2_DTLB_HIT) \ __PMC_EV(K8, DC_L1_DTLB_MISS_AND_L2_DTLB_MISS) \ __PMC_EV(K8, DC_MISALIGNED_DATA_REFERENCE) \ __PMC_EV(K8, DC_MICROARCHITECTURAL_LATE_CANCEL) \ __PMC_EV(K8, DC_MICROARCHITECTURAL_EARLY_CANCEL) \ __PMC_EV(K8, DC_ONE_BIT_ECC_ERROR) \ __PMC_EV(K8, DC_DISPATCHED_PREFETCH_INSTRUCTIONS) \ __PMC_EV(K8, DC_DCACHE_ACCESSES_BY_LOCKS) \ __PMC_EV(K8, BU_CPU_CLK_UNHALTED) \ __PMC_EV(K8, BU_INTERNAL_L2_REQUEST) \ __PMC_EV(K8, BU_FILL_REQUEST_L2_MISS) \ __PMC_EV(K8, BU_FILL_INTO_L2) \ __PMC_EV(K8, IC_FETCH) \ __PMC_EV(K8, IC_MISS) \ __PMC_EV(K8, IC_REFILL_FROM_L2) \ __PMC_EV(K8, IC_REFILL_FROM_SYSTEM) \ __PMC_EV(K8, IC_L1_ITLB_MISS_AND_L2_ITLB_HIT) \ __PMC_EV(K8, IC_L1_ITLB_MISS_AND_L2_ITLB_MISS) \ __PMC_EV(K8, IC_MICROARCHITECTURAL_RESYNC_BY_SNOOP) \ __PMC_EV(K8, IC_INSTRUCTION_FETCH_STALL) \ __PMC_EV(K8, IC_RETURN_STACK_HIT) \ __PMC_EV(K8, IC_RETURN_STACK_OVERFLOW) \ __PMC_EV(K8, FR_RETIRED_X86_INSTRUCTIONS) \ __PMC_EV(K8, FR_RETIRED_UOPS) \ __PMC_EV(K8, FR_RETIRED_BRANCHES) \ __PMC_EV(K8, FR_RETIRED_BRANCHES_MISPREDICTED) \ __PMC_EV(K8, FR_RETIRED_TAKEN_BRANCHES) \ __PMC_EV(K8, FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED) \ __PMC_EV(K8, FR_RETIRED_FAR_CONTROL_TRANSFERS) \ __PMC_EV(K8, FR_RETIRED_RESYNCS) \ __PMC_EV(K8, FR_RETIRED_NEAR_RETURNS) \ __PMC_EV(K8, FR_RETIRED_NEAR_RETURNS_MISPREDICTED) \ __PMC_EV(K8, FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED_BY_ADDR_MISCOMPARE) \ __PMC_EV(K8, FR_RETIRED_FPU_INSTRUCTIONS) \ __PMC_EV(K8, FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS) \ __PMC_EV(K8, FR_INTERRUPTS_MASKED_CYCLES) \ __PMC_EV(K8, FR_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES) \ __PMC_EV(K8, FR_TAKEN_HARDWARE_INTERRUPTS) \ __PMC_EV(K8, FR_DECODER_EMPTY) \ __PMC_EV(K8, FR_DISPATCH_STALLS) \ __PMC_EV(K8, FR_DISPATCH_STALL_FROM_BRANCH_ABORT_TO_RETIRE) \ __PMC_EV(K8, FR_DISPATCH_STALL_FOR_SERIALIZATION) \ __PMC_EV(K8, FR_DISPATCH_STALL_FOR_SEGMENT_LOAD) \ __PMC_EV(K8, FR_DISPATCH_STALL_WHEN_REORDER_BUFFER_IS_FULL) \ __PMC_EV(K8, FR_DISPATCH_STALL_WHEN_RESERVATION_STATIONS_ARE_FULL) \ __PMC_EV(K8, FR_DISPATCH_STALL_WHEN_FPU_IS_FULL) \ __PMC_EV(K8, FR_DISPATCH_STALL_WHEN_LS_IS_FULL) \ __PMC_EV(K8, FR_DISPATCH_STALL_WHEN_WAITING_FOR_ALL_TO_BE_QUIET) \ __PMC_EV(K8, FR_DISPATCH_STALL_WHEN_FAR_XFER_OR_RESYNC_BRANCH_PENDING) \ __PMC_EV(K8, FR_FPU_EXCEPTIONS) \ __PMC_EV(K8, FR_NUMBER_OF_BREAKPOINTS_FOR_DR0) \ __PMC_EV(K8, FR_NUMBER_OF_BREAKPOINTS_FOR_DR1) \ __PMC_EV(K8, FR_NUMBER_OF_BREAKPOINTS_FOR_DR2) \ __PMC_EV(K8, FR_NUMBER_OF_BREAKPOINTS_FOR_DR3) \ __PMC_EV(K8, NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT) \ __PMC_EV(K8, NB_MEMORY_CONTROLLER_PAGE_TABLE_OVERFLOW) \ __PMC_EV(K8, NB_MEMORY_CONTROLLER_DRAM_COMMAND_SLOTS_MISSED) \ __PMC_EV(K8, NB_MEMORY_CONTROLLER_TURNAROUND) \ __PMC_EV(K8, NB_MEMORY_CONTROLLER_BYPASS_SATURATION) \ __PMC_EV(K8, NB_SIZED_COMMANDS) \ __PMC_EV(K8, NB_PROBE_RESULT) \ __PMC_EV(K8, NB_HT_BUS0_BANDWIDTH) \ __PMC_EV(K8, NB_HT_BUS1_BANDWIDTH) \ __PMC_EV(K8, NB_HT_BUS2_BANDWIDTH) #define PMC_EV_K8_FIRST PMC_EV_K8_FP_DISPATCHED_FPU_OPS #define PMC_EV_K8_LAST PMC_EV_K8_NB_HT_BUS2_BANDWIDTH /* * Events supported by Intel architectural fixed function counters, * from the "Intel 64 and IA-32 Architectures Software Developer's * Manual Volume 3B: System Programming Guide, Part 2", July 2008. */ #define __PMC_EV_IAF() \ __PMC_EV(IAF, INSTR_RETIRED_ANY) \ __PMC_EV(IAF, CPU_CLK_UNHALTED_CORE) \ __PMC_EV(IAF, CPU_CLK_UNHALTED_REF) #define PMC_EV_IAF_FIRST PMC_EV_IAF_INSTR_RETIRED_ANY #define PMC_EV_IAF_LAST PMC_EV_IAF_CPU_CLK_UNHALTED_REF #define __PMC_EV_ALIAS_IAF() \ __PMC_EV_ALIAS("instruction-retired", IAF_INSTR_RETIRED_ANY) \ __PMC_EV_ALIAS("unhalted-core-cycles", IAF_CPU_CLK_UNHALTED_CORE) \ __PMC_EV_ALIAS("unhalted-reference-cycles", IAF_CPU_CLK_UNHALTED_REF) #define PMC_EV_IAP_FIRST PMC_EV_IAP_ARCH_BR_INS_RET #define PMC_EV_IAP_LAST PMC_EV_IAP_EVENT_FDH_40H /* * Map "architectural" event names to event ids. */ #define __PMC_EV_ALIAS_INTEL_ARCHITECTURAL() \ __PMC_EV_ALIAS("branch-instruction-retired", IAP_ARCH_BR_INS_RET) \ __PMC_EV_ALIAS("branch-misses-retired", IAP_ARCH_BR_MIS_RET) \ __PMC_EV_ALIAS("instruction-retired", IAP_ARCH_INS_RET) \ __PMC_EV_ALIAS("llc-misses", IAP_ARCH_LLC_MIS) \ __PMC_EV_ALIAS("llc-reference", IAP_ARCH_LLC_REF) \ __PMC_EV_ALIAS("unhalted-reference-cycles", IAP_ARCH_UNH_REF_CYC) \ __PMC_EV_ALIAS("unhalted-core-cycles", IAP_ARCH_UNH_COR_CYC) #define __PMC_EV_UCP() \ __PMC_EV(UCP, EVENT_0CH_04H_E) \ __PMC_EV(UCP, EVENT_0CH_04H_F) \ __PMC_EV(UCP, EVENT_0CH_04H_M) \ __PMC_EV(UCP, EVENT_0CH_04H_S) \ __PMC_EV(UCP, EVENT_0CH_08H_E) \ __PMC_EV(UCP, EVENT_0CH_08H_F) \ __PMC_EV(UCP, EVENT_0CH_08H_M) \ __PMC_EV(UCP, EVENT_0CH_08H_S) \ /* * Intel XScale events from: * * Intel XScale Core Developer's Manual * January, 2004, #27347302 * * 3rd Generation Intel XScale Microarchitecture * Developer's Manual * May 2007, #31628302 * * First 14 events are for 1st and 2nd Generation Intel XScale cores. The * remaining are available only on 3rd Generation Intel XScale cores. */ #define __PMC_EV_XSCALE() \ __PMC_EV(XSCALE, IC_FETCH) \ __PMC_EV(XSCALE, IC_MISS) \ __PMC_EV(XSCALE, DATA_DEPENDENCY_STALLED) \ __PMC_EV(XSCALE, ITLB_MISS) \ __PMC_EV(XSCALE, DTLB_MISS) \ __PMC_EV(XSCALE, BRANCH_RETIRED) \ __PMC_EV(XSCALE, BRANCH_MISPRED) \ __PMC_EV(XSCALE, INSTR_RETIRED) \ __PMC_EV(XSCALE, DC_FULL_CYCLE) \ __PMC_EV(XSCALE, DC_FULL_CONTIG) \ __PMC_EV(XSCALE, DC_ACCESS) \ __PMC_EV(XSCALE, DC_MISS) \ __PMC_EV(XSCALE, DC_WRITEBACK) \ __PMC_EV(XSCALE, PC_CHANGE) \ __PMC_EV(XSCALE, BRANCH_RETIRED_ALL) \ __PMC_EV(XSCALE, INSTR_CYCLE) \ __PMC_EV(XSCALE, CP_STALL) \ __PMC_EV(XSCALE, PC_CHANGE_ALL) \ __PMC_EV(XSCALE, PIPELINE_FLUSH) \ __PMC_EV(XSCALE, BACKEND_STALL) \ __PMC_EV(XSCALE, MULTIPLIER_USE) \ __PMC_EV(XSCALE, MULTIPLIER_STALLED) \ __PMC_EV(XSCALE, DATA_CACHE_STALLED) \ __PMC_EV(XSCALE, L2_CACHE_REQ) \ __PMC_EV(XSCALE, L2_CACHE_MISS) \ __PMC_EV(XSCALE, ADDRESS_BUS_TRANS) \ __PMC_EV(XSCALE, SELF_ADDRESS_BUS_TRANS) \ __PMC_EV(XSCALE, DATA_BUS_TRANS) #define PMC_EV_XSCALE_FIRST PMC_EV_XSCALE_IC_FETCH #define PMC_EV_XSCALE_LAST PMC_EV_XSCALE_DATA_BUS_TRANS /* * ARMv7 Events */ #define __PMC_EV_ARMV7() \ __PMC_EV(ARMV7, EVENT_00H) \ __PMC_EV(ARMV7, EVENT_01H) \ __PMC_EV(ARMV7, EVENT_02H) \ __PMC_EV(ARMV7, EVENT_03H) \ __PMC_EV(ARMV7, EVENT_04H) \ __PMC_EV(ARMV7, EVENT_05H) \ __PMC_EV(ARMV7, EVENT_06H) \ __PMC_EV(ARMV7, EVENT_07H) \ __PMC_EV(ARMV7, EVENT_08H) \ __PMC_EV(ARMV7, EVENT_09H) \ __PMC_EV(ARMV7, EVENT_0AH) \ __PMC_EV(ARMV7, EVENT_0BH) \ __PMC_EV(ARMV7, EVENT_0CH) \ __PMC_EV(ARMV7, EVENT_0DH) \ __PMC_EV(ARMV7, EVENT_0EH) \ __PMC_EV(ARMV7, EVENT_0FH) \ __PMC_EV(ARMV7, EVENT_10H) \ __PMC_EV(ARMV7, EVENT_11H) \ __PMC_EV(ARMV7, EVENT_12H) \ __PMC_EV(ARMV7, EVENT_13H) \ __PMC_EV(ARMV7, EVENT_14H) \ __PMC_EV(ARMV7, EVENT_15H) \ __PMC_EV(ARMV7, EVENT_16H) \ __PMC_EV(ARMV7, EVENT_17H) \ __PMC_EV(ARMV7, EVENT_18H) \ __PMC_EV(ARMV7, EVENT_19H) \ __PMC_EV(ARMV7, EVENT_1AH) \ __PMC_EV(ARMV7, EVENT_1BH) \ __PMC_EV(ARMV7, EVENT_1CH) \ __PMC_EV(ARMV7, EVENT_1DH) \ __PMC_EV(ARMV7, EVENT_1EH) \ __PMC_EV(ARMV7, EVENT_1FH) \ __PMC_EV(ARMV7, EVENT_20H) \ __PMC_EV(ARMV7, EVENT_21H) \ __PMC_EV(ARMV7, EVENT_22H) \ __PMC_EV(ARMV7, EVENT_23H) \ __PMC_EV(ARMV7, EVENT_24H) \ __PMC_EV(ARMV7, EVENT_25H) \ __PMC_EV(ARMV7, EVENT_26H) \ __PMC_EV(ARMV7, EVENT_27H) \ __PMC_EV(ARMV7, EVENT_28H) \ __PMC_EV(ARMV7, EVENT_29H) \ __PMC_EV(ARMV7, EVENT_2AH) \ __PMC_EV(ARMV7, EVENT_2BH) \ __PMC_EV(ARMV7, EVENT_2CH) \ __PMC_EV(ARMV7, EVENT_2DH) \ __PMC_EV(ARMV7, EVENT_2EH) \ __PMC_EV(ARMV7, EVENT_2FH) \ __PMC_EV(ARMV7, EVENT_30H) \ __PMC_EV(ARMV7, EVENT_31H) \ __PMC_EV(ARMV7, EVENT_32H) \ __PMC_EV(ARMV7, EVENT_33H) \ __PMC_EV(ARMV7, EVENT_34H) \ __PMC_EV(ARMV7, EVENT_35H) \ __PMC_EV(ARMV7, EVENT_36H) \ __PMC_EV(ARMV7, EVENT_37H) \ __PMC_EV(ARMV7, EVENT_38H) \ __PMC_EV(ARMV7, EVENT_39H) \ __PMC_EV(ARMV7, EVENT_3AH) \ __PMC_EV(ARMV7, EVENT_3BH) \ __PMC_EV(ARMV7, EVENT_3CH) \ __PMC_EV(ARMV7, EVENT_3DH) \ __PMC_EV(ARMV7, EVENT_3EH) \ __PMC_EV(ARMV7, EVENT_3FH) \ __PMC_EV(ARMV7, EVENT_40H) \ __PMC_EV(ARMV7, EVENT_41H) \ __PMC_EV(ARMV7, EVENT_42H) \ __PMC_EV(ARMV7, EVENT_43H) \ __PMC_EV(ARMV7, EVENT_44H) \ __PMC_EV(ARMV7, EVENT_45H) \ __PMC_EV(ARMV7, EVENT_46H) \ __PMC_EV(ARMV7, EVENT_47H) \ __PMC_EV(ARMV7, EVENT_48H) \ __PMC_EV(ARMV7, EVENT_49H) \ __PMC_EV(ARMV7, EVENT_4AH) \ __PMC_EV(ARMV7, EVENT_4BH) \ __PMC_EV(ARMV7, EVENT_4CH) \ __PMC_EV(ARMV7, EVENT_4DH) \ __PMC_EV(ARMV7, EVENT_4EH) \ __PMC_EV(ARMV7, EVENT_4FH) \ __PMC_EV(ARMV7, EVENT_50H) \ __PMC_EV(ARMV7, EVENT_51H) \ __PMC_EV(ARMV7, EVENT_52H) \ __PMC_EV(ARMV7, EVENT_53H) \ __PMC_EV(ARMV7, EVENT_54H) \ __PMC_EV(ARMV7, EVENT_55H) \ __PMC_EV(ARMV7, EVENT_56H) \ __PMC_EV(ARMV7, EVENT_57H) \ __PMC_EV(ARMV7, EVENT_58H) \ __PMC_EV(ARMV7, EVENT_59H) \ __PMC_EV(ARMV7, EVENT_5AH) \ __PMC_EV(ARMV7, EVENT_5BH) \ __PMC_EV(ARMV7, EVENT_5CH) \ __PMC_EV(ARMV7, EVENT_5DH) \ __PMC_EV(ARMV7, EVENT_5EH) \ __PMC_EV(ARMV7, EVENT_5FH) \ __PMC_EV(ARMV7, EVENT_60H) \ __PMC_EV(ARMV7, EVENT_61H) \ __PMC_EV(ARMV7, EVENT_62H) \ __PMC_EV(ARMV7, EVENT_63H) \ __PMC_EV(ARMV7, EVENT_64H) \ __PMC_EV(ARMV7, EVENT_65H) \ __PMC_EV(ARMV7, EVENT_66H) \ __PMC_EV(ARMV7, EVENT_67H) \ __PMC_EV(ARMV7, EVENT_68H) \ __PMC_EV(ARMV7, EVENT_69H) \ __PMC_EV(ARMV7, EVENT_6AH) \ __PMC_EV(ARMV7, EVENT_6BH) \ __PMC_EV(ARMV7, EVENT_6CH) \ __PMC_EV(ARMV7, EVENT_6DH) \ __PMC_EV(ARMV7, EVENT_6EH) \ __PMC_EV(ARMV7, EVENT_6FH) \ __PMC_EV(ARMV7, EVENT_70H) \ __PMC_EV(ARMV7, EVENT_71H) \ __PMC_EV(ARMV7, EVENT_72H) \ __PMC_EV(ARMV7, EVENT_73H) \ __PMC_EV(ARMV7, EVENT_74H) \ __PMC_EV(ARMV7, EVENT_75H) \ __PMC_EV(ARMV7, EVENT_76H) \ __PMC_EV(ARMV7, EVENT_77H) \ __PMC_EV(ARMV7, EVENT_78H) \ __PMC_EV(ARMV7, EVENT_79H) \ __PMC_EV(ARMV7, EVENT_7AH) \ __PMC_EV(ARMV7, EVENT_7BH) \ __PMC_EV(ARMV7, EVENT_7CH) \ __PMC_EV(ARMV7, EVENT_7DH) \ __PMC_EV(ARMV7, EVENT_7EH) \ __PMC_EV(ARMV7, EVENT_7FH) \ __PMC_EV(ARMV7, EVENT_80H) \ __PMC_EV(ARMV7, EVENT_81H) \ __PMC_EV(ARMV7, EVENT_82H) \ __PMC_EV(ARMV7, EVENT_83H) \ __PMC_EV(ARMV7, EVENT_84H) \ __PMC_EV(ARMV7, EVENT_85H) \ __PMC_EV(ARMV7, EVENT_86H) \ __PMC_EV(ARMV7, EVENT_87H) \ __PMC_EV(ARMV7, EVENT_88H) \ __PMC_EV(ARMV7, EVENT_89H) \ __PMC_EV(ARMV7, EVENT_8AH) \ __PMC_EV(ARMV7, EVENT_8BH) \ __PMC_EV(ARMV7, EVENT_8CH) \ __PMC_EV(ARMV7, EVENT_8DH) \ __PMC_EV(ARMV7, EVENT_8EH) \ __PMC_EV(ARMV7, EVENT_8FH) \ __PMC_EV(ARMV7, EVENT_90H) \ __PMC_EV(ARMV7, EVENT_91H) \ __PMC_EV(ARMV7, EVENT_92H) \ __PMC_EV(ARMV7, EVENT_93H) \ __PMC_EV(ARMV7, EVENT_94H) \ __PMC_EV(ARMV7, EVENT_95H) \ __PMC_EV(ARMV7, EVENT_96H) \ __PMC_EV(ARMV7, EVENT_97H) \ __PMC_EV(ARMV7, EVENT_98H) \ __PMC_EV(ARMV7, EVENT_99H) \ __PMC_EV(ARMV7, EVENT_9AH) \ __PMC_EV(ARMV7, EVENT_9BH) \ __PMC_EV(ARMV7, EVENT_9CH) \ __PMC_EV(ARMV7, EVENT_9DH) \ __PMC_EV(ARMV7, EVENT_9EH) \ __PMC_EV(ARMV7, EVENT_9FH) \ __PMC_EV(ARMV7, EVENT_A0H) \ __PMC_EV(ARMV7, EVENT_A1H) \ __PMC_EV(ARMV7, EVENT_A2H) \ __PMC_EV(ARMV7, EVENT_A3H) \ __PMC_EV(ARMV7, EVENT_A4H) \ __PMC_EV(ARMV7, EVENT_A5H) \ __PMC_EV(ARMV7, EVENT_A6H) \ __PMC_EV(ARMV7, EVENT_A7H) \ __PMC_EV(ARMV7, EVENT_A8H) \ __PMC_EV(ARMV7, EVENT_A9H) \ __PMC_EV(ARMV7, EVENT_AAH) \ __PMC_EV(ARMV7, EVENT_ABH) \ __PMC_EV(ARMV7, EVENT_ACH) \ __PMC_EV(ARMV7, EVENT_ADH) \ __PMC_EV(ARMV7, EVENT_AEH) \ __PMC_EV(ARMV7, EVENT_AFH) \ __PMC_EV(ARMV7, EVENT_B0H) \ __PMC_EV(ARMV7, EVENT_B1H) \ __PMC_EV(ARMV7, EVENT_B2H) \ __PMC_EV(ARMV7, EVENT_B3H) \ __PMC_EV(ARMV7, EVENT_B4H) \ __PMC_EV(ARMV7, EVENT_B5H) \ __PMC_EV(ARMV7, EVENT_B6H) \ __PMC_EV(ARMV7, EVENT_B7H) \ __PMC_EV(ARMV7, EVENT_B8H) \ __PMC_EV(ARMV7, EVENT_B9H) \ __PMC_EV(ARMV7, EVENT_BAH) \ __PMC_EV(ARMV7, EVENT_BBH) \ __PMC_EV(ARMV7, EVENT_BCH) \ __PMC_EV(ARMV7, EVENT_BDH) \ __PMC_EV(ARMV7, EVENT_BEH) \ __PMC_EV(ARMV7, EVENT_BFH) \ __PMC_EV(ARMV7, EVENT_C0H) \ __PMC_EV(ARMV7, EVENT_C1H) \ __PMC_EV(ARMV7, EVENT_C2H) \ __PMC_EV(ARMV7, EVENT_C3H) \ __PMC_EV(ARMV7, EVENT_C4H) \ __PMC_EV(ARMV7, EVENT_C5H) \ __PMC_EV(ARMV7, EVENT_C6H) \ __PMC_EV(ARMV7, EVENT_C7H) \ __PMC_EV(ARMV7, EVENT_C8H) \ __PMC_EV(ARMV7, EVENT_C9H) \ __PMC_EV(ARMV7, EVENT_CAH) \ __PMC_EV(ARMV7, EVENT_CBH) \ __PMC_EV(ARMV7, EVENT_CCH) \ __PMC_EV(ARMV7, EVENT_CDH) \ __PMC_EV(ARMV7, EVENT_CEH) \ __PMC_EV(ARMV7, EVENT_CFH) \ __PMC_EV(ARMV7, EVENT_D0H) \ __PMC_EV(ARMV7, EVENT_D1H) \ __PMC_EV(ARMV7, EVENT_D2H) \ __PMC_EV(ARMV7, EVENT_D3H) \ __PMC_EV(ARMV7, EVENT_D4H) \ __PMC_EV(ARMV7, EVENT_D5H) \ __PMC_EV(ARMV7, EVENT_D6H) \ __PMC_EV(ARMV7, EVENT_D7H) \ __PMC_EV(ARMV7, EVENT_D8H) \ __PMC_EV(ARMV7, EVENT_D9H) \ __PMC_EV(ARMV7, EVENT_DAH) \ __PMC_EV(ARMV7, EVENT_DBH) \ __PMC_EV(ARMV7, EVENT_DCH) \ __PMC_EV(ARMV7, EVENT_DDH) \ __PMC_EV(ARMV7, EVENT_DEH) \ __PMC_EV(ARMV7, EVENT_DFH) \ __PMC_EV(ARMV7, EVENT_E0H) \ __PMC_EV(ARMV7, EVENT_E1H) \ __PMC_EV(ARMV7, EVENT_E2H) \ __PMC_EV(ARMV7, EVENT_E3H) \ __PMC_EV(ARMV7, EVENT_E4H) \ __PMC_EV(ARMV7, EVENT_E5H) \ __PMC_EV(ARMV7, EVENT_E6H) \ __PMC_EV(ARMV7, EVENT_E7H) \ __PMC_EV(ARMV7, EVENT_E8H) \ __PMC_EV(ARMV7, EVENT_E9H) \ __PMC_EV(ARMV7, EVENT_EAH) \ __PMC_EV(ARMV7, EVENT_EBH) \ __PMC_EV(ARMV7, EVENT_ECH) \ __PMC_EV(ARMV7, EVENT_EDH) \ __PMC_EV(ARMV7, EVENT_EEH) \ __PMC_EV(ARMV7, EVENT_EFH) \ __PMC_EV(ARMV7, EVENT_F0H) \ __PMC_EV(ARMV7, EVENT_F1H) \ __PMC_EV(ARMV7, EVENT_F2H) \ __PMC_EV(ARMV7, EVENT_F3H) \ __PMC_EV(ARMV7, EVENT_F4H) \ __PMC_EV(ARMV7, EVENT_F5H) \ __PMC_EV(ARMV7, EVENT_F6H) \ __PMC_EV(ARMV7, EVENT_F7H) \ __PMC_EV(ARMV7, EVENT_F8H) \ __PMC_EV(ARMV7, EVENT_F9H) \ __PMC_EV(ARMV7, EVENT_FAH) \ __PMC_EV(ARMV7, EVENT_FBH) \ __PMC_EV(ARMV7, EVENT_FCH) \ __PMC_EV(ARMV7, EVENT_FDH) \ __PMC_EV(ARMV7, EVENT_FEH) \ __PMC_EV(ARMV7, EVENT_FFH) #define PMC_EV_ARMV7_FIRST PMC_EV_ARMV7_EVENT_00H #define PMC_EV_ARMV7_LAST PMC_EV_ARMV7_EVENT_FFH #define __PMC_EV_ALIAS_ARMV7_COMMON() \ __PMC_EV_ALIAS("PMNC_SW_INCR", ARMV7_EVENT_00H) \ __PMC_EV_ALIAS("L1_ICACHE_REFILL", ARMV7_EVENT_01H) \ __PMC_EV_ALIAS("ITLB_REFILL", ARMV7_EVENT_02H) \ __PMC_EV_ALIAS("L1_DCACHE_REFILL", ARMV7_EVENT_03H) \ __PMC_EV_ALIAS("L1_DCACHE_ACCESS", ARMV7_EVENT_04H) \ __PMC_EV_ALIAS("DTLB_REFILL", ARMV7_EVENT_05H) \ __PMC_EV_ALIAS("MEM_READ", ARMV7_EVENT_06H) \ __PMC_EV_ALIAS("MEM_WRITE", ARMV7_EVENT_07H) \ __PMC_EV_ALIAS("EXC_TAKEN", ARMV7_EVENT_09H) \ __PMC_EV_ALIAS("EXC_EXECUTED", ARMV7_EVENT_0AH) \ __PMC_EV_ALIAS("CID_WRITE", ARMV7_EVENT_0BH) \ __PMC_EV_ALIAS("PC_WRITE", ARMV7_EVENT_0CH) \ __PMC_EV_ALIAS("PC_IMM_BRANCH", ARMV7_EVENT_0DH) \ __PMC_EV_ALIAS("MEM_UNALIGNED_ACCESS", ARMV7_EVENT_0FH) \ __PMC_EV_ALIAS("PC_BRANCH_MIS_PRED", ARMV7_EVENT_10H) \ __PMC_EV_ALIAS("CLOCK_CYCLES", ARMV7_EVENT_11H) \ __PMC_EV_ALIAS("PC_BRANCH_PRED", ARMV7_EVENT_12H) #define __PMC_EV_ALIAS_ARMV7_COMMON_A8() \ __PMC_EV_ALIAS_ARMV7_COMMON() \ __PMC_EV_ALIAS("INSTR_EXECUTED", ARMV7_EVENT_08H) \ __PMC_EV_ALIAS("PC_PROC_RETURN", ARMV7_EVENT_0EH) \ __PMC_EV_ALIAS("MEM_ACCESS", ARMV7_EVENT_13H) \ __PMC_EV_ALIAS("L1_ICACHE_ACCESS", ARMV7_EVENT_14H) \ __PMC_EV_ALIAS("L1_DCACHE_WB", ARMV7_EVENT_15H) \ __PMC_EV_ALIAS("L2_CACHE_ACCESS", ARMV7_EVENT_16H) \ __PMC_EV_ALIAS("L2_CACHE_REFILL", ARMV7_EVENT_17H) \ __PMC_EV_ALIAS("L2_CACHE_WB", ARMV7_EVENT_18H) \ __PMC_EV_ALIAS("BUS_ACCESS", ARMV7_EVENT_19H) \ __PMC_EV_ALIAS("MEM_ERROR", ARMV7_EVENT_1AH) \ __PMC_EV_ALIAS("INSTR_SPEC", ARMV7_EVENT_1BH) \ __PMC_EV_ALIAS("TTBR_WRITE", ARMV7_EVENT_1CH) \ __PMC_EV_ALIAS("BUS_CYCLES", ARMV7_EVENT_1DH) \ __PMC_EV_ALIAS("CPU_CYCLES", ARMV7_EVENT_FFH) #define __PMC_EV_ALIAS_ARMV7_CORTEX_A8() \ __PMC_EV_ALIAS_ARMV7_COMMON_A8() \ __PMC_EV_ALIAS("WRITE_BUF_FULL", ARMV7_EVENT_40H) \ __PMC_EV_ALIAS("L2_STORE_MERGED", ARMV7_EVENT_41H) \ __PMC_EV_ALIAS("L2_STORE_BUFFERABLE", ARMV7_EVENT_42H) \ __PMC_EV_ALIAS("L2_ACCESS", ARMV7_EVENT_43H) \ __PMC_EV_ALIAS("L2_CACHE_MISS", ARMV7_EVENT_44H) \ __PMC_EV_ALIAS("AXI_READ", ARMV7_EVENT_45H) \ __PMC_EV_ALIAS("AXI_WRITE", ARMV7_EVENT_46H) \ __PMC_EV_ALIAS("MEM_REPLAY_EVT", ARMV7_EVENT_47H) \ __PMC_EV_ALIAS("MEM_UNALIGNED_ACCESS_REPLAY", ARMV7_EVENT_48H) \ __PMC_EV_ALIAS("L1_DCACHE_HASH_MISS", ARMV7_EVENT_49H) \ __PMC_EV_ALIAS("L1_ICACHE_HASH_MISS", ARMV7_EVENT_4AH) \ __PMC_EV_ALIAS("L1_CACHE_PAGECOL_ALIAS", ARMV7_EVENT_4BH) \ __PMC_EV_ALIAS("L1_DCACHE_NEON_ACCESS", ARMV7_EVENT_4CH) \ __PMC_EV_ALIAS("L1_DCACHE_NEON_CACHEABLE", ARMV7_EVENT_4DH) \ __PMC_EV_ALIAS("L2_CACHE_NEON_MEM_ACCESS", ARMV7_EVENT_4EH) \ __PMC_EV_ALIAS("L2_CACHE_NEON_HIT", ARMV7_EVENT_4FH) \ __PMC_EV_ALIAS("L1_CACHE_ACCESS_NOCP15", ARMV7_EVENT_50H) \ __PMC_EV_ALIAS("RET_STACK_MISPREDICT", ARMV7_EVENT_51H) \ __PMC_EV_ALIAS("BRANCH_DIR_MISPREDICT", ARMV7_EVENT_52H) \ __PMC_EV_ALIAS("PRED_BRANCH_PRED_TAKEN", ARMV7_EVENT_53H) \ __PMC_EV_ALIAS("PRED_BRANCH_EXEC_TAKEN", ARMV7_EVENT_54H) \ __PMC_EV_ALIAS("OPS_ISSUED", ARMV7_EVENT_55H) \ __PMC_EV_ALIAS("CYCLES_NO_INSTRUCTION", ARMV7_EVENT_56H) \ __PMC_EV_ALIAS("INSTRUCTIONS_ISSUED_CYCLE", ARMV7_EVENT_57H) \ __PMC_EV_ALIAS("CYCLES_STALLED_NEON_MRC", ARMV7_EVENT_58H) \ __PMC_EV_ALIAS("CYCLES_STALLED_NEON_FULLQ", ARMV7_EVENT_59H) \ __PMC_EV_ALIAS("CYCLES_NONIDLE_NEON_INT", ARMV7_EVENT_5AH) \ __PMC_EV_ALIAS("PMUEXTIN0_EVT", ARMV7_EVENT_70H) \ __PMC_EV_ALIAS("PMUEXTIN1_EVT", ARMV7_EVENT_71H) \ __PMC_EV_ALIAS("PMUEXTIN_EVT", ARMV7_EVENT_72H) #define PMC_EV_ARMV7_CORTEX_A8_FIRST PMC_EV_ARMV7_PMNC_SW_INCR #define PMC_EV_ARMV7_CORTEX_A8_LAST PMC_EV_ARMV7_PMUEXTIN_EVT #define __PMC_EV_ALIAS_ARMV7_CORTEX_A9() \ __PMC_EV_ALIAS_ARMV7_COMMON() \ __PMC_EV_ALIAS("JAVA_BYTECODE", ARMV7_EVENT_40H) \ __PMC_EV_ALIAS("SOFTWARE_JAVA_BYTECODE", ARMV7_EVENT_41H) \ __PMC_EV_ALIAS("JAZELLE_BACKWARD_BRANCH", ARMV7_EVENT_42H) \ __PMC_EV_ALIAS("COHERENT_LINEFILL_MISSC", ARMV7_EVENT_50H) \ __PMC_EV_ALIAS("COHERENT_LINEFILL_HITC", ARMV7_EVENT_51H) \ __PMC_EV_ALIAS("INSTR_CACHE_DEPENDENT_STALL", ARMV7_EVENT_60H) \ __PMC_EV_ALIAS("DATA_CACHE_DEPENDENT_STALL", ARMV7_EVENT_61H) \ __PMC_EV_ALIAS("MAIN_TLB_MISS_STALL", ARMV7_EVENT_62H) \ __PMC_EV_ALIAS("STREX_PASSED", ARMV7_EVENT_63H) \ __PMC_EV_ALIAS("STREX_FAILED", ARMV7_EVENT_64H) \ __PMC_EV_ALIAS("DATA_EVICTION", ARMV7_EVENT_65H) \ __PMC_EV_ALIAS("ISSUE_DNOT_DISPATCH_ANY_INSTR", ARMV7_EVENT_66H) \ __PMC_EV_ALIAS("ISSUE_IS_EMPTY", ARMV7_EVENT_67H) \ __PMC_EV_ALIAS("INSTR_RENAMED", ARMV7_EVENT_68H) \ __PMC_EV_ALIAS("PREDICTABLE_FUNCTION_RETURN", ARMV7_EVENT_6EH) \ __PMC_EV_ALIAS("MAIN_EXECUTION_UNIT_PIPE", ARMV7_EVENT_70H) \ __PMC_EV_ALIAS("SECOND_EXECUTION_UNIT_PIPE", ARMV7_EVENT_71H) \ __PMC_EV_ALIAS("LOAD_STORE_PIPE", ARMV7_EVENT_72H) \ __PMC_EV_ALIAS("FLOATING_POINT_INSTR_RENAMED", ARMV7_EVENT_73H) \ __PMC_EV_ALIAS("NEON_INSTRS_RENAMED", ARMV7_EVENT_74H) \ __PMC_EV_ALIAS("PLD_STALL", ARMV7_EVENT_80H) \ __PMC_EV_ALIAS("WRITE_STALL", ARMV7_EVENT_81H) \ __PMC_EV_ALIAS("INSTR_MAIN_TLB_MISS_STALL", ARMV7_EVENT_82H) \ __PMC_EV_ALIAS("DATA_MAIN_TLB_MISS_STALL", ARMV7_EVENT_83H) \ __PMC_EV_ALIAS("INSTR_MICRO_TLB_MISS_STALL", ARMV7_EVENT_84H) \ __PMC_EV_ALIAS("DATA_MICRO_TLB_MISS_STALL", ARMV7_EVENT_85H) \ __PMC_EV_ALIAS("DMB_STALL", ARMV7_EVENT_86H) \ __PMC_EV_ALIAS("INTEGER_CORE_CLOCK_ENABLED", ARMV7_EVENT_8AH) \ __PMC_EV_ALIAS("DATA_ENGINE_CLOCK_ENABLED", ARMV7_EVENT_8BH) \ __PMC_EV_ALIAS("ISB", ARMV7_EVENT_90H) \ __PMC_EV_ALIAS("DSB", ARMV7_EVENT_91H) \ __PMC_EV_ALIAS("DMB", ARMV7_EVENT_92H) \ __PMC_EV_ALIAS("EXTERNAL_INTERRUPT", ARMV7_EVENT_93H) \ __PMC_EV_ALIAS("PLE_CACHE_LINE_REQ_COMPLETED", ARMV7_EVENT_A0H) \ __PMC_EV_ALIAS("PLE_CACHE_LINE_REQ_SKIPPED", ARMV7_EVENT_A1H) \ __PMC_EV_ALIAS("PLE_FIFO_FLUSH", ARMV7_EVENT_A2H) \ __PMC_EV_ALIAS("PLE_REQUEST_COMPLETED", ARMV7_EVENT_A3H) \ __PMC_EV_ALIAS("PLE_FIFO_OVERFLOW", ARMV7_EVENT_A4H) \ __PMC_EV_ALIAS("PLE_REQUEST_PROGRAMMED", ARMV7_EVENT_A5H) /* * ARMv8 Events */ #define __PMC_EV_ARMV8() \ __PMC_EV(ARMV8, EVENT_00H) \ __PMC_EV(ARMV8, EVENT_01H) \ __PMC_EV(ARMV8, EVENT_02H) \ __PMC_EV(ARMV8, EVENT_03H) \ __PMC_EV(ARMV8, EVENT_04H) \ __PMC_EV(ARMV8, EVENT_05H) \ __PMC_EV(ARMV8, EVENT_06H) \ __PMC_EV(ARMV8, EVENT_07H) \ __PMC_EV(ARMV8, EVENT_08H) \ __PMC_EV(ARMV8, EVENT_09H) \ __PMC_EV(ARMV8, EVENT_0AH) \ __PMC_EV(ARMV8, EVENT_0BH) \ __PMC_EV(ARMV8, EVENT_0CH) \ __PMC_EV(ARMV8, EVENT_0DH) \ __PMC_EV(ARMV8, EVENT_0EH) \ __PMC_EV(ARMV8, EVENT_0FH) \ __PMC_EV(ARMV8, EVENT_10H) \ __PMC_EV(ARMV8, EVENT_11H) \ __PMC_EV(ARMV8, EVENT_12H) \ __PMC_EV(ARMV8, EVENT_13H) \ __PMC_EV(ARMV8, EVENT_14H) \ __PMC_EV(ARMV8, EVENT_15H) \ __PMC_EV(ARMV8, EVENT_16H) \ __PMC_EV(ARMV8, EVENT_17H) \ __PMC_EV(ARMV8, EVENT_18H) \ __PMC_EV(ARMV8, EVENT_19H) \ __PMC_EV(ARMV8, EVENT_1AH) \ __PMC_EV(ARMV8, EVENT_1BH) \ __PMC_EV(ARMV8, EVENT_1CH) \ __PMC_EV(ARMV8, EVENT_1DH) \ __PMC_EV(ARMV8, EVENT_1EH) \ __PMC_EV(ARMV8, EVENT_1FH) \ __PMC_EV(ARMV8, EVENT_20H) \ __PMC_EV(ARMV8, EVENT_21H) \ __PMC_EV(ARMV8, EVENT_22H) \ __PMC_EV(ARMV8, EVENT_23H) \ __PMC_EV(ARMV8, EVENT_24H) \ __PMC_EV(ARMV8, EVENT_25H) \ __PMC_EV(ARMV8, EVENT_26H) \ __PMC_EV(ARMV8, EVENT_27H) \ __PMC_EV(ARMV8, EVENT_28H) \ __PMC_EV(ARMV8, EVENT_29H) \ __PMC_EV(ARMV8, EVENT_2AH) \ __PMC_EV(ARMV8, EVENT_2BH) \ __PMC_EV(ARMV8, EVENT_2CH) \ __PMC_EV(ARMV8, EVENT_2DH) \ __PMC_EV(ARMV8, EVENT_2EH) \ __PMC_EV(ARMV8, EVENT_2FH) \ __PMC_EV(ARMV8, EVENT_30H) \ __PMC_EV(ARMV8, EVENT_31H) \ __PMC_EV(ARMV8, EVENT_32H) \ __PMC_EV(ARMV8, EVENT_33H) \ __PMC_EV(ARMV8, EVENT_34H) \ __PMC_EV(ARMV8, EVENT_35H) \ __PMC_EV(ARMV8, EVENT_36H) \ __PMC_EV(ARMV8, EVENT_37H) \ __PMC_EV(ARMV8, EVENT_38H) \ __PMC_EV(ARMV8, EVENT_39H) \ __PMC_EV(ARMV8, EVENT_3AH) \ __PMC_EV(ARMV8, EVENT_3BH) \ __PMC_EV(ARMV8, EVENT_3CH) \ __PMC_EV(ARMV8, EVENT_3DH) \ __PMC_EV(ARMV8, EVENT_3EH) \ __PMC_EV(ARMV8, EVENT_3FH) \ __PMC_EV(ARMV8, EVENT_40H) \ __PMC_EV(ARMV8, EVENT_41H) \ __PMC_EV(ARMV8, EVENT_42H) \ __PMC_EV(ARMV8, EVENT_43H) \ __PMC_EV(ARMV8, EVENT_44H) \ __PMC_EV(ARMV8, EVENT_45H) \ __PMC_EV(ARMV8, EVENT_46H) \ __PMC_EV(ARMV8, EVENT_47H) \ __PMC_EV(ARMV8, EVENT_48H) \ __PMC_EV(ARMV8, EVENT_49H) \ __PMC_EV(ARMV8, EVENT_4AH) \ __PMC_EV(ARMV8, EVENT_4BH) \ __PMC_EV(ARMV8, EVENT_4CH) \ __PMC_EV(ARMV8, EVENT_4DH) \ __PMC_EV(ARMV8, EVENT_4EH) \ __PMC_EV(ARMV8, EVENT_4FH) \ __PMC_EV(ARMV8, EVENT_50H) \ __PMC_EV(ARMV8, EVENT_51H) \ __PMC_EV(ARMV8, EVENT_52H) \ __PMC_EV(ARMV8, EVENT_53H) \ __PMC_EV(ARMV8, EVENT_54H) \ __PMC_EV(ARMV8, EVENT_55H) \ __PMC_EV(ARMV8, EVENT_56H) \ __PMC_EV(ARMV8, EVENT_57H) \ __PMC_EV(ARMV8, EVENT_58H) \ __PMC_EV(ARMV8, EVENT_59H) \ __PMC_EV(ARMV8, EVENT_5AH) \ __PMC_EV(ARMV8, EVENT_5BH) \ __PMC_EV(ARMV8, EVENT_5CH) \ __PMC_EV(ARMV8, EVENT_5DH) \ __PMC_EV(ARMV8, EVENT_5EH) \ __PMC_EV(ARMV8, EVENT_5FH) \ __PMC_EV(ARMV8, EVENT_60H) \ __PMC_EV(ARMV8, EVENT_61H) \ __PMC_EV(ARMV8, EVENT_62H) \ __PMC_EV(ARMV8, EVENT_63H) \ __PMC_EV(ARMV8, EVENT_64H) \ __PMC_EV(ARMV8, EVENT_65H) \ __PMC_EV(ARMV8, EVENT_66H) \ __PMC_EV(ARMV8, EVENT_67H) \ __PMC_EV(ARMV8, EVENT_68H) \ __PMC_EV(ARMV8, EVENT_69H) \ __PMC_EV(ARMV8, EVENT_6AH) \ __PMC_EV(ARMV8, EVENT_6BH) \ __PMC_EV(ARMV8, EVENT_6CH) \ __PMC_EV(ARMV8, EVENT_6DH) \ __PMC_EV(ARMV8, EVENT_6EH) \ __PMC_EV(ARMV8, EVENT_6FH) \ __PMC_EV(ARMV8, EVENT_70H) \ __PMC_EV(ARMV8, EVENT_71H) \ __PMC_EV(ARMV8, EVENT_72H) \ __PMC_EV(ARMV8, EVENT_73H) \ __PMC_EV(ARMV8, EVENT_74H) \ __PMC_EV(ARMV8, EVENT_75H) \ __PMC_EV(ARMV8, EVENT_76H) \ __PMC_EV(ARMV8, EVENT_77H) \ __PMC_EV(ARMV8, EVENT_78H) \ __PMC_EV(ARMV8, EVENT_79H) \ __PMC_EV(ARMV8, EVENT_7AH) \ __PMC_EV(ARMV8, EVENT_7BH) \ __PMC_EV(ARMV8, EVENT_7CH) \ __PMC_EV(ARMV8, EVENT_7DH) \ __PMC_EV(ARMV8, EVENT_7EH) \ __PMC_EV(ARMV8, EVENT_7FH) \ __PMC_EV(ARMV8, EVENT_80H) \ __PMC_EV(ARMV8, EVENT_81H) \ __PMC_EV(ARMV8, EVENT_82H) \ __PMC_EV(ARMV8, EVENT_83H) \ __PMC_EV(ARMV8, EVENT_84H) \ __PMC_EV(ARMV8, EVENT_85H) \ __PMC_EV(ARMV8, EVENT_86H) \ __PMC_EV(ARMV8, EVENT_87H) \ __PMC_EV(ARMV8, EVENT_88H) \ __PMC_EV(ARMV8, EVENT_89H) \ __PMC_EV(ARMV8, EVENT_8AH) \ __PMC_EV(ARMV8, EVENT_8BH) \ __PMC_EV(ARMV8, EVENT_8CH) \ __PMC_EV(ARMV8, EVENT_8DH) \ __PMC_EV(ARMV8, EVENT_8EH) \ __PMC_EV(ARMV8, EVENT_8FH) \ __PMC_EV(ARMV8, EVENT_90H) \ __PMC_EV(ARMV8, EVENT_91H) \ __PMC_EV(ARMV8, EVENT_92H) \ __PMC_EV(ARMV8, EVENT_93H) \ __PMC_EV(ARMV8, EVENT_94H) \ __PMC_EV(ARMV8, EVENT_95H) \ __PMC_EV(ARMV8, EVENT_96H) \ __PMC_EV(ARMV8, EVENT_97H) \ __PMC_EV(ARMV8, EVENT_98H) \ __PMC_EV(ARMV8, EVENT_99H) \ __PMC_EV(ARMV8, EVENT_9AH) \ __PMC_EV(ARMV8, EVENT_9BH) \ __PMC_EV(ARMV8, EVENT_9CH) \ __PMC_EV(ARMV8, EVENT_9DH) \ __PMC_EV(ARMV8, EVENT_9EH) \ __PMC_EV(ARMV8, EVENT_9FH) \ __PMC_EV(ARMV8, EVENT_A0H) \ __PMC_EV(ARMV8, EVENT_A1H) \ __PMC_EV(ARMV8, EVENT_A2H) \ __PMC_EV(ARMV8, EVENT_A3H) \ __PMC_EV(ARMV8, EVENT_A4H) \ __PMC_EV(ARMV8, EVENT_A5H) \ __PMC_EV(ARMV8, EVENT_A6H) \ __PMC_EV(ARMV8, EVENT_A7H) \ __PMC_EV(ARMV8, EVENT_A8H) \ __PMC_EV(ARMV8, EVENT_A9H) \ __PMC_EV(ARMV8, EVENT_AAH) \ __PMC_EV(ARMV8, EVENT_ABH) \ __PMC_EV(ARMV8, EVENT_ACH) \ __PMC_EV(ARMV8, EVENT_ADH) \ __PMC_EV(ARMV8, EVENT_AEH) \ __PMC_EV(ARMV8, EVENT_AFH) \ __PMC_EV(ARMV8, EVENT_B0H) \ __PMC_EV(ARMV8, EVENT_B1H) \ __PMC_EV(ARMV8, EVENT_B2H) \ __PMC_EV(ARMV8, EVENT_B3H) \ __PMC_EV(ARMV8, EVENT_B4H) \ __PMC_EV(ARMV8, EVENT_B5H) \ __PMC_EV(ARMV8, EVENT_B6H) \ __PMC_EV(ARMV8, EVENT_B7H) \ __PMC_EV(ARMV8, EVENT_B8H) \ __PMC_EV(ARMV8, EVENT_B9H) \ __PMC_EV(ARMV8, EVENT_BAH) \ __PMC_EV(ARMV8, EVENT_BBH) \ __PMC_EV(ARMV8, EVENT_BCH) \ __PMC_EV(ARMV8, EVENT_BDH) \ __PMC_EV(ARMV8, EVENT_BEH) \ __PMC_EV(ARMV8, EVENT_BFH) \ __PMC_EV(ARMV8, EVENT_C0H) \ __PMC_EV(ARMV8, EVENT_C1H) \ __PMC_EV(ARMV8, EVENT_C2H) \ __PMC_EV(ARMV8, EVENT_C3H) \ __PMC_EV(ARMV8, EVENT_C4H) \ __PMC_EV(ARMV8, EVENT_C5H) \ __PMC_EV(ARMV8, EVENT_C6H) \ __PMC_EV(ARMV8, EVENT_C7H) \ __PMC_EV(ARMV8, EVENT_C8H) \ __PMC_EV(ARMV8, EVENT_C9H) \ __PMC_EV(ARMV8, EVENT_CAH) \ __PMC_EV(ARMV8, EVENT_CBH) \ __PMC_EV(ARMV8, EVENT_CCH) \ __PMC_EV(ARMV8, EVENT_CDH) \ __PMC_EV(ARMV8, EVENT_CEH) \ __PMC_EV(ARMV8, EVENT_CFH) \ __PMC_EV(ARMV8, EVENT_D0H) \ __PMC_EV(ARMV8, EVENT_D1H) \ __PMC_EV(ARMV8, EVENT_D2H) \ __PMC_EV(ARMV8, EVENT_D3H) \ __PMC_EV(ARMV8, EVENT_D4H) \ __PMC_EV(ARMV8, EVENT_D5H) \ __PMC_EV(ARMV8, EVENT_D6H) \ __PMC_EV(ARMV8, EVENT_D7H) \ __PMC_EV(ARMV8, EVENT_D8H) \ __PMC_EV(ARMV8, EVENT_D9H) \ __PMC_EV(ARMV8, EVENT_DAH) \ __PMC_EV(ARMV8, EVENT_DBH) \ __PMC_EV(ARMV8, EVENT_DCH) \ __PMC_EV(ARMV8, EVENT_DDH) \ __PMC_EV(ARMV8, EVENT_DEH) \ __PMC_EV(ARMV8, EVENT_DFH) \ __PMC_EV(ARMV8, EVENT_E0H) \ __PMC_EV(ARMV8, EVENT_E1H) \ __PMC_EV(ARMV8, EVENT_E2H) \ __PMC_EV(ARMV8, EVENT_E3H) \ __PMC_EV(ARMV8, EVENT_E4H) \ __PMC_EV(ARMV8, EVENT_E5H) \ __PMC_EV(ARMV8, EVENT_E6H) \ __PMC_EV(ARMV8, EVENT_E7H) \ __PMC_EV(ARMV8, EVENT_E8H) \ __PMC_EV(ARMV8, EVENT_E9H) \ __PMC_EV(ARMV8, EVENT_EAH) \ __PMC_EV(ARMV8, EVENT_EBH) \ __PMC_EV(ARMV8, EVENT_ECH) \ __PMC_EV(ARMV8, EVENT_EDH) \ __PMC_EV(ARMV8, EVENT_EEH) \ __PMC_EV(ARMV8, EVENT_EFH) \ __PMC_EV(ARMV8, EVENT_F0H) \ __PMC_EV(ARMV8, EVENT_F1H) \ __PMC_EV(ARMV8, EVENT_F2H) \ __PMC_EV(ARMV8, EVENT_F3H) \ __PMC_EV(ARMV8, EVENT_F4H) \ __PMC_EV(ARMV8, EVENT_F5H) \ __PMC_EV(ARMV8, EVENT_F6H) \ __PMC_EV(ARMV8, EVENT_F7H) \ __PMC_EV(ARMV8, EVENT_F8H) \ __PMC_EV(ARMV8, EVENT_F9H) \ __PMC_EV(ARMV8, EVENT_FAH) \ __PMC_EV(ARMV8, EVENT_FBH) \ __PMC_EV(ARMV8, EVENT_FCH) \ __PMC_EV(ARMV8, EVENT_FDH) \ __PMC_EV(ARMV8, EVENT_FEH) \ __PMC_EV(ARMV8, EVENT_FFH) #define PMC_EV_ARMV8_FIRST PMC_EV_ARMV8_EVENT_00H #define PMC_EV_ARMV8_LAST PMC_EV_ARMV8_EVENT_FFH #define __PMC_EV_ALIAS_ARMV8_COMMON() \ __PMC_EV_ALIAS("SW_INCR", ARMV8_EVENT_00H) \ __PMC_EV_ALIAS("L1I_CACHE_REFILL", ARMV8_EVENT_01H) \ __PMC_EV_ALIAS("L1I_TLB_REFILL", ARMV8_EVENT_02H) \ __PMC_EV_ALIAS("L1D_CACHE_REFILL", ARMV8_EVENT_03H) \ __PMC_EV_ALIAS("L1D_CACHE", ARMV8_EVENT_04H) \ __PMC_EV_ALIAS("L1D_TLB_REFILL", ARMV8_EVENT_05H) \ __PMC_EV_ALIAS("INST_RETIRED", ARMV8_EVENT_08H) \ __PMC_EV_ALIAS("EXC_TAKEN", ARMV8_EVENT_09H) \ __PMC_EV_ALIAS("EXC_RETURN", ARMV8_EVENT_0AH) \ __PMC_EV_ALIAS("CID_WRITE_RETIRED", ARMV8_EVENT_0BH) \ __PMC_EV_ALIAS("BR_MIS_PRED", ARMV8_EVENT_10H) \ __PMC_EV_ALIAS("CPU_CYCLES", ARMV8_EVENT_11H) \ __PMC_EV_ALIAS("BR_PRED", ARMV8_EVENT_12H) \ __PMC_EV_ALIAS("MEM_ACCESS", ARMV8_EVENT_13H) \ __PMC_EV_ALIAS("L1I_CACHE", ARMV8_EVENT_14H) \ __PMC_EV_ALIAS("L1D_CACHE_WB", ARMV8_EVENT_15H) \ __PMC_EV_ALIAS("L2D_CACHE", ARMV8_EVENT_16H) \ __PMC_EV_ALIAS("L2D_CACHE_REFILL", ARMV8_EVENT_17H) \ __PMC_EV_ALIAS("L2D_CACHE_WB", ARMV8_EVENT_18H) \ __PMC_EV_ALIAS("BUS_ACCESS", ARMV8_EVENT_19H) \ __PMC_EV_ALIAS("MEMORY_ERROR", ARMV8_EVENT_1AH) \ __PMC_EV_ALIAS("BUS_CYCLES", ARMV8_EVENT_1DH) \ __PMC_EV_ALIAS("CHAIN", ARMV8_EVENT_1EH) \ __PMC_EV_ALIAS("BUS_ACCESS_LD", ARMV8_EVENT_60H) \ __PMC_EV_ALIAS("BUS_ACCESS_ST", ARMV8_EVENT_61H) \ __PMC_EV_ALIAS("BR_INDIRECT_SPEC", ARMV8_EVENT_7AH) \ __PMC_EV_ALIAS("EXC_IRQ", ARMV8_EVENT_86H) \ __PMC_EV_ALIAS("EXC_FIQ", ARMV8_EVENT_87H) #define __PMC_EV_ALIAS_ARMV8_CORTEX_A53() \ __PMC_EV_ALIAS_ARMV8_COMMON() \ __PMC_EV_ALIAS("LD_RETIRED", ARMV8_EVENT_06H) \ __PMC_EV_ALIAS("ST_RETIRED", ARMV8_EVENT_07H) \ __PMC_EV_ALIAS("PC_WRITE_RETIRED", ARMV8_EVENT_0CH) \ __PMC_EV_ALIAS("BR_IMMED_RETIRED", ARMV8_EVENT_0DH) \ __PMC_EV_ALIAS("BR_RETURN_RETIRED", ARMV8_EVENT_0EH) \ __PMC_EV_ALIAS("UNALIGNED_LDST_RETIRED",ARMV8_EVENT_0FH) #define __PMC_EV_ALIAS_ARMV8_CORTEX_A57() \ __PMC_EV_ALIAS_ARMV8_COMMON() \ __PMC_EV_ALIAS("INST_SPEC", ARMV8_EVENT_1BH) \ __PMC_EV_ALIAS("TTBR_WRITE_RETIRED", ARMV8_EVENT_1CH) \ __PMC_EV_ALIAS("L1D_CACHE_LD", ARMV8_EVENT_40H) \ __PMC_EV_ALIAS("L1D_CACHE_ST", ARMV8_EVENT_41H) \ __PMC_EV_ALIAS("L1D_CACHE_REFILL_LD", ARMV8_EVENT_42H) \ __PMC_EV_ALIAS("L1D_CACHE_REFILL_ST", ARMV8_EVENT_43H) \ __PMC_EV_ALIAS("L1D_CACHE_WB_VICTIM", ARMV8_EVENT_46H) \ __PMC_EV_ALIAS("L1D_CACHE_WB_CLEAN", ARMV8_EVENT_47H) \ __PMC_EV_ALIAS("L1D_CACHE_INVAL", ARMV8_EVENT_48H) \ __PMC_EV_ALIAS("L1D_TLB_REFILL_LD", ARMV8_EVENT_4CH) \ __PMC_EV_ALIAS("L1D_TLB_REFILL_ST", ARMV8_EVENT_4DH) \ __PMC_EV_ALIAS("L2D_CACHE_LD", ARMV8_EVENT_50H) \ __PMC_EV_ALIAS("L2D_CACHE_ST", ARMV8_EVENT_51H) \ __PMC_EV_ALIAS("L2D_CACHE_REFILL_LD", ARMV8_EVENT_52H) \ __PMC_EV_ALIAS("L2D_CACHE_REFILL_ST", ARMV8_EVENT_53H) \ __PMC_EV_ALIAS("L2D_CACHE_WB_VICTIM", ARMV8_EVENT_56H) \ __PMC_EV_ALIAS("L2D_CACHE_WB_CLEAN", ARMV8_EVENT_57H) \ __PMC_EV_ALIAS("L2D_CACHE_INVAL", ARMV8_EVENT_58H) \ __PMC_EV_ALIAS("BUS_ACCESS_SHARED", ARMV8_EVENT_62H) \ __PMC_EV_ALIAS("BUS_ACCESS_NOT_SHARED", ARMV8_EVENT_63H) \ __PMC_EV_ALIAS("BUS_ACCESS_NORMAL", ARMV8_EVENT_64H) \ __PMC_EV_ALIAS("BUS_ACCESS_PERIPH", ARMV8_EVENT_65H) \ __PMC_EV_ALIAS("MEM_ACCESS_LD", ARMV8_EVENT_66H) \ __PMC_EV_ALIAS("MEM_ACCESS_ST", ARMV8_EVENT_67H) \ __PMC_EV_ALIAS("UNALIGNED_LD_SPEC", ARMV8_EVENT_68H) \ __PMC_EV_ALIAS("UNALIGNED_ST_SPEC", ARMV8_EVENT_69H) \ __PMC_EV_ALIAS("UNALIGNED_LDST_SPEC", ARMV8_EVENT_6AH) \ __PMC_EV_ALIAS("LDREX_SPEC", ARMV8_EVENT_6CH) \ __PMC_EV_ALIAS("STREX_PASS_SPEC", ARMV8_EVENT_6DH) \ __PMC_EV_ALIAS("STREX_FAIL_SPEC", ARMV8_EVENT_6EH) \ __PMC_EV_ALIAS("LD_SPEC", ARMV8_EVENT_70H) \ __PMC_EV_ALIAS("ST_SPEC", ARMV8_EVENT_71H) \ __PMC_EV_ALIAS("LDST_SPEC", ARMV8_EVENT_72H) \ __PMC_EV_ALIAS("DP_SPEC", ARMV8_EVENT_73H) \ __PMC_EV_ALIAS("ASE_SPEC", ARMV8_EVENT_74H) \ __PMC_EV_ALIAS("VFP_SPEC", ARMV8_EVENT_75H) \ __PMC_EV_ALIAS("PC_WRITE_SPEC", ARMV8_EVENT_76H) \ __PMC_EV_ALIAS("CRYPTO_SPEC", ARMV8_EVENT_77H) \ __PMC_EV_ALIAS("BR_IMMED_SPEC", ARMV8_EVENT_78H) \ __PMC_EV_ALIAS("BR_RETURN_SPEC", ARMV8_EVENT_79H) \ __PMC_EV_ALIAS("ISB_SPEC", ARMV8_EVENT_7CH) \ __PMC_EV_ALIAS("DSB_SPEC", ARMV8_EVENT_7DH) \ __PMC_EV_ALIAS("DMB_SPEC", ARMV8_EVENT_7EH) \ __PMC_EV_ALIAS("EXC_UNDEF", ARMV8_EVENT_81H) \ __PMC_EV_ALIAS("EXC_SVC", ARMV8_EVENT_82H) \ __PMC_EV_ALIAS("EXC_PABORT", ARMV8_EVENT_83H) \ __PMC_EV_ALIAS("EXC_DABORT", ARMV8_EVENT_84H) \ __PMC_EV_ALIAS("EXC_SMC", ARMV8_EVENT_88H) \ __PMC_EV_ALIAS("EXC_HVC", ARMV8_EVENT_8AH) \ __PMC_EV_ALIAS("EXC_TRAP_PABORT", ARMV8_EVENT_8BH) \ __PMC_EV_ALIAS("EXC_TRAP_DABORT", ARMV8_EVENT_8CH) \ __PMC_EV_ALIAS("EXC_TRAP_OTHER", ARMV8_EVENT_8DH) \ __PMC_EV_ALIAS("EXC_TRAP_IRQ", ARMV8_EVENT_8EH) \ __PMC_EV_ALIAS("EXC_TRAP_FIQ", ARMV8_EVENT_8FH) \ __PMC_EV_ALIAS("RC_LD_SPEC", ARMV8_EVENT_90H) \ __PMC_EV_ALIAS("RC_ST_SPEC", ARMV8_EVENT_91H) /* * MIPS Events from "Programming the MIPS32 24K Core Family", * Document Number: MD00355 Revision 04.63 December 19, 2008 * These events are kept in the order found in Table 7.4. * For counters which are different between the left hand * column (0/2) and the right hand column (1/3) the left * hand is given first, e.g. BRANCH_COMPLETED and BRANCH_MISPRED * in the definition below. */ #define __PMC_EV_MIPS24K() \ __PMC_EV(MIPS24K, CYCLE) \ __PMC_EV(MIPS24K, INSTR_EXECUTED) \ __PMC_EV(MIPS24K, BRANCH_COMPLETED) \ __PMC_EV(MIPS24K, BRANCH_MISPRED) \ __PMC_EV(MIPS24K, RETURN) \ __PMC_EV(MIPS24K, RETURN_MISPRED) \ __PMC_EV(MIPS24K, RETURN_NOT_31) \ __PMC_EV(MIPS24K, RETURN_NOTPRED) \ __PMC_EV(MIPS24K, ITLB_ACCESS) \ __PMC_EV(MIPS24K, ITLB_MISS) \ __PMC_EV(MIPS24K, DTLB_ACCESS) \ __PMC_EV(MIPS24K, DTLB_MISS) \ __PMC_EV(MIPS24K, JTLB_IACCESS) \ __PMC_EV(MIPS24K, JTLB_IMISS) \ __PMC_EV(MIPS24K, JTLB_DACCESS) \ __PMC_EV(MIPS24K, JTLB_DMISS) \ __PMC_EV(MIPS24K, IC_FETCH) \ __PMC_EV(MIPS24K, IC_MISS) \ __PMC_EV(MIPS24K, DC_LOADSTORE) \ __PMC_EV(MIPS24K, DC_WRITEBACK) \ __PMC_EV(MIPS24K, DC_MISS) \ __PMC_EV(MIPS24K, STORE_MISS) \ __PMC_EV(MIPS24K, LOAD_MISS) \ __PMC_EV(MIPS24K, INTEGER_COMPLETED) \ __PMC_EV(MIPS24K, FP_COMPLETED) \ __PMC_EV(MIPS24K, LOAD_COMPLETED) \ __PMC_EV(MIPS24K, STORE_COMPLETED) \ __PMC_EV(MIPS24K, BARRIER_COMPLETED) \ __PMC_EV(MIPS24K, MIPS16_COMPLETED) \ __PMC_EV(MIPS24K, NOP_COMPLETED) \ __PMC_EV(MIPS24K, INTEGER_MULDIV_COMPLETED)\ __PMC_EV(MIPS24K, RF_STALL) \ __PMC_EV(MIPS24K, INSTR_REFETCH) \ __PMC_EV(MIPS24K, STORE_COND_COMPLETED) \ __PMC_EV(MIPS24K, STORE_COND_FAILED) \ __PMC_EV(MIPS24K, ICACHE_REQUESTS) \ __PMC_EV(MIPS24K, ICACHE_HIT) \ __PMC_EV(MIPS24K, L2_WRITEBACK) \ __PMC_EV(MIPS24K, L2_ACCESS) \ __PMC_EV(MIPS24K, L2_MISS) \ __PMC_EV(MIPS24K, L2_ERR_CORRECTED) \ __PMC_EV(MIPS24K, EXCEPTIONS) \ __PMC_EV(MIPS24K, RF_CYCLES_STALLED) \ __PMC_EV(MIPS24K, IFU_CYCLES_STALLED) \ __PMC_EV(MIPS24K, ALU_CYCLES_STALLED) \ __PMC_EV(MIPS24K, UNCACHED_LOAD) \ __PMC_EV(MIPS24K, UNCACHED_STORE) \ __PMC_EV(MIPS24K, CP2_REG_TO_REG_COMPLETED)\ __PMC_EV(MIPS24K, MFTC_COMPLETED) \ __PMC_EV(MIPS24K, IC_BLOCKED_CYCLES) \ __PMC_EV(MIPS24K, DC_BLOCKED_CYCLES) \ __PMC_EV(MIPS24K, L2_IMISS_STALL_CYCLES) \ __PMC_EV(MIPS24K, L2_DMISS_STALL_CYCLES) \ __PMC_EV(MIPS24K, DMISS_CYCLES) \ __PMC_EV(MIPS24K, L2_MISS_CYCLES) \ __PMC_EV(MIPS24K, UNCACHED_BLOCK_CYCLES) \ __PMC_EV(MIPS24K, MDU_STALL_CYCLES) \ __PMC_EV(MIPS24K, FPU_STALL_CYCLES) \ __PMC_EV(MIPS24K, CP2_STALL_CYCLES) \ __PMC_EV(MIPS24K, COREXTEND_STALL_CYCLES) \ __PMC_EV(MIPS24K, ISPRAM_STALL_CYCLES) \ __PMC_EV(MIPS24K, DSPRAM_STALL_CYCLES) \ __PMC_EV(MIPS24K, CACHE_STALL_CYCLES) \ __PMC_EV(MIPS24K, LOAD_TO_USE_STALLS) \ __PMC_EV(MIPS24K, BASE_MISPRED_STALLS) \ __PMC_EV(MIPS24K, CPO_READ_STALLS) \ __PMC_EV(MIPS24K, BRANCH_MISPRED_CYCLES) \ __PMC_EV(MIPS24K, IFETCH_BUFFER_FULL) \ __PMC_EV(MIPS24K, FETCH_BUFFER_ALLOCATED) \ __PMC_EV(MIPS24K, EJTAG_ITRIGGER) \ __PMC_EV(MIPS24K, EJTAG_DTRIGGER) \ __PMC_EV(MIPS24K, FSB_LT_QUARTER) \ __PMC_EV(MIPS24K, FSB_QUARTER_TO_HALF) \ __PMC_EV(MIPS24K, FSB_GT_HALF) \ __PMC_EV(MIPS24K, FSB_FULL_PIPELINE_STALLS)\ __PMC_EV(MIPS24K, LDQ_LT_QUARTER) \ __PMC_EV(MIPS24K, LDQ_QUARTER_TO_HALF) \ __PMC_EV(MIPS24K, LDQ_GT_HALF) \ __PMC_EV(MIPS24K, LDQ_FULL_PIPELINE_STALLS)\ __PMC_EV(MIPS24K, WBB_LT_QUARTER) \ __PMC_EV(MIPS24K, WBB_QUARTER_TO_HALF) \ __PMC_EV(MIPS24K, WBB_GT_HALF) \ __PMC_EV(MIPS24K, WBB_FULL_PIPELINE_STALLS) \ __PMC_EV(MIPS24K, REQUEST_LATENCY) \ __PMC_EV(MIPS24K, REQUEST_COUNT) #define PMC_EV_MIPS24K_FIRST PMC_EV_MIPS24K_CYCLE #define PMC_EV_MIPS24K_LAST PMC_EV_MIPS24K_WBB_FULL_PIPELINE_STALLS /* * MIPS74k events. Similar to MIPS24k, the arrangement * is (0,2) then (1,3) events. */ #define __PMC_EV_MIPS74K() \ __PMC_EV(MIPS74K, CYCLES) \ __PMC_EV(MIPS74K, INSTR_EXECUTED) \ __PMC_EV(MIPS74K, PREDICTED_JR_31) \ __PMC_EV(MIPS74K, JR_31_MISPREDICTIONS) \ __PMC_EV(MIPS74K, REDIRECT_STALLS) \ __PMC_EV(MIPS74K, JR_31_NO_PREDICTIONS) \ __PMC_EV(MIPS74K, ITLB_ACCESSES) \ __PMC_EV(MIPS74K, ITLB_MISSES) \ __PMC_EV(MIPS74K, JTLB_INSN_MISSES) \ __PMC_EV(MIPS74K, ICACHE_ACCESSES) \ __PMC_EV(MIPS74K, ICACHE_MISSES) \ __PMC_EV(MIPS74K, ICACHE_MISS_STALLS) \ __PMC_EV(MIPS74K, UNCACHED_IFETCH_STALLS) \ __PMC_EV(MIPS74K, PDTRACE_BACK_STALLS) \ __PMC_EV(MIPS74K, IFU_REPLAYS) \ __PMC_EV(MIPS74K, KILLED_FETCH_SLOTS) \ __PMC_EV(MIPS74K, IFU_IDU_MISS_PRED_UPSTREAM_CYCLES) \ __PMC_EV(MIPS74K, IFU_IDU_NO_FETCH_CYCLES) \ __PMC_EV(MIPS74K, IFU_IDU_CLOGED_DOWNSTREAM_CYCLES) \ __PMC_EV(MIPS74K, DDQ0_FULL_DR_STALLS) \ __PMC_EV(MIPS74K, DDQ1_FULL_DR_STALLS) \ __PMC_EV(MIPS74K, ALCB_FULL_DR_STALLS) \ __PMC_EV(MIPS74K, AGCB_FULL_DR_STALLS) \ __PMC_EV(MIPS74K, CLDQ_FULL_DR_STALLS) \ __PMC_EV(MIPS74K, IODQ_FULL_DR_STALLS) \ __PMC_EV(MIPS74K, ALU_EMPTY_CYCLES) \ __PMC_EV(MIPS74K, AGEN_EMPTY_CYCLES) \ __PMC_EV(MIPS74K, ALU_OPERANDS_NOT_READY_CYCLES) \ __PMC_EV(MIPS74K, AGEN_OPERANDS_NOT_READY_CYCLES) \ __PMC_EV(MIPS74K, ALU_NO_ISSUES_CYCLES) \ __PMC_EV(MIPS74K, AGEN_NO_ISSUES_CYCLES) \ __PMC_EV(MIPS74K, ALU_BUBBLE_CYCLES) \ __PMC_EV(MIPS74K, AGEN_BUBBLE_CYCLES) \ __PMC_EV(MIPS74K, SINGLE_ISSUE_CYCLES) \ __PMC_EV(MIPS74K, DUAL_ISSUE_CYCLES) \ __PMC_EV(MIPS74K, OOO_ALU_ISSUE_CYCLES) \ __PMC_EV(MIPS74K, OOO_AGEN_ISSUE_CYCLES) \ __PMC_EV(MIPS74K, JALR_JALR_HB_INSNS) \ __PMC_EV(MIPS74K, DCACHE_LINE_REFILL_REQUESTS) \ __PMC_EV(MIPS74K, DCACHE_LOAD_ACCESSES) \ __PMC_EV(MIPS74K, DCACHE_ACCESSES) \ __PMC_EV(MIPS74K, DCACHE_WRITEBACKS) \ __PMC_EV(MIPS74K, DCACHE_MISSES) \ __PMC_EV(MIPS74K, JTLB_DATA_ACCESSES) \ __PMC_EV(MIPS74K, JTLB_DATA_MISSES) \ __PMC_EV(MIPS74K, LOAD_STORE_REPLAYS) \ __PMC_EV(MIPS74K, VA_TRANSALTION_CORNER_CASES) \ __PMC_EV(MIPS74K, LOAD_STORE_BLOCKED_CYCLES) \ __PMC_EV(MIPS74K, LOAD_STORE_NO_FILL_REQUESTS) \ __PMC_EV(MIPS74K, L2_CACHE_WRITEBACKS) \ __PMC_EV(MIPS74K, L2_CACHE_ACCESSES) \ __PMC_EV(MIPS74K, L2_CACHE_MISSES) \ __PMC_EV(MIPS74K, L2_CACHE_MISS_CYCLES) \ __PMC_EV(MIPS74K, FSB_FULL_STALLS) \ __PMC_EV(MIPS74K, FSB_OVER_50_FULL) \ __PMC_EV(MIPS74K, LDQ_FULL_STALLS) \ __PMC_EV(MIPS74K, LDQ_OVER_50_FULL) \ __PMC_EV(MIPS74K, WBB_FULL_STALLS) \ __PMC_EV(MIPS74K, WBB_OVER_50_FULL) \ __PMC_EV(MIPS74K, LOAD_MISS_CONSUMER_REPLAYS) \ __PMC_EV(MIPS74K, CP1_CP2_LOAD_INSNS) \ __PMC_EV(MIPS74K, JR_NON_31_INSNS) \ __PMC_EV(MIPS74K, MISPREDICTED_JR_31_INSNS) \ __PMC_EV(MIPS74K, BRANCH_INSNS) \ __PMC_EV(MIPS74K, CP1_CP2_COND_BRANCH_INSNS) \ __PMC_EV(MIPS74K, BRANCH_LIKELY_INSNS) \ __PMC_EV(MIPS74K, MISPREDICTED_BRANCH_LIKELY_INSNS) \ __PMC_EV(MIPS74K, COND_BRANCH_INSNS) \ __PMC_EV(MIPS74K, MISPREDICTED_BRANCH_INSNS) \ __PMC_EV(MIPS74K, INTEGER_INSNS) \ __PMC_EV(MIPS74K, FPU_INSNS) \ __PMC_EV(MIPS74K, LOAD_INSNS) \ __PMC_EV(MIPS74K, STORE_INSNS) \ __PMC_EV(MIPS74K, J_JAL_INSNS) \ __PMC_EV(MIPS74K, MIPS16_INSNS) \ __PMC_EV(MIPS74K, NOP_INSNS) \ __PMC_EV(MIPS74K, NT_MUL_DIV_INSNS) \ __PMC_EV(MIPS74K, DSP_INSNS) \ __PMC_EV(MIPS74K, ALU_DSP_SATURATION_INSNS) \ __PMC_EV(MIPS74K, DSP_BRANCH_INSNS) \ __PMC_EV(MIPS74K, MDU_DSP_SATURATION_INSNS) \ __PMC_EV(MIPS74K, UNCACHED_LOAD_INSNS) \ __PMC_EV(MIPS74K, UNCACHED_STORE_INSNS) \ __PMC_EV(MIPS74K, EJTAG_INSN_TRIGGERS) \ __PMC_EV(MIPS74K, CP1_BRANCH_MISPREDICTIONS) \ __PMC_EV(MIPS74K, SC_INSNS) \ __PMC_EV(MIPS74K, FAILED_SC_INSNS) \ __PMC_EV(MIPS74K, PREFETCH_INSNS) \ __PMC_EV(MIPS74K, CACHE_HIT_PREFETCH_INSNS) \ __PMC_EV(MIPS74K, NO_INSN_CYCLES) \ __PMC_EV(MIPS74K, LOAD_MISS_INSNS) \ __PMC_EV(MIPS74K, ONE_INSN_CYCLES) \ __PMC_EV(MIPS74K, TWO_INSNS_CYCLES) \ __PMC_EV(MIPS74K, GFIFO_BLOCKED_CYCLES) \ __PMC_EV(MIPS74K, CP1_CP2_STORE_INSNS) \ __PMC_EV(MIPS74K, MISPREDICTION_STALLS) \ __PMC_EV(MIPS74K, MISPREDICTED_BRANCH_INSNS_CYCLES) \ __PMC_EV(MIPS74K, EXCEPTIONS_TAKEN) \ __PMC_EV(MIPS74K, GRADUATION_REPLAYS) \ __PMC_EV(MIPS74K, COREEXTEND_EVENTS) \ __PMC_EV(MIPS74K, ISPRAM_EVENTS) \ __PMC_EV(MIPS74K, DSPRAM_EVENTS) \ __PMC_EV(MIPS74K, L2_CACHE_SINGLE_BIT_ERRORS) \ __PMC_EV(MIPS74K, SYSTEM_EVENT_0) \ __PMC_EV(MIPS74K, SYSTEM_EVENT_1) \ __PMC_EV(MIPS74K, SYSTEM_EVENT_2) \ __PMC_EV(MIPS74K, SYSTEM_EVENT_3) \ __PMC_EV(MIPS74K, SYSTEM_EVENT_4) \ __PMC_EV(MIPS74K, SYSTEM_EVENT_5) \ __PMC_EV(MIPS74K, SYSTEM_EVENT_6) \ __PMC_EV(MIPS74K, SYSTEM_EVENT_7) \ __PMC_EV(MIPS74K, OCP_ALL_REQUESTS) \ __PMC_EV(MIPS74K, OCP_ALL_CACHEABLE_REQUESTS) \ __PMC_EV(MIPS74K, OCP_READ_REQUESTS) \ __PMC_EV(MIPS74K, OCP_READ_CACHEABLE_REQUESTS) \ __PMC_EV(MIPS74K, OCP_WRITE_REQUESTS) \ __PMC_EV(MIPS74K, OCP_WRITE_CACHEABLE_REQUESTS) \ __PMC_EV(MIPS74K, FSB_LESS_25_FULL) \ __PMC_EV(MIPS74K, FSB_25_50_FULL) \ __PMC_EV(MIPS74K, LDQ_LESS_25_FULL) \ __PMC_EV(MIPS74K, LDQ_25_50_FULL) \ __PMC_EV(MIPS74K, WBB_LESS_25_FULL) \ __PMC_EV(MIPS74K, WBB_25_50_FULL) #define PMC_EV_MIPS74K_FIRST PMC_EV_MIPS74K_CYCLES #define PMC_EV_MIPS74K_LAST PMC_EV_MIPS74K_WBB_25_50_FULL /* * Cavium Octeon counters. Obtained from cvmx-core.h */ #define __PMC_EV_OCTEON() \ __PMC_EV(OCTEON, CLK) \ __PMC_EV(OCTEON, ISSUE) \ __PMC_EV(OCTEON, RET) \ __PMC_EV(OCTEON, NISSUE) \ __PMC_EV(OCTEON, SISSUE) \ __PMC_EV(OCTEON, DISSUE) \ __PMC_EV(OCTEON, IFI) \ __PMC_EV(OCTEON, BR) \ __PMC_EV(OCTEON, BRMIS) \ __PMC_EV(OCTEON, J) \ __PMC_EV(OCTEON, JMIS) \ __PMC_EV(OCTEON, REPLAY) \ __PMC_EV(OCTEON, IUNA) \ __PMC_EV(OCTEON, TRAP) \ __PMC_EV(OCTEON, UULOAD) \ __PMC_EV(OCTEON, UUSTORE) \ __PMC_EV(OCTEON, ULOAD) \ __PMC_EV(OCTEON, USTORE) \ __PMC_EV(OCTEON, EC) \ __PMC_EV(OCTEON, MC) \ __PMC_EV(OCTEON, CC) \ __PMC_EV(OCTEON, CSRC) \ __PMC_EV(OCTEON, CFETCH) \ __PMC_EV(OCTEON, CPREF) \ __PMC_EV(OCTEON, ICA) \ __PMC_EV(OCTEON, II) \ __PMC_EV(OCTEON, IP) \ __PMC_EV(OCTEON, CIMISS) \ __PMC_EV(OCTEON, WBUF) \ __PMC_EV(OCTEON, WDAT) \ __PMC_EV(OCTEON, WBUFLD) \ __PMC_EV(OCTEON, WBUFFL) \ __PMC_EV(OCTEON, WBUFTR) \ __PMC_EV(OCTEON, BADD) \ __PMC_EV(OCTEON, BADDL2) \ __PMC_EV(OCTEON, BFILL) \ __PMC_EV(OCTEON, DDIDS) \ __PMC_EV(OCTEON, IDIDS) \ __PMC_EV(OCTEON, DIDNA) \ __PMC_EV(OCTEON, LDS) \ __PMC_EV(OCTEON, LMLDS) \ __PMC_EV(OCTEON, IOLDS) \ __PMC_EV(OCTEON, DMLDS) \ __PMC_EV(OCTEON, STS) \ __PMC_EV(OCTEON, LMSTS) \ __PMC_EV(OCTEON, IOSTS) \ __PMC_EV(OCTEON, IOBDMA) \ __PMC_EV(OCTEON, DTLB) \ __PMC_EV(OCTEON, DTLBAD) \ __PMC_EV(OCTEON, ITLB) \ __PMC_EV(OCTEON, SYNC) \ __PMC_EV(OCTEON, SYNCIOB) \ __PMC_EV(OCTEON, SYNCW) #define PMC_EV_OCTEON_FIRST PMC_EV_OCTEON_CLK #define PMC_EV_OCTEON_LAST PMC_EV_OCTEON_SYNCW #define __PMC_EV_PPC7450() \ __PMC_EV(PPC7450, CYCLE) \ __PMC_EV(PPC7450, INSTR_COMPLETED) \ __PMC_EV(PPC7450, TLB_BIT_TRANSITIONS) \ __PMC_EV(PPC7450, INSTR_DISPATCHED) \ __PMC_EV(PPC7450, PMON_EXCEPT) \ __PMC_EV(PPC7450, PMON_SIG) \ __PMC_EV(PPC7450, VPU_INSTR_COMPLETED) \ __PMC_EV(PPC7450, VFPU_INSTR_COMPLETED) \ __PMC_EV(PPC7450, VIU1_INSTR_COMPLETED) \ __PMC_EV(PPC7450, VIU2_INSTR_COMPLETED) \ __PMC_EV(PPC7450, MTVSCR_INSTR_COMPLETED) \ __PMC_EV(PPC7450, MTVRSAVE_INSTR_COMPLETED) \ __PMC_EV(PPC7450, VPU_INSTR_WAIT_CYCLES) \ __PMC_EV(PPC7450, VFPU_INSTR_WAIT_CYCLES) \ __PMC_EV(PPC7450, VIU1_INSTR_WAIT_CYCLES) \ __PMC_EV(PPC7450, VIU2_INSTR_WAIT_CYCLES) \ __PMC_EV(PPC7450, MFVSCR_SYNC_CYCLES) \ __PMC_EV(PPC7450, VSCR_SAT_SET) \ __PMC_EV(PPC7450, STORE_INSTR_COMPLETED) \ __PMC_EV(PPC7450, L1_INSTR_CACHE_MISSES) \ __PMC_EV(PPC7450, L1_DATA_SNOOPS) \ __PMC_EV(PPC7450, UNRESOLVED_BRANCHES) \ __PMC_EV(PPC7450, SPEC_BUFFER_CYCLES) \ __PMC_EV(PPC7450, BRANCH_UNIT_STALL_CYCLES) \ __PMC_EV(PPC7450, TRUE_BRANCH_TARGET_HITS) \ __PMC_EV(PPC7450, BRANCH_LINK_STAC_PREDICTED) \ __PMC_EV(PPC7450, GPR_ISSUE_QUEUE_DISPATCHES) \ __PMC_EV(PPC7450, CYCLES_THREE_INSTR_DISPATCHED) \ __PMC_EV(PPC7450, THRESHOLD_INSTR_QUEUE_ENTRIES_CYCLES) \ __PMC_EV(PPC7450, THRESHOLD_VEC_INSTR_QUEUE_ENTRIES_CYCLES) \ __PMC_EV(PPC7450, CYCLES_NO_COMPLETED_INSTRS) \ __PMC_EV(PPC7450, IU2_INSTR_COMPLETED) \ __PMC_EV(PPC7450, BRANCHES_COMPLETED) \ __PMC_EV(PPC7450, EIEIO_INSTR_COMPLETED) \ __PMC_EV(PPC7450, MTSPR_INSTR_COMPLETED) \ __PMC_EV(PPC7450, SC_INSTR_COMPLETED) \ __PMC_EV(PPC7450, LS_LM_COMPLETED) \ __PMC_EV(PPC7450, ITLB_HW_TABLE_SEARCH_CYCLES) \ __PMC_EV(PPC7450, DTLB_HW_SEARCH_CYCLES_OVER_THRESHOLD) \ __PMC_EV(PPC7450, L1_INSTR_CACHE_ACCESSES) \ __PMC_EV(PPC7450, INSTR_BKPT_MATCHES) \ __PMC_EV(PPC7450, L1_DATA_CACHE_LOAD_MISS_CYCLES_OVER_THRESHOLD)\ __PMC_EV(PPC7450, L1_DATA_SNOOP_HIT_ON_MODIFIED) \ __PMC_EV(PPC7450, LOAD_MISS_ALIAS) \ __PMC_EV(PPC7450, LOAD_MISS_ALIAS_ON_TOUCH) \ __PMC_EV(PPC7450, TOUCH_ALIAS) \ __PMC_EV(PPC7450, L1_DATA_SNOOP_HIT_CASTOUT_QUEUE) \ __PMC_EV(PPC7450, L1_DATA_SNOOP_HIT_CASTOUT) \ __PMC_EV(PPC7450, L1_DATA_SNOOP_HITS) \ __PMC_EV(PPC7450, WRITE_THROUGH_STORES) \ __PMC_EV(PPC7450, CACHE_INHIBITED_STORES) \ __PMC_EV(PPC7450, L1_DATA_LOAD_HIT) \ __PMC_EV(PPC7450, L1_DATA_TOUCH_HIT) \ __PMC_EV(PPC7450, L1_DATA_STORE_HIT) \ __PMC_EV(PPC7450, L1_DATA_TOTAL_HITS) \ __PMC_EV(PPC7450, DST_INSTR_DISPATCHED) \ __PMC_EV(PPC7450, REFRESHED_DSTS) \ __PMC_EV(PPC7450, SUCCESSFUL_DST_TABLE_SEARCHES) \ __PMC_EV(PPC7450, DSS_INSTR_COMPLETED) \ __PMC_EV(PPC7450, DST_STREAM_0_CACHE_LINE_FETCHES) \ __PMC_EV(PPC7450, VTQ_SUSPENDS_DUE_TO_CTX_CHANGE) \ __PMC_EV(PPC7450, VTQ_LINE_FETCH_HIT) \ __PMC_EV(PPC7450, VEC_LOAD_INSTR_COMPLETED) \ __PMC_EV(PPC7450, FP_STORE_INSTR_COMPLETED_IN_LSU) \ __PMC_EV(PPC7450, FPU_RENORMALIZATION) \ __PMC_EV(PPC7450, FPU_DENORMALIZATION) \ __PMC_EV(PPC7450, FP_STORE_CAUSES_STALL_IN_LSU) \ __PMC_EV(PPC7450, LD_ST_TRUE_ALIAS_STALL) \ __PMC_EV(PPC7450, LSU_INDEXED_ALIAS_STALL) \ __PMC_EV(PPC7450, LSU_ALIAS_VS_FSQ_WB0_WB1) \ __PMC_EV(PPC7450, LSU_ALIAS_VS_CSQ) \ __PMC_EV(PPC7450, LSU_LOAD_HIT_LINE_ALIAS_VS_CSQ0) \ __PMC_EV(PPC7450, LSU_LOAD_MISS_LINE_ALIAS_VS_CSQ0) \ __PMC_EV(PPC7450, LSU_TOUCH_LINE_ALIAS_VS_FSQ_WB0_WB1) \ __PMC_EV(PPC7450, LSU_TOUCH_ALIAS_VS_CSQ) \ __PMC_EV(PPC7450, LSU_LMQ_FULL_STALL) \ __PMC_EV(PPC7450, FP_LOAD_INSTR_COMPLETED_IN_LSU) \ __PMC_EV(PPC7450, FP_LOAD_SINGLE_INSTR_COMPLETED_IN_LSU) \ __PMC_EV(PPC7450, FP_LOAD_DOUBLE_COMPLETED_IN_LSU) \ __PMC_EV(PPC7450, LSU_RA_LATCH_STALL) \ __PMC_EV(PPC7450, LSU_LOAD_VS_STORE_QUEUE_ALIAS_STALL) \ __PMC_EV(PPC7450, LSU_LMQ_INDEX_ALIAS) \ __PMC_EV(PPC7450, LSU_STORE_QUEUE_INDEX_ALIAS) \ __PMC_EV(PPC7450, LSU_CSQ_FORWARDING) \ __PMC_EV(PPC7450, LSU_MISALIGNED_LOAD_FINISH) \ __PMC_EV(PPC7450, LSU_MISALIGN_STORE_COMPLETED) \ __PMC_EV(PPC7450, LSU_MISALIGN_STALL) \ __PMC_EV(PPC7450, FP_ONE_QUARTER_FPSCR_RENAMES_BUSY) \ __PMC_EV(PPC7450, FP_ONE_HALF_FPSCR_RENAMES_BUSY) \ __PMC_EV(PPC7450, FP_THREE_QUARTERS_FPSCR_RENAMES_BUSY) \ __PMC_EV(PPC7450, FP_ALL_FPSCR_RENAMES_BUSY) \ __PMC_EV(PPC7450, FP_DENORMALIZED_RESULT) \ __PMC_EV(PPC7450, L1_DATA_TOTAL_MISSES) \ __PMC_EV(PPC7450, DISPATCHES_TO_FPR_ISSUE_QUEUE) \ __PMC_EV(PPC7450, LSU_INSTR_COMPLETED) \ __PMC_EV(PPC7450, LOAD_INSTR_COMPLETED) \ __PMC_EV(PPC7450, SS_SM_INSTR_COMPLETED) \ __PMC_EV(PPC7450, TLBIE_INSTR_COMPLETED) \ __PMC_EV(PPC7450, LWARX_INSTR_COMPLETED) \ __PMC_EV(PPC7450, MFSPR_INSTR_COMPLETED) \ __PMC_EV(PPC7450, REFETCH_SERIALIZATION) \ __PMC_EV(PPC7450, COMPLETION_QUEUE_ENTRIES_OVER_THRESHOLD) \ __PMC_EV(PPC7450, CYCLES_ONE_INSTR_DISPATCHED) \ __PMC_EV(PPC7450, CYCLES_TWO_INSTR_COMPLETED) \ __PMC_EV(PPC7450, ITLB_NON_SPECULATIVE_MISSES) \ __PMC_EV(PPC7450, CYCLES_WAITING_FROM_L1_INSTR_CACHE_MISS) \ __PMC_EV(PPC7450, L1_DATA_LOAD_ACCESS_MISS) \ __PMC_EV(PPC7450, L1_DATA_TOUCH_MISS) \ __PMC_EV(PPC7450, L1_DATA_STORE_MISS) \ __PMC_EV(PPC7450, L1_DATA_TOUCH_MISS_CYCLES) \ __PMC_EV(PPC7450, L1_DATA_CYCLES_USED) \ __PMC_EV(PPC7450, DST_STREAM_1_CACHE_LINE_FETCHES) \ __PMC_EV(PPC7450, VTQ_STREAM_CANCELED_PREMATURELY) \ __PMC_EV(PPC7450, VTQ_RESUMES_DUE_TO_CTX_CHANGE) \ __PMC_EV(PPC7450, VTQ_LINE_FETCH_MISS) \ __PMC_EV(PPC7450, VTQ_LINE_FETCH) \ __PMC_EV(PPC7450, TLBIE_SNOOPS) \ __PMC_EV(PPC7450, L1_INSTR_CACHE_RELOADS) \ __PMC_EV(PPC7450, L1_DATA_CACHE_RELOADS) \ __PMC_EV(PPC7450, L1_DATA_CACHE_CASTOUTS_TO_L2) \ __PMC_EV(PPC7450, STORE_MERGE_GATHER) \ __PMC_EV(PPC7450, CACHEABLE_STORE_MERGE_TO_32_BYTES) \ __PMC_EV(PPC7450, DATA_BKPT_MATCHES) \ __PMC_EV(PPC7450, FALL_THROUGH_BRANCHES_PROCESSED) \ __PMC_EV(PPC7450, \ FIRST_SPECULATIVE_BRANCH_BUFFER_RESOLVED_CORRECTLY) \ __PMC_EV(PPC7450, SECOND_SPECULATION_BUFFER_ACTIVE) \ __PMC_EV(PPC7450, BPU_STALL_ON_LR_DEPENDENCY) \ __PMC_EV(PPC7450, BTIC_MISS) \ __PMC_EV(PPC7450, BRANCH_LINK_STACK_CORRECTLY_RESOLVED) \ __PMC_EV(PPC7450, FPR_ISSUE_STALLED) \ __PMC_EV(PPC7450, SWITCHES_BETWEEN_PRIV_USER) \ __PMC_EV(PPC7450, LSU_COMPLETES_FP_STORE_SINGLE) \ __PMC_EV(PPC7450, VR_ISSUE_QUEUE_DISPATCHES) \ __PMC_EV(PPC7450, VR_STALLS) \ __PMC_EV(PPC7450, GPR_RENAME_BUFFER_ENTRIES_OVER_THRESHOLD) \ __PMC_EV(PPC7450, FPR_ISSUE_QUEUE_ENTRIES) \ __PMC_EV(PPC7450, FPU_INSTR_COMPLETED) \ __PMC_EV(PPC7450, STWCX_INSTR_COMPLETED) \ __PMC_EV(PPC7450, LS_LM_INSTR_PIECES) \ __PMC_EV(PPC7450, ITLB_HW_SEARCH_CYCLES_OVER_THRESHOLD) \ __PMC_EV(PPC7450, DTLB_MISSES) \ __PMC_EV(PPC7450, CANCELLED_L1_INSTR_CACHE_MISSES) \ __PMC_EV(PPC7450, L1_DATA_CACHE_OP_HIT) \ __PMC_EV(PPC7450, L1_DATA_LOAD_MISS_CYCLES) \ __PMC_EV(PPC7450, L1_DATA_PUSHES) \ __PMC_EV(PPC7450, L1_DATA_TOTAL_MISS) \ __PMC_EV(PPC7450, VT2_FETCHES) \ __PMC_EV(PPC7450, TAKEN_BRANCHES_PROCESSED) \ __PMC_EV(PPC7450, BRANCH_FLUSHES) \ __PMC_EV(PPC7450, \ SECOND_SPECULATIVE_BRANCH_BUFFER_RESOLVED_CORRECTLY) \ __PMC_EV(PPC7450, THIRD_SPECULATION_BUFFER_ACTIVE) \ __PMC_EV(PPC7450, BRANCH_UNIT_STALL_ON_CTR_DEPENDENCY) \ __PMC_EV(PPC7450, FAST_BTIC_HIT) \ __PMC_EV(PPC7450, BRANCH_LINK_STACK_MISPREDICTED) \ __PMC_EV(PPC7450, CYCLES_THREE_INSTR_COMPLETED) \ __PMC_EV(PPC7450, CYCLES_NO_INSTR_DISPATCHED) \ __PMC_EV(PPC7450, GPR_ISSUE_QUEUE_ENTRIES_OVER_THRESHOLD) \ __PMC_EV(PPC7450, GPR_ISSUE_QUEUE_STALLED) \ __PMC_EV(PPC7450, IU1_INSTR_COMPLETED) \ __PMC_EV(PPC7450, DSSALL_INSTR_COMPLETED) \ __PMC_EV(PPC7450, TLBSYNC_INSTR_COMPLETED) \ __PMC_EV(PPC7450, SYNC_INSTR_COMPLETED) \ __PMC_EV(PPC7450, SS_SM_INSTR_PIECES) \ __PMC_EV(PPC7450, DTLB_HW_SEARCH_CYCLES) \ __PMC_EV(PPC7450, SNOOP_RETRIES) \ __PMC_EV(PPC7450, SUCCESSFUL_STWCX) \ __PMC_EV(PPC7450, DST_STREAM_3_CACHE_LINE_FETCHES) \ __PMC_EV(PPC7450, \ THIRD_SPECULATIVE_BRANCH_BUFFER_RESOLVED_CORRECTLY) \ __PMC_EV(PPC7450, MISPREDICTED_BRANCHES) \ __PMC_EV(PPC7450, FOLDED_BRANCHES) \ __PMC_EV(PPC7450, FP_STORE_DOUBLE_COMPLETES_IN_LSU) \ __PMC_EV(PPC7450, L2_CACHE_HITS) \ __PMC_EV(PPC7450, L3_CACHE_HITS) \ __PMC_EV(PPC7450, L2_INSTR_CACHE_MISSES) \ __PMC_EV(PPC7450, L3_INSTR_CACHE_MISSES) \ __PMC_EV(PPC7450, L2_DATA_CACHE_MISSES) \ __PMC_EV(PPC7450, L3_DATA_CACHE_MISSES) \ __PMC_EV(PPC7450, L2_LOAD_HITS) \ __PMC_EV(PPC7450, L2_STORE_HITS) \ __PMC_EV(PPC7450, L3_LOAD_HITS) \ __PMC_EV(PPC7450, L3_STORE_HITS) \ __PMC_EV(PPC7450, L2_TOUCH_HITS) \ __PMC_EV(PPC7450, L3_TOUCH_HITS) \ __PMC_EV(PPC7450, SNOOP_MODIFIED) \ __PMC_EV(PPC7450, SNOOP_VALID) \ __PMC_EV(PPC7450, INTERVENTION) \ __PMC_EV(PPC7450, L2_CACHE_MISSES) \ __PMC_EV(PPC7450, L3_CACHE_MISSES) \ __PMC_EV(PPC7450, L2_CACHE_CASTOUTS) \ __PMC_EV(PPC7450, L3_CACHE_CASTOUTS) \ __PMC_EV(PPC7450, L2SQ_FULL_CYCLES) \ __PMC_EV(PPC7450, L3SQ_FULL_CYCLES) \ __PMC_EV(PPC7450, RAQ_FULL_CYCLES) \ __PMC_EV(PPC7450, WAQ_FULL_CYCLES) \ __PMC_EV(PPC7450, L1_EXTERNAL_INTERVENTIONS) \ __PMC_EV(PPC7450, L2_EXTERNAL_INTERVENTIONS) \ __PMC_EV(PPC7450, L3_EXTERNAL_INTERVENTIONS) \ __PMC_EV(PPC7450, EXTERNAL_INTERVENTIONS) \ __PMC_EV(PPC7450, EXTERNAL_PUSHES) \ __PMC_EV(PPC7450, EXTERNAL_SNOOP_RETRY) \ __PMC_EV(PPC7450, DTQ_FULL_CYCLES) \ __PMC_EV(PPC7450, BUS_RETRY) \ __PMC_EV(PPC7450, L2_VALID_REQUEST) \ __PMC_EV(PPC7450, BORDQ_FULL) \ __PMC_EV(PPC7450, BUS_TAS_FOR_READS) \ __PMC_EV(PPC7450, BUS_TAS_FOR_WRITES) \ __PMC_EV(PPC7450, BUS_READS_NOT_RETRIED) \ __PMC_EV(PPC7450, BUS_WRITES_NOT_RETRIED) \ __PMC_EV(PPC7450, BUS_READS_WRITES_NOT_RETRIED) \ __PMC_EV(PPC7450, BUS_RETRY_DUE_TO_L1_RETRY) \ __PMC_EV(PPC7450, BUS_RETRY_DUE_TO_PREVIOUS_ADJACENT) \ __PMC_EV(PPC7450, BUS_RETRY_DUE_TO_COLLISION) \ __PMC_EV(PPC7450, BUS_RETRY_DUE_TO_INTERVENTION_ORDERING) \ __PMC_EV(PPC7450, SNOOP_REQUESTS) \ __PMC_EV(PPC7450, PREFETCH_ENGINE_REQUEST) \ __PMC_EV(PPC7450, PREFETCH_ENGINE_COLLISION_VS_LOAD) \ __PMC_EV(PPC7450, PREFETCH_ENGINE_COLLISION_VS_STORE) \ __PMC_EV(PPC7450, PREFETCH_ENGINE_COLLISION_VS_INSTR_FETCH) \ __PMC_EV(PPC7450, \ PREFETCH_ENGINE_COLLISION_VS_LOAD_STORE_INSTR_FETCH) \ __PMC_EV(PPC7450, PREFETCH_ENGINE_FULL) #define PMC_EV_PPC7450_FIRST PMC_EV_PPC7450_CYCLE #define PMC_EV_PPC7450_LAST PMC_EV_PPC7450_PREFETCH_ENGINE_FULL #define __PMC_EV_PPC970() \ __PMC_EV(PPC970, INSTR_COMPLETED) \ __PMC_EV(PPC970, MARKED_GROUP_DISPATCH) \ __PMC_EV(PPC970, MARKED_STORE_COMPLETED) \ __PMC_EV(PPC970, GCT_EMPTY) \ __PMC_EV(PPC970, RUN_CYCLES) \ __PMC_EV(PPC970, OVERFLOW) \ __PMC_EV(PPC970, CYCLES) \ __PMC_EV(PPC970, THRESHOLD_TIMEOUT) \ __PMC_EV(PPC970, GROUP_DISPATCH) \ __PMC_EV(PPC970, BR_MARKED_INSTR_FINISH) \ __PMC_EV(PPC970, GCT_EMPTY_BY_SRQ_FULL) \ __PMC_EV(PPC970, STOP_COMPLETION) \ __PMC_EV(PPC970, LSU_EMPTY) \ __PMC_EV(PPC970, MARKED_STORE_WITH_INTR) \ __PMC_EV(PPC970, CYCLES_IN_SUPER) \ __PMC_EV(PPC970, VPU_MARKED_INSTR_COMPLETED) \ __PMC_EV(PPC970, FXU0_IDLE_FXU1_BUSY) \ __PMC_EV(PPC970, SRQ_EMPTY) \ __PMC_EV(PPC970, MARKED_GROUP_COMPLETED) \ __PMC_EV(PPC970, CR_MARKED_INSTR_FINISH) \ __PMC_EV(PPC970, DISPATCH_SUCCESS) \ __PMC_EV(PPC970, FXU0_IDLE_FXU1_IDLE) \ __PMC_EV(PPC970, ONE_PLUS_INSTR_COMPLETED) \ __PMC_EV(PPC970, GROUP_MARKED_IDU) \ __PMC_EV(PPC970, MARKED_GROUP_COMPLETE_TIMEOUT) \ __PMC_EV(PPC970, FXU0_BUSY_FXU1_BUSY) \ __PMC_EV(PPC970, MARKED_STORE_SENT_TO_STS) \ __PMC_EV(PPC970, FXU_MARKED_INSTR_FINISHED) \ __PMC_EV(PPC970, MARKED_GROUP_ISSUED) \ __PMC_EV(PPC970, FXU0_BUSY_FXU1_IDLE) \ __PMC_EV(PPC970, GROUP_COMPLETED) \ __PMC_EV(PPC970, FPU_MARKED_INSTR_COMPLETED) \ __PMC_EV(PPC970, MARKED_INSTR_FINISH_ANY_UNIT) \ __PMC_EV(PPC970, EXTERNAL_INTERRUPT) \ __PMC_EV(PPC970, GROUP_DISPATCH_REJECT) \ __PMC_EV(PPC970, LSU_MARKED_INSTR_FINISH) \ __PMC_EV(PPC970, TIMEBASE_EVENT) \ __PMC_EV(PPC970, LSU_COMPLETION_STALL) \ __PMC_EV(PPC970, FXU_COMPLETION_STALL) \ __PMC_EV(PPC970, DCACHE_MISS_COMPLETION_STALL) \ __PMC_EV(PPC970, FPU_COMPLETION_STALL) \ __PMC_EV(PPC970, FXU_LONG_INSTR_COMPLETION_STALL) \ __PMC_EV(PPC970, REJECT_COMPLETION_STALL) \ __PMC_EV(PPC970, FPU_LONG_INSTR_COMPLETION_STALL) \ __PMC_EV(PPC970, GCT_EMPTY_BY_ICACHE_MISS) \ __PMC_EV(PPC970, REJECT_COMPLETION_STALL_ERAT_MISS) \ __PMC_EV(PPC970, GCT_EMPTY_BY_BRANCH_MISS_PREDICT) \ __PMC_EV(PPC970, BUS_HIGH) \ __PMC_EV(PPC970, BUS_LOW) \ __PMC_EV(PPC970, ADDER) #define PMC_EV_PPC970_FIRST PMC_EV_PPC970_INSTR_COMPLETED #define PMC_EV_PPC970_LAST PMC_EV_PPC970_ADDER #define __PMC_EV_E500() \ __PMC_EV(E500, CYCLES) \ __PMC_EV(E500, INSTR_COMPLETED) \ __PMC_EV(E500, UOPS_COMPLETED) \ __PMC_EV(E500, INSTR_FETCHED) \ __PMC_EV(E500, UOPS_DECODED) \ __PMC_EV(E500, PM_EVENT_TRANSITIONS) \ __PMC_EV(E500, PM_EVENT_CYCLES) \ __PMC_EV(E500, BRANCH_INSTRS_COMPLETED) \ __PMC_EV(E500, LOAD_UOPS_COMPLETED) \ __PMC_EV(E500, STORE_UOPS_COMPLETED) \ __PMC_EV(E500, CQ_REDIRECTS) \ __PMC_EV(E500, BRANCHES_FINISHED) \ __PMC_EV(E500, TAKEN_BRANCHES_FINISHED) \ __PMC_EV(E500, FINISHED_UNCOND_BRANCHES_MISS_BTB) \ __PMC_EV(E500, BRANCH_MISPRED) \ __PMC_EV(E500, BTB_BRANCH_MISPRED_FROM_DIRECTION) \ __PMC_EV(E500, BTB_HITS_PSEUDO_HITS) \ __PMC_EV(E500, CYCLES_DECODE_STALLED) \ __PMC_EV(E500, CYCLES_ISSUE_STALLED) \ __PMC_EV(E500, CYCLES_BRANCH_ISSUE_STALLED) \ __PMC_EV(E500, CYCLES_SU1_SCHED_STALLED) \ __PMC_EV(E500, CYCLES_SU2_SCHED_STALLED) \ __PMC_EV(E500, CYCLES_MU_SCHED_STALLED) \ __PMC_EV(E500, CYCLES_LRU_SCHED_STALLED) \ __PMC_EV(E500, CYCLES_BU_SCHED_STALLED) \ __PMC_EV(E500, TOTAL_TRANSLATED) \ __PMC_EV(E500, LOADS_TRANSLATED) \ __PMC_EV(E500, STORES_TRANSLATED) \ __PMC_EV(E500, TOUCHES_TRANSLATED) \ __PMC_EV(E500, CACHEOPS_TRANSLATED) \ __PMC_EV(E500, CACHE_INHIBITED_ACCESS_TRANSLATED) \ __PMC_EV(E500, GUARDED_LOADS_TRANSLATED) \ __PMC_EV(E500, WRITE_THROUGH_STORES_TRANSLATED) \ __PMC_EV(E500, MISALIGNED_LOAD_STORE_ACCESS_TRANSLATED) \ __PMC_EV(E500, TOTAL_ALLOCATED_TO_DLFB) \ __PMC_EV(E500, LOADS_TRANSLATED_ALLOCATED_TO_DLFB) \ __PMC_EV(E500, STORES_COMPLETED_ALLOCATED_TO_DLFB) \ __PMC_EV(E500, TOUCHES_TRANSLATED_ALLOCATED_TO_DLFB) \ __PMC_EV(E500, STORES_COMPLETED) \ __PMC_EV(E500, DATA_L1_CACHE_LOCKS) \ __PMC_EV(E500, DATA_L1_CACHE_RELOADS) \ __PMC_EV(E500, DATA_L1_CACHE_CASTOUTS) \ __PMC_EV(E500, LOAD_MISS_DLFB_FULL) \ __PMC_EV(E500, LOAD_MISS_LDQ_FULL) \ __PMC_EV(E500, LOAD_GUARDED_MISS) \ __PMC_EV(E500, STORE_TRANSLATE_WHEN_QUEUE_FULL) \ __PMC_EV(E500, ADDRESS_COLLISION) \ __PMC_EV(E500, DATA_MMU_MISS) \ __PMC_EV(E500, DATA_MMU_BUSY) \ __PMC_EV(E500, PART2_MISALIGNED_CACHE_ACCESS) \ __PMC_EV(E500, LOAD_MISS_DLFB_FULL_CYCLES) \ __PMC_EV(E500, LOAD_MISS_LDQ_FULL_CYCLES) \ __PMC_EV(E500, LOAD_GUARDED_MISS_CYCLES) \ __PMC_EV(E500, STORE_TRANSLATE_WHEN_QUEUE_FULL_CYCLES) \ __PMC_EV(E500, ADDRESS_COLLISION_CYCLES) \ __PMC_EV(E500, DATA_MMU_MISS_CYCLES) \ __PMC_EV(E500, DATA_MMU_BUSY_CYCLES) \ __PMC_EV(E500, PART2_MISALIGNED_CACHE_ACCESS_CYCLES) \ __PMC_EV(E500, INSTR_L1_CACHE_LOCKS) \ __PMC_EV(E500, INSTR_L1_CACHE_RELOADS) \ __PMC_EV(E500, INSTR_L1_CACHE_FETCHES) \ __PMC_EV(E500, INSTR_MMU_TLB4K_RELOADS) \ __PMC_EV(E500, INSTR_MMU_VSP_RELOADS) \ __PMC_EV(E500, DATA_MMU_TLB4K_RELOADS) \ __PMC_EV(E500, DATA_MMU_VSP_RELOADS) \ __PMC_EV(E500, L2MMU_MISSES) \ __PMC_EV(E500, BIU_MASTER_REQUESTS) \ __PMC_EV(E500, BIU_MASTER_INSTR_SIDE_REQUESTS) \ __PMC_EV(E500, BIU_MASTER_DATA_SIDE_REQUESTS) \ __PMC_EV(E500, BIU_MASTER_DATA_SIDE_CASTOUT_REQUESTS) \ __PMC_EV(E500, BIU_MASTER_RETRIES) \ __PMC_EV(E500, SNOOP_REQUESTS) \ __PMC_EV(E500, SNOOP_HITS) \ __PMC_EV(E500, SNOOP_PUSHES) \ __PMC_EV(E500, SNOOP_RETRIES) \ __PMC_EV(E500, DLFB_LOAD_MISS_CYCLES) \ __PMC_EV(E500, ILFB_FETCH_MISS_CYCLES) \ __PMC_EV(E500, EXT_INPU_INTR_LATENCY_CYCLES) \ __PMC_EV(E500, CRIT_INPUT_INTR_LATENCY_CYCLES) \ __PMC_EV(E500, EXT_INPUT_INTR_PENDING_LATENCY_CYCLES) \ __PMC_EV(E500, CRIT_INPUT_INTR_PENDING_LATENCY_CYCLES) \ __PMC_EV(E500, PMC0_OVERFLOW) \ __PMC_EV(E500, PMC1_OVERFLOW) \ __PMC_EV(E500, PMC2_OVERFLOW) \ __PMC_EV(E500, PMC3_OVERFLOW) \ __PMC_EV(E500, INTERRUPTS_TAKEN) \ __PMC_EV(E500, EXT_INPUT_INTR_TAKEN) \ __PMC_EV(E500, CRIT_INPUT_INTR_TAKEN) \ __PMC_EV(E500, SYSCALL_TRAP_INTR) \ __PMC_EV(E500, TLB_BIT_TRANSITIONS) \ __PMC_EV(E500, L2_LINEFILL_BUFFER) \ __PMC_EV(E500, LV2_VS) \ __PMC_EV(E500, CASTOUTS_RELEASED) \ __PMC_EV(E500, INTV_ALLOCATIONS) \ __PMC_EV(E500, DLFB_RETRIES_TO_MBAR) \ __PMC_EV(E500, STORE_RETRIES) \ __PMC_EV(E500, STASH_L1_HITS) \ __PMC_EV(E500, STASH_L2_HITS) \ __PMC_EV(E500, STASH_BUSY_1) \ __PMC_EV(E500, STASH_BUSY_2) \ __PMC_EV(E500, STASH_BUSY_3) \ __PMC_EV(E500, STASH_HITS) \ __PMC_EV(E500, STASH_HIT_DLFB) \ __PMC_EV(E500, STASH_REQUESTS) \ __PMC_EV(E500, STASH_REQUESTS_L1) \ __PMC_EV(E500, STASH_REQUESTS_L2) \ __PMC_EV(E500, STALLS_NO_CAQ_OR_COB) \ __PMC_EV(E500, L2_CACHE_ACCESSES) \ __PMC_EV(E500, L2_HIT_CACHE_ACCESSES) \ __PMC_EV(E500, L2_CACHE_DATA_ACCESSES) \ __PMC_EV(E500, L2_CACHE_DATA_HITS) \ __PMC_EV(E500, L2_CACHE_INSTR_ACCESSES) \ __PMC_EV(E500, L2_CACHE_INSTR_HITS) \ __PMC_EV(E500, L2_CACHE_ALLOCATIONS) \ __PMC_EV(E500, L2_CACHE_DATA_ALLOCATIONS) \ __PMC_EV(E500, L2_CACHE_DIRTY_DATA_ALLOCATIONS) \ __PMC_EV(E500, L2_CACHE_INSTR_ALLOCATIONS) \ __PMC_EV(E500, L2_CACHE_UPDATES) \ __PMC_EV(E500, L2_CACHE_CLEAN_UPDATES) \ __PMC_EV(E500, L2_CACHE_DIRTY_UPDATES) \ __PMC_EV(E500, L2_CACHE_CLEAN_REDUNDANT_UPDATES) \ __PMC_EV(E500, L2_CACHE_DIRTY_REDUNDANT_UPDATES) \ __PMC_EV(E500, L2_CACHE_LOCKS) \ __PMC_EV(E500, L2_CACHE_CASTOUTS) \ __PMC_EV(E500, L2_CACHE_DATA_DIRTY_HITS) \ __PMC_EV(E500, INSTR_LFB_WENT_HIGH_PRIORITY) \ __PMC_EV(E500, SNOOP_THROTTLING_TURNED_ON) \ __PMC_EV(E500, L2_CLEAN_LINE_INVALIDATIONS) \ __PMC_EV(E500, L2_INCOHERENT_LINE_INVALIDATIONS) \ __PMC_EV(E500, L2_COHERENT_LINE_INVALIDATIONS) \ __PMC_EV(E500, COHERENT_LOOKUP_MISS_DUE_TO_VALID_BUT_INCOHERENT_MATCHES) \ __PMC_EV(E500, IAC1S_DETECTED) \ __PMC_EV(E500, IAC2S_DETECTED) \ __PMC_EV(E500, DAC1S_DTECTED) \ __PMC_EV(E500, DAC2S_DTECTED) \ __PMC_EV(E500, DVT0_DETECTED) \ __PMC_EV(E500, DVT1_DETECTED) \ __PMC_EV(E500, DVT2_DETECTED) \ __PMC_EV(E500, DVT3_DETECTED) \ __PMC_EV(E500, DVT4_DETECTED) \ __PMC_EV(E500, DVT5_DETECTED) \ __PMC_EV(E500, DVT6_DETECTED) \ __PMC_EV(E500, DVT7_DETECTED) \ __PMC_EV(E500, CYCLES_COMPLETION_STALLED_NEXUS_FIFO_FULL) \ __PMC_EV(E500, FPU_DOUBLE_PUMP) \ __PMC_EV(E500, FPU_FINISH) \ __PMC_EV(E500, FPU_DIVIDE_CYCLES) \ __PMC_EV(E500, FPU_DENORM_INPUT_CYCLES) \ __PMC_EV(E500, FPU_RESULT_STALL_CYCLES) \ __PMC_EV(E500, FPU_FPSCR_FULL_STALL) \ __PMC_EV(E500, FPU_PIPE_SYNC_STALLS) \ __PMC_EV(E500, FPU_INPUT_DATA_STALLS) \ __PMC_EV(E500, DECORATED_LOADS) \ __PMC_EV(E500, DECORATED_STORES) \ __PMC_EV(E500, LOAD_RETRIES) \ __PMC_EV(E500, STWCX_SUCCESSES) \ __PMC_EV(E500, STWCX_FAILURES) \ #define PMC_EV_E500_FIRST PMC_EV_E500_CYCLES #define PMC_EV_E500_LAST PMC_EV_E500_STWCX_FAILURES /* * All known PMC events. * * PMC event numbers are allocated sparsely to allow new PMC events to * be added to a PMC class without breaking ABI compatibility. The * current allocation scheme is: * * START #EVENTS DESCRIPTION * 0 0x1000 Reserved * 0x1000 0x0001 TSC * 0x2000 0x0080 AMD K7 events * 0x2080 0x0100 AMD K8 events * 0x10000 0x0080 INTEL architectural fixed-function events * 0x10080 0x0F80 INTEL architectural programmable events * 0x11000 0x0080 INTEL Pentium 4 events * 0x11080 0x0080 INTEL Pentium MMX events * 0x11100 0x0100 INTEL Pentium Pro/P-II/P-III/Pentium-M events * 0x11200 0x00FF INTEL XScale events * 0x11300 0x00FF MIPS 24K events * 0x11400 0x00FF Octeon events * 0x11500 0x00FF MIPS 74K events * 0x13000 0x00FF MPC7450 events * 0x13100 0x00FF IBM PPC970 events * 0x13300 0x00FF Freescale e500 events * 0x14000 0x0100 ARMv7 events * 0x14100 0x0100 ARMv8 events * 0x20000 0x1000 Software events */ #define __PMC_EVENTS() \ __PMC_EV_BLOCK(TSC, 0x01000) \ __PMC_EV_TSC() \ __PMC_EV_BLOCK(IAF, 0x10000) \ __PMC_EV_IAF() \ __PMC_EV_BLOCK(K7, 0x2000) \ __PMC_EV_K7() \ __PMC_EV_BLOCK(K8, 0x2080) \ __PMC_EV_K8() \ __PMC_EV_BLOCK(XSCALE, 0x11200) \ __PMC_EV_XSCALE() \ __PMC_EV_BLOCK(MIPS24K, 0x11300) \ __PMC_EV_MIPS24K() \ __PMC_EV_BLOCK(OCTEON, 0x11400) \ __PMC_EV_OCTEON() \ __PMC_EV_BLOCK(MIPS74K, 0x11500) \ __PMC_EV_MIPS74K() \ __PMC_EV_BLOCK(UCP, 0x12080) \ __PMC_EV_UCP() \ __PMC_EV_BLOCK(PPC7450, 0x13000) \ __PMC_EV_PPC7450() \ __PMC_EV_BLOCK(PPC970, 0x13100) \ __PMC_EV_PPC970() \ __PMC_EV_BLOCK(E500, 0x13300) \ __PMC_EV_E500() \ __PMC_EV_BLOCK(ARMV7, 0x14000) \ __PMC_EV_ARMV7() \ __PMC_EV_BLOCK(ARMV8, 0x14100) \ __PMC_EV_ARMV8() #define PMC_EVENT_FIRST PMC_EV_TSC_TSC #define PMC_EVENT_LAST PMC_EV_SOFT_LAST #endif /* _DEV_HWPMC_PMC_EVENTS_H_ */ Index: head/sys/dev/sk/if_sk.c =================================================================== --- head/sys/dev/sk/if_sk.c (revision 336756) +++ head/sys/dev/sk/if_sk.c (revision 336757) @@ -1,3842 +1,3842 @@ /* $OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $ */ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1997, 1998, 1999, 2000 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (c) 2003 Nathan L. Binkert * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /* * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports * the SK-984x series adapters, both single port and dual port. * References: * The XaQti XMAC II datasheet, * https://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf * The SysKonnect GEnesis manual, http://www.syskonnect.com * * Note: XaQti has been acquired by Vitesse, and Vitesse does not have the * XMAC II datasheet online. I have put my copy at people.freebsd.org as a * convenience to others until Vitesse corrects this problem: * - * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf + * https://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf * * Written by Bill Paul * Department of Electrical Engineering * Columbia University, New York City */ /* * The SysKonnect gigabit ethernet adapters consist of two main * components: the SysKonnect GEnesis controller chip and the XaQti Corp. * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC * components and a PHY while the GEnesis controller provides a PCI * interface with DMA support. Each card may have between 512K and * 2MB of SRAM on board depending on the configuration. * * The SysKonnect GEnesis controller can have either one or two XMAC * chips connected to it, allowing single or dual port NIC configurations. * SysKonnect has the distinction of being the only vendor on the market * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs, * dual DMA queues, packet/MAC/transmit arbiters and direct access to the * XMAC registers. This driver takes advantage of these features to allow * both XMACs to operate as independent interfaces. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if 0 #define SK_USEIOSPACE #endif #include #include #include MODULE_DEPEND(sk, pci, 1, 1, 1); MODULE_DEPEND(sk, ether, 1, 1, 1); MODULE_DEPEND(sk, miibus, 1, 1, 1); /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" static const struct sk_type sk_devs[] = { { VENDORID_SK, DEVICEID_SK_V1, "SysKonnect Gigabit Ethernet (V1.0)" }, { VENDORID_SK, DEVICEID_SK_V2, "SysKonnect Gigabit Ethernet (V2.0)" }, { VENDORID_MARVELL, DEVICEID_SK_V2, "Marvell Gigabit Ethernet" }, { VENDORID_MARVELL, DEVICEID_BELKIN_5005, "Belkin F5D5005 Gigabit Ethernet" }, { VENDORID_3COM, DEVICEID_3COM_3C940, "3Com 3C940 Gigabit Ethernet" }, { VENDORID_LINKSYS, DEVICEID_LINKSYS_EG1032, "Linksys EG1032 Gigabit Ethernet" }, { VENDORID_DLINK, DEVICEID_DLINK_DGE530T_A1, "D-Link DGE-530T Gigabit Ethernet" }, { VENDORID_DLINK, DEVICEID_DLINK_DGE530T_B1, "D-Link DGE-530T Gigabit Ethernet" }, { 0, 0, NULL } }; static int skc_probe(device_t); static int skc_attach(device_t); static int skc_detach(device_t); static int skc_shutdown(device_t); static int skc_suspend(device_t); static int skc_resume(device_t); static bus_dma_tag_t skc_get_dma_tag(device_t, device_t); static int sk_detach(device_t); static int sk_probe(device_t); static int sk_attach(device_t); static void sk_tick(void *); static void sk_yukon_tick(void *); static void sk_intr(void *); static void sk_intr_xmac(struct sk_if_softc *); static void sk_intr_bcom(struct sk_if_softc *); static void sk_intr_yukon(struct sk_if_softc *); static __inline void sk_rxcksum(struct ifnet *, struct mbuf *, u_int32_t); static __inline int sk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t); static void sk_rxeof(struct sk_if_softc *); static void sk_jumbo_rxeof(struct sk_if_softc *); static void sk_txeof(struct sk_if_softc *); static void sk_txcksum(struct ifnet *, struct mbuf *, struct sk_tx_desc *); static int sk_encap(struct sk_if_softc *, struct mbuf **); static void sk_start(struct ifnet *); static void sk_start_locked(struct ifnet *); static int sk_ioctl(struct ifnet *, u_long, caddr_t); static void sk_init(void *); static void sk_init_locked(struct sk_if_softc *); static void sk_init_xmac(struct sk_if_softc *); static void sk_init_yukon(struct sk_if_softc *); static void sk_stop(struct sk_if_softc *); static void sk_watchdog(void *); static int sk_ifmedia_upd(struct ifnet *); static void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *); static void sk_reset(struct sk_softc *); static __inline void sk_discard_rxbuf(struct sk_if_softc *, int); static __inline void sk_discard_jumbo_rxbuf(struct sk_if_softc *, int); static int sk_newbuf(struct sk_if_softc *, int); static int sk_jumbo_newbuf(struct sk_if_softc *, int); static void sk_dmamap_cb(void *, bus_dma_segment_t *, int, int); static int sk_dma_alloc(struct sk_if_softc *); static int sk_dma_jumbo_alloc(struct sk_if_softc *); static void sk_dma_free(struct sk_if_softc *); static void sk_dma_jumbo_free(struct sk_if_softc *); static int sk_init_rx_ring(struct sk_if_softc *); static int sk_init_jumbo_rx_ring(struct sk_if_softc *); static void sk_init_tx_ring(struct sk_if_softc *); static u_int32_t sk_win_read_4(struct sk_softc *, int); static u_int16_t sk_win_read_2(struct sk_softc *, int); static u_int8_t sk_win_read_1(struct sk_softc *, int); static void sk_win_write_4(struct sk_softc *, int, u_int32_t); static void sk_win_write_2(struct sk_softc *, int, u_int32_t); static void sk_win_write_1(struct sk_softc *, int, u_int32_t); static int sk_miibus_readreg(device_t, int, int); static int sk_miibus_writereg(device_t, int, int, int); static void sk_miibus_statchg(device_t); static int sk_xmac_miibus_readreg(struct sk_if_softc *, int, int); static int sk_xmac_miibus_writereg(struct sk_if_softc *, int, int, int); static void sk_xmac_miibus_statchg(struct sk_if_softc *); static int sk_marv_miibus_readreg(struct sk_if_softc *, int, int); static int sk_marv_miibus_writereg(struct sk_if_softc *, int, int, int); static void sk_marv_miibus_statchg(struct sk_if_softc *); static uint32_t sk_xmchash(const uint8_t *); static void sk_setfilt(struct sk_if_softc *, u_int16_t *, int); static void sk_rxfilter(struct sk_if_softc *); static void sk_rxfilter_genesis(struct sk_if_softc *); static void sk_rxfilter_yukon(struct sk_if_softc *); static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high); static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS); /* Tunables. */ static int jumbo_disable = 0; TUNABLE_INT("hw.skc.jumbo_disable", &jumbo_disable); /* * It seems that SK-NET GENESIS supports very simple checksum offload * capability for Tx and I believe it can generate 0 checksum value for * UDP packets in Tx as the hardware can't differenciate UDP packets from * TCP packets. 0 chcecksum value for UDP packet is an invalid one as it * means sender didn't perforam checksum computation. For the safety I * disabled UDP checksum offload capability at the moment. Alternatively * we can intrduce a LINK0/LINK1 flag as hme(4) did in its Tx checksum * offload routine. */ #define SK_CSUM_FEATURES (CSUM_TCP) /* * Note that we have newbus methods for both the GEnesis controller * itself and the XMAC(s). The XMACs are children of the GEnesis, and * the miibus code is a child of the XMACs. We need to do it this way * so that the miibus drivers can access the PHY registers on the * right PHY. It's not quite what I had in mind, but it's the only * design that achieves the desired effect. */ static device_method_t skc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, skc_probe), DEVMETHOD(device_attach, skc_attach), DEVMETHOD(device_detach, skc_detach), DEVMETHOD(device_suspend, skc_suspend), DEVMETHOD(device_resume, skc_resume), DEVMETHOD(device_shutdown, skc_shutdown), DEVMETHOD(bus_get_dma_tag, skc_get_dma_tag), DEVMETHOD_END }; static driver_t skc_driver = { "skc", skc_methods, sizeof(struct sk_softc) }; static devclass_t skc_devclass; static device_method_t sk_methods[] = { /* Device interface */ DEVMETHOD(device_probe, sk_probe), DEVMETHOD(device_attach, sk_attach), DEVMETHOD(device_detach, sk_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), /* MII interface */ DEVMETHOD(miibus_readreg, sk_miibus_readreg), DEVMETHOD(miibus_writereg, sk_miibus_writereg), DEVMETHOD(miibus_statchg, sk_miibus_statchg), DEVMETHOD_END }; static driver_t sk_driver = { "sk", sk_methods, sizeof(struct sk_if_softc) }; static devclass_t sk_devclass; DRIVER_MODULE(skc, pci, skc_driver, skc_devclass, NULL, NULL); DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, NULL, NULL); DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, NULL, NULL); static struct resource_spec sk_res_spec_io[] = { { SYS_RES_IOPORT, PCIR_BAR(1), RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0, 0 } }; static struct resource_spec sk_res_spec_mem[] = { { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0, 0 } }; #define SK_SETBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x) #define SK_CLRBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x) #define SK_WIN_SETBIT_4(sc, reg, x) \ sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x) #define SK_WIN_CLRBIT_4(sc, reg, x) \ sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x) #define SK_WIN_SETBIT_2(sc, reg, x) \ sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x) #define SK_WIN_CLRBIT_2(sc, reg, x) \ sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x) static u_int32_t sk_win_read_4(sc, reg) struct sk_softc *sc; int reg; { #ifdef SK_USEIOSPACE CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg))); #else return(CSR_READ_4(sc, reg)); #endif } static u_int16_t sk_win_read_2(sc, reg) struct sk_softc *sc; int reg; { #ifdef SK_USEIOSPACE CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg))); #else return(CSR_READ_2(sc, reg)); #endif } static u_int8_t sk_win_read_1(sc, reg) struct sk_softc *sc; int reg; { #ifdef SK_USEIOSPACE CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg))); #else return(CSR_READ_1(sc, reg)); #endif } static void sk_win_write_4(sc, reg, val) struct sk_softc *sc; int reg; u_int32_t val; { #ifdef SK_USEIOSPACE CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val); #else CSR_WRITE_4(sc, reg, val); #endif return; } static void sk_win_write_2(sc, reg, val) struct sk_softc *sc; int reg; u_int32_t val; { #ifdef SK_USEIOSPACE CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val); #else CSR_WRITE_2(sc, reg, val); #endif return; } static void sk_win_write_1(sc, reg, val) struct sk_softc *sc; int reg; u_int32_t val; { #ifdef SK_USEIOSPACE CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val); #else CSR_WRITE_1(sc, reg, val); #endif return; } static int sk_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct sk_if_softc *sc_if; int v; sc_if = device_get_softc(dev); SK_IF_MII_LOCK(sc_if); switch(sc_if->sk_softc->sk_type) { case SK_GENESIS: v = sk_xmac_miibus_readreg(sc_if, phy, reg); break; case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: v = sk_marv_miibus_readreg(sc_if, phy, reg); break; default: v = 0; break; } SK_IF_MII_UNLOCK(sc_if); return (v); } static int sk_miibus_writereg(dev, phy, reg, val) device_t dev; int phy, reg, val; { struct sk_if_softc *sc_if; int v; sc_if = device_get_softc(dev); SK_IF_MII_LOCK(sc_if); switch(sc_if->sk_softc->sk_type) { case SK_GENESIS: v = sk_xmac_miibus_writereg(sc_if, phy, reg, val); break; case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: v = sk_marv_miibus_writereg(sc_if, phy, reg, val); break; default: v = 0; break; } SK_IF_MII_UNLOCK(sc_if); return (v); } static void sk_miibus_statchg(dev) device_t dev; { struct sk_if_softc *sc_if; sc_if = device_get_softc(dev); SK_IF_MII_LOCK(sc_if); switch(sc_if->sk_softc->sk_type) { case SK_GENESIS: sk_xmac_miibus_statchg(sc_if); break; case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: sk_marv_miibus_statchg(sc_if); break; } SK_IF_MII_UNLOCK(sc_if); return; } static int sk_xmac_miibus_readreg(sc_if, phy, reg) struct sk_if_softc *sc_if; int phy, reg; { int i; SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); SK_XM_READ_2(sc_if, XM_PHY_DATA); if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { for (i = 0; i < SK_TIMEOUT; i++) { DELAY(1); if (SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYDATARDY) break; } if (i == SK_TIMEOUT) { if_printf(sc_if->sk_ifp, "phy failed to come ready\n"); return(0); } } DELAY(1); i = SK_XM_READ_2(sc_if, XM_PHY_DATA); return(i); } static int sk_xmac_miibus_writereg(sc_if, phy, reg, val) struct sk_if_softc *sc_if; int phy, reg, val; { int i; SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); for (i = 0; i < SK_TIMEOUT; i++) { if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) break; } if (i == SK_TIMEOUT) { if_printf(sc_if->sk_ifp, "phy failed to come ready\n"); return (ETIMEDOUT); } SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val); for (i = 0; i < SK_TIMEOUT; i++) { DELAY(1); if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) break; } if (i == SK_TIMEOUT) if_printf(sc_if->sk_ifp, "phy write timed out\n"); return(0); } static void sk_xmac_miibus_statchg(sc_if) struct sk_if_softc *sc_if; { struct mii_data *mii; mii = device_get_softc(sc_if->sk_miibus); /* * If this is a GMII PHY, manually set the XMAC's * duplex mode accordingly. */ if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); } else { SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); } } } static int sk_marv_miibus_readreg(sc_if, phy, reg) struct sk_if_softc *sc_if; int phy, reg; { u_int16_t val; int i; if (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER && sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER) { return(0); } SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ); for (i = 0; i < SK_TIMEOUT; i++) { DELAY(1); val = SK_YU_READ_2(sc_if, YUKON_SMICR); if (val & YU_SMICR_READ_VALID) break; } if (i == SK_TIMEOUT) { if_printf(sc_if->sk_ifp, "phy failed to come ready\n"); return(0); } val = SK_YU_READ_2(sc_if, YUKON_SMIDR); return(val); } static int sk_marv_miibus_writereg(sc_if, phy, reg, val) struct sk_if_softc *sc_if; int phy, reg, val; { int i; SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val); SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE); for (i = 0; i < SK_TIMEOUT; i++) { DELAY(1); if ((SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY) == 0) break; } if (i == SK_TIMEOUT) if_printf(sc_if->sk_ifp, "phy write timeout\n"); return(0); } static void sk_marv_miibus_statchg(sc_if) struct sk_if_softc *sc_if; { return; } #define HASH_BITS 6 static u_int32_t sk_xmchash(addr) const uint8_t *addr; { uint32_t crc; /* Compute CRC for the address value. */ crc = ether_crc32_le(addr, ETHER_ADDR_LEN); return (~crc & ((1 << HASH_BITS) - 1)); } static void sk_setfilt(sc_if, addr, slot) struct sk_if_softc *sc_if; u_int16_t *addr; int slot; { int base; base = XM_RXFILT_ENTRY(slot); SK_XM_WRITE_2(sc_if, base, addr[0]); SK_XM_WRITE_2(sc_if, base + 2, addr[1]); SK_XM_WRITE_2(sc_if, base + 4, addr[2]); return; } static void sk_rxfilter(sc_if) struct sk_if_softc *sc_if; { struct sk_softc *sc; SK_IF_LOCK_ASSERT(sc_if); sc = sc_if->sk_softc; if (sc->sk_type == SK_GENESIS) sk_rxfilter_genesis(sc_if); else sk_rxfilter_yukon(sc_if); } static void sk_rxfilter_genesis(sc_if) struct sk_if_softc *sc_if; { struct ifnet *ifp = sc_if->sk_ifp; u_int32_t hashes[2] = { 0, 0 }, mode; int h = 0, i; struct ifmultiaddr *ifma; u_int16_t dummy[] = { 0, 0, 0 }; u_int16_t maddr[(ETHER_ADDR_LEN+1)/2]; SK_IF_LOCK_ASSERT(sc_if); mode = SK_XM_READ_4(sc_if, XM_MODE); mode &= ~(XM_MODE_RX_PROMISC | XM_MODE_RX_USE_HASH | XM_MODE_RX_USE_PERFECT); /* First, zot all the existing perfect filters. */ for (i = 1; i < XM_RXFILT_MAX; i++) sk_setfilt(sc_if, dummy, i); /* Now program new ones. */ if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { if (ifp->if_flags & IFF_ALLMULTI) mode |= XM_MODE_RX_USE_HASH; if (ifp->if_flags & IFF_PROMISC) mode |= XM_MODE_RX_PROMISC; hashes[0] = 0xFFFFFFFF; hashes[1] = 0xFFFFFFFF; } else { i = 1; if_maddr_rlock(ifp); /* XXX want to maintain reverse semantics */ CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; /* * Program the first XM_RXFILT_MAX multicast groups * into the perfect filter. */ bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), maddr, ETHER_ADDR_LEN); if (i < XM_RXFILT_MAX) { sk_setfilt(sc_if, maddr, i); mode |= XM_MODE_RX_USE_PERFECT; i++; continue; } h = sk_xmchash((const uint8_t *)maddr); if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); mode |= XM_MODE_RX_USE_HASH; } if_maddr_runlock(ifp); } SK_XM_WRITE_4(sc_if, XM_MODE, mode); SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]); SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]); } static void sk_rxfilter_yukon(sc_if) struct sk_if_softc *sc_if; { struct ifnet *ifp; u_int32_t crc, hashes[2] = { 0, 0 }, mode; struct ifmultiaddr *ifma; SK_IF_LOCK_ASSERT(sc_if); ifp = sc_if->sk_ifp; mode = SK_YU_READ_2(sc_if, YUKON_RCR); if (ifp->if_flags & IFF_PROMISC) mode &= ~(YU_RCR_UFLEN | YU_RCR_MUFLEN); else if (ifp->if_flags & IFF_ALLMULTI) { mode |= YU_RCR_UFLEN | YU_RCR_MUFLEN; hashes[0] = 0xFFFFFFFF; hashes[1] = 0xFFFFFFFF; } else { mode |= YU_RCR_UFLEN; if_maddr_rlock(ifp); CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN); /* Just want the 6 least significant bits. */ crc &= 0x3f; /* Set the corresponding bit in the hash table. */ hashes[crc >> 5] |= 1 << (crc & 0x1f); } if_maddr_runlock(ifp); if (hashes[0] != 0 || hashes[1] != 0) mode |= YU_RCR_MUFLEN; } SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff); SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff); SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff); SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff); SK_YU_WRITE_2(sc_if, YUKON_RCR, mode); } static int sk_init_rx_ring(sc_if) struct sk_if_softc *sc_if; { struct sk_ring_data *rd; bus_addr_t addr; u_int32_t csum_start; int i; sc_if->sk_cdata.sk_rx_cons = 0; csum_start = (ETHER_HDR_LEN + sizeof(struct ip)) << 16 | ETHER_HDR_LEN; rd = &sc_if->sk_rdata; bzero(rd->sk_rx_ring, sizeof(struct sk_rx_desc) * SK_RX_RING_CNT); for (i = 0; i < SK_RX_RING_CNT; i++) { if (sk_newbuf(sc_if, i) != 0) return (ENOBUFS); if (i == (SK_RX_RING_CNT - 1)) addr = SK_RX_RING_ADDR(sc_if, 0); else addr = SK_RX_RING_ADDR(sc_if, i + 1); rd->sk_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr)); rd->sk_rx_ring[i].sk_csum_start = htole32(csum_start); } bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag, sc_if->sk_cdata.sk_rx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return(0); } static int sk_init_jumbo_rx_ring(sc_if) struct sk_if_softc *sc_if; { struct sk_ring_data *rd; bus_addr_t addr; u_int32_t csum_start; int i; sc_if->sk_cdata.sk_jumbo_rx_cons = 0; csum_start = ((ETHER_HDR_LEN + sizeof(struct ip)) << 16) | ETHER_HDR_LEN; rd = &sc_if->sk_rdata; bzero(rd->sk_jumbo_rx_ring, sizeof(struct sk_rx_desc) * SK_JUMBO_RX_RING_CNT); for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) { if (sk_jumbo_newbuf(sc_if, i) != 0) return (ENOBUFS); if (i == (SK_JUMBO_RX_RING_CNT - 1)) addr = SK_JUMBO_RX_RING_ADDR(sc_if, 0); else addr = SK_JUMBO_RX_RING_ADDR(sc_if, i + 1); rd->sk_jumbo_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr)); rd->sk_jumbo_rx_ring[i].sk_csum_start = htole32(csum_start); } bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, sc_if->sk_cdata.sk_jumbo_rx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } static void sk_init_tx_ring(sc_if) struct sk_if_softc *sc_if; { struct sk_ring_data *rd; struct sk_txdesc *txd; bus_addr_t addr; int i; STAILQ_INIT(&sc_if->sk_cdata.sk_txfreeq); STAILQ_INIT(&sc_if->sk_cdata.sk_txbusyq); sc_if->sk_cdata.sk_tx_prod = 0; sc_if->sk_cdata.sk_tx_cons = 0; sc_if->sk_cdata.sk_tx_cnt = 0; rd = &sc_if->sk_rdata; bzero(rd->sk_tx_ring, sizeof(struct sk_tx_desc) * SK_TX_RING_CNT); for (i = 0; i < SK_TX_RING_CNT; i++) { if (i == (SK_TX_RING_CNT - 1)) addr = SK_TX_RING_ADDR(sc_if, 0); else addr = SK_TX_RING_ADDR(sc_if, i + 1); rd->sk_tx_ring[i].sk_next = htole32(SK_ADDR_LO(addr)); txd = &sc_if->sk_cdata.sk_txdesc[i]; STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q); } bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag, sc_if->sk_cdata.sk_tx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static __inline void sk_discard_rxbuf(sc_if, idx) struct sk_if_softc *sc_if; int idx; { struct sk_rx_desc *r; struct sk_rxdesc *rxd; struct mbuf *m; r = &sc_if->sk_rdata.sk_rx_ring[idx]; rxd = &sc_if->sk_cdata.sk_rxdesc[idx]; m = rxd->rx_m; r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM); } static __inline void sk_discard_jumbo_rxbuf(sc_if, idx) struct sk_if_softc *sc_if; int idx; { struct sk_rx_desc *r; struct sk_rxdesc *rxd; struct mbuf *m; r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx]; rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx]; m = rxd->rx_m; r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM); } static int sk_newbuf(sc_if, idx) struct sk_if_softc *sc_if; int idx; { struct sk_rx_desc *r; struct sk_rxdesc *rxd; struct mbuf *m; bus_dma_segment_t segs[1]; bus_dmamap_t map; int nsegs; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MCLBYTES; m_adj(m, ETHER_ALIGN); if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_rx_tag, sc_if->sk_cdata.sk_rx_sparemap, m, segs, &nsegs, 0) != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); rxd = &sc_if->sk_cdata.sk_rxdesc[idx]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap); } map = rxd->rx_dmamap; rxd->rx_dmamap = sc_if->sk_cdata.sk_rx_sparemap; sc_if->sk_cdata.sk_rx_sparemap = map; bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_PREREAD); rxd->rx_m = m; r = &sc_if->sk_rdata.sk_rx_ring[idx]; r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr)); r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr)); r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM); return (0); } static int sk_jumbo_newbuf(sc_if, idx) struct sk_if_softc *sc_if; int idx; { struct sk_rx_desc *r; struct sk_rxdesc *rxd; struct mbuf *m; bus_dma_segment_t segs[1]; bus_dmamap_t map; int nsegs; m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); if (m == NULL) return (ENOBUFS); m->m_pkthdr.len = m->m_len = MJUM9BYTES; /* * Adjust alignment so packet payload begins on a * longword boundary. Mandatory for Alpha, useful on * x86 too. */ m_adj(m, ETHER_ALIGN); if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_jumbo_rx_tag, sc_if->sk_cdata.sk_jumbo_rx_sparemap, m, segs, &nsegs, 0) != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap); } map = rxd->rx_dmamap; rxd->rx_dmamap = sc_if->sk_cdata.sk_jumbo_rx_sparemap; sc_if->sk_cdata.sk_jumbo_rx_sparemap = map; bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_PREREAD); rxd->rx_m = m; r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx]; r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr)); r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr)); r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM); return (0); } /* * Set media options. */ static int sk_ifmedia_upd(ifp) struct ifnet *ifp; { struct sk_if_softc *sc_if = ifp->if_softc; struct mii_data *mii; mii = device_get_softc(sc_if->sk_miibus); sk_init(sc_if); mii_mediachg(mii); return(0); } /* * Report current media status. */ static void sk_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct sk_if_softc *sc_if; struct mii_data *mii; sc_if = ifp->if_softc; mii = device_get_softc(sc_if->sk_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } static int sk_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct sk_if_softc *sc_if = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; int error, mask; struct mii_data *mii; error = 0; switch(command) { case SIOCSIFMTU: if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > SK_JUMBO_MTU) error = EINVAL; else if (ifp->if_mtu != ifr->ifr_mtu) { if (sc_if->sk_jumbo_disable != 0 && ifr->ifr_mtu > SK_MAX_FRAMELEN) error = EINVAL; else { SK_IF_LOCK(sc_if); ifp->if_mtu = ifr->ifr_mtu; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; sk_init_locked(sc_if); } SK_IF_UNLOCK(sc_if); } } break; case SIOCSIFFLAGS: SK_IF_LOCK(sc_if); if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { if ((ifp->if_flags ^ sc_if->sk_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) sk_rxfilter(sc_if); } else sk_init_locked(sc_if); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) sk_stop(sc_if); } sc_if->sk_if_flags = ifp->if_flags; SK_IF_UNLOCK(sc_if); break; case SIOCADDMULTI: case SIOCDELMULTI: SK_IF_LOCK(sc_if); if (ifp->if_drv_flags & IFF_DRV_RUNNING) sk_rxfilter(sc_if); SK_IF_UNLOCK(sc_if); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc_if->sk_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; case SIOCSIFCAP: SK_IF_LOCK(sc_if); if (sc_if->sk_softc->sk_type == SK_GENESIS) { SK_IF_UNLOCK(sc_if); break; } mask = ifr->ifr_reqcap ^ ifp->if_capenable; if ((mask & IFCAP_TXCSUM) != 0 && (IFCAP_TXCSUM & ifp->if_capabilities) != 0) { ifp->if_capenable ^= IFCAP_TXCSUM; if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) ifp->if_hwassist |= SK_CSUM_FEATURES; else ifp->if_hwassist &= ~SK_CSUM_FEATURES; } if ((mask & IFCAP_RXCSUM) != 0 && (IFCAP_RXCSUM & ifp->if_capabilities) != 0) ifp->if_capenable ^= IFCAP_RXCSUM; SK_IF_UNLOCK(sc_if); break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } /* * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int skc_probe(dev) device_t dev; { const struct sk_type *t = sk_devs; while(t->sk_name != NULL) { if ((pci_get_vendor(dev) == t->sk_vid) && (pci_get_device(dev) == t->sk_did)) { /* * Only attach to rev. 2 of the Linksys EG1032 adapter. * Rev. 3 is supported by re(4). */ if ((t->sk_vid == VENDORID_LINKSYS) && (t->sk_did == DEVICEID_LINKSYS_EG1032) && (pci_get_subdevice(dev) != SUBDEVICEID_LINKSYS_EG1032_REV2)) { t++; continue; } device_set_desc(dev, t->sk_name); return (BUS_PROBE_DEFAULT); } t++; } return(ENXIO); } /* * Force the GEnesis into reset, then bring it out of reset. */ static void sk_reset(sc) struct sk_softc *sc; { CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET); CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET); if (SK_YUKON_FAMILY(sc->sk_type)) CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET); DELAY(1000); CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET); DELAY(2); CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET); if (SK_YUKON_FAMILY(sc->sk_type)) CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR); if (sc->sk_type == SK_GENESIS) { /* Configure packet arbiter */ sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET); sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT); sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT); sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT); sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT); } /* Enable RAM interface */ sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET); /* * Configure interrupt moderation. The moderation timer * defers interrupts specified in the interrupt moderation * timer mask based on the timeout specified in the interrupt * moderation timer init register. Each bit in the timer * register represents one tick, so to specify a timeout in * microseconds, we have to multiply by the correct number of * ticks-per-microsecond. */ switch (sc->sk_type) { case SK_GENESIS: sc->sk_int_ticks = SK_IMTIMER_TICKS_GENESIS; break; default: sc->sk_int_ticks = SK_IMTIMER_TICKS_YUKON; break; } if (bootverbose) device_printf(sc->sk_dev, "interrupt moderation is %d us\n", sc->sk_int_mod); sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod, sc->sk_int_ticks)); sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF| SK_ISR_RX1_EOF|SK_ISR_RX2_EOF); sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START); return; } static int sk_probe(dev) device_t dev; { struct sk_softc *sc; sc = device_get_softc(device_get_parent(dev)); /* * Not much to do here. We always know there will be * at least one XMAC present, and if there are two, * skc_attach() will create a second device instance * for us. */ switch (sc->sk_type) { case SK_GENESIS: device_set_desc(dev, "XaQti Corp. XMAC II"); break; case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon"); break; } return (BUS_PROBE_DEFAULT); } /* * Each XMAC chip is attached as a separate logical IP interface. * Single port cards will have only one logical interface of course. */ static int sk_attach(dev) device_t dev; { struct sk_softc *sc; struct sk_if_softc *sc_if; struct ifnet *ifp; u_int32_t r; int error, i, phy, port; u_char eaddr[6]; u_char inv_mac[] = {0, 0, 0, 0, 0, 0}; if (dev == NULL) return(EINVAL); error = 0; sc_if = device_get_softc(dev); sc = device_get_softc(device_get_parent(dev)); port = *(int *)device_get_ivars(dev); sc_if->sk_if_dev = dev; sc_if->sk_port = port; sc_if->sk_softc = sc; sc->sk_if[port] = sc_if; if (port == SK_PORT_A) sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0; if (port == SK_PORT_B) sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1; callout_init_mtx(&sc_if->sk_tick_ch, &sc_if->sk_softc->sk_mtx, 0); callout_init_mtx(&sc_if->sk_watchdog_ch, &sc_if->sk_softc->sk_mtx, 0); if (sk_dma_alloc(sc_if) != 0) { error = ENOMEM; goto fail; } sk_dma_jumbo_alloc(sc_if); ifp = sc_if->sk_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(sc_if->sk_if_dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } ifp->if_softc = sc_if; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; /* * SK_GENESIS has a bug in checksum offload - From linux. */ if (sc_if->sk_softc->sk_type != SK_GENESIS) { ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_RXCSUM; ifp->if_hwassist = 0; } else { ifp->if_capabilities = 0; ifp->if_hwassist = 0; } ifp->if_capenable = ifp->if_capabilities; /* * Some revision of Yukon controller generates corrupted * frame when TX checksum offloading is enabled. The * frame has a valid checksum value so payload might be * modified during TX checksum calculation. Disable TX * checksum offloading but give users chance to enable it * when they know their controller works without problems * with TX checksum offloading. */ ifp->if_capenable &= ~IFCAP_TXCSUM; ifp->if_ioctl = sk_ioctl; ifp->if_start = sk_start; ifp->if_init = sk_init; IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1); ifp->if_snd.ifq_drv_maxlen = SK_TX_RING_CNT - 1; IFQ_SET_READY(&ifp->if_snd); /* * Get station address for this interface. Note that * dual port cards actually come with three station * addresses: one for each port, plus an extra. The * extra one is used by the SysKonnect driver software * as a 'virtual' station address for when both ports * are operating in failover mode. Currently we don't * use this extra address. */ SK_IF_LOCK(sc_if); for (i = 0; i < ETHER_ADDR_LEN; i++) eaddr[i] = sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i); /* Verify whether the station address is invalid or not. */ if (bcmp(eaddr, inv_mac, sizeof(inv_mac)) == 0) { device_printf(sc_if->sk_if_dev, "Generating random ethernet address\n"); r = arc4random(); /* * Set OUI to convenient locally assigned address. 'b' * is 0x62, which has the locally assigned bit set, and * the broadcast/multicast bit clear. */ eaddr[0] = 'b'; eaddr[1] = 's'; eaddr[2] = 'd'; eaddr[3] = (r >> 16) & 0xff; eaddr[4] = (r >> 8) & 0xff; eaddr[5] = (r >> 0) & 0xff; } /* * Set up RAM buffer addresses. The NIC will have a certain * amount of SRAM on it, somewhere between 512K and 2MB. We * need to divide this up a) between the transmitter and * receiver and b) between the two XMACs, if this is a * dual port NIC. Our algotithm is to divide up the memory * evenly so that everyone gets a fair share. * * Just to be contrary, Yukon2 appears to have separate memory * for each MAC. */ if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) { u_int32_t chunk, val; chunk = sc->sk_ramsize / 2; val = sc->sk_rboff / sizeof(u_int64_t); sc_if->sk_rx_ramstart = val; val += (chunk / sizeof(u_int64_t)); sc_if->sk_rx_ramend = val - 1; sc_if->sk_tx_ramstart = val; val += (chunk / sizeof(u_int64_t)); sc_if->sk_tx_ramend = val - 1; } else { u_int32_t chunk, val; chunk = sc->sk_ramsize / 4; val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) / sizeof(u_int64_t); sc_if->sk_rx_ramstart = val; val += (chunk / sizeof(u_int64_t)); sc_if->sk_rx_ramend = val - 1; sc_if->sk_tx_ramstart = val; val += (chunk / sizeof(u_int64_t)); sc_if->sk_tx_ramend = val - 1; } /* Read and save PHY type and set PHY address */ sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF; if (!SK_YUKON_FAMILY(sc->sk_type)) { switch(sc_if->sk_phytype) { case SK_PHYTYPE_XMAC: sc_if->sk_phyaddr = SK_PHYADDR_XMAC; break; case SK_PHYTYPE_BCOM: sc_if->sk_phyaddr = SK_PHYADDR_BCOM; break; default: device_printf(sc->sk_dev, "unsupported PHY type: %d\n", sc_if->sk_phytype); error = ENODEV; SK_IF_UNLOCK(sc_if); goto fail; } } else { if (sc_if->sk_phytype < SK_PHYTYPE_MARV_COPPER && sc->sk_pmd != 'S') { /* not initialized, punt */ sc_if->sk_phytype = SK_PHYTYPE_MARV_COPPER; sc->sk_coppertype = 1; } sc_if->sk_phyaddr = SK_PHYADDR_MARV; if (!(sc->sk_coppertype)) sc_if->sk_phytype = SK_PHYTYPE_MARV_FIBER; } /* * Call MI attach routine. Can't hold locks when calling into ether_*. */ SK_IF_UNLOCK(sc_if); ether_ifattach(ifp, eaddr); SK_IF_LOCK(sc_if); /* * The hardware should be ready for VLAN_MTU by default: * XMAC II has 0x8100 in VLAN Tag Level 1 register initially; * YU_SMR_MFL_VLAN is set by this driver in Yukon. * */ ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_capenable |= IFCAP_VLAN_MTU; /* * Tell the upper layer(s) we support long frames. * Must appear after the call to ether_ifattach() because * ether_ifattach() sets ifi_hdrlen to the default value. */ ifp->if_hdrlen = sizeof(struct ether_vlan_header); /* * Do miibus setup. */ phy = MII_PHY_ANY; switch (sc->sk_type) { case SK_GENESIS: sk_init_xmac(sc_if); if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) phy = 0; break; case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: sk_init_yukon(sc_if); phy = 0; break; } SK_IF_UNLOCK(sc_if); error = mii_attach(dev, &sc_if->sk_miibus, ifp, sk_ifmedia_upd, sk_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0); if (error != 0) { device_printf(sc_if->sk_if_dev, "attaching PHYs failed\n"); ether_ifdetach(ifp); goto fail; } fail: if (error) { /* Access should be ok even though lock has been dropped */ sc->sk_if[port] = NULL; sk_detach(dev); } return(error); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int skc_attach(dev) device_t dev; { struct sk_softc *sc; int error = 0, *port; uint8_t skrs; const char *pname = NULL; char *revstr; sc = device_get_softc(dev); sc->sk_dev = dev; mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); mtx_init(&sc->sk_mii_mtx, "sk_mii_mutex", NULL, MTX_DEF); /* * Map control/status registers. */ pci_enable_busmaster(dev); /* Allocate resources */ #ifdef SK_USEIOSPACE sc->sk_res_spec = sk_res_spec_io; #else sc->sk_res_spec = sk_res_spec_mem; #endif error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res); if (error) { if (sc->sk_res_spec == sk_res_spec_mem) sc->sk_res_spec = sk_res_spec_io; else sc->sk_res_spec = sk_res_spec_mem; error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res); if (error) { device_printf(dev, "couldn't allocate %s resources\n", sc->sk_res_spec == sk_res_spec_mem ? "memory" : "I/O"); goto fail; } } sc->sk_type = sk_win_read_1(sc, SK_CHIPVER); sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4) & 0xf; /* Bail out if chip is not recognized. */ if (sc->sk_type != SK_GENESIS && !SK_YUKON_FAMILY(sc->sk_type)) { device_printf(dev, "unknown device: chipver=%02x, rev=%x\n", sc->sk_type, sc->sk_rev); error = ENXIO; goto fail; } SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "int_mod", CTLTYPE_INT|CTLFLAG_RW, &sc->sk_int_mod, 0, sysctl_hw_sk_int_mod, "I", "SK interrupt moderation"); /* Pull in device tunables. */ sc->sk_int_mod = SK_IM_DEFAULT; error = resource_int_value(device_get_name(dev), device_get_unit(dev), "int_mod", &sc->sk_int_mod); if (error == 0) { if (sc->sk_int_mod < SK_IM_MIN || sc->sk_int_mod > SK_IM_MAX) { device_printf(dev, "int_mod value out of range; " "using default: %d\n", SK_IM_DEFAULT); sc->sk_int_mod = SK_IM_DEFAULT; } } /* Reset the adapter. */ sk_reset(sc); skrs = sk_win_read_1(sc, SK_EPROM0); if (sc->sk_type == SK_GENESIS) { /* Read and save RAM size and RAMbuffer offset */ switch(skrs) { case SK_RAMSIZE_512K_64: sc->sk_ramsize = 0x80000; sc->sk_rboff = SK_RBOFF_0; break; case SK_RAMSIZE_1024K_64: sc->sk_ramsize = 0x100000; sc->sk_rboff = SK_RBOFF_80000; break; case SK_RAMSIZE_1024K_128: sc->sk_ramsize = 0x100000; sc->sk_rboff = SK_RBOFF_0; break; case SK_RAMSIZE_2048K_128: sc->sk_ramsize = 0x200000; sc->sk_rboff = SK_RBOFF_0; break; default: device_printf(dev, "unknown ram size: %d\n", skrs); error = ENXIO; goto fail; } } else { /* SK_YUKON_FAMILY */ if (skrs == 0x00) sc->sk_ramsize = 0x20000; else sc->sk_ramsize = skrs * (1<<12); sc->sk_rboff = SK_RBOFF_0; } /* Read and save physical media type */ sc->sk_pmd = sk_win_read_1(sc, SK_PMDTYPE); if (sc->sk_pmd == 'T' || sc->sk_pmd == '1') sc->sk_coppertype = 1; else sc->sk_coppertype = 0; /* Determine whether to name it with VPD PN or just make it up. * Marvell Yukon VPD PN seems to freqently be bogus. */ switch (pci_get_device(dev)) { case DEVICEID_SK_V1: case DEVICEID_BELKIN_5005: case DEVICEID_3COM_3C940: case DEVICEID_LINKSYS_EG1032: case DEVICEID_DLINK_DGE530T_A1: case DEVICEID_DLINK_DGE530T_B1: /* Stay with VPD PN. */ (void) pci_get_vpd_ident(dev, &pname); break; case DEVICEID_SK_V2: /* YUKON VPD PN might bear no resemblance to reality. */ switch (sc->sk_type) { case SK_GENESIS: /* Stay with VPD PN. */ (void) pci_get_vpd_ident(dev, &pname); break; case SK_YUKON: pname = "Marvell Yukon Gigabit Ethernet"; break; case SK_YUKON_LITE: pname = "Marvell Yukon Lite Gigabit Ethernet"; break; case SK_YUKON_LP: pname = "Marvell Yukon LP Gigabit Ethernet"; break; default: pname = "Marvell Yukon (Unknown) Gigabit Ethernet"; break; } /* Yukon Lite Rev. A0 needs special test. */ if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) { u_int32_t far; u_int8_t testbyte; /* Save flash address register before testing. */ far = sk_win_read_4(sc, SK_EP_ADDR); sk_win_write_1(sc, SK_EP_ADDR+0x03, 0xff); testbyte = sk_win_read_1(sc, SK_EP_ADDR+0x03); if (testbyte != 0x00) { /* Yukon Lite Rev. A0 detected. */ sc->sk_type = SK_YUKON_LITE; sc->sk_rev = SK_YUKON_LITE_REV_A0; /* Restore flash address register. */ sk_win_write_4(sc, SK_EP_ADDR, far); } } break; default: device_printf(dev, "unknown device: vendor=%04x, device=%04x, " "chipver=%02x, rev=%x\n", pci_get_vendor(dev), pci_get_device(dev), sc->sk_type, sc->sk_rev); error = ENXIO; goto fail; } if (sc->sk_type == SK_YUKON_LITE) { switch (sc->sk_rev) { case SK_YUKON_LITE_REV_A0: revstr = "A0"; break; case SK_YUKON_LITE_REV_A1: revstr = "A1"; break; case SK_YUKON_LITE_REV_A3: revstr = "A3"; break; default: revstr = ""; break; } } else { revstr = ""; } /* Announce the product name and more VPD data if there. */ if (pname != NULL) device_printf(dev, "%s rev. %s(0x%x)\n", pname, revstr, sc->sk_rev); if (bootverbose) { device_printf(dev, "chip ver = 0x%02x\n", sc->sk_type); device_printf(dev, "chip rev = 0x%02x\n", sc->sk_rev); device_printf(dev, "SK_EPROM0 = 0x%02x\n", skrs); device_printf(dev, "SRAM size = 0x%06x\n", sc->sk_ramsize); } sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1); if (sc->sk_devs[SK_PORT_A] == NULL) { device_printf(dev, "failed to add child for PORT_A\n"); error = ENXIO; goto fail; } port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); if (port == NULL) { device_printf(dev, "failed to allocate memory for " "ivars of PORT_A\n"); error = ENXIO; goto fail; } *port = SK_PORT_A; device_set_ivars(sc->sk_devs[SK_PORT_A], port); if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) { sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1); if (sc->sk_devs[SK_PORT_B] == NULL) { device_printf(dev, "failed to add child for PORT_B\n"); error = ENXIO; goto fail; } port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); if (port == NULL) { device_printf(dev, "failed to allocate memory for " "ivars of PORT_B\n"); error = ENXIO; goto fail; } *port = SK_PORT_B; device_set_ivars(sc->sk_devs[SK_PORT_B], port); } /* Turn on the 'driver is loaded' LED. */ CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON); error = bus_generic_attach(dev); if (error) { device_printf(dev, "failed to attach port(s)\n"); goto fail; } /* Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->sk_res[1], INTR_TYPE_NET|INTR_MPSAFE, NULL, sk_intr, sc, &sc->sk_intrhand); if (error) { device_printf(dev, "couldn't set up irq\n"); goto fail; } fail: if (error) skc_detach(dev); return(error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int sk_detach(dev) device_t dev; { struct sk_if_softc *sc_if; struct ifnet *ifp; sc_if = device_get_softc(dev); KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx), ("sk mutex not initialized in sk_detach")); SK_IF_LOCK(sc_if); ifp = sc_if->sk_ifp; /* These should only be active if attach_xmac succeeded */ if (device_is_attached(dev)) { sk_stop(sc_if); /* Can't hold locks while calling detach */ SK_IF_UNLOCK(sc_if); callout_drain(&sc_if->sk_tick_ch); callout_drain(&sc_if->sk_watchdog_ch); ether_ifdetach(ifp); SK_IF_LOCK(sc_if); } /* * We're generally called from skc_detach() which is using * device_delete_child() to get to here. It's already trashed * miibus for us, so don't do it here or we'll panic. */ /* if (sc_if->sk_miibus != NULL) device_delete_child(dev, sc_if->sk_miibus); */ bus_generic_detach(dev); sk_dma_jumbo_free(sc_if); sk_dma_free(sc_if); SK_IF_UNLOCK(sc_if); if (ifp) if_free(ifp); return(0); } static int skc_detach(dev) device_t dev; { struct sk_softc *sc; sc = device_get_softc(dev); KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized")); if (device_is_alive(dev)) { if (sc->sk_devs[SK_PORT_A] != NULL) { free(device_get_ivars(sc->sk_devs[SK_PORT_A]), M_DEVBUF); device_delete_child(dev, sc->sk_devs[SK_PORT_A]); } if (sc->sk_devs[SK_PORT_B] != NULL) { free(device_get_ivars(sc->sk_devs[SK_PORT_B]), M_DEVBUF); device_delete_child(dev, sc->sk_devs[SK_PORT_B]); } bus_generic_detach(dev); } if (sc->sk_intrhand) bus_teardown_intr(dev, sc->sk_res[1], sc->sk_intrhand); bus_release_resources(dev, sc->sk_res_spec, sc->sk_res); mtx_destroy(&sc->sk_mii_mtx); mtx_destroy(&sc->sk_mtx); return(0); } static bus_dma_tag_t skc_get_dma_tag(device_t bus, device_t child __unused) { return (bus_get_dma_tag(bus)); } struct sk_dmamap_arg { bus_addr_t sk_busaddr; }; static void sk_dmamap_cb(arg, segs, nseg, error) void *arg; bus_dma_segment_t *segs; int nseg; int error; { struct sk_dmamap_arg *ctx; if (error != 0) return; ctx = arg; ctx->sk_busaddr = segs[0].ds_addr; } /* * Allocate jumbo buffer storage. The SysKonnect adapters support * "jumbograms" (9K frames), although SysKonnect doesn't currently * use them in their drivers. In order for us to use them, we need * large 9K receive buffers, however standard mbuf clusters are only * 2048 bytes in size. Consequently, we need to allocate and manage * our own jumbo buffer pool. Fortunately, this does not require an * excessive amount of additional code. */ static int sk_dma_alloc(sc_if) struct sk_if_softc *sc_if; { struct sk_dmamap_arg ctx; struct sk_txdesc *txd; struct sk_rxdesc *rxd; int error, i; /* create parent tag */ /* * XXX * This driver should use BUS_SPACE_MAXADDR for lowaddr argument * in bus_dma_tag_create(9) as the NIC would support DAC mode. * However bz@ reported that it does not work on amd64 with > 4GB * RAM. Until we have more clues of the breakage, disable DAC mode * by limiting DMA address to be in 32bit address space. */ error = bus_dma_tag_create( bus_get_dma_tag(sc_if->sk_if_dev),/* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc_if->sk_cdata.sk_parent_tag); if (error != 0) { device_printf(sc_if->sk_if_dev, "failed to create parent DMA tag\n"); goto fail; } /* create tag for Tx ring */ error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ SK_RING_ALIGN, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ SK_TX_RING_SZ, /* maxsize */ 1, /* nsegments */ SK_TX_RING_SZ, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc_if->sk_cdata.sk_tx_ring_tag); if (error != 0) { device_printf(sc_if->sk_if_dev, "failed to allocate Tx ring DMA tag\n"); goto fail; } /* create tag for Rx ring */ error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ SK_RING_ALIGN, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ SK_RX_RING_SZ, /* maxsize */ 1, /* nsegments */ SK_RX_RING_SZ, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc_if->sk_cdata.sk_rx_ring_tag); if (error != 0) { device_printf(sc_if->sk_if_dev, "failed to allocate Rx ring DMA tag\n"); goto fail; } /* create tag for Tx buffers */ error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES * SK_MAXTXSEGS, /* maxsize */ SK_MAXTXSEGS, /* nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc_if->sk_cdata.sk_tx_tag); if (error != 0) { device_printf(sc_if->sk_if_dev, "failed to allocate Tx DMA tag\n"); goto fail; } /* create tag for Rx buffers */ error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, /* maxsize */ 1, /* nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc_if->sk_cdata.sk_rx_tag); if (error != 0) { device_printf(sc_if->sk_if_dev, "failed to allocate Rx DMA tag\n"); goto fail; } /* allocate DMA'able memory and load the DMA map for Tx ring */ error = bus_dmamem_alloc(sc_if->sk_cdata.sk_tx_ring_tag, (void **)&sc_if->sk_rdata.sk_tx_ring, BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->sk_cdata.sk_tx_ring_map); if (error != 0) { device_printf(sc_if->sk_if_dev, "failed to allocate DMA'able memory for Tx ring\n"); goto fail; } ctx.sk_busaddr = 0; error = bus_dmamap_load(sc_if->sk_cdata.sk_tx_ring_tag, sc_if->sk_cdata.sk_tx_ring_map, sc_if->sk_rdata.sk_tx_ring, SK_TX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc_if->sk_if_dev, "failed to load DMA'able memory for Tx ring\n"); goto fail; } sc_if->sk_rdata.sk_tx_ring_paddr = ctx.sk_busaddr; /* allocate DMA'able memory and load the DMA map for Rx ring */ error = bus_dmamem_alloc(sc_if->sk_cdata.sk_rx_ring_tag, (void **)&sc_if->sk_rdata.sk_rx_ring, BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->sk_cdata.sk_rx_ring_map); if (error != 0) { device_printf(sc_if->sk_if_dev, "failed to allocate DMA'able memory for Rx ring\n"); goto fail; } ctx.sk_busaddr = 0; error = bus_dmamap_load(sc_if->sk_cdata.sk_rx_ring_tag, sc_if->sk_cdata.sk_rx_ring_map, sc_if->sk_rdata.sk_rx_ring, SK_RX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc_if->sk_if_dev, "failed to load DMA'able memory for Rx ring\n"); goto fail; } sc_if->sk_rdata.sk_rx_ring_paddr = ctx.sk_busaddr; /* create DMA maps for Tx buffers */ for (i = 0; i < SK_TX_RING_CNT; i++) { txd = &sc_if->sk_cdata.sk_txdesc[i]; txd->tx_m = NULL; txd->tx_dmamap = NULL; error = bus_dmamap_create(sc_if->sk_cdata.sk_tx_tag, 0, &txd->tx_dmamap); if (error != 0) { device_printf(sc_if->sk_if_dev, "failed to create Tx dmamap\n"); goto fail; } } /* create DMA maps for Rx buffers */ if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0, &sc_if->sk_cdata.sk_rx_sparemap)) != 0) { device_printf(sc_if->sk_if_dev, "failed to create spare Rx dmamap\n"); goto fail; } for (i = 0; i < SK_RX_RING_CNT; i++) { rxd = &sc_if->sk_cdata.sk_rxdesc[i]; rxd->rx_m = NULL; rxd->rx_dmamap = NULL; error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0, &rxd->rx_dmamap); if (error != 0) { device_printf(sc_if->sk_if_dev, "failed to create Rx dmamap\n"); goto fail; } } fail: return (error); } static int sk_dma_jumbo_alloc(sc_if) struct sk_if_softc *sc_if; { struct sk_dmamap_arg ctx; struct sk_rxdesc *jrxd; int error, i; if (jumbo_disable != 0) { device_printf(sc_if->sk_if_dev, "disabling jumbo frame support\n"); sc_if->sk_jumbo_disable = 1; return (0); } /* create tag for jumbo Rx ring */ error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ SK_RING_ALIGN, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ SK_JUMBO_RX_RING_SZ, /* maxsize */ 1, /* nsegments */ SK_JUMBO_RX_RING_SZ, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc_if->sk_cdata.sk_jumbo_rx_ring_tag); if (error != 0) { device_printf(sc_if->sk_if_dev, "failed to allocate jumbo Rx ring DMA tag\n"); goto jumbo_fail; } /* create tag for jumbo Rx buffers */ error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MJUM9BYTES, /* maxsize */ 1, /* nsegments */ MJUM9BYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc_if->sk_cdata.sk_jumbo_rx_tag); if (error != 0) { device_printf(sc_if->sk_if_dev, "failed to allocate jumbo Rx DMA tag\n"); goto jumbo_fail; } /* allocate DMA'able memory and load the DMA map for jumbo Rx ring */ error = bus_dmamem_alloc(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, (void **)&sc_if->sk_rdata.sk_jumbo_rx_ring, BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->sk_cdata.sk_jumbo_rx_ring_map); if (error != 0) { device_printf(sc_if->sk_if_dev, "failed to allocate DMA'able memory for jumbo Rx ring\n"); goto jumbo_fail; } ctx.sk_busaddr = 0; error = bus_dmamap_load(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, sc_if->sk_cdata.sk_jumbo_rx_ring_map, sc_if->sk_rdata.sk_jumbo_rx_ring, SK_JUMBO_RX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc_if->sk_if_dev, "failed to load DMA'able memory for jumbo Rx ring\n"); goto jumbo_fail; } sc_if->sk_rdata.sk_jumbo_rx_ring_paddr = ctx.sk_busaddr; /* create DMA maps for jumbo Rx buffers */ if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0, &sc_if->sk_cdata.sk_jumbo_rx_sparemap)) != 0) { device_printf(sc_if->sk_if_dev, "failed to create spare jumbo Rx dmamap\n"); goto jumbo_fail; } for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) { jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i]; jrxd->rx_m = NULL; jrxd->rx_dmamap = NULL; error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0, &jrxd->rx_dmamap); if (error != 0) { device_printf(sc_if->sk_if_dev, "failed to create jumbo Rx dmamap\n"); goto jumbo_fail; } } return (0); jumbo_fail: sk_dma_jumbo_free(sc_if); device_printf(sc_if->sk_if_dev, "disabling jumbo frame support due to " "resource shortage\n"); sc_if->sk_jumbo_disable = 1; return (0); } static void sk_dma_free(sc_if) struct sk_if_softc *sc_if; { struct sk_txdesc *txd; struct sk_rxdesc *rxd; int i; /* Tx ring */ if (sc_if->sk_cdata.sk_tx_ring_tag) { if (sc_if->sk_rdata.sk_tx_ring_paddr) bus_dmamap_unload(sc_if->sk_cdata.sk_tx_ring_tag, sc_if->sk_cdata.sk_tx_ring_map); if (sc_if->sk_rdata.sk_tx_ring) bus_dmamem_free(sc_if->sk_cdata.sk_tx_ring_tag, sc_if->sk_rdata.sk_tx_ring, sc_if->sk_cdata.sk_tx_ring_map); sc_if->sk_rdata.sk_tx_ring = NULL; sc_if->sk_rdata.sk_tx_ring_paddr = 0; bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_ring_tag); sc_if->sk_cdata.sk_tx_ring_tag = NULL; } /* Rx ring */ if (sc_if->sk_cdata.sk_rx_ring_tag) { if (sc_if->sk_rdata.sk_rx_ring_paddr) bus_dmamap_unload(sc_if->sk_cdata.sk_rx_ring_tag, sc_if->sk_cdata.sk_rx_ring_map); if (sc_if->sk_rdata.sk_rx_ring) bus_dmamem_free(sc_if->sk_cdata.sk_rx_ring_tag, sc_if->sk_rdata.sk_rx_ring, sc_if->sk_cdata.sk_rx_ring_map); sc_if->sk_rdata.sk_rx_ring = NULL; sc_if->sk_rdata.sk_rx_ring_paddr = 0; bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_ring_tag); sc_if->sk_cdata.sk_rx_ring_tag = NULL; } /* Tx buffers */ if (sc_if->sk_cdata.sk_tx_tag) { for (i = 0; i < SK_TX_RING_CNT; i++) { txd = &sc_if->sk_cdata.sk_txdesc[i]; if (txd->tx_dmamap) { bus_dmamap_destroy(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap); txd->tx_dmamap = NULL; } } bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_tag); sc_if->sk_cdata.sk_tx_tag = NULL; } /* Rx buffers */ if (sc_if->sk_cdata.sk_rx_tag) { for (i = 0; i < SK_RX_RING_CNT; i++) { rxd = &sc_if->sk_cdata.sk_rxdesc[i]; if (rxd->rx_dmamap) { bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap); rxd->rx_dmamap = NULL; } } if (sc_if->sk_cdata.sk_rx_sparemap) { bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag, sc_if->sk_cdata.sk_rx_sparemap); sc_if->sk_cdata.sk_rx_sparemap = NULL; } bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_tag); sc_if->sk_cdata.sk_rx_tag = NULL; } if (sc_if->sk_cdata.sk_parent_tag) { bus_dma_tag_destroy(sc_if->sk_cdata.sk_parent_tag); sc_if->sk_cdata.sk_parent_tag = NULL; } } static void sk_dma_jumbo_free(sc_if) struct sk_if_softc *sc_if; { struct sk_rxdesc *jrxd; int i; /* jumbo Rx ring */ if (sc_if->sk_cdata.sk_jumbo_rx_ring_tag) { if (sc_if->sk_rdata.sk_jumbo_rx_ring_paddr) bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, sc_if->sk_cdata.sk_jumbo_rx_ring_map); if (sc_if->sk_rdata.sk_jumbo_rx_ring) bus_dmamem_free(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, sc_if->sk_rdata.sk_jumbo_rx_ring, sc_if->sk_cdata.sk_jumbo_rx_ring_map); sc_if->sk_rdata.sk_jumbo_rx_ring = NULL; sc_if->sk_rdata.sk_jumbo_rx_ring_paddr = 0; bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_ring_tag); sc_if->sk_cdata.sk_jumbo_rx_ring_tag = NULL; } /* jumbo Rx buffers */ if (sc_if->sk_cdata.sk_jumbo_rx_tag) { for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) { jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i]; if (jrxd->rx_dmamap) { bus_dmamap_destroy( sc_if->sk_cdata.sk_jumbo_rx_tag, jrxd->rx_dmamap); jrxd->rx_dmamap = NULL; } } if (sc_if->sk_cdata.sk_jumbo_rx_sparemap) { bus_dmamap_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag, sc_if->sk_cdata.sk_jumbo_rx_sparemap); sc_if->sk_cdata.sk_jumbo_rx_sparemap = NULL; } bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag); sc_if->sk_cdata.sk_jumbo_rx_tag = NULL; } } static void sk_txcksum(ifp, m, f) struct ifnet *ifp; struct mbuf *m; struct sk_tx_desc *f; { struct ip *ip; u_int16_t offset; u_int8_t *p; offset = sizeof(struct ip) + ETHER_HDR_LEN; for(; m && m->m_len == 0; m = m->m_next) ; if (m == NULL || m->m_len < ETHER_HDR_LEN) { if_printf(ifp, "%s: m_len < ETHER_HDR_LEN\n", __func__); /* checksum may be corrupted */ goto sendit; } if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) { if (m->m_len != ETHER_HDR_LEN) { if_printf(ifp, "%s: m_len != ETHER_HDR_LEN\n", __func__); /* checksum may be corrupted */ goto sendit; } for(m = m->m_next; m && m->m_len == 0; m = m->m_next) ; if (m == NULL) { offset = sizeof(struct ip) + ETHER_HDR_LEN; /* checksum may be corrupted */ goto sendit; } ip = mtod(m, struct ip *); } else { p = mtod(m, u_int8_t *); p += ETHER_HDR_LEN; ip = (struct ip *)p; } offset = (ip->ip_hl << 2) + ETHER_HDR_LEN; sendit: f->sk_csum_startval = 0; f->sk_csum_start = htole32(((offset + m->m_pkthdr.csum_data) & 0xffff) | (offset << 16)); } static int sk_encap(sc_if, m_head) struct sk_if_softc *sc_if; struct mbuf **m_head; { struct sk_txdesc *txd; struct sk_tx_desc *f = NULL; struct mbuf *m; bus_dma_segment_t txsegs[SK_MAXTXSEGS]; u_int32_t cflags, frag, si, sk_ctl; int error, i, nseg; SK_IF_LOCK_ASSERT(sc_if); if ((txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txfreeq)) == NULL) return (ENOBUFS); error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap, *m_head, txsegs, &nseg, 0); if (error == EFBIG) { m = m_defrag(*m_head, M_NOWAIT); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOMEM); } *m_head = m; error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap, *m_head, txsegs, &nseg, 0); if (error != 0) { m_freem(*m_head); *m_head = NULL; return (error); } } else if (error != 0) return (error); if (nseg == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } if (sc_if->sk_cdata.sk_tx_cnt + nseg >= SK_TX_RING_CNT) { bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap); return (ENOBUFS); } m = *m_head; if ((m->m_pkthdr.csum_flags & sc_if->sk_ifp->if_hwassist) != 0) cflags = SK_OPCODE_CSUM; else cflags = SK_OPCODE_DEFAULT; si = frag = sc_if->sk_cdata.sk_tx_prod; for (i = 0; i < nseg; i++) { f = &sc_if->sk_rdata.sk_tx_ring[frag]; f->sk_data_lo = htole32(SK_ADDR_LO(txsegs[i].ds_addr)); f->sk_data_hi = htole32(SK_ADDR_HI(txsegs[i].ds_addr)); sk_ctl = txsegs[i].ds_len | cflags; if (i == 0) { if (cflags == SK_OPCODE_CSUM) sk_txcksum(sc_if->sk_ifp, m, f); sk_ctl |= SK_TXCTL_FIRSTFRAG; } else sk_ctl |= SK_TXCTL_OWN; f->sk_ctl = htole32(sk_ctl); sc_if->sk_cdata.sk_tx_cnt++; SK_INC(frag, SK_TX_RING_CNT); } sc_if->sk_cdata.sk_tx_prod = frag; /* set EOF on the last desciptor */ frag = (frag + SK_TX_RING_CNT - 1) % SK_TX_RING_CNT; f = &sc_if->sk_rdata.sk_tx_ring[frag]; f->sk_ctl |= htole32(SK_TXCTL_LASTFRAG | SK_TXCTL_EOF_INTR); /* turn the first descriptor ownership to NIC */ f = &sc_if->sk_rdata.sk_tx_ring[si]; f->sk_ctl |= htole32(SK_TXCTL_OWN); STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txfreeq, tx_q); STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txbusyq, txd, tx_q); txd->tx_m = m; /* sync descriptors */ bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag, sc_if->sk_cdata.sk_tx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } static void sk_start(ifp) struct ifnet *ifp; { struct sk_if_softc *sc_if; sc_if = ifp->if_softc; SK_IF_LOCK(sc_if); sk_start_locked(ifp); SK_IF_UNLOCK(sc_if); return; } static void sk_start_locked(ifp) struct ifnet *ifp; { struct sk_softc *sc; struct sk_if_softc *sc_if; struct mbuf *m_head; int enq; sc_if = ifp->if_softc; sc = sc_if->sk_softc; SK_IF_LOCK_ASSERT(sc_if); for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 1; ) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* * Pack the data into the transmit ring. If we * don't have room, set the OACTIVE flag and wait * for the NIC to drain the ring. */ if (sk_encap(sc_if, &m_head)) { if (m_head == NULL) break; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } enq++; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); } if (enq > 0) { /* Transmit */ CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START); /* Set a timeout in case the chip goes out to lunch. */ sc_if->sk_watchdog_timer = 5; } } static void sk_watchdog(arg) void *arg; { struct sk_if_softc *sc_if; struct ifnet *ifp; ifp = arg; sc_if = ifp->if_softc; SK_IF_LOCK_ASSERT(sc_if); if (sc_if->sk_watchdog_timer == 0 || --sc_if->sk_watchdog_timer) goto done; /* * Reclaim first as there is a possibility of losing Tx completion * interrupts. */ sk_txeof(sc_if); if (sc_if->sk_cdata.sk_tx_cnt != 0) { if_printf(sc_if->sk_ifp, "watchdog timeout\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; sk_init_locked(sc_if); } done: callout_reset(&sc_if->sk_watchdog_ch, hz, sk_watchdog, ifp); return; } static int skc_shutdown(dev) device_t dev; { struct sk_softc *sc; sc = device_get_softc(dev); SK_LOCK(sc); /* Turn off the 'driver is loaded' LED. */ CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF); /* * Reset the GEnesis controller. Doing this should also * assert the resets on the attached XMAC(s). */ sk_reset(sc); SK_UNLOCK(sc); return (0); } static int skc_suspend(dev) device_t dev; { struct sk_softc *sc; struct sk_if_softc *sc_if0, *sc_if1; struct ifnet *ifp0 = NULL, *ifp1 = NULL; sc = device_get_softc(dev); SK_LOCK(sc); sc_if0 = sc->sk_if[SK_PORT_A]; sc_if1 = sc->sk_if[SK_PORT_B]; if (sc_if0 != NULL) ifp0 = sc_if0->sk_ifp; if (sc_if1 != NULL) ifp1 = sc_if1->sk_ifp; if (ifp0 != NULL) sk_stop(sc_if0); if (ifp1 != NULL) sk_stop(sc_if1); sc->sk_suspended = 1; SK_UNLOCK(sc); return (0); } static int skc_resume(dev) device_t dev; { struct sk_softc *sc; struct sk_if_softc *sc_if0, *sc_if1; struct ifnet *ifp0 = NULL, *ifp1 = NULL; sc = device_get_softc(dev); SK_LOCK(sc); sc_if0 = sc->sk_if[SK_PORT_A]; sc_if1 = sc->sk_if[SK_PORT_B]; if (sc_if0 != NULL) ifp0 = sc_if0->sk_ifp; if (sc_if1 != NULL) ifp1 = sc_if1->sk_ifp; if (ifp0 != NULL && ifp0->if_flags & IFF_UP) sk_init_locked(sc_if0); if (ifp1 != NULL && ifp1->if_flags & IFF_UP) sk_init_locked(sc_if1); sc->sk_suspended = 0; SK_UNLOCK(sc); return (0); } /* * According to the data sheet from SK-NET GENESIS the hardware can compute * two Rx checksums at the same time(Each checksum start position is * programmed in Rx descriptors). However it seems that TCP/UDP checksum * does not work at least on my Yukon hardware. I tried every possible ways * to get correct checksum value but couldn't get correct one. So TCP/UDP * checksum offload was disabled at the moment and only IP checksum offload * was enabled. * As nomral IP header size is 20 bytes I can't expect it would give an * increase in throughput. However it seems it doesn't hurt performance in * my testing. If there is a more detailed information for checksum secret * of the hardware in question please contact yongari@FreeBSD.org to add * TCP/UDP checksum offload support. */ static __inline void sk_rxcksum(ifp, m, csum) struct ifnet *ifp; struct mbuf *m; u_int32_t csum; { struct ether_header *eh; struct ip *ip; int32_t hlen, len, pktlen; u_int16_t csum1, csum2, ipcsum; pktlen = m->m_pkthdr.len; if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) return; eh = mtod(m, struct ether_header *); if (eh->ether_type != htons(ETHERTYPE_IP)) return; ip = (struct ip *)(eh + 1); if (ip->ip_v != IPVERSION) return; hlen = ip->ip_hl << 2; pktlen -= sizeof(struct ether_header); if (hlen < sizeof(struct ip)) return; if (ntohs(ip->ip_len) < hlen) return; if (ntohs(ip->ip_len) != pktlen) return; csum1 = htons(csum & 0xffff); csum2 = htons((csum >> 16) & 0xffff); ipcsum = in_addword(csum1, ~csum2 & 0xffff); /* checksum fixup for IP options */ len = hlen - sizeof(struct ip); if (len > 0) { /* * If the second checksum value is correct we can compute IP * checksum with simple math. Unfortunately the second checksum * value is wrong so we can't verify the checksum from the * value(It seems there is some magic here to get correct * value). If the second checksum value is correct it also * means we can get TCP/UDP checksum) here. However, it still * needs pseudo header checksum calculation due to hardware * limitations. */ return; } m->m_pkthdr.csum_flags = CSUM_IP_CHECKED; if (ipcsum == 0xffff) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; } static __inline int sk_rxvalid(sc, stat, len) struct sk_softc *sc; u_int32_t stat, len; { if (sc->sk_type == SK_GENESIS) { if ((stat & XM_RXSTAT_ERRFRAME) == XM_RXSTAT_ERRFRAME || XM_RXSTAT_BYTES(stat) != len) return (0); } else { if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR | YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_JABBER)) != 0 || (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK || YU_RXSTAT_BYTES(stat) != len) return (0); } return (1); } static void sk_rxeof(sc_if) struct sk_if_softc *sc_if; { struct sk_softc *sc; struct mbuf *m; struct ifnet *ifp; struct sk_rx_desc *cur_rx; struct sk_rxdesc *rxd; int cons, prog; u_int32_t csum, rxstat, sk_ctl; sc = sc_if->sk_softc; ifp = sc_if->sk_ifp; SK_IF_LOCK_ASSERT(sc_if); bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag, sc_if->sk_cdata.sk_rx_ring_map, BUS_DMASYNC_POSTREAD); prog = 0; for (cons = sc_if->sk_cdata.sk_rx_cons; prog < SK_RX_RING_CNT; prog++, SK_INC(cons, SK_RX_RING_CNT)) { cur_rx = &sc_if->sk_rdata.sk_rx_ring[cons]; sk_ctl = le32toh(cur_rx->sk_ctl); if ((sk_ctl & SK_RXCTL_OWN) != 0) break; rxd = &sc_if->sk_cdata.sk_rxdesc[cons]; rxstat = le32toh(cur_rx->sk_xmac_rxstat); if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) || SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN || SK_RXBYTES(sk_ctl) > SK_MAX_FRAMELEN || sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); sk_discard_rxbuf(sc_if, cons); continue; } m = rxd->rx_m; csum = le32toh(cur_rx->sk_csum); if (sk_newbuf(sc_if, cons) != 0) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); /* reuse old buffer */ sk_discard_rxbuf(sc_if, cons); continue; } m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) sk_rxcksum(ifp, m, csum); SK_IF_UNLOCK(sc_if); (*ifp->if_input)(ifp, m); SK_IF_LOCK(sc_if); } if (prog > 0) { sc_if->sk_cdata.sk_rx_cons = cons; bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag, sc_if->sk_cdata.sk_rx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } } static void sk_jumbo_rxeof(sc_if) struct sk_if_softc *sc_if; { struct sk_softc *sc; struct mbuf *m; struct ifnet *ifp; struct sk_rx_desc *cur_rx; struct sk_rxdesc *jrxd; int cons, prog; u_int32_t csum, rxstat, sk_ctl; sc = sc_if->sk_softc; ifp = sc_if->sk_ifp; SK_IF_LOCK_ASSERT(sc_if); bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, sc_if->sk_cdata.sk_jumbo_rx_ring_map, BUS_DMASYNC_POSTREAD); prog = 0; for (cons = sc_if->sk_cdata.sk_jumbo_rx_cons; prog < SK_JUMBO_RX_RING_CNT; prog++, SK_INC(cons, SK_JUMBO_RX_RING_CNT)) { cur_rx = &sc_if->sk_rdata.sk_jumbo_rx_ring[cons]; sk_ctl = le32toh(cur_rx->sk_ctl); if ((sk_ctl & SK_RXCTL_OWN) != 0) break; jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[cons]; rxstat = le32toh(cur_rx->sk_xmac_rxstat); if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) || SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN || SK_RXBYTES(sk_ctl) > SK_JUMBO_FRAMELEN || sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); sk_discard_jumbo_rxbuf(sc_if, cons); continue; } m = jrxd->rx_m; csum = le32toh(cur_rx->sk_csum); if (sk_jumbo_newbuf(sc_if, cons) != 0) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); /* reuse old buffer */ sk_discard_jumbo_rxbuf(sc_if, cons); continue; } m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) sk_rxcksum(ifp, m, csum); SK_IF_UNLOCK(sc_if); (*ifp->if_input)(ifp, m); SK_IF_LOCK(sc_if); } if (prog > 0) { sc_if->sk_cdata.sk_jumbo_rx_cons = cons; bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, sc_if->sk_cdata.sk_jumbo_rx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } } static void sk_txeof(sc_if) struct sk_if_softc *sc_if; { struct sk_txdesc *txd; struct sk_tx_desc *cur_tx; struct ifnet *ifp; u_int32_t idx, sk_ctl; ifp = sc_if->sk_ifp; txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq); if (txd == NULL) return; bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag, sc_if->sk_cdata.sk_tx_ring_map, BUS_DMASYNC_POSTREAD); /* * Go through our tx ring and free mbufs for those * frames that have been sent. */ for (idx = sc_if->sk_cdata.sk_tx_cons;; SK_INC(idx, SK_TX_RING_CNT)) { if (sc_if->sk_cdata.sk_tx_cnt <= 0) break; cur_tx = &sc_if->sk_rdata.sk_tx_ring[idx]; sk_ctl = le32toh(cur_tx->sk_ctl); if (sk_ctl & SK_TXCTL_OWN) break; sc_if->sk_cdata.sk_tx_cnt--; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if ((sk_ctl & SK_TXCTL_LASTFRAG) == 0) continue; bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); m_freem(txd->tx_m); txd->tx_m = NULL; STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txbusyq, tx_q); STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q); txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq); } sc_if->sk_cdata.sk_tx_cons = idx; sc_if->sk_watchdog_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0; bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag, sc_if->sk_cdata.sk_tx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static void sk_tick(xsc_if) void *xsc_if; { struct sk_if_softc *sc_if; struct mii_data *mii; struct ifnet *ifp; int i; sc_if = xsc_if; ifp = sc_if->sk_ifp; mii = device_get_softc(sc_if->sk_miibus); if (!(ifp->if_flags & IFF_UP)) return; if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { sk_intr_bcom(sc_if); return; } /* * According to SysKonnect, the correct way to verify that * the link has come back up is to poll bit 0 of the GPIO * register three times. This pin has the signal from the * link_sync pin connected to it; if we read the same link * state 3 times in a row, we know the link is up. */ for (i = 0; i < 3; i++) { if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET) break; } if (i != 3) { callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if); return; } /* Turn the GP0 interrupt back on. */ SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); SK_XM_READ_2(sc_if, XM_ISR); mii_tick(mii); callout_stop(&sc_if->sk_tick_ch); } static void sk_yukon_tick(xsc_if) void *xsc_if; { struct sk_if_softc *sc_if; struct mii_data *mii; sc_if = xsc_if; mii = device_get_softc(sc_if->sk_miibus); mii_tick(mii); callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if); } static void sk_intr_bcom(sc_if) struct sk_if_softc *sc_if; { struct mii_data *mii; struct ifnet *ifp; int status; mii = device_get_softc(sc_if->sk_miibus); ifp = sc_if->sk_ifp; SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); /* * Read the PHY interrupt register to make sure * we clear any pending interrupts. */ status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { sk_init_xmac(sc_if); return; } if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) { int lstat; lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_AUXSTS); if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) { mii_mediachg(mii); /* Turn off the link LED. */ SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF); sc_if->sk_link = 0; } else if (status & BRGPHY_ISR_LNK_CHG) { sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_IMR, 0xFF00); mii_tick(mii); sc_if->sk_link = 1; /* Turn on the link LED. */ SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF| SK_LINKLED_BLINK_OFF); } else { mii_tick(mii); callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if); } } SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); return; } static void sk_intr_xmac(sc_if) struct sk_if_softc *sc_if; { struct sk_softc *sc; u_int16_t status; sc = sc_if->sk_softc; status = SK_XM_READ_2(sc_if, XM_ISR); /* * Link has gone down. Start MII tick timeout to * watch for link resync. */ if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) { if (status & XM_ISR_GP0_SET) { SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if); } if (status & XM_ISR_AUTONEG_DONE) { callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if); } } if (status & XM_IMR_TX_UNDERRUN) SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO); if (status & XM_IMR_RX_OVERRUN) SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO); status = SK_XM_READ_2(sc_if, XM_ISR); return; } static void sk_intr_yukon(sc_if) struct sk_if_softc *sc_if; { u_int8_t status; status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR); /* RX overrun */ if ((status & SK_GMAC_INT_RX_OVER) != 0) { SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RX_FIFO_OVER); } /* TX underrun */ if ((status & SK_GMAC_INT_TX_UNDER) != 0) { SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_TFCTL_TX_FIFO_UNDER); } } static void sk_intr(xsc) void *xsc; { struct sk_softc *sc = xsc; struct sk_if_softc *sc_if0, *sc_if1; struct ifnet *ifp0 = NULL, *ifp1 = NULL; u_int32_t status; SK_LOCK(sc); status = CSR_READ_4(sc, SK_ISSR); if (status == 0 || status == 0xffffffff || sc->sk_suspended) goto done_locked; sc_if0 = sc->sk_if[SK_PORT_A]; sc_if1 = sc->sk_if[SK_PORT_B]; if (sc_if0 != NULL) ifp0 = sc_if0->sk_ifp; if (sc_if1 != NULL) ifp1 = sc_if1->sk_ifp; for (; (status &= sc->sk_intrmask) != 0;) { /* Handle receive interrupts first. */ if (status & SK_ISR_RX1_EOF) { if (ifp0->if_mtu > SK_MAX_FRAMELEN) sk_jumbo_rxeof(sc_if0); else sk_rxeof(sc_if0); CSR_WRITE_4(sc, SK_BMU_RX_CSR0, SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); } if (status & SK_ISR_RX2_EOF) { if (ifp1->if_mtu > SK_MAX_FRAMELEN) sk_jumbo_rxeof(sc_if1); else sk_rxeof(sc_if1); CSR_WRITE_4(sc, SK_BMU_RX_CSR1, SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); } /* Then transmit interrupts. */ if (status & SK_ISR_TX1_S_EOF) { sk_txeof(sc_if0); CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, SK_TXBMU_CLR_IRQ_EOF); } if (status & SK_ISR_TX2_S_EOF) { sk_txeof(sc_if1); CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, SK_TXBMU_CLR_IRQ_EOF); } /* Then MAC interrupts. */ if (status & SK_ISR_MAC1 && ifp0->if_drv_flags & IFF_DRV_RUNNING) { if (sc->sk_type == SK_GENESIS) sk_intr_xmac(sc_if0); else sk_intr_yukon(sc_if0); } if (status & SK_ISR_MAC2 && ifp1->if_drv_flags & IFF_DRV_RUNNING) { if (sc->sk_type == SK_GENESIS) sk_intr_xmac(sc_if1); else sk_intr_yukon(sc_if1); } if (status & SK_ISR_EXTERNAL_REG) { if (ifp0 != NULL && sc_if0->sk_phytype == SK_PHYTYPE_BCOM) sk_intr_bcom(sc_if0); if (ifp1 != NULL && sc_if1->sk_phytype == SK_PHYTYPE_BCOM) sk_intr_bcom(sc_if1); } status = CSR_READ_4(sc, SK_ISSR); } CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); if (ifp0 != NULL && !IFQ_DRV_IS_EMPTY(&ifp0->if_snd)) sk_start_locked(ifp0); if (ifp1 != NULL && !IFQ_DRV_IS_EMPTY(&ifp1->if_snd)) sk_start_locked(ifp1); done_locked: SK_UNLOCK(sc); } static void sk_init_xmac(sc_if) struct sk_if_softc *sc_if; { struct sk_softc *sc; struct ifnet *ifp; u_int16_t eaddr[(ETHER_ADDR_LEN+1)/2]; static const struct sk_bcom_hack bhack[] = { { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, { 0, 0 } }; SK_IF_LOCK_ASSERT(sc_if); sc = sc_if->sk_softc; ifp = sc_if->sk_ifp; /* Unreset the XMAC. */ SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET); DELAY(1000); /* Reset the XMAC's internal state. */ SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); /* Save the XMAC II revision */ sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID)); /* * Perform additional initialization for external PHYs, * namely for the 1000baseTX cards that use the XMAC's * GMII mode. */ if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { int i = 0; u_int32_t val; /* Take PHY out of reset. */ val = sk_win_read_4(sc, SK_GPIO); if (sc_if->sk_port == SK_PORT_A) val |= SK_GPIO_DIR0|SK_GPIO_DAT0; else val |= SK_GPIO_DIR2|SK_GPIO_DAT2; sk_win_write_4(sc, SK_GPIO, val); /* Enable GMII mode on the XMAC. */ SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE); sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET); DELAY(10000); sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_IMR, 0xFFF0); /* * Early versions of the BCM5400 apparently have * a bug that requires them to have their reserved * registers initialized to some magic values. I don't * know what the numbers do, I'm just the messenger. */ if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03) == 0x6041) { while(bhack[i].reg) { sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, bhack[i].reg, bhack[i].val); i++; } } } /* Set station address */ bcopy(IF_LLADDR(sc_if->sk_ifp), eaddr, ETHER_ADDR_LEN); SK_XM_WRITE_2(sc_if, XM_PAR0, eaddr[0]); SK_XM_WRITE_2(sc_if, XM_PAR1, eaddr[1]); SK_XM_WRITE_2(sc_if, XM_PAR2, eaddr[2]); SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION); if (ifp->if_flags & IFF_BROADCAST) { SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); } else { SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); } /* We don't need the FCS appended to the packet. */ SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS); /* We want short frames padded to 60 bytes. */ SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD); /* * Enable the reception of all error frames. This is is * a necessary evil due to the design of the XMAC. The * XMAC's receive FIFO is only 8K in size, however jumbo * frames can be up to 9000 bytes in length. When bad * frame filtering is enabled, the XMAC's RX FIFO operates * in 'store and forward' mode. For this to work, the * entire frame has to fit into the FIFO, but that means * that jumbo frames larger than 8192 bytes will be * truncated. Disabling all bad frame filtering causes * the RX FIFO to operate in streaming mode, in which * case the XMAC will start transferring frames out of the * RX FIFO as soon as the FIFO threshold is reached. */ if (ifp->if_mtu > SK_MAX_FRAMELEN) { SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES| XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS| XM_MODE_RX_INRANGELEN); SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); } else SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); /* * Bump up the transmit threshold. This helps hold off transmit * underruns when we're blasting traffic from both ports at once. */ SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH); /* Set Rx filter */ sk_rxfilter_genesis(sc_if); /* Clear and enable interrupts */ SK_XM_READ_2(sc_if, XM_ISR); if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS); else SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); /* Configure MAC arbiter */ switch(sc_if->sk_xmac_rev) { case XM_XMAC_REV_B2: sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2); sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2); sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2); sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2); sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2); sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2); sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2); sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2); sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); break; case XM_XMAC_REV_C1: sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1); sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1); sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1); sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1); sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1); sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1); sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1); sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1); sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); break; default: break; } sk_win_write_2(sc, SK_MACARB_CTL, SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF); sc_if->sk_link = 1; return; } static void sk_init_yukon(sc_if) struct sk_if_softc *sc_if; { u_int32_t phy, v; u_int16_t reg; struct sk_softc *sc; struct ifnet *ifp; u_int8_t *eaddr; int i; SK_IF_LOCK_ASSERT(sc_if); sc = sc_if->sk_softc; ifp = sc_if->sk_ifp; if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev >= SK_YUKON_LITE_REV_A3) { /* * Workaround code for COMA mode, set PHY reset. * Otherwise it will not correctly take chip out of * powerdown (coma) */ v = sk_win_read_4(sc, SK_GPIO); v |= SK_GPIO_DIR9 | SK_GPIO_DAT9; sk_win_write_4(sc, SK_GPIO, v); } /* GMAC and GPHY Reset */ SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET); SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET); DELAY(1000); if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev >= SK_YUKON_LITE_REV_A3) { /* * Workaround code for COMA mode, clear PHY reset */ v = sk_win_read_4(sc, SK_GPIO); v |= SK_GPIO_DIR9; v &= ~SK_GPIO_DAT9; sk_win_write_4(sc, SK_GPIO, v); } phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP | SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE; if (sc->sk_coppertype) phy |= SK_GPHY_COPPER; else phy |= SK_GPHY_FIBER; SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET); DELAY(1000); SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR); SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF | SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR); /* unused read of the interrupt source register */ SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR); reg = SK_YU_READ_2(sc_if, YUKON_PAR); /* MIB Counter Clear Mode set */ reg |= YU_PAR_MIB_CLR; SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); /* MIB Counter Clear Mode clear */ reg &= ~YU_PAR_MIB_CLR; SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); /* receive control reg */ SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR); /* transmit parameter register */ SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) | YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) ); /* serial mode register */ reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e); if (ifp->if_mtu > SK_MAX_FRAMELEN) reg |= YU_SMR_MFL_JUMBO; SK_YU_WRITE_2(sc_if, YUKON_SMR, reg); /* Setup Yukon's station address */ eaddr = IF_LLADDR(sc_if->sk_ifp); for (i = 0; i < 3; i++) SK_YU_WRITE_2(sc_if, SK_MAC0_0 + i * 4, eaddr[i * 2] | eaddr[i * 2 + 1] << 8); /* Set GMAC source address of flow control. */ for (i = 0; i < 3; i++) SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4, eaddr[i * 2] | eaddr[i * 2 + 1] << 8); /* Set GMAC virtual address. */ for (i = 0; i < 3; i++) SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, eaddr[i * 2] | eaddr[i * 2 + 1] << 8); /* Set Rx filter */ sk_rxfilter_yukon(sc_if); /* enable interrupt mask for counter overflows */ SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0); SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0); SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0); /* Configure RX MAC FIFO Flush Mask */ v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT | YU_RXSTAT_JABBER; SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v); /* Disable RX MAC FIFO Flush for YUKON-Lite Rev. A0 only */ if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev == SK_YUKON_LITE_REV_A0) v = SK_TFCTL_OPERATION_ON; else v = SK_TFCTL_OPERATION_ON | SK_RFCTL_FIFO_FLUSH_ON; /* Configure RX MAC FIFO */ SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR); SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, v); /* Increase flush threshould to 64 bytes */ SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD, SK_RFCTL_FIFO_THRESHOLD + 1); /* Configure TX MAC FIFO */ SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR); SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON); } /* * Note that to properly initialize any part of the GEnesis chip, * you first have to take it out of reset mode. */ static void sk_init(xsc) void *xsc; { struct sk_if_softc *sc_if = xsc; SK_IF_LOCK(sc_if); sk_init_locked(sc_if); SK_IF_UNLOCK(sc_if); return; } static void sk_init_locked(sc_if) struct sk_if_softc *sc_if; { struct sk_softc *sc; struct ifnet *ifp; struct mii_data *mii; u_int16_t reg; u_int32_t imr; int error; SK_IF_LOCK_ASSERT(sc_if); ifp = sc_if->sk_ifp; sc = sc_if->sk_softc; mii = device_get_softc(sc_if->sk_miibus); if (ifp->if_drv_flags & IFF_DRV_RUNNING) return; /* Cancel pending I/O and free all RX/TX buffers. */ sk_stop(sc_if); if (sc->sk_type == SK_GENESIS) { /* Configure LINK_SYNC LED */ SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON); SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_ON); /* Configure RX LED */ SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_START); /* Configure TX LED */ SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_TXLEDCTL_COUNTER_START); } /* * Configure descriptor poll timer * * SK-NET GENESIS data sheet says that possibility of losing Start * transmit command due to CPU/cache related interim storage problems * under certain conditions. The document recommends a polling * mechanism to send a Start transmit command to initiate transfer * of ready descriptors regulary. To cope with this issue sk(4) now * enables descriptor poll timer to initiate descriptor processing * periodically as defined by SK_DPT_TIMER_MAX. However sk(4) still * issue SK_TXBMU_TX_START to Tx BMU to get fast execution of Tx * command instead of waiting for next descriptor polling time. * The same rule may apply to Rx side too but it seems that is not * needed at the moment. * Since sk(4) uses descriptor polling as a last resort there is no * need to set smaller polling time than maximum allowable one. */ SK_IF_WRITE_4(sc_if, 0, SK_DPT_INIT, SK_DPT_TIMER_MAX); /* Configure I2C registers */ /* Configure XMAC(s) */ switch (sc->sk_type) { case SK_GENESIS: sk_init_xmac(sc_if); break; case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: sk_init_yukon(sc_if); break; } mii_mediachg(mii); if (sc->sk_type == SK_GENESIS) { /* Configure MAC FIFOs */ SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET); SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END); SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON); SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET); SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END); SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON); } /* Configure transmit arbiter(s) */ SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON); /* Configure RAMbuffers */ SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET); SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart); SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart); SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart); SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend); SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON); /* Configure BMUs */ SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE); if (ifp->if_mtu > SK_MAX_FRAMELEN) { SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO, SK_ADDR_LO(SK_JUMBO_RX_RING_ADDR(sc_if, 0))); SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, SK_ADDR_HI(SK_JUMBO_RX_RING_ADDR(sc_if, 0))); } else { SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO, SK_ADDR_LO(SK_RX_RING_ADDR(sc_if, 0))); SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, SK_ADDR_HI(SK_RX_RING_ADDR(sc_if, 0))); } SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE); SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO, SK_ADDR_LO(SK_TX_RING_ADDR(sc_if, 0))); SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, SK_ADDR_HI(SK_TX_RING_ADDR(sc_if, 0))); /* Init descriptors */ if (ifp->if_mtu > SK_MAX_FRAMELEN) error = sk_init_jumbo_rx_ring(sc_if); else error = sk_init_rx_ring(sc_if); if (error != 0) { device_printf(sc_if->sk_if_dev, "initialization failed: no memory for rx buffers\n"); sk_stop(sc_if); return; } sk_init_tx_ring(sc_if); /* Set interrupt moderation if changed via sysctl. */ imr = sk_win_read_4(sc, SK_IMTIMERINIT); if (imr != SK_IM_USECS(sc->sk_int_mod, sc->sk_int_ticks)) { sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod, sc->sk_int_ticks)); if (bootverbose) device_printf(sc_if->sk_if_dev, "interrupt moderation is %d us.\n", sc->sk_int_mod); } /* Configure interrupt handling */ CSR_READ_4(sc, SK_ISSR); if (sc_if->sk_port == SK_PORT_A) sc->sk_intrmask |= SK_INTRS1; else sc->sk_intrmask |= SK_INTRS2; sc->sk_intrmask |= SK_ISR_EXTERNAL_REG; CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); /* Start BMUs. */ SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START); switch(sc->sk_type) { case SK_GENESIS: /* Enable XMACs TX and RX state machines */ SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE); SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); break; case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: reg = SK_YU_READ_2(sc_if, YUKON_GPCR); reg |= YU_GPCR_TXEN | YU_GPCR_RXEN; #if 0 /* XXX disable 100Mbps and full duplex mode? */ reg &= ~(YU_GPCR_SPEED | YU_GPCR_DPLX_DIS); #endif SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg); } /* Activate descriptor polling timer */ SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_START); /* start transfer of Tx descriptors */ CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; switch (sc->sk_type) { case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if); break; } callout_reset(&sc_if->sk_watchdog_ch, hz, sk_watchdog, ifp); return; } static void sk_stop(sc_if) struct sk_if_softc *sc_if; { int i; struct sk_softc *sc; struct sk_txdesc *txd; struct sk_rxdesc *rxd; struct sk_rxdesc *jrxd; struct ifnet *ifp; u_int32_t val; SK_IF_LOCK_ASSERT(sc_if); sc = sc_if->sk_softc; ifp = sc_if->sk_ifp; callout_stop(&sc_if->sk_tick_ch); callout_stop(&sc_if->sk_watchdog_ch); /* stop Tx descriptor polling timer */ SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP); /* stop transfer of Tx descriptors */ CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_STOP); for (i = 0; i < SK_TIMEOUT; i++) { val = CSR_READ_4(sc, sc_if->sk_tx_bmu); if ((val & SK_TXBMU_TX_STOP) == 0) break; DELAY(1); } if (i == SK_TIMEOUT) device_printf(sc_if->sk_if_dev, "can not stop transfer of Tx descriptor\n"); /* stop transfer of Rx descriptors */ SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_STOP); for (i = 0; i < SK_TIMEOUT; i++) { val = SK_IF_READ_4(sc_if, 0, SK_RXQ1_BMU_CSR); if ((val & SK_RXBMU_RX_STOP) == 0) break; DELAY(1); } if (i == SK_TIMEOUT) device_printf(sc_if->sk_if_dev, "can not stop transfer of Rx descriptor\n"); if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { /* Put PHY back into reset. */ val = sk_win_read_4(sc, SK_GPIO); if (sc_if->sk_port == SK_PORT_A) { val |= SK_GPIO_DIR0; val &= ~SK_GPIO_DAT0; } else { val |= SK_GPIO_DIR2; val &= ~SK_GPIO_DAT2; } sk_win_write_4(sc, SK_GPIO, val); } /* Turn off various components of this interface. */ SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); switch (sc->sk_type) { case SK_GENESIS: SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET); SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET); break; case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET); SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET); break; } SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE); SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF); SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF); SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF); /* Disable interrupts */ if (sc_if->sk_port == SK_PORT_A) sc->sk_intrmask &= ~SK_INTRS1; else sc->sk_intrmask &= ~SK_INTRS2; CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); SK_XM_READ_2(sc_if, XM_ISR); SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); /* Free RX and TX mbufs still in the queues. */ for (i = 0; i < SK_RX_RING_CNT; i++) { rxd = &sc_if->sk_cdata.sk_rxdesc[i]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap); m_freem(rxd->rx_m); rxd->rx_m = NULL; } } for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) { jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i]; if (jrxd->rx_m != NULL) { bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag, jrxd->rx_dmamap); m_freem(jrxd->rx_m); jrxd->rx_m = NULL; } } for (i = 0; i < SK_TX_RING_CNT; i++) { txd = &sc_if->sk_cdata.sk_txdesc[i]; if (txd->tx_m != NULL) { bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap); m_freem(txd->tx_m); txd->tx_m = NULL; } } ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE); return; } static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) { int error, value; if (!arg1) return (EINVAL); value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if (error || !req->newptr) return (error); if (value < low || value > high) return (EINVAL); *(int *)arg1 = value; return (0); } static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, SK_IM_MIN, SK_IM_MAX)); } Index: head/sys/dev/sound/pci/maestro3.c =================================================================== --- head/sys/dev/sound/pci/maestro3.c (revision 336756) +++ head/sys/dev/sound/pci/maestro3.c (revision 336757) @@ -1,1800 +1,1800 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2001 Scott Long * Copyright (c) 2001 Darrell Anderson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Maestro-3/Allegro FreeBSD pcm sound driver * * executive status summary: * (+) /dev/dsp multiple concurrent play channels. * (+) /dev/dsp config (speed, mono/stereo, 8/16 bit). * (+) /dev/mixer sets left/right volumes. * (+) /dev/dsp recording works. Tested successfully with the cdrom channel * (+) apm suspend/resume works, and works properly!. * (-) hardware volme controls don't work =-( * (-) setblocksize() does nothing. * * The real credit goes to: * * Zach Brown for his Linux driver core and helpful technical comments. * , http://www.zabbo.net/maestro3 * * Cameron Grant created the pcm framework used here nearly verbatim. - * , http://people.freebsd.org/~cg/template.c + * , https://people.freebsd.org/~cg/template.c * * Taku YAMAMOTO for his Maestro-1/2 FreeBSD driver and sanity reference. * * * ESS docs explained a few magic registers and numbers. * http://virgo.caltech.edu/~dmoore/maestro3.pdf.gz */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include #define M3_MODEL 1 #include #include SND_DECLARE_FILE("$FreeBSD$"); /* -------------------------------------------------------------------- */ enum {CHANGE=0, CALL=1, INTR=2, BORING=3, NONE=-1}; #ifndef M3_DEBUG_LEVEL #define M3_DEBUG_LEVEL NONE #endif #define M3_DEBUG(level, _msg) {if ((level) <= M3_DEBUG_LEVEL) {printf _msg;}} /* -------------------------------------------------------------------- */ enum { ESS_ALLEGRO_1, ESS_MAESTRO3 }; static struct m3_card_type { u_int32_t pci_id; int which; int delay1; int delay2; char *name; } m3_card_types[] = { { 0x1988125d, ESS_ALLEGRO_1, 50, 800, "ESS Technology Allegro-1" }, { 0x1998125d, ESS_MAESTRO3, 20, 500, "ESS Technology Maestro3" }, { 0x199a125d, ESS_MAESTRO3, 20, 500, "ESS Technology Maestro3" }, { 0, 0, 0, 0, NULL } }; #define M3_BUFSIZE_MIN 4096 #define M3_BUFSIZE_MAX 65536 #define M3_BUFSIZE_DEFAULT 4096 #define M3_PCHANS 4 /* create /dev/dsp0.[0-N] to use more than one */ #define M3_RCHANS 1 #define M3_MAXADDR ((1 << 27) - 1) #define M3_DEFAULT_VOL 0x6800 struct sc_info; struct sc_pchinfo { u_int32_t spd; u_int32_t fmt; struct snd_dbuf *buffer; struct pcm_channel *channel; struct sc_info *parent; u_int32_t bufsize; u_int32_t dac_data; u_int32_t dac_idx; u_int32_t active; u_int32_t ptr; u_int32_t prevptr; }; struct sc_rchinfo { u_int32_t spd; u_int32_t fmt; struct snd_dbuf *buffer; struct pcm_channel *channel; struct sc_info *parent; u_int32_t bufsize; u_int32_t adc_data; u_int32_t adc_idx; u_int32_t active; u_int32_t ptr; u_int32_t prevptr; }; struct sc_info { device_t dev; u_int32_t type; int which; int delay1; int delay2; bus_space_tag_t st; bus_space_handle_t sh; bus_dma_tag_t parent_dmat; struct resource *reg; struct resource *irq; int regtype; int regid; int irqid; void *ih; struct sc_pchinfo pch[M3_PCHANS]; struct sc_rchinfo rch[M3_RCHANS]; int pch_cnt; int rch_cnt; int pch_active_cnt; unsigned int bufsz; u_int16_t *savemem; struct mtx *sc_lock; }; #define M3_LOCK(_sc) snd_mtxlock((_sc)->sc_lock) #define M3_UNLOCK(_sc) snd_mtxunlock((_sc)->sc_lock) #define M3_LOCK_ASSERT(_sc) snd_mtxassert((_sc)->sc_lock) /* -------------------------------------------------------------------- */ /* play channel interface */ static void *m3_pchan_init(kobj_t, void *, struct snd_dbuf *, struct pcm_channel *, int); static int m3_pchan_free(kobj_t, void *); static int m3_pchan_setformat(kobj_t, void *, u_int32_t); static u_int32_t m3_pchan_setspeed(kobj_t, void *, u_int32_t); static u_int32_t m3_pchan_setblocksize(kobj_t, void *, u_int32_t); static int m3_pchan_trigger(kobj_t, void *, int); static int m3_pchan_trigger_locked(kobj_t, void *, int); static u_int32_t m3_pchan_getptr_internal(struct sc_pchinfo *); static u_int32_t m3_pchan_getptr(kobj_t, void *); static struct pcmchan_caps *m3_pchan_getcaps(kobj_t, void *); /* record channel interface */ static void *m3_rchan_init(kobj_t, void *, struct snd_dbuf *, struct pcm_channel *, int); static int m3_rchan_free(kobj_t, void *); static int m3_rchan_setformat(kobj_t, void *, u_int32_t); static u_int32_t m3_rchan_setspeed(kobj_t, void *, u_int32_t); static u_int32_t m3_rchan_setblocksize(kobj_t, void *, u_int32_t); static int m3_rchan_trigger(kobj_t, void *, int); static int m3_rchan_trigger_locked(kobj_t, void *, int); static u_int32_t m3_rchan_getptr_internal(struct sc_rchinfo *); static u_int32_t m3_rchan_getptr(kobj_t, void *); static struct pcmchan_caps *m3_rchan_getcaps(kobj_t, void *); static int m3_chan_active(struct sc_info *); /* talk to the codec - called from ac97.c */ static u_int32_t m3_initcd(kobj_t, void *); static int m3_rdcd(kobj_t, void *, int); static int m3_wrcd(kobj_t, void *, int, u_int32_t); /* stuff */ static void m3_intr(void *); static int m3_power(struct sc_info *, int); static int m3_init(struct sc_info *); static int m3_uninit(struct sc_info *); static u_int8_t m3_assp_halt(struct sc_info *); static void m3_config(struct sc_info *); static void m3_amp_enable(struct sc_info *); static void m3_enable_ints(struct sc_info *); static void m3_codec_reset(struct sc_info *); /* -------------------------------------------------------------------- */ /* Codec descriptor */ static kobj_method_t m3_codec_methods[] = { KOBJMETHOD(ac97_init, m3_initcd), KOBJMETHOD(ac97_read, m3_rdcd), KOBJMETHOD(ac97_write, m3_wrcd), KOBJMETHOD_END }; AC97_DECLARE(m3_codec); /* -------------------------------------------------------------------- */ /* channel descriptors */ static u_int32_t m3_playfmt[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_U8, 2, 0), SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), 0 }; static struct pcmchan_caps m3_playcaps = {8000, 48000, m3_playfmt, 0}; static kobj_method_t m3_pch_methods[] = { KOBJMETHOD(channel_init, m3_pchan_init), KOBJMETHOD(channel_setformat, m3_pchan_setformat), KOBJMETHOD(channel_setspeed, m3_pchan_setspeed), KOBJMETHOD(channel_setblocksize, m3_pchan_setblocksize), KOBJMETHOD(channel_trigger, m3_pchan_trigger), KOBJMETHOD(channel_getptr, m3_pchan_getptr), KOBJMETHOD(channel_getcaps, m3_pchan_getcaps), KOBJMETHOD(channel_free, m3_pchan_free), KOBJMETHOD_END }; CHANNEL_DECLARE(m3_pch); static u_int32_t m3_recfmt[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_U8, 2, 0), SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), 0 }; static struct pcmchan_caps m3_reccaps = {8000, 48000, m3_recfmt, 0}; static kobj_method_t m3_rch_methods[] = { KOBJMETHOD(channel_init, m3_rchan_init), KOBJMETHOD(channel_setformat, m3_rchan_setformat), KOBJMETHOD(channel_setspeed, m3_rchan_setspeed), KOBJMETHOD(channel_setblocksize, m3_rchan_setblocksize), KOBJMETHOD(channel_trigger, m3_rchan_trigger), KOBJMETHOD(channel_getptr, m3_rchan_getptr), KOBJMETHOD(channel_getcaps, m3_rchan_getcaps), KOBJMETHOD(channel_free, m3_rchan_free), KOBJMETHOD_END }; CHANNEL_DECLARE(m3_rch); /* -------------------------------------------------------------------- */ /* some i/o convenience functions */ #define m3_rd_1(sc, regno) bus_space_read_1(sc->st, sc->sh, regno) #define m3_rd_2(sc, regno) bus_space_read_2(sc->st, sc->sh, regno) #define m3_rd_4(sc, regno) bus_space_read_4(sc->st, sc->sh, regno) #define m3_wr_1(sc, regno, data) bus_space_write_1(sc->st, sc->sh, regno, data) #define m3_wr_2(sc, regno, data) bus_space_write_2(sc->st, sc->sh, regno, data) #define m3_wr_4(sc, regno, data) bus_space_write_4(sc->st, sc->sh, regno, data) #define m3_rd_assp_code(sc, index) \ m3_rd_assp(sc, MEMTYPE_INTERNAL_CODE, index) #define m3_wr_assp_code(sc, index, data) \ m3_wr_assp(sc, MEMTYPE_INTERNAL_CODE, index, data) #define m3_rd_assp_data(sc, index) \ m3_rd_assp(sc, MEMTYPE_INTERNAL_DATA, index) #define m3_wr_assp_data(sc, index, data) \ m3_wr_assp(sc, MEMTYPE_INTERNAL_DATA, index, data) static __inline u_int16_t m3_rd_assp(struct sc_info *sc, u_int16_t region, u_int16_t index) { m3_wr_2(sc, DSP_PORT_MEMORY_TYPE, region & MEMTYPE_MASK); m3_wr_2(sc, DSP_PORT_MEMORY_INDEX, index); return m3_rd_2(sc, DSP_PORT_MEMORY_DATA); } static __inline void m3_wr_assp(struct sc_info *sc, u_int16_t region, u_int16_t index, u_int16_t data) { m3_wr_2(sc, DSP_PORT_MEMORY_TYPE, region & MEMTYPE_MASK); m3_wr_2(sc, DSP_PORT_MEMORY_INDEX, index); m3_wr_2(sc, DSP_PORT_MEMORY_DATA, data); } static __inline int m3_wait(struct sc_info *sc) { int i; for (i=0 ; i<20 ; i++) { if ((m3_rd_1(sc, CODEC_STATUS) & 1) == 0) { return 0; } DELAY(2); } return -1; } /* -------------------------------------------------------------------- */ /* ac97 codec */ static u_int32_t m3_initcd(kobj_t kobj, void *devinfo) { struct sc_info *sc = (struct sc_info *)devinfo; u_int32_t data; M3_DEBUG(CALL, ("m3_initcd\n")); /* init ac-link */ data = m3_rd_1(sc, CODEC_COMMAND); return ((data & 0x1) ? 0 : 1); } static int m3_rdcd(kobj_t kobj, void *devinfo, int regno) { struct sc_info *sc = (struct sc_info *)devinfo; u_int32_t data; if (m3_wait(sc)) { device_printf(sc->dev, "m3_rdcd timed out.\n"); return -1; } m3_wr_1(sc, CODEC_COMMAND, (regno & 0x7f) | 0x80); DELAY(50); /* ac97 cycle = 20.8 usec */ if (m3_wait(sc)) { device_printf(sc->dev, "m3_rdcd timed out.\n"); return -1; } data = m3_rd_2(sc, CODEC_DATA); return data; } static int m3_wrcd(kobj_t kobj, void *devinfo, int regno, u_int32_t data) { struct sc_info *sc = (struct sc_info *)devinfo; if (m3_wait(sc)) { device_printf(sc->dev, "m3_wrcd timed out.\n"); return -1; } m3_wr_2(sc, CODEC_DATA, data); m3_wr_1(sc, CODEC_COMMAND, regno & 0x7f); DELAY(50); /* ac97 cycle = 20.8 usec */ return 0; } /* -------------------------------------------------------------------- */ /* play channel interface */ #define LO(x) (((x) & 0x0000ffff) ) #define HI(x) (((x) & 0xffff0000) >> 16) static void * m3_pchan_init(kobj_t kobj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct sc_info *sc = devinfo; struct sc_pchinfo *ch; u_int32_t bus_addr, i; int idx, data_bytes, dac_data; int dsp_in_size, dsp_out_size, dsp_in_buf, dsp_out_buf; struct data_word { u_int16_t addr, val; } pv[] = { {CDATA_LEFT_VOLUME, M3_DEFAULT_VOL}, {CDATA_RIGHT_VOLUME, M3_DEFAULT_VOL}, {SRC3_DIRECTION_OFFSET, 0} , {SRC3_DIRECTION_OFFSET + 3, 0x0000}, {SRC3_DIRECTION_OFFSET + 4, 0}, {SRC3_DIRECTION_OFFSET + 5, 0}, {SRC3_DIRECTION_OFFSET + 6, 0}, {SRC3_DIRECTION_OFFSET + 7, 0}, {SRC3_DIRECTION_OFFSET + 8, 0}, {SRC3_DIRECTION_OFFSET + 9, 0}, {SRC3_DIRECTION_OFFSET + 10, 0x8000}, {SRC3_DIRECTION_OFFSET + 11, 0xFF00}, {SRC3_DIRECTION_OFFSET + 13, 0}, {SRC3_DIRECTION_OFFSET + 14, 0}, {SRC3_DIRECTION_OFFSET + 15, 0}, {SRC3_DIRECTION_OFFSET + 16, 8}, {SRC3_DIRECTION_OFFSET + 17, 50*2}, {SRC3_DIRECTION_OFFSET + 18, MINISRC_BIQUAD_STAGE - 1}, {SRC3_DIRECTION_OFFSET + 20, 0}, {SRC3_DIRECTION_OFFSET + 21, 0} }; M3_LOCK(sc); idx = sc->pch_cnt; /* dac instance number, no active reuse! */ M3_DEBUG(CHANGE, ("m3_pchan_init(dac=%d)\n", idx)); if (dir != PCMDIR_PLAY) { M3_UNLOCK(sc); device_printf(sc->dev, "m3_pchan_init not PCMDIR_PLAY\n"); return (NULL); } data_bytes = (((MINISRC_TMP_BUFFER_SIZE & ~1) + (MINISRC_IN_BUFFER_SIZE & ~1) + (MINISRC_OUT_BUFFER_SIZE & ~1) + 4) + 255) &~ 255; dac_data = 0x1100 + (data_bytes * idx); dsp_in_size = MINISRC_IN_BUFFER_SIZE - (0x20 * 2); dsp_out_size = MINISRC_OUT_BUFFER_SIZE - (0x20 * 2); dsp_in_buf = dac_data + (MINISRC_TMP_BUFFER_SIZE/2); dsp_out_buf = dsp_in_buf + (dsp_in_size/2) + 1; ch = &sc->pch[idx]; ch->dac_idx = idx; ch->dac_data = dac_data; if (ch->dac_data + data_bytes/2 >= 0x1c00) { M3_UNLOCK(sc); device_printf(sc->dev, "m3_pchan_init: revb mem exhausted\n"); return (NULL); } ch->buffer = b; ch->parent = sc; ch->channel = c; ch->fmt = SND_FORMAT(AFMT_U8, 1, 0); ch->spd = DSP_DEFAULT_SPEED; M3_UNLOCK(sc); /* XXX */ if (sndbuf_alloc(ch->buffer, sc->parent_dmat, 0, sc->bufsz) != 0) { device_printf(sc->dev, "m3_pchan_init chn_allocbuf failed\n"); return (NULL); } M3_LOCK(sc); ch->bufsize = sndbuf_getsize(ch->buffer); /* host dma buffer pointers */ bus_addr = sndbuf_getbufaddr(ch->buffer); if (bus_addr & 3) { device_printf(sc->dev, "m3_pchan_init unaligned bus_addr\n"); bus_addr = (bus_addr + 4) & ~3; } m3_wr_assp_data(sc, ch->dac_data + CDATA_HOST_SRC_ADDRL, LO(bus_addr)); m3_wr_assp_data(sc, ch->dac_data + CDATA_HOST_SRC_ADDRH, HI(bus_addr)); m3_wr_assp_data(sc, ch->dac_data + CDATA_HOST_SRC_END_PLUS_1L, LO(bus_addr + ch->bufsize)); m3_wr_assp_data(sc, ch->dac_data + CDATA_HOST_SRC_END_PLUS_1H, HI(bus_addr + ch->bufsize)); m3_wr_assp_data(sc, ch->dac_data + CDATA_HOST_SRC_CURRENTL, LO(bus_addr)); m3_wr_assp_data(sc, ch->dac_data + CDATA_HOST_SRC_CURRENTH, HI(bus_addr)); /* dsp buffers */ m3_wr_assp_data(sc, ch->dac_data + CDATA_IN_BUF_BEGIN, dsp_in_buf); m3_wr_assp_data(sc, ch->dac_data + CDATA_IN_BUF_END_PLUS_1, dsp_in_buf + dsp_in_size/2); m3_wr_assp_data(sc, ch->dac_data + CDATA_IN_BUF_HEAD, dsp_in_buf); m3_wr_assp_data(sc, ch->dac_data + CDATA_IN_BUF_TAIL, dsp_in_buf); m3_wr_assp_data(sc, ch->dac_data + CDATA_OUT_BUF_BEGIN, dsp_out_buf); m3_wr_assp_data(sc, ch->dac_data + CDATA_OUT_BUF_END_PLUS_1, dsp_out_buf + dsp_out_size/2); m3_wr_assp_data(sc, ch->dac_data + CDATA_OUT_BUF_HEAD, dsp_out_buf); m3_wr_assp_data(sc, ch->dac_data + CDATA_OUT_BUF_TAIL, dsp_out_buf); /* some per client initializers */ m3_wr_assp_data(sc, ch->dac_data + SRC3_DIRECTION_OFFSET + 12, ch->dac_data + 40 + 8); m3_wr_assp_data(sc, ch->dac_data + SRC3_DIRECTION_OFFSET + 19, 0x400 + MINISRC_COEF_LOC); /* enable or disable low pass filter? (0xff if rate> 45000) */ m3_wr_assp_data(sc, ch->dac_data + SRC3_DIRECTION_OFFSET + 22, 0); /* tell it which way dma is going? */ m3_wr_assp_data(sc, ch->dac_data + CDATA_DMA_CONTROL, DMACONTROL_AUTOREPEAT + DMAC_PAGE3_SELECTOR + DMAC_BLOCKF_SELECTOR); /* set an armload of static initializers */ for(i = 0 ; i < (sizeof(pv) / sizeof(pv[0])) ; i++) { m3_wr_assp_data(sc, ch->dac_data + pv[i].addr, pv[i].val); } /* put us in the packed task lists */ m3_wr_assp_data(sc, KDATA_INSTANCE0_MINISRC + (sc->pch_cnt + sc->rch_cnt), ch->dac_data >> DP_SHIFT_COUNT); m3_wr_assp_data(sc, KDATA_DMA_XFER0 + (sc->pch_cnt + sc->rch_cnt), ch->dac_data >> DP_SHIFT_COUNT); m3_wr_assp_data(sc, KDATA_MIXER_XFER0 + sc->pch_cnt, ch->dac_data >> DP_SHIFT_COUNT); /* gotta start before stop */ m3_pchan_trigger_locked(NULL, ch, PCMTRIG_START); /* silence noise on load */ m3_pchan_trigger_locked(NULL, ch, PCMTRIG_STOP); sc->pch_cnt++; M3_UNLOCK(sc); return (ch); } static int m3_pchan_free(kobj_t kobj, void *chdata) { struct sc_pchinfo *ch = chdata; struct sc_info *sc = ch->parent; M3_LOCK(sc); M3_DEBUG(CHANGE, ("m3_pchan_free(dac=%d)\n", ch->dac_idx)); /* * should remove this exact instance from the packed lists, but all * are released at once (and in a stopped state) so this is ok. */ m3_wr_assp_data(sc, KDATA_INSTANCE0_MINISRC + (sc->pch_cnt - 1) + sc->rch_cnt, 0); m3_wr_assp_data(sc, KDATA_DMA_XFER0 + (sc->pch_cnt - 1) + sc->rch_cnt, 0); m3_wr_assp_data(sc, KDATA_MIXER_XFER0 + (sc->pch_cnt-1), 0); sc->pch_cnt--; M3_UNLOCK(sc); return (0); } static int m3_pchan_setformat(kobj_t kobj, void *chdata, u_int32_t format) { struct sc_pchinfo *ch = chdata; struct sc_info *sc = ch->parent; u_int32_t data; M3_LOCK(sc); M3_DEBUG(CHANGE, ("m3_pchan_setformat(dac=%d, format=0x%x{%s-%s})\n", ch->dac_idx, format, format & (AFMT_U8|AFMT_S8) ? "8bit":"16bit", (AFMT_CHANNEL(format) > 1) ? "STEREO":"MONO")); /* mono word */ data = (AFMT_CHANNEL(format) > 1)? 0 : 1; m3_wr_assp_data(sc, ch->dac_data + SRC3_MODE_OFFSET, data); /* 8bit word */ data = ((format & AFMT_U8) || (format & AFMT_S8)) ? 1 : 0; m3_wr_assp_data(sc, ch->dac_data + SRC3_WORD_LENGTH_OFFSET, data); ch->fmt = format; M3_UNLOCK(sc); return (0); } static u_int32_t m3_pchan_setspeed(kobj_t kobj, void *chdata, u_int32_t speed) { struct sc_pchinfo *ch = chdata; struct sc_info *sc = ch->parent; u_int32_t freq; M3_LOCK(sc); M3_DEBUG(CHANGE, ("m3_pchan_setspeed(dac=%d, speed=%d)\n", ch->dac_idx, speed)); if ((freq = ((speed << 15) + 24000) / 48000) != 0) { freq--; } m3_wr_assp_data(sc, ch->dac_data + CDATA_FREQUENCY, freq); ch->spd = speed; M3_UNLOCK(sc); /* return closest possible speed */ return (speed); } static u_int32_t m3_pchan_setblocksize(kobj_t kobj, void *chdata, u_int32_t blocksize) { struct sc_pchinfo *ch = chdata; M3_DEBUG(CHANGE, ("m3_pchan_setblocksize(dac=%d, blocksize=%d)\n", ch->dac_idx, blocksize)); return (sndbuf_getblksz(ch->buffer)); } static int m3_pchan_trigger(kobj_t kobj, void *chdata, int go) { struct sc_pchinfo *ch = chdata; struct sc_info *sc = ch->parent; int ret; if (!PCMTRIG_COMMON(go)) return (0); M3_LOCK(sc); ret = m3_pchan_trigger_locked(kobj, chdata, go); M3_UNLOCK(sc); return (ret); } static int m3_chan_active(struct sc_info *sc) { int i, ret; ret = 0; for (i = 0; i < sc->pch_cnt; i++) ret += sc->pch[i].active; for (i = 0; i < sc->rch_cnt; i++) ret += sc->rch[i].active; return (ret); } static int m3_pchan_trigger_locked(kobj_t kobj, void *chdata, int go) { struct sc_pchinfo *ch = chdata; struct sc_info *sc = ch->parent; u_int32_t data; M3_LOCK_ASSERT(sc); M3_DEBUG(go == PCMTRIG_START ? CHANGE : go == PCMTRIG_STOP ? CHANGE : go == PCMTRIG_ABORT ? CHANGE : CALL, ("m3_pchan_trigger(dac=%d, go=0x%x{%s})\n", ch->dac_idx, go, go == PCMTRIG_START ? "PCMTRIG_START" : go == PCMTRIG_STOP ? "PCMTRIG_STOP" : go == PCMTRIG_ABORT ? "PCMTRIG_ABORT" : "ignore")); switch(go) { case PCMTRIG_START: if (ch->active) { return 0; } ch->active = 1; ch->ptr = 0; ch->prevptr = 0; sc->pch_active_cnt++; /*[[inc_timer_users]]*/ if (m3_chan_active(sc) == 1) { m3_wr_assp_data(sc, KDATA_TIMER_COUNT_RELOAD, 240); m3_wr_assp_data(sc, KDATA_TIMER_COUNT_CURRENT, 240); data = m3_rd_2(sc, HOST_INT_CTRL); m3_wr_2(sc, HOST_INT_CTRL, data | CLKRUN_GEN_ENABLE); } m3_wr_assp_data(sc, ch->dac_data + CDATA_INSTANCE_READY, 1); m3_wr_assp_data(sc, KDATA_MIXER_TASK_NUMBER, sc->pch_active_cnt); break; case PCMTRIG_STOP: case PCMTRIG_ABORT: if (ch->active == 0) { return 0; } ch->active = 0; sc->pch_active_cnt--; /* XXX should the channel be drained? */ /*[[dec_timer_users]]*/ if (m3_chan_active(sc) == 0) { m3_wr_assp_data(sc, KDATA_TIMER_COUNT_RELOAD, 0); m3_wr_assp_data(sc, KDATA_TIMER_COUNT_CURRENT, 0); data = m3_rd_2(sc, HOST_INT_CTRL); m3_wr_2(sc, HOST_INT_CTRL, data & ~CLKRUN_GEN_ENABLE); } m3_wr_assp_data(sc, ch->dac_data + CDATA_INSTANCE_READY, 0); m3_wr_assp_data(sc, KDATA_MIXER_TASK_NUMBER, sc->pch_active_cnt); break; case PCMTRIG_EMLDMAWR: /* got play irq, transfer next buffer - ignore if using dma */ case PCMTRIG_EMLDMARD: /* got rec irq, transfer next buffer - ignore if using dma */ default: break; } return 0; } static u_int32_t m3_pchan_getptr_internal(struct sc_pchinfo *ch) { struct sc_info *sc = ch->parent; u_int32_t hi, lo, bus_base, bus_crnt; bus_base = sndbuf_getbufaddr(ch->buffer); hi = m3_rd_assp_data(sc, ch->dac_data + CDATA_HOST_SRC_CURRENTH); lo = m3_rd_assp_data(sc, ch->dac_data + CDATA_HOST_SRC_CURRENTL); bus_crnt = lo | (hi << 16); M3_DEBUG(CALL, ("m3_pchan_getptr(dac=%d) result=%d\n", ch->dac_idx, bus_crnt - bus_base)); return (bus_crnt - bus_base); /* current byte offset of channel */ } static u_int32_t m3_pchan_getptr(kobj_t kobj, void *chdata) { struct sc_pchinfo *ch = chdata; struct sc_info *sc = ch->parent; u_int32_t ptr; M3_LOCK(sc); ptr = ch->ptr; M3_UNLOCK(sc); return (ptr); } static struct pcmchan_caps * m3_pchan_getcaps(kobj_t kobj, void *chdata) { struct sc_pchinfo *ch = chdata; M3_DEBUG(CALL, ("m3_pchan_getcaps(dac=%d)\n", ch->dac_idx)); return &m3_playcaps; } /* -------------------------------------------------------------------- */ /* rec channel interface */ static void * m3_rchan_init(kobj_t kobj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct sc_info *sc = devinfo; struct sc_rchinfo *ch; u_int32_t bus_addr, i; int idx, data_bytes, adc_data; int dsp_in_size, dsp_out_size, dsp_in_buf, dsp_out_buf; struct data_word { u_int16_t addr, val; } rv[] = { {CDATA_LEFT_VOLUME, M3_DEFAULT_VOL}, {CDATA_RIGHT_VOLUME, M3_DEFAULT_VOL}, {SRC3_DIRECTION_OFFSET, 1}, {SRC3_DIRECTION_OFFSET + 3, 0x0000}, {SRC3_DIRECTION_OFFSET + 4, 0}, {SRC3_DIRECTION_OFFSET + 5, 0}, {SRC3_DIRECTION_OFFSET + 6, 0}, {SRC3_DIRECTION_OFFSET + 7, 0}, {SRC3_DIRECTION_OFFSET + 8, 0}, {SRC3_DIRECTION_OFFSET + 9, 0}, {SRC3_DIRECTION_OFFSET + 10, 0x8000}, {SRC3_DIRECTION_OFFSET + 11, 0xFF00}, {SRC3_DIRECTION_OFFSET + 13, 0}, {SRC3_DIRECTION_OFFSET + 14, 0}, {SRC3_DIRECTION_OFFSET + 15, 0}, {SRC3_DIRECTION_OFFSET + 16, 50}, {SRC3_DIRECTION_OFFSET + 17, 8}, {SRC3_DIRECTION_OFFSET + 18, 0}, {SRC3_DIRECTION_OFFSET + 19, 0}, {SRC3_DIRECTION_OFFSET + 20, 0}, {SRC3_DIRECTION_OFFSET + 21, 0}, {SRC3_DIRECTION_OFFSET + 22, 0xff} }; M3_LOCK(sc); idx = sc->rch_cnt; /* adc instance number, no active reuse! */ M3_DEBUG(CHANGE, ("m3_rchan_init(adc=%d)\n", idx)); if (dir != PCMDIR_REC) { M3_UNLOCK(sc); device_printf(sc->dev, "m3_pchan_init not PCMDIR_REC\n"); return (NULL); } data_bytes = (((MINISRC_TMP_BUFFER_SIZE & ~1) + (MINISRC_IN_BUFFER_SIZE & ~1) + (MINISRC_OUT_BUFFER_SIZE & ~1) + 4) + 255) &~ 255; adc_data = 0x1100 + (data_bytes * idx) + data_bytes/2; dsp_in_size = MINISRC_IN_BUFFER_SIZE + (0x10 * 2); dsp_out_size = MINISRC_OUT_BUFFER_SIZE - (0x10 * 2); dsp_in_buf = adc_data + (MINISRC_TMP_BUFFER_SIZE / 2); dsp_out_buf = dsp_in_buf + (dsp_in_size / 2) + 1; ch = &sc->rch[idx]; ch->adc_idx = idx; ch->adc_data = adc_data; if (ch->adc_data + data_bytes/2 >= 0x1c00) { M3_UNLOCK(sc); device_printf(sc->dev, "m3_rchan_init: revb mem exhausted\n"); return (NULL); } ch->buffer = b; ch->parent = sc; ch->channel = c; ch->fmt = SND_FORMAT(AFMT_U8, 1, 0); ch->spd = DSP_DEFAULT_SPEED; M3_UNLOCK(sc); /* XXX */ if (sndbuf_alloc(ch->buffer, sc->parent_dmat, 0, sc->bufsz) != 0) { device_printf(sc->dev, "m3_rchan_init chn_allocbuf failed\n"); return (NULL); } M3_LOCK(sc); ch->bufsize = sndbuf_getsize(ch->buffer); /* host dma buffer pointers */ bus_addr = sndbuf_getbufaddr(ch->buffer); if (bus_addr & 3) { device_printf(sc->dev, "m3_rchan_init unaligned bus_addr\n"); bus_addr = (bus_addr + 4) & ~3; } m3_wr_assp_data(sc, ch->adc_data + CDATA_HOST_SRC_ADDRL, LO(bus_addr)); m3_wr_assp_data(sc, ch->adc_data + CDATA_HOST_SRC_ADDRH, HI(bus_addr)); m3_wr_assp_data(sc, ch->adc_data + CDATA_HOST_SRC_END_PLUS_1L, LO(bus_addr + ch->bufsize)); m3_wr_assp_data(sc, ch->adc_data + CDATA_HOST_SRC_END_PLUS_1H, HI(bus_addr + ch->bufsize)); m3_wr_assp_data(sc, ch->adc_data + CDATA_HOST_SRC_CURRENTL, LO(bus_addr)); m3_wr_assp_data(sc, ch->adc_data + CDATA_HOST_SRC_CURRENTH, HI(bus_addr)); /* dsp buffers */ m3_wr_assp_data(sc, ch->adc_data + CDATA_IN_BUF_BEGIN, dsp_in_buf); m3_wr_assp_data(sc, ch->adc_data + CDATA_IN_BUF_END_PLUS_1, dsp_in_buf + dsp_in_size/2); m3_wr_assp_data(sc, ch->adc_data + CDATA_IN_BUF_HEAD, dsp_in_buf); m3_wr_assp_data(sc, ch->adc_data + CDATA_IN_BUF_TAIL, dsp_in_buf); m3_wr_assp_data(sc, ch->adc_data + CDATA_OUT_BUF_BEGIN, dsp_out_buf); m3_wr_assp_data(sc, ch->adc_data + CDATA_OUT_BUF_END_PLUS_1, dsp_out_buf + dsp_out_size/2); m3_wr_assp_data(sc, ch->adc_data + CDATA_OUT_BUF_HEAD, dsp_out_buf); m3_wr_assp_data(sc, ch->adc_data + CDATA_OUT_BUF_TAIL, dsp_out_buf); /* some per client initializers */ m3_wr_assp_data(sc, ch->adc_data + SRC3_DIRECTION_OFFSET + 12, ch->adc_data + 40 + 8); m3_wr_assp_data(sc, ch->adc_data + CDATA_DMA_CONTROL, DMACONTROL_DIRECTION + DMACONTROL_AUTOREPEAT + DMAC_PAGE3_SELECTOR + DMAC_BLOCKF_SELECTOR); /* set an armload of static initializers */ for(i = 0 ; i < (sizeof(rv) / sizeof(rv[0])) ; i++) { m3_wr_assp_data(sc, ch->adc_data + rv[i].addr, rv[i].val); } /* put us in the packed task lists */ m3_wr_assp_data(sc, KDATA_INSTANCE0_MINISRC + (sc->pch_cnt + sc->rch_cnt), ch->adc_data >> DP_SHIFT_COUNT); m3_wr_assp_data(sc, KDATA_DMA_XFER0 + (sc->pch_cnt + sc->rch_cnt), ch->adc_data >> DP_SHIFT_COUNT); m3_wr_assp_data(sc, KDATA_ADC1_XFER0 + sc->rch_cnt, ch->adc_data >> DP_SHIFT_COUNT); /* gotta start before stop */ m3_rchan_trigger_locked(NULL, ch, PCMTRIG_START); /* stop on init */ m3_rchan_trigger_locked(NULL, ch, PCMTRIG_STOP); sc->rch_cnt++; M3_UNLOCK(sc); return (ch); } static int m3_rchan_free(kobj_t kobj, void *chdata) { struct sc_rchinfo *ch = chdata; struct sc_info *sc = ch->parent; M3_LOCK(sc); M3_DEBUG(CHANGE, ("m3_rchan_free(adc=%d)\n", ch->adc_idx)); /* * should remove this exact instance from the packed lists, but all * are released at once (and in a stopped state) so this is ok. */ m3_wr_assp_data(sc, KDATA_INSTANCE0_MINISRC + (sc->rch_cnt - 1) + sc->pch_cnt, 0); m3_wr_assp_data(sc, KDATA_DMA_XFER0 + (sc->rch_cnt - 1) + sc->pch_cnt, 0); m3_wr_assp_data(sc, KDATA_ADC1_XFER0 + (sc->rch_cnt - 1), 0); sc->rch_cnt--; M3_UNLOCK(sc); return (0); } static int m3_rchan_setformat(kobj_t kobj, void *chdata, u_int32_t format) { struct sc_rchinfo *ch = chdata; struct sc_info *sc = ch->parent; u_int32_t data; M3_LOCK(sc); M3_DEBUG(CHANGE, ("m3_rchan_setformat(dac=%d, format=0x%x{%s-%s})\n", ch->adc_idx, format, format & (AFMT_U8|AFMT_S8) ? "8bit":"16bit", (AFMT_CHANNEL(format) > 1) ? "STEREO":"MONO")); /* mono word */ data = (AFMT_CHANNEL(format) > 1) ? 0 : 1; m3_wr_assp_data(sc, ch->adc_data + SRC3_MODE_OFFSET, data); /* 8bit word */ data = ((format & AFMT_U8) || (format & AFMT_S8)) ? 1 : 0; m3_wr_assp_data(sc, ch->adc_data + SRC3_WORD_LENGTH_OFFSET, data); ch->fmt = format; M3_UNLOCK(sc); return (0); } static u_int32_t m3_rchan_setspeed(kobj_t kobj, void *chdata, u_int32_t speed) { struct sc_rchinfo *ch = chdata; struct sc_info *sc = ch->parent; u_int32_t freq; M3_LOCK(sc); M3_DEBUG(CHANGE, ("m3_rchan_setspeed(adc=%d, speed=%d)\n", ch->adc_idx, speed)); if ((freq = ((speed << 15) + 24000) / 48000) != 0) { freq--; } m3_wr_assp_data(sc, ch->adc_data + CDATA_FREQUENCY, freq); ch->spd = speed; M3_UNLOCK(sc); /* return closest possible speed */ return (speed); } static u_int32_t m3_rchan_setblocksize(kobj_t kobj, void *chdata, u_int32_t blocksize) { struct sc_rchinfo *ch = chdata; M3_DEBUG(CHANGE, ("m3_rchan_setblocksize(adc=%d, blocksize=%d)\n", ch->adc_idx, blocksize)); return (sndbuf_getblksz(ch->buffer)); } static int m3_rchan_trigger(kobj_t kobj, void *chdata, int go) { struct sc_rchinfo *ch = chdata; struct sc_info *sc = ch->parent; int ret; if (!PCMTRIG_COMMON(go)) return (0); M3_LOCK(sc); ret = m3_rchan_trigger_locked(kobj, chdata, go); M3_UNLOCK(sc); return (ret); } static int m3_rchan_trigger_locked(kobj_t kobj, void *chdata, int go) { struct sc_rchinfo *ch = chdata; struct sc_info *sc = ch->parent; u_int32_t data; M3_LOCK_ASSERT(sc); M3_DEBUG(go == PCMTRIG_START ? CHANGE : go == PCMTRIG_STOP ? CHANGE : go == PCMTRIG_ABORT ? CHANGE : CALL, ("m3_rchan_trigger(adc=%d, go=0x%x{%s})\n", ch->adc_idx, go, go == PCMTRIG_START ? "PCMTRIG_START" : go == PCMTRIG_STOP ? "PCMTRIG_STOP" : go == PCMTRIG_ABORT ? "PCMTRIG_ABORT" : "ignore")); switch(go) { case PCMTRIG_START: if (ch->active) { return 0; } ch->active = 1; ch->ptr = 0; ch->prevptr = 0; /*[[inc_timer_users]]*/ if (m3_chan_active(sc) == 1) { m3_wr_assp_data(sc, KDATA_TIMER_COUNT_RELOAD, 240); m3_wr_assp_data(sc, KDATA_TIMER_COUNT_CURRENT, 240); data = m3_rd_2(sc, HOST_INT_CTRL); m3_wr_2(sc, HOST_INT_CTRL, data | CLKRUN_GEN_ENABLE); } m3_wr_assp_data(sc, KDATA_ADC1_REQUEST, 1); m3_wr_assp_data(sc, ch->adc_data + CDATA_INSTANCE_READY, 1); break; case PCMTRIG_STOP: case PCMTRIG_ABORT: if (ch->active == 0) { return 0; } ch->active = 0; /*[[dec_timer_users]]*/ if (m3_chan_active(sc) == 0) { m3_wr_assp_data(sc, KDATA_TIMER_COUNT_RELOAD, 0); m3_wr_assp_data(sc, KDATA_TIMER_COUNT_CURRENT, 0); data = m3_rd_2(sc, HOST_INT_CTRL); m3_wr_2(sc, HOST_INT_CTRL, data & ~CLKRUN_GEN_ENABLE); } m3_wr_assp_data(sc, ch->adc_data + CDATA_INSTANCE_READY, 0); m3_wr_assp_data(sc, KDATA_ADC1_REQUEST, 0); break; case PCMTRIG_EMLDMAWR: /* got play irq, transfer next buffer - ignore if using dma */ case PCMTRIG_EMLDMARD: /* got rec irq, transfer next buffer - ignore if using dma */ default: break; } return 0; } static u_int32_t m3_rchan_getptr_internal(struct sc_rchinfo *ch) { struct sc_info *sc = ch->parent; u_int32_t hi, lo, bus_base, bus_crnt; bus_base = sndbuf_getbufaddr(ch->buffer); hi = m3_rd_assp_data(sc, ch->adc_data + CDATA_HOST_SRC_CURRENTH); lo = m3_rd_assp_data(sc, ch->adc_data + CDATA_HOST_SRC_CURRENTL); bus_crnt = lo | (hi << 16); M3_DEBUG(CALL, ("m3_rchan_getptr(adc=%d) result=%d\n", ch->adc_idx, bus_crnt - bus_base)); return (bus_crnt - bus_base); /* current byte offset of channel */ } static u_int32_t m3_rchan_getptr(kobj_t kobj, void *chdata) { struct sc_rchinfo *ch = chdata; struct sc_info *sc = ch->parent; u_int32_t ptr; M3_LOCK(sc); ptr = ch->ptr; M3_UNLOCK(sc); return (ptr); } static struct pcmchan_caps * m3_rchan_getcaps(kobj_t kobj, void *chdata) { struct sc_rchinfo *ch = chdata; M3_DEBUG(CALL, ("m3_rchan_getcaps(adc=%d)\n", ch->adc_idx)); return &m3_reccaps; } /* -------------------------------------------------------------------- */ /* The interrupt handler */ static void m3_intr(void *p) { struct sc_info *sc = (struct sc_info *)p; struct sc_pchinfo *pch; struct sc_rchinfo *rch; u_int32_t status, ctl, i, delta; M3_DEBUG(INTR, ("m3_intr\n")); M3_LOCK(sc); status = m3_rd_1(sc, HOST_INT_STATUS); if (!status) { M3_UNLOCK(sc); return; } m3_wr_1(sc, HOST_INT_STATUS, 0xff); /* ack the int? */ if (status & HV_INT_PENDING) { u_int8_t event; event = m3_rd_1(sc, HW_VOL_COUNTER_MASTER); switch (event) { case 0x99: mixer_hwvol_mute(sc->dev); break; case 0xaa: mixer_hwvol_step(sc->dev, 1, 1); break; case 0x66: mixer_hwvol_step(sc->dev, -1, -1); break; case 0x88: break; default: device_printf(sc->dev, "Unknown HWVOL event\n"); } m3_wr_1(sc, HW_VOL_COUNTER_MASTER, 0x88); } if (status & ASSP_INT_PENDING) { ctl = m3_rd_1(sc, ASSP_CONTROL_B); if (!(ctl & STOP_ASSP_CLOCK)) { ctl = m3_rd_1(sc, ASSP_HOST_INT_STATUS); if (ctl & DSP2HOST_REQ_TIMER) { m3_wr_1(sc, ASSP_HOST_INT_STATUS, DSP2HOST_REQ_TIMER); /*[[ess_update_ptr]]*/ goto m3_handle_channel_intr; } } } goto m3_handle_channel_intr_out; m3_handle_channel_intr: for (i=0 ; ipch_cnt ; i++) { pch = &sc->pch[i]; if (pch->active) { pch->ptr = m3_pchan_getptr_internal(pch); delta = pch->bufsize + pch->ptr - pch->prevptr; delta %= pch->bufsize; if (delta < sndbuf_getblksz(pch->buffer)) continue; pch->prevptr = pch->ptr; M3_UNLOCK(sc); chn_intr(pch->channel); M3_LOCK(sc); } } for (i=0 ; irch_cnt ; i++) { rch = &sc->rch[i]; if (rch->active) { rch->ptr = m3_rchan_getptr_internal(rch); delta = rch->bufsize + rch->ptr - rch->prevptr; delta %= rch->bufsize; if (delta < sndbuf_getblksz(rch->buffer)) continue; rch->prevptr = rch->ptr; M3_UNLOCK(sc); chn_intr(rch->channel); M3_LOCK(sc); } } m3_handle_channel_intr_out: M3_UNLOCK(sc); } /* -------------------------------------------------------------------- */ /* stuff */ static int m3_power(struct sc_info *sc, int state) { u_int32_t data; M3_DEBUG(CHANGE, ("m3_power(%d)\n", state)); M3_LOCK_ASSERT(sc); data = pci_read_config(sc->dev, 0x34, 1); if (pci_read_config(sc->dev, data, 1) == 1) { pci_write_config(sc->dev, data + 4, state, 1); } return 0; } static int m3_init(struct sc_info *sc) { u_int32_t data, i, size; u_int8_t reset_state; M3_LOCK_ASSERT(sc); M3_DEBUG(CHANGE, ("m3_init\n")); /* diable legacy emulations. */ data = pci_read_config(sc->dev, PCI_LEGACY_AUDIO_CTRL, 2); data |= DISABLE_LEGACY; pci_write_config(sc->dev, PCI_LEGACY_AUDIO_CTRL, data, 2); m3_config(sc); reset_state = m3_assp_halt(sc); m3_codec_reset(sc); /* [m3_assp_init] */ /* zero kernel data */ size = REV_B_DATA_MEMORY_UNIT_LENGTH * NUM_UNITS_KERNEL_DATA; for(i = 0 ; i < size / 2 ; i++) { m3_wr_assp_data(sc, KDATA_BASE_ADDR + i, 0); } /* zero mixer data? */ size = REV_B_DATA_MEMORY_UNIT_LENGTH * NUM_UNITS_KERNEL_DATA; for(i = 0 ; i < size / 2 ; i++) { m3_wr_assp_data(sc, KDATA_BASE_ADDR2 + i, 0); } /* init dma pointer */ m3_wr_assp_data(sc, KDATA_CURRENT_DMA, KDATA_DMA_XFER0); /* write kernel into code memory */ size = sizeof(gaw_kernel_vect_code); for(i = 0 ; i < size / 2; i++) { m3_wr_assp_code(sc, REV_B_CODE_MEMORY_BEGIN + i, gaw_kernel_vect_code[i]); } /* * We only have this one client and we know that 0x400 is free in * our kernel's mem map, so lets just drop it there. It seems that * the minisrc doesn't need vectors, so we won't bother with them.. */ size = sizeof(gaw_minisrc_code_0400); for(i = 0 ; i < size / 2; i++) { m3_wr_assp_code(sc, 0x400 + i, gaw_minisrc_code_0400[i]); } /* write the coefficients for the low pass filter? */ size = sizeof(minisrc_lpf); for(i = 0; i < size / 2 ; i++) { m3_wr_assp_code(sc,0x400 + MINISRC_COEF_LOC + i, minisrc_lpf[i]); } m3_wr_assp_code(sc, 0x400 + MINISRC_COEF_LOC + size, 0x8000); /* the minisrc is the only thing on our task list */ m3_wr_assp_data(sc, KDATA_TASK0, 0x400); /* init the mixer number */ m3_wr_assp_data(sc, KDATA_MIXER_TASK_NUMBER, 0); /* extreme kernel master volume */ m3_wr_assp_data(sc, KDATA_DAC_LEFT_VOLUME, M3_DEFAULT_VOL); m3_wr_assp_data(sc, KDATA_DAC_RIGHT_VOLUME, M3_DEFAULT_VOL); m3_amp_enable(sc); /* [m3_assp_client_init] (only one client at index 0) */ for (i=0x1100 ; i<0x1c00 ; i++) { m3_wr_assp_data(sc, i, 0); /* zero entire dac/adc area */ } /* [m3_assp_continue] */ m3_wr_1(sc, DSP_PORT_CONTROL_REG_B, reset_state | REGB_ENABLE_RESET); return 0; } static int m3_uninit(struct sc_info *sc) { M3_DEBUG(CHANGE, ("m3_uninit\n")); return 0; } /* -------------------------------------------------------------------- */ /* Probe and attach the card */ static int m3_pci_probe(device_t dev) { struct m3_card_type *card; M3_DEBUG(CALL, ("m3_pci_probe(0x%x)\n", pci_get_devid(dev))); for (card = m3_card_types ; card->pci_id ; card++) { if (pci_get_devid(dev) == card->pci_id) { device_set_desc(dev, card->name); return BUS_PROBE_DEFAULT; } } return ENXIO; } static int m3_pci_attach(device_t dev) { struct sc_info *sc; struct ac97_info *codec = NULL; char status[SND_STATUSLEN]; struct m3_card_type *card; int i, len, dacn, adcn; M3_DEBUG(CALL, ("m3_pci_attach\n")); sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO); sc->dev = dev; sc->type = pci_get_devid(dev); sc->sc_lock = snd_mtxcreate(device_get_nameunit(dev), "snd_maestro3 softc"); for (card = m3_card_types ; card->pci_id ; card++) { if (sc->type == card->pci_id) { sc->which = card->which; sc->delay1 = card->delay1; sc->delay2 = card->delay2; break; } } if (resource_int_value(device_get_name(dev), device_get_unit(dev), "dac", &i) == 0) { if (i < 1) dacn = 1; else if (i > M3_PCHANS) dacn = M3_PCHANS; else dacn = i; } else dacn = M3_PCHANS; adcn = M3_RCHANS; pci_enable_busmaster(dev); sc->regid = PCIR_BAR(0); sc->regtype = SYS_RES_MEMORY; sc->reg = bus_alloc_resource_any(dev, sc->regtype, &sc->regid, RF_ACTIVE); if (!sc->reg) { sc->regtype = SYS_RES_IOPORT; sc->reg = bus_alloc_resource_any(dev, sc->regtype, &sc->regid, RF_ACTIVE); } if (!sc->reg) { device_printf(dev, "unable to allocate register space\n"); goto bad; } sc->st = rman_get_bustag(sc->reg); sc->sh = rman_get_bushandle(sc->reg); sc->irqid = 0; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid, RF_ACTIVE | RF_SHAREABLE); if (!sc->irq) { device_printf(dev, "unable to allocate interrupt\n"); goto bad; } if (snd_setup_intr(dev, sc->irq, INTR_MPSAFE, m3_intr, sc, &sc->ih)) { device_printf(dev, "unable to setup interrupt\n"); goto bad; } sc->bufsz = pcm_getbuffersize(dev, M3_BUFSIZE_MIN, M3_BUFSIZE_DEFAULT, M3_BUFSIZE_MAX); if (bus_dma_tag_create( bus_get_dma_tag(dev), /* parent */ 2, 0, /* alignment, boundary */ M3_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ sc->bufsz, /* maxsize */ 1, /* nsegments */ 0x3ffff, /* maxsegz */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &sc->parent_dmat) != 0) { device_printf(dev, "unable to create dma tag\n"); goto bad; } M3_LOCK(sc); m3_power(sc, 0); /* power up */ /* init chip */ i = m3_init(sc); M3_UNLOCK(sc); if (i == -1) { device_printf(dev, "unable to initialize the card\n"); goto bad; } /* create/init mixer */ codec = AC97_CREATE(dev, sc, m3_codec); if (codec == NULL) { device_printf(dev, "ac97_create error\n"); goto bad; } if (mixer_init(dev, ac97_getmixerclass(), codec)) { device_printf(dev, "mixer_init error\n"); goto bad; } m3_enable_ints(sc); if (pcm_register(dev, sc, dacn, adcn)) { device_printf(dev, "pcm_register error\n"); goto bad; } for (i=0 ; iregtype == SYS_RES_IOPORT)? "io" : "memory", rman_get_start(sc->reg), rman_get_start(sc->irq), PCM_KLDSTRING(snd_maestro3)); if (pcm_setstatus(dev, status)) { device_printf(dev, "attach: pcm_setstatus error\n"); goto bad; } mixer_hwvol_init(dev); /* Create the buffer for saving the card state during suspend */ len = sizeof(u_int16_t) * (REV_B_CODE_MEMORY_LENGTH + REV_B_DATA_MEMORY_LENGTH); sc->savemem = (u_int16_t*)malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); return 0; bad: if (codec) ac97_destroy(codec); if (sc->ih) bus_teardown_intr(dev, sc->irq, sc->ih); if (sc->irq) bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irq); if (sc->reg) bus_release_resource(dev, sc->regtype, sc->regid, sc->reg); if (sc->parent_dmat) bus_dma_tag_destroy(sc->parent_dmat); if (sc->sc_lock) snd_mtxfree(sc->sc_lock); free(sc, M_DEVBUF); return ENXIO; } static int m3_pci_detach(device_t dev) { struct sc_info *sc = pcm_getdevinfo(dev); int r; M3_DEBUG(CALL, ("m3_pci_detach\n")); if ((r = pcm_unregister(dev)) != 0) { return r; } M3_LOCK(sc); m3_uninit(sc); /* shutdown chip */ m3_power(sc, 3); /* power off */ M3_UNLOCK(sc); bus_teardown_intr(dev, sc->irq, sc->ih); bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irq); bus_release_resource(dev, sc->regtype, sc->regid, sc->reg); bus_dma_tag_destroy(sc->parent_dmat); free(sc->savemem, M_DEVBUF); snd_mtxfree(sc->sc_lock); free(sc, M_DEVBUF); return 0; } static int m3_pci_suspend(device_t dev) { struct sc_info *sc = pcm_getdevinfo(dev); int i, index = 0; M3_DEBUG(CHANGE, ("m3_pci_suspend\n")); M3_LOCK(sc); for (i=0 ; ipch_cnt ; i++) { if (sc->pch[i].active) { m3_pchan_trigger_locked(NULL, &sc->pch[i], PCMTRIG_STOP); } } for (i=0 ; irch_cnt ; i++) { if (sc->rch[i].active) { m3_rchan_trigger_locked(NULL, &sc->rch[i], PCMTRIG_STOP); } } DELAY(10 * 1000); /* give things a chance to stop */ /* Disable interrupts */ m3_wr_2(sc, HOST_INT_CTRL, 0); m3_wr_1(sc, ASSP_CONTROL_C, 0); m3_assp_halt(sc); /* Save the state of the ASSP */ for (i = REV_B_CODE_MEMORY_BEGIN; i <= REV_B_CODE_MEMORY_END; i++) sc->savemem[index++] = m3_rd_assp_code(sc, i); for (i = REV_B_DATA_MEMORY_BEGIN; i <= REV_B_DATA_MEMORY_END; i++) sc->savemem[index++] = m3_rd_assp_data(sc, i); /* Power down the card to D3 state */ m3_power(sc, 3); M3_UNLOCK(sc); return 0; } static int m3_pci_resume(device_t dev) { struct sc_info *sc = pcm_getdevinfo(dev); int i, index = 0; u_int8_t reset_state; M3_DEBUG(CHANGE, ("m3_pci_resume\n")); M3_LOCK(sc); /* Power the card back to D0 */ m3_power(sc, 0); m3_config(sc); reset_state = m3_assp_halt(sc); m3_codec_reset(sc); /* Restore the ASSP state */ for (i = REV_B_CODE_MEMORY_BEGIN; i <= REV_B_CODE_MEMORY_END; i++) m3_wr_assp_code(sc, i, sc->savemem[index++]); for (i = REV_B_DATA_MEMORY_BEGIN; i <= REV_B_DATA_MEMORY_END; i++) m3_wr_assp_data(sc, i, sc->savemem[index++]); /* Restart the DMA engine */ m3_wr_assp_data(sc, KDATA_DMA_ACTIVE, 0); /* [m3_assp_continue] */ m3_wr_1(sc, DSP_PORT_CONTROL_REG_B, reset_state | REGB_ENABLE_RESET); m3_amp_enable(sc); m3_enable_ints(sc); M3_UNLOCK(sc); /* XXX */ if (mixer_reinit(dev) == -1) { device_printf(dev, "unable to reinitialize the mixer\n"); return (ENXIO); } M3_LOCK(sc); /* Turn the channels back on */ for (i=0 ; ipch_cnt ; i++) { if (sc->pch[i].active) { m3_pchan_trigger_locked(NULL, &sc->pch[i], PCMTRIG_START); } } for (i=0 ; irch_cnt ; i++) { if (sc->rch[i].active) { m3_rchan_trigger_locked(NULL, &sc->rch[i], PCMTRIG_START); } } M3_UNLOCK(sc); return 0; } static int m3_pci_shutdown(device_t dev) { struct sc_info *sc = pcm_getdevinfo(dev); M3_DEBUG(CALL, ("m3_pci_shutdown\n")); M3_LOCK(sc); m3_power(sc, 3); /* power off */ M3_UNLOCK(sc); return 0; } static u_int8_t m3_assp_halt(struct sc_info *sc) { u_int8_t data, reset_state; M3_LOCK_ASSERT(sc); data = m3_rd_1(sc, DSP_PORT_CONTROL_REG_B); reset_state = data & ~REGB_STOP_CLOCK; /* remember for continue */ DELAY(10 * 1000); m3_wr_1(sc, DSP_PORT_CONTROL_REG_B, reset_state & ~REGB_ENABLE_RESET); DELAY(10 * 1000); /* necessary? */ return reset_state; } static void m3_config(struct sc_info *sc) { u_int32_t data, hv_cfg; int hint; M3_LOCK_ASSERT(sc); M3_UNLOCK(sc); /* * The volume buttons can be wired up via two different sets of pins. * This presents a problem since we can't tell which way it's * configured. Allow the user to set a hint in order to twiddle * the proper bits. */ if (resource_int_value(device_get_name(sc->dev), device_get_unit(sc->dev), "hwvol_config", &hint) == 0) hv_cfg = (hint > 0) ? HV_BUTTON_FROM_GD : 0; else hv_cfg = HV_BUTTON_FROM_GD; M3_LOCK(sc); data = pci_read_config(sc->dev, PCI_ALLEGRO_CONFIG, 4); data &= ~HV_BUTTON_FROM_GD; data |= REDUCED_DEBOUNCE | HV_CTRL_ENABLE | hv_cfg; data |= PM_CTRL_ENABLE | CLK_DIV_BY_49 | USE_PCI_TIMING; pci_write_config(sc->dev, PCI_ALLEGRO_CONFIG, data, 4); m3_wr_1(sc, ASSP_CONTROL_B, RESET_ASSP); data = pci_read_config(sc->dev, PCI_ALLEGRO_CONFIG, 4); data &= ~INT_CLK_SELECT; if (sc->which == ESS_MAESTRO3) { data &= ~INT_CLK_MULT_ENABLE; data |= INT_CLK_SRC_NOT_PCI; } data &= ~(CLK_MULT_MODE_SELECT | CLK_MULT_MODE_SELECT_2); pci_write_config(sc->dev, PCI_ALLEGRO_CONFIG, data, 4); if (sc->which == ESS_ALLEGRO_1) { data = pci_read_config(sc->dev, PCI_USER_CONFIG, 4); data |= IN_CLK_12MHZ_SELECT; pci_write_config(sc->dev, PCI_USER_CONFIG, data, 4); } data = m3_rd_1(sc, ASSP_CONTROL_A); data &= ~(DSP_CLK_36MHZ_SELECT | ASSP_CLK_49MHZ_SELECT); data |= ASSP_CLK_49MHZ_SELECT; /*XXX assumes 49MHZ dsp XXX*/ data |= ASSP_0_WS_ENABLE; m3_wr_1(sc, ASSP_CONTROL_A, data); m3_wr_1(sc, ASSP_CONTROL_B, RUN_ASSP); } static void m3_enable_ints(struct sc_info *sc) { u_int8_t data; m3_wr_2(sc, HOST_INT_CTRL, ASSP_INT_ENABLE | HV_INT_ENABLE); data = m3_rd_1(sc, ASSP_CONTROL_C); m3_wr_1(sc, ASSP_CONTROL_C, data | ASSP_HOST_INT_ENABLE); } static void m3_amp_enable(struct sc_info *sc) { u_int32_t gpo, polarity_port, polarity; u_int16_t data; M3_LOCK_ASSERT(sc); switch (sc->which) { case ESS_ALLEGRO_1: polarity_port = 0x1800; break; case ESS_MAESTRO3: polarity_port = 0x1100; break; default: panic("bad sc->which"); } gpo = (polarity_port >> 8) & 0x0f; polarity = polarity_port >> 12; polarity = !polarity; /* enable */ polarity = polarity << gpo; gpo = 1 << gpo; m3_wr_2(sc, GPIO_MASK, ~gpo); data = m3_rd_2(sc, GPIO_DIRECTION); m3_wr_2(sc, GPIO_DIRECTION, data | gpo); data = GPO_SECONDARY_AC97 | GPO_PRIMARY_AC97 | polarity; m3_wr_2(sc, GPIO_DATA, data); m3_wr_2(sc, GPIO_MASK, ~0); } static void m3_codec_reset(struct sc_info *sc) { u_int16_t data, dir; int retry = 0; M3_LOCK_ASSERT(sc); do { data = m3_rd_2(sc, GPIO_DIRECTION); dir = data | 0x10; /* assuming pci bus master? */ /* [[remote_codec_config]] */ data = m3_rd_2(sc, RING_BUS_CTRL_B); m3_wr_2(sc, RING_BUS_CTRL_B, data & ~SECOND_CODEC_ID_MASK); data = m3_rd_2(sc, SDO_OUT_DEST_CTRL); m3_wr_2(sc, SDO_OUT_DEST_CTRL, data & ~COMMAND_ADDR_OUT); data = m3_rd_2(sc, SDO_IN_DEST_CTRL); m3_wr_2(sc, SDO_IN_DEST_CTRL, data & ~STATUS_ADDR_IN); m3_wr_2(sc, RING_BUS_CTRL_A, IO_SRAM_ENABLE); DELAY(20); m3_wr_2(sc, GPIO_DIRECTION, dir & ~GPO_PRIMARY_AC97); m3_wr_2(sc, GPIO_MASK, ~GPO_PRIMARY_AC97); m3_wr_2(sc, GPIO_DATA, 0); m3_wr_2(sc, GPIO_DIRECTION, dir | GPO_PRIMARY_AC97); DELAY(sc->delay1 * 1000); /*delay1 (ALLEGRO:50, MAESTRO3:20)*/ m3_wr_2(sc, GPIO_DATA, GPO_PRIMARY_AC97); DELAY(5); m3_wr_2(sc, RING_BUS_CTRL_A, IO_SRAM_ENABLE | SERIAL_AC_LINK_ENABLE); m3_wr_2(sc, GPIO_MASK, ~0); DELAY(sc->delay2 * 1000); /*delay2 (ALLEGRO:800, MAESTRO3:500)*/ /* [[try read vendor]] */ data = m3_rdcd(NULL, sc, 0x7c); if ((data == 0) || (data == 0xffff)) { retry++; if (retry > 3) { device_printf(sc->dev, "Codec reset failed\n"); break; } device_printf(sc->dev, "Codec reset retry\n"); } else retry = 0; } while (retry); } static device_method_t m3_methods[] = { DEVMETHOD(device_probe, m3_pci_probe), DEVMETHOD(device_attach, m3_pci_attach), DEVMETHOD(device_detach, m3_pci_detach), DEVMETHOD(device_suspend, m3_pci_suspend), DEVMETHOD(device_resume, m3_pci_resume), DEVMETHOD(device_shutdown, m3_pci_shutdown), { 0, 0 } }; static driver_t m3_driver = { "pcm", m3_methods, PCM_SOFTC_SIZE, }; DRIVER_MODULE(snd_maestro3, pci, m3_driver, pcm_devclass, 0, 0); MODULE_DEPEND(snd_maestro3, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_VERSION(snd_maestro3, 1); Index: head/sys/dev/sound/pcm/dsp.c =================================================================== --- head/sys/dev/sound/pcm/dsp.c (revision 336756) +++ head/sys/dev/sound/pcm/dsp.c (revision 336757) @@ -1,3369 +1,3369 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2005-2009 Ariff Abdullah * Portions Copyright (c) Ryan Beasley - GSoC 2006 * Copyright (c) 1999 Cameron Grant * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include #include #include #include #include #include SND_DECLARE_FILE("$FreeBSD$"); static int dsp_mmap_allow_prot_exec = 0; SYSCTL_INT(_hw_snd, OID_AUTO, compat_linux_mmap, CTLFLAG_RWTUN, &dsp_mmap_allow_prot_exec, 0, "linux mmap compatibility (-1=force disable 0=auto 1=force enable)"); static int dsp_basename_clone = 1; SYSCTL_INT(_hw_snd, OID_AUTO, basename_clone, CTLFLAG_RWTUN, &dsp_basename_clone, 0, "DSP basename cloning (0: Disable; 1: Enabled)"); struct dsp_cdevinfo { struct pcm_channel *rdch, *wrch; struct pcm_channel *volch; int busy, simplex; TAILQ_ENTRY(dsp_cdevinfo) link; }; #define PCM_RDCH(x) (((struct dsp_cdevinfo *)(x)->si_drv1)->rdch) #define PCM_WRCH(x) (((struct dsp_cdevinfo *)(x)->si_drv1)->wrch) #define PCM_VOLCH(x) (((struct dsp_cdevinfo *)(x)->si_drv1)->volch) #define PCM_SIMPLEX(x) (((struct dsp_cdevinfo *)(x)->si_drv1)->simplex) #define DSP_CDEVINFO_CACHESIZE 8 #define DSP_REGISTERED(x, y) (PCM_REGISTERED(x) && \ (y) != NULL && (y)->si_drv1 != NULL) #define OLDPCM_IOCTL static d_open_t dsp_open; static d_close_t dsp_close; static d_read_t dsp_read; static d_write_t dsp_write; static d_ioctl_t dsp_ioctl; static d_poll_t dsp_poll; static d_mmap_t dsp_mmap; static d_mmap_single_t dsp_mmap_single; struct cdevsw dsp_cdevsw = { .d_version = D_VERSION, .d_open = dsp_open, .d_close = dsp_close, .d_read = dsp_read, .d_write = dsp_write, .d_ioctl = dsp_ioctl, .d_poll = dsp_poll, .d_mmap = dsp_mmap, .d_mmap_single = dsp_mmap_single, .d_name = "dsp", }; static eventhandler_tag dsp_ehtag = NULL; static int dsp_umax = -1; static int dsp_cmax = -1; static int dsp_oss_syncgroup(struct pcm_channel *wrch, struct pcm_channel *rdch, oss_syncgroup *group); static int dsp_oss_syncstart(int sg_id); static int dsp_oss_policy(struct pcm_channel *wrch, struct pcm_channel *rdch, int policy); static int dsp_oss_cookedmode(struct pcm_channel *wrch, struct pcm_channel *rdch, int enabled); static int dsp_oss_getchnorder(struct pcm_channel *wrch, struct pcm_channel *rdch, unsigned long long *map); static int dsp_oss_setchnorder(struct pcm_channel *wrch, struct pcm_channel *rdch, unsigned long long *map); static int dsp_oss_getchannelmask(struct pcm_channel *wrch, struct pcm_channel *rdch, int *mask); #ifdef OSSV4_EXPERIMENT static int dsp_oss_getlabel(struct pcm_channel *wrch, struct pcm_channel *rdch, oss_label_t *label); static int dsp_oss_setlabel(struct pcm_channel *wrch, struct pcm_channel *rdch, oss_label_t *label); static int dsp_oss_getsong(struct pcm_channel *wrch, struct pcm_channel *rdch, oss_longname_t *song); static int dsp_oss_setsong(struct pcm_channel *wrch, struct pcm_channel *rdch, oss_longname_t *song); static int dsp_oss_setname(struct pcm_channel *wrch, struct pcm_channel *rdch, oss_longname_t *name); #endif static struct snddev_info * dsp_get_info(struct cdev *dev) { return (devclass_get_softc(pcm_devclass, PCMUNIT(dev))); } static uint32_t dsp_get_flags(struct cdev *dev) { device_t bdev; bdev = devclass_get_device(pcm_devclass, PCMUNIT(dev)); return ((bdev != NULL) ? pcm_getflags(bdev) : 0xffffffff); } static void dsp_set_flags(struct cdev *dev, uint32_t flags) { device_t bdev; bdev = devclass_get_device(pcm_devclass, PCMUNIT(dev)); if (bdev != NULL) pcm_setflags(bdev, flags); } /* * return the channels associated with an open device instance. * lock channels specified. */ static int getchns(struct cdev *dev, struct pcm_channel **rdch, struct pcm_channel **wrch, uint32_t prio) { struct snddev_info *d; struct pcm_channel *ch; uint32_t flags; if (PCM_SIMPLEX(dev) != 0) { d = dsp_get_info(dev); if (!PCM_REGISTERED(d)) return (ENXIO); PCM_LOCK(d); PCM_WAIT(d); PCM_ACQUIRE(d); /* * Note: order is important - * pcm flags -> prio query flags -> wild guess */ ch = NULL; flags = dsp_get_flags(dev); if (flags & SD_F_PRIO_WR) { ch = PCM_RDCH(dev); PCM_RDCH(dev) = NULL; } else if (flags & SD_F_PRIO_RD) { ch = PCM_WRCH(dev); PCM_WRCH(dev) = NULL; } else if (prio & SD_F_PRIO_WR) { ch = PCM_RDCH(dev); PCM_RDCH(dev) = NULL; flags |= SD_F_PRIO_WR; } else if (prio & SD_F_PRIO_RD) { ch = PCM_WRCH(dev); PCM_WRCH(dev) = NULL; flags |= SD_F_PRIO_RD; } else if (PCM_WRCH(dev) != NULL) { ch = PCM_RDCH(dev); PCM_RDCH(dev) = NULL; flags |= SD_F_PRIO_WR; } else if (PCM_RDCH(dev) != NULL) { ch = PCM_WRCH(dev); PCM_WRCH(dev) = NULL; flags |= SD_F_PRIO_RD; } PCM_SIMPLEX(dev) = 0; dsp_set_flags(dev, flags); if (ch != NULL) { CHN_LOCK(ch); pcm_chnref(ch, -1); pcm_chnrelease(ch); } PCM_RELEASE(d); PCM_UNLOCK(d); } *rdch = PCM_RDCH(dev); *wrch = PCM_WRCH(dev); if (*rdch != NULL && (prio & SD_F_PRIO_RD)) CHN_LOCK(*rdch); if (*wrch != NULL && (prio & SD_F_PRIO_WR)) CHN_LOCK(*wrch); return (0); } /* unlock specified channels */ static void relchns(struct cdev *dev, struct pcm_channel *rdch, struct pcm_channel *wrch, uint32_t prio) { if (wrch != NULL && (prio & SD_F_PRIO_WR)) CHN_UNLOCK(wrch); if (rdch != NULL && (prio & SD_F_PRIO_RD)) CHN_UNLOCK(rdch); } static void dsp_cdevinfo_alloc(struct cdev *dev, struct pcm_channel *rdch, struct pcm_channel *wrch, struct pcm_channel *volch) { struct snddev_info *d; struct dsp_cdevinfo *cdi; int simplex; d = dsp_get_info(dev); KASSERT(PCM_REGISTERED(d) && dev != NULL && dev->si_drv1 == NULL && ((rdch == NULL && wrch == NULL) || rdch != wrch), ("bogus %s(), what are you trying to accomplish here?", __func__)); PCM_BUSYASSERT(d); PCM_LOCKASSERT(d); simplex = (dsp_get_flags(dev) & SD_F_SIMPLEX) ? 1 : 0; /* * Scan for free instance entry and put it into the end of list. * Create new one if necessary. */ TAILQ_FOREACH(cdi, &d->dsp_cdevinfo_pool, link) { if (cdi->busy != 0) break; cdi->rdch = rdch; cdi->wrch = wrch; cdi->volch = volch; cdi->simplex = simplex; cdi->busy = 1; TAILQ_REMOVE(&d->dsp_cdevinfo_pool, cdi, link); TAILQ_INSERT_TAIL(&d->dsp_cdevinfo_pool, cdi, link); dev->si_drv1 = cdi; return; } PCM_UNLOCK(d); cdi = malloc(sizeof(*cdi), M_DEVBUF, M_WAITOK | M_ZERO); PCM_LOCK(d); cdi->rdch = rdch; cdi->wrch = wrch; cdi->volch = volch; cdi->simplex = simplex; cdi->busy = 1; TAILQ_INSERT_TAIL(&d->dsp_cdevinfo_pool, cdi, link); dev->si_drv1 = cdi; } static void dsp_cdevinfo_free(struct cdev *dev) { struct snddev_info *d; struct dsp_cdevinfo *cdi, *tmp; uint32_t flags; int i; d = dsp_get_info(dev); KASSERT(PCM_REGISTERED(d) && dev != NULL && dev->si_drv1 != NULL && PCM_RDCH(dev) == NULL && PCM_WRCH(dev) == NULL && PCM_VOLCH(dev) == NULL, ("bogus %s(), what are you trying to accomplish here?", __func__)); PCM_BUSYASSERT(d); PCM_LOCKASSERT(d); cdi = dev->si_drv1; dev->si_drv1 = NULL; cdi->rdch = NULL; cdi->wrch = NULL; cdi->volch = NULL; cdi->simplex = 0; cdi->busy = 0; /* * Once it is free, move it back to the beginning of list for * faster new entry allocation. */ TAILQ_REMOVE(&d->dsp_cdevinfo_pool, cdi, link); TAILQ_INSERT_HEAD(&d->dsp_cdevinfo_pool, cdi, link); /* * Scan the list, cache free entries up to DSP_CDEVINFO_CACHESIZE. * Reset simplex flags. */ flags = dsp_get_flags(dev) & ~SD_F_PRIO_SET; i = DSP_CDEVINFO_CACHESIZE; TAILQ_FOREACH_SAFE(cdi, &d->dsp_cdevinfo_pool, link, tmp) { if (cdi->busy != 0) { if (cdi->simplex == 0) { if (cdi->rdch != NULL) flags |= SD_F_PRIO_RD; if (cdi->wrch != NULL) flags |= SD_F_PRIO_WR; } } else { if (i == 0) { TAILQ_REMOVE(&d->dsp_cdevinfo_pool, cdi, link); free(cdi, M_DEVBUF); } else i--; } } dsp_set_flags(dev, flags); } void dsp_cdevinfo_init(struct snddev_info *d) { struct dsp_cdevinfo *cdi; int i; KASSERT(d != NULL, ("NULL snddev_info")); PCM_BUSYASSERT(d); PCM_UNLOCKASSERT(d); TAILQ_INIT(&d->dsp_cdevinfo_pool); for (i = 0; i < DSP_CDEVINFO_CACHESIZE; i++) { cdi = malloc(sizeof(*cdi), M_DEVBUF, M_WAITOK | M_ZERO); TAILQ_INSERT_HEAD(&d->dsp_cdevinfo_pool, cdi, link); } } void dsp_cdevinfo_flush(struct snddev_info *d) { struct dsp_cdevinfo *cdi, *tmp; KASSERT(d != NULL, ("NULL snddev_info")); PCM_BUSYASSERT(d); PCM_UNLOCKASSERT(d); cdi = TAILQ_FIRST(&d->dsp_cdevinfo_pool); while (cdi != NULL) { tmp = TAILQ_NEXT(cdi, link); free(cdi, M_DEVBUF); cdi = tmp; } TAILQ_INIT(&d->dsp_cdevinfo_pool); } /* duplex / simplex cdev type */ enum { DSP_CDEV_TYPE_RDONLY, /* simplex read-only (record) */ DSP_CDEV_TYPE_WRONLY, /* simplex write-only (play) */ DSP_CDEV_TYPE_RDWR /* duplex read, write, or both */ }; enum { DSP_CDEV_VOLCTL_NONE, DSP_CDEV_VOLCTL_READ, DSP_CDEV_VOLCTL_WRITE }; #define DSP_F_VALID(x) ((x) & (FREAD | FWRITE)) #define DSP_F_DUPLEX(x) (((x) & (FREAD | FWRITE)) == (FREAD | FWRITE)) #define DSP_F_SIMPLEX(x) (!DSP_F_DUPLEX(x)) #define DSP_F_READ(x) ((x) & FREAD) #define DSP_F_WRITE(x) ((x) & FWRITE) static const struct { int type; char *name; char *sep; char *alias; int use_sep; int hw; int max; int volctl; uint32_t fmt, spd; int query; } dsp_cdevs[] = { { SND_DEV_DSP, "dsp", ".", NULL, 0, 0, 0, 0, SND_FORMAT(AFMT_U8, 1, 0), DSP_DEFAULT_SPEED, DSP_CDEV_TYPE_RDWR }, { SND_DEV_AUDIO, "audio", ".", NULL, 0, 0, 0, 0, SND_FORMAT(AFMT_MU_LAW, 1, 0), DSP_DEFAULT_SPEED, DSP_CDEV_TYPE_RDWR }, { SND_DEV_DSP16, "dspW", ".", NULL, 0, 0, 0, 0, SND_FORMAT(AFMT_S16_LE, 1, 0), DSP_DEFAULT_SPEED, DSP_CDEV_TYPE_RDWR }, { SND_DEV_DSPHW_PLAY, "dsp", ".p", NULL, 1, 1, SND_MAXHWCHAN, 1, SND_FORMAT(AFMT_S16_LE, 2, 0), 48000, DSP_CDEV_TYPE_WRONLY }, { SND_DEV_DSPHW_VPLAY, "dsp", ".vp", NULL, 1, 1, SND_MAXVCHANS, 1, SND_FORMAT(AFMT_S16_LE, 2, 0), 48000, DSP_CDEV_TYPE_WRONLY }, { SND_DEV_DSPHW_REC, "dsp", ".r", NULL, 1, 1, SND_MAXHWCHAN, 1, SND_FORMAT(AFMT_S16_LE, 2, 0), 48000, DSP_CDEV_TYPE_RDONLY }, { SND_DEV_DSPHW_VREC, "dsp", ".vr", NULL, 1, 1, SND_MAXVCHANS, 1, SND_FORMAT(AFMT_S16_LE, 2, 0), 48000, DSP_CDEV_TYPE_RDONLY }, { SND_DEV_DSPHW_CD, "dspcd", ".", NULL, 0, 0, 0, 0, SND_FORMAT(AFMT_S16_LE, 2, 0), 44100, DSP_CDEV_TYPE_RDWR }, /* Low priority, OSSv4 aliases. */ { SND_DEV_DSP, "dsp_ac3", ".", "dsp", 0, 0, 0, 0, SND_FORMAT(AFMT_U8, 1, 0), DSP_DEFAULT_SPEED, DSP_CDEV_TYPE_RDWR }, { SND_DEV_DSP, "dsp_mmap", ".", "dsp", 0, 0, 0, 0, SND_FORMAT(AFMT_U8, 1, 0), DSP_DEFAULT_SPEED, DSP_CDEV_TYPE_RDWR }, { SND_DEV_DSP, "dsp_multich", ".", "dsp", 0, 0, 0, 0, SND_FORMAT(AFMT_U8, 1, 0), DSP_DEFAULT_SPEED, DSP_CDEV_TYPE_RDWR }, { SND_DEV_DSP, "dsp_spdifout", ".", "dsp", 0, 0, 0, 0, SND_FORMAT(AFMT_U8, 1, 0), DSP_DEFAULT_SPEED, DSP_CDEV_TYPE_RDWR }, { SND_DEV_DSP, "dsp_spdifin", ".", "dsp", 0, 0, 0, 0, SND_FORMAT(AFMT_U8, 1, 0), DSP_DEFAULT_SPEED, DSP_CDEV_TYPE_RDWR }, }; #define DSP_FIXUP_ERROR() do { \ prio = dsp_get_flags(i_dev); \ if (!DSP_F_VALID(flags)) \ error = EINVAL; \ if (!DSP_F_DUPLEX(flags) && \ ((DSP_F_READ(flags) && d->reccount == 0) || \ (DSP_F_WRITE(flags) && d->playcount == 0))) \ error = ENOTSUP; \ else if (!DSP_F_DUPLEX(flags) && (prio & SD_F_SIMPLEX) && \ ((DSP_F_READ(flags) && (prio & SD_F_PRIO_WR)) || \ (DSP_F_WRITE(flags) && (prio & SD_F_PRIO_RD)))) \ error = EBUSY; \ else if (DSP_REGISTERED(d, i_dev)) \ error = EBUSY; \ } while (0) static int dsp_open(struct cdev *i_dev, int flags, int mode, struct thread *td) { struct pcm_channel *rdch, *wrch; struct snddev_info *d; uint32_t fmt, spd, prio, volctl; int i, error, rderror, wrerror, devtype, wdevunit, rdevunit; /* Kind of impossible.. */ if (i_dev == NULL || td == NULL) return (ENODEV); d = dsp_get_info(i_dev); if (!PCM_REGISTERED(d)) return (EBADF); PCM_GIANT_ENTER(d); /* Lock snddev so nobody else can monkey with it. */ PCM_LOCK(d); PCM_WAIT(d); /* * Try to acquire cloned device before someone else pick it. * ENODEV means this is not a cloned droids. */ error = snd_clone_acquire(i_dev); if (!(error == 0 || error == ENODEV)) { DSP_FIXUP_ERROR(); PCM_UNLOCK(d); PCM_GIANT_EXIT(d); return (error); } error = 0; DSP_FIXUP_ERROR(); if (error != 0) { (void)snd_clone_release(i_dev); PCM_UNLOCK(d); PCM_GIANT_EXIT(d); return (error); } /* * That is just enough. Acquire and unlock pcm lock so * the other will just have to wait until we finish doing * everything. */ PCM_ACQUIRE(d); PCM_UNLOCK(d); devtype = PCMDEV(i_dev); wdevunit = -1; rdevunit = -1; fmt = 0; spd = 0; volctl = DSP_CDEV_VOLCTL_NONE; for (i = 0; i < (sizeof(dsp_cdevs) / sizeof(dsp_cdevs[0])); i++) { if (devtype != dsp_cdevs[i].type || dsp_cdevs[i].alias != NULL) continue; /* * Volume control only valid for DSPHW devices, * and it must be opened in opposite direction be it * simplex or duplex. Anything else will be handled * as usual. */ if (dsp_cdevs[i].query == DSP_CDEV_TYPE_WRONLY) { if (dsp_cdevs[i].volctl != 0 && DSP_F_READ(flags)) { volctl = DSP_CDEV_VOLCTL_WRITE; flags &= ~FREAD; flags |= FWRITE; } if (DSP_F_READ(flags)) { (void)snd_clone_release(i_dev); PCM_RELEASE_QUICK(d); PCM_GIANT_EXIT(d); return (ENOTSUP); } wdevunit = dev2unit(i_dev); } else if (dsp_cdevs[i].query == DSP_CDEV_TYPE_RDONLY) { if (dsp_cdevs[i].volctl != 0 && DSP_F_WRITE(flags)) { volctl = DSP_CDEV_VOLCTL_READ; flags &= ~FWRITE; flags |= FREAD; } if (DSP_F_WRITE(flags)) { (void)snd_clone_release(i_dev); PCM_RELEASE_QUICK(d); PCM_GIANT_EXIT(d); return (ENOTSUP); } rdevunit = dev2unit(i_dev); } fmt = dsp_cdevs[i].fmt; spd = dsp_cdevs[i].spd; break; } /* No matching devtype? */ if (fmt == 0 || spd == 0) panic("impossible devtype %d", devtype); rdch = NULL; wrch = NULL; rderror = 0; wrerror = 0; /* * if we get here, the open request is valid- either: * * we were previously not open * * we were open for play xor record and the opener wants * the non-open direction */ if (DSP_F_READ(flags)) { /* open for read */ rderror = pcm_chnalloc(d, &rdch, PCMDIR_REC, td->td_proc->p_pid, td->td_proc->p_comm, rdevunit); if (rderror == 0 && chn_reset(rdch, fmt, spd) != 0) rderror = ENXIO; if (volctl == DSP_CDEV_VOLCTL_READ) rderror = 0; if (rderror != 0) { if (rdch != NULL) pcm_chnrelease(rdch); if (!DSP_F_DUPLEX(flags)) { (void)snd_clone_release(i_dev); PCM_RELEASE_QUICK(d); PCM_GIANT_EXIT(d); return (rderror); } rdch = NULL; } else if (volctl == DSP_CDEV_VOLCTL_READ) { if (rdch != NULL) { pcm_chnref(rdch, 1); pcm_chnrelease(rdch); } } else { if (flags & O_NONBLOCK) rdch->flags |= CHN_F_NBIO; if (flags & O_EXCL) rdch->flags |= CHN_F_EXCLUSIVE; pcm_chnref(rdch, 1); if (volctl == DSP_CDEV_VOLCTL_NONE) chn_vpc_reset(rdch, SND_VOL_C_PCM, 0); CHN_UNLOCK(rdch); } } if (DSP_F_WRITE(flags)) { /* open for write */ wrerror = pcm_chnalloc(d, &wrch, PCMDIR_PLAY, td->td_proc->p_pid, td->td_proc->p_comm, wdevunit); if (wrerror == 0 && chn_reset(wrch, fmt, spd) != 0) wrerror = ENXIO; if (volctl == DSP_CDEV_VOLCTL_WRITE) wrerror = 0; if (wrerror != 0) { if (wrch != NULL) pcm_chnrelease(wrch); if (!DSP_F_DUPLEX(flags)) { if (rdch != NULL) { /* * Lock, deref and release previously * created record channel */ CHN_LOCK(rdch); pcm_chnref(rdch, -1); pcm_chnrelease(rdch); } (void)snd_clone_release(i_dev); PCM_RELEASE_QUICK(d); PCM_GIANT_EXIT(d); return (wrerror); } wrch = NULL; } else if (volctl == DSP_CDEV_VOLCTL_WRITE) { if (wrch != NULL) { pcm_chnref(wrch, 1); pcm_chnrelease(wrch); } } else { if (flags & O_NONBLOCK) wrch->flags |= CHN_F_NBIO; if (flags & O_EXCL) wrch->flags |= CHN_F_EXCLUSIVE; pcm_chnref(wrch, 1); if (volctl == DSP_CDEV_VOLCTL_NONE) chn_vpc_reset(wrch, SND_VOL_C_PCM, 0); CHN_UNLOCK(wrch); } } PCM_LOCK(d); /* * We're done. Allocate channels information for this cdev. */ switch (volctl) { case DSP_CDEV_VOLCTL_READ: KASSERT(wrch == NULL, ("wrch=%p not null!", wrch)); dsp_cdevinfo_alloc(i_dev, NULL, NULL, rdch); break; case DSP_CDEV_VOLCTL_WRITE: KASSERT(rdch == NULL, ("rdch=%p not null!", rdch)); dsp_cdevinfo_alloc(i_dev, NULL, NULL, wrch); break; case DSP_CDEV_VOLCTL_NONE: default: if (wrch == NULL && rdch == NULL) { (void)snd_clone_release(i_dev); PCM_RELEASE(d); PCM_UNLOCK(d); PCM_GIANT_EXIT(d); if (wrerror != 0) return (wrerror); if (rderror != 0) return (rderror); return (EINVAL); } dsp_cdevinfo_alloc(i_dev, rdch, wrch, NULL); if (rdch != NULL) CHN_INSERT_HEAD(d, rdch, channels.pcm.opened); if (wrch != NULL) CHN_INSERT_HEAD(d, wrch, channels.pcm.opened); break; } /* * Increase clone refcount for its automatic garbage collector. */ (void)snd_clone_ref(i_dev); PCM_RELEASE(d); PCM_UNLOCK(d); PCM_GIANT_LEAVE(d); return (0); } static int dsp_close(struct cdev *i_dev, int flags, int mode, struct thread *td) { struct pcm_channel *rdch, *wrch, *volch; struct snddev_info *d; int sg_ids, rdref, wdref; d = dsp_get_info(i_dev); if (!DSP_REGISTERED(d, i_dev)) return (EBADF); PCM_GIANT_ENTER(d); PCM_LOCK(d); PCM_WAIT(d); PCM_ACQUIRE(d); rdch = PCM_RDCH(i_dev); wrch = PCM_WRCH(i_dev); volch = PCM_VOLCH(i_dev); PCM_RDCH(i_dev) = NULL; PCM_WRCH(i_dev) = NULL; PCM_VOLCH(i_dev) = NULL; rdref = -1; wdref = -1; if (volch != NULL) { if (volch == rdch) rdref--; else if (volch == wrch) wdref--; else { CHN_LOCK(volch); pcm_chnref(volch, -1); CHN_UNLOCK(volch); } } if (rdch != NULL) CHN_REMOVE(d, rdch, channels.pcm.opened); if (wrch != NULL) CHN_REMOVE(d, wrch, channels.pcm.opened); if (rdch != NULL || wrch != NULL) { PCM_UNLOCK(d); if (rdch != NULL) { /* * The channel itself need not be locked because: * a) Adding a channel to a syncgroup happens only * in dsp_ioctl(), which cannot run concurrently * to dsp_close(). * b) The syncmember pointer (sm) is protected by * the global syncgroup list lock. * c) A channel can't just disappear, invalidating * pointers, unless it's closed/dereferenced * first. */ PCM_SG_LOCK(); sg_ids = chn_syncdestroy(rdch); PCM_SG_UNLOCK(); if (sg_ids != 0) free_unr(pcmsg_unrhdr, sg_ids); CHN_LOCK(rdch); pcm_chnref(rdch, rdref); chn_abort(rdch); /* won't sleep */ rdch->flags &= ~(CHN_F_RUNNING | CHN_F_MMAP | CHN_F_DEAD | CHN_F_EXCLUSIVE); chn_reset(rdch, 0, 0); pcm_chnrelease(rdch); } if (wrch != NULL) { /* * Please see block above. */ PCM_SG_LOCK(); sg_ids = chn_syncdestroy(wrch); PCM_SG_UNLOCK(); if (sg_ids != 0) free_unr(pcmsg_unrhdr, sg_ids); CHN_LOCK(wrch); pcm_chnref(wrch, wdref); chn_flush(wrch); /* may sleep */ wrch->flags &= ~(CHN_F_RUNNING | CHN_F_MMAP | CHN_F_DEAD | CHN_F_EXCLUSIVE); chn_reset(wrch, 0, 0); pcm_chnrelease(wrch); } PCM_LOCK(d); } dsp_cdevinfo_free(i_dev); /* * Release clone busy state and unref it so the automatic * garbage collector will get the hint and do the remaining * cleanup process. */ (void)snd_clone_release(i_dev); /* * destroy_dev() might sleep, so release pcm lock * here and rely on pcm cv serialization. */ PCM_UNLOCK(d); (void)snd_clone_unref(i_dev); PCM_LOCK(d); PCM_RELEASE(d); PCM_UNLOCK(d); PCM_GIANT_LEAVE(d); return (0); } static __inline int dsp_io_ops(struct cdev *i_dev, struct uio *buf) { struct snddev_info *d; struct pcm_channel **ch, *rdch, *wrch; int (*chn_io)(struct pcm_channel *, struct uio *); int prio, ret; pid_t runpid; KASSERT(i_dev != NULL && buf != NULL && (buf->uio_rw == UIO_READ || buf->uio_rw == UIO_WRITE), ("%s(): io train wreck!", __func__)); d = dsp_get_info(i_dev); if (!DSP_REGISTERED(d, i_dev)) return (EBADF); PCM_GIANT_ENTER(d); switch (buf->uio_rw) { case UIO_READ: prio = SD_F_PRIO_RD; ch = &rdch; chn_io = chn_read; break; case UIO_WRITE: prio = SD_F_PRIO_WR; ch = &wrch; chn_io = chn_write; break; default: panic("invalid/corrupted uio direction: %d", buf->uio_rw); break; } rdch = NULL; wrch = NULL; runpid = buf->uio_td->td_proc->p_pid; getchns(i_dev, &rdch, &wrch, prio); if (*ch == NULL || !((*ch)->flags & CHN_F_BUSY)) { PCM_GIANT_EXIT(d); return (EBADF); } if (((*ch)->flags & (CHN_F_MMAP | CHN_F_DEAD)) || (((*ch)->flags & CHN_F_RUNNING) && (*ch)->pid != runpid)) { relchns(i_dev, rdch, wrch, prio); PCM_GIANT_EXIT(d); return (EINVAL); } else if (!((*ch)->flags & CHN_F_RUNNING)) { (*ch)->flags |= CHN_F_RUNNING; (*ch)->pid = runpid; } /* * chn_read/write must give up channel lock in order to copy bytes * from/to userland, so up the "in progress" counter to make sure * someone else doesn't come along and muss up the buffer. */ ++(*ch)->inprog; ret = chn_io(*ch, buf); --(*ch)->inprog; CHN_BROADCAST(&(*ch)->cv); relchns(i_dev, rdch, wrch, prio); PCM_GIANT_LEAVE(d); return (ret); } static int dsp_read(struct cdev *i_dev, struct uio *buf, int flag) { return (dsp_io_ops(i_dev, buf)); } static int dsp_write(struct cdev *i_dev, struct uio *buf, int flag) { return (dsp_io_ops(i_dev, buf)); } static int dsp_get_volume_channel(struct cdev *dev, struct pcm_channel **volch) { struct snddev_info *d; struct pcm_channel *c; int unit; KASSERT(dev != NULL && volch != NULL, ("%s(): NULL query dev=%p volch=%p", __func__, dev, volch)); d = dsp_get_info(dev); if (!PCM_REGISTERED(d)) { *volch = NULL; return (EINVAL); } PCM_UNLOCKASSERT(d); *volch = NULL; c = PCM_VOLCH(dev); if (c != NULL) { if (!(c->feederflags & (1 << FEEDER_VOLUME))) return (-1); *volch = c; return (0); } PCM_LOCK(d); PCM_WAIT(d); PCM_ACQUIRE(d); unit = dev2unit(dev); CHN_FOREACH(c, d, channels.pcm) { CHN_LOCK(c); if (c->unit != unit) { CHN_UNLOCK(c); continue; } *volch = c; pcm_chnref(c, 1); PCM_VOLCH(dev) = c; CHN_UNLOCK(c); PCM_RELEASE(d); PCM_UNLOCK(d); return ((c->feederflags & (1 << FEEDER_VOLUME)) ? 0 : -1); } PCM_RELEASE(d); PCM_UNLOCK(d); return (EINVAL); } static int dsp_ioctl_channel(struct cdev *dev, struct pcm_channel *volch, u_long cmd, caddr_t arg) { struct snddev_info *d; struct pcm_channel *rdch, *wrch; int j, devtype, ret; d = dsp_get_info(dev); if (!PCM_REGISTERED(d) || !(dsp_get_flags(dev) & SD_F_VPC)) return (-1); PCM_UNLOCKASSERT(d); j = cmd & 0xff; rdch = PCM_RDCH(dev); wrch = PCM_WRCH(dev); /* No specific channel, look into cache */ if (volch == NULL) volch = PCM_VOLCH(dev); /* Look harder */ if (volch == NULL) { if (j == SOUND_MIXER_RECLEV && rdch != NULL) volch = rdch; else if (j == SOUND_MIXER_PCM && wrch != NULL) volch = wrch; } devtype = PCMDEV(dev); /* Look super harder */ if (volch == NULL && (devtype == SND_DEV_DSPHW_PLAY || devtype == SND_DEV_DSPHW_VPLAY || devtype == SND_DEV_DSPHW_REC || devtype == SND_DEV_DSPHW_VREC)) { ret = dsp_get_volume_channel(dev, &volch); if (ret != 0) return (ret); if (volch == NULL) return (EINVAL); } /* Final validation */ if (volch != NULL) { CHN_LOCK(volch); if (!(volch->feederflags & (1 << FEEDER_VOLUME))) { CHN_UNLOCK(volch); return (-1); } if (volch->direction == PCMDIR_PLAY) wrch = volch; else rdch = volch; } ret = EINVAL; if (volch != NULL && ((j == SOUND_MIXER_PCM && volch->direction == PCMDIR_PLAY) || (j == SOUND_MIXER_RECLEV && volch->direction == PCMDIR_REC))) { if ((cmd & ~0xff) == MIXER_WRITE(0)) { int left, right, center; left = *(int *)arg & 0x7f; right = ((*(int *)arg) >> 8) & 0x7f; center = (left + right) >> 1; chn_setvolume_multi(volch, SND_VOL_C_PCM, left, right, center); } else if ((cmd & ~0xff) == MIXER_READ(0)) { *(int *)arg = CHN_GETVOLUME(volch, SND_VOL_C_PCM, SND_CHN_T_FL); *(int *)arg |= CHN_GETVOLUME(volch, SND_VOL_C_PCM, SND_CHN_T_FR) << 8; } ret = 0; } else if (rdch != NULL || wrch != NULL) { switch (j) { case SOUND_MIXER_DEVMASK: case SOUND_MIXER_CAPS: case SOUND_MIXER_STEREODEVS: if ((cmd & ~0xff) == MIXER_READ(0)) { *(int *)arg = 0; if (rdch != NULL) *(int *)arg |= SOUND_MASK_RECLEV; if (wrch != NULL) *(int *)arg |= SOUND_MASK_PCM; } ret = 0; break; case SOUND_MIXER_RECMASK: case SOUND_MIXER_RECSRC: if ((cmd & ~0xff) == MIXER_READ(0)) *(int *)arg = 0; ret = 0; break; default: break; } } if (volch != NULL) CHN_UNLOCK(volch); return (ret); } static int dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode, struct thread *td) { struct pcm_channel *chn, *rdch, *wrch; struct snddev_info *d; u_long xcmd; int *arg_i, ret, tmp; d = dsp_get_info(i_dev); if (!DSP_REGISTERED(d, i_dev)) return (EBADF); PCM_GIANT_ENTER(d); arg_i = (int *)arg; ret = 0; xcmd = 0; chn = NULL; if (IOCGROUP(cmd) == 'M') { if (cmd == OSS_GETVERSION) { *arg_i = SOUND_VERSION; PCM_GIANT_EXIT(d); return (0); } ret = dsp_ioctl_channel(i_dev, PCM_VOLCH(i_dev), cmd, arg); if (ret != -1) { PCM_GIANT_EXIT(d); return (ret); } if (d->mixer_dev != NULL) { PCM_ACQUIRE_QUICK(d); ret = mixer_ioctl_cmd(d->mixer_dev, cmd, arg, -1, td, MIXER_CMD_DIRECT); PCM_RELEASE_QUICK(d); } else ret = EBADF; PCM_GIANT_EXIT(d); return (ret); } /* * Certain ioctls may be made on any type of device (audio, mixer, * and MIDI). Handle those special cases here. */ if (IOCGROUP(cmd) == 'X') { PCM_ACQUIRE_QUICK(d); switch(cmd) { case SNDCTL_SYSINFO: sound_oss_sysinfo((oss_sysinfo *)arg); break; case SNDCTL_CARDINFO: ret = sound_oss_card_info((oss_card_info *)arg); break; case SNDCTL_AUDIOINFO: case SNDCTL_AUDIOINFO_EX: case SNDCTL_ENGINEINFO: ret = dsp_oss_audioinfo(i_dev, (oss_audioinfo *)arg); break; case SNDCTL_MIXERINFO: ret = mixer_oss_mixerinfo(i_dev, (oss_mixerinfo *)arg); break; default: ret = EINVAL; } PCM_RELEASE_QUICK(d); PCM_GIANT_EXIT(d); return (ret); } getchns(i_dev, &rdch, &wrch, 0); if (wrch != NULL && (wrch->flags & CHN_F_DEAD)) wrch = NULL; if (rdch != NULL && (rdch->flags & CHN_F_DEAD)) rdch = NULL; if (wrch == NULL && rdch == NULL) { PCM_GIANT_EXIT(d); return (EINVAL); } switch(cmd) { #ifdef OLDPCM_IOCTL /* * we start with the new ioctl interface. */ case AIONWRITE: /* how many bytes can write ? */ if (wrch) { CHN_LOCK(wrch); /* if (wrch && wrch->bufhard.dl) while (chn_wrfeed(wrch) == 0); */ *arg_i = sndbuf_getfree(wrch->bufsoft); CHN_UNLOCK(wrch); } else { *arg_i = 0; ret = EINVAL; } break; case AIOSSIZE: /* set the current blocksize */ { struct snd_size *p = (struct snd_size *)arg; p->play_size = 0; p->rec_size = 0; PCM_ACQUIRE_QUICK(d); if (wrch) { CHN_LOCK(wrch); chn_setblocksize(wrch, 2, p->play_size); p->play_size = sndbuf_getblksz(wrch->bufsoft); CHN_UNLOCK(wrch); } if (rdch) { CHN_LOCK(rdch); chn_setblocksize(rdch, 2, p->rec_size); p->rec_size = sndbuf_getblksz(rdch->bufsoft); CHN_UNLOCK(rdch); } PCM_RELEASE_QUICK(d); } break; case AIOGSIZE: /* get the current blocksize */ { struct snd_size *p = (struct snd_size *)arg; if (wrch) { CHN_LOCK(wrch); p->play_size = sndbuf_getblksz(wrch->bufsoft); CHN_UNLOCK(wrch); } if (rdch) { CHN_LOCK(rdch); p->rec_size = sndbuf_getblksz(rdch->bufsoft); CHN_UNLOCK(rdch); } } break; case AIOSFMT: case AIOGFMT: { snd_chan_param *p = (snd_chan_param *)arg; if (cmd == AIOSFMT && ((p->play_format != 0 && p->play_rate == 0) || (p->rec_format != 0 && p->rec_rate == 0))) { ret = EINVAL; break; } PCM_ACQUIRE_QUICK(d); if (wrch) { CHN_LOCK(wrch); if (cmd == AIOSFMT && p->play_format != 0) { chn_setformat(wrch, SND_FORMAT(p->play_format, AFMT_CHANNEL(wrch->format), AFMT_EXTCHANNEL(wrch->format))); chn_setspeed(wrch, p->play_rate); } p->play_rate = wrch->speed; p->play_format = AFMT_ENCODING(wrch->format); CHN_UNLOCK(wrch); } else { p->play_rate = 0; p->play_format = 0; } if (rdch) { CHN_LOCK(rdch); if (cmd == AIOSFMT && p->rec_format != 0) { chn_setformat(rdch, SND_FORMAT(p->rec_format, AFMT_CHANNEL(rdch->format), AFMT_EXTCHANNEL(rdch->format))); chn_setspeed(rdch, p->rec_rate); } p->rec_rate = rdch->speed; p->rec_format = AFMT_ENCODING(rdch->format); CHN_UNLOCK(rdch); } else { p->rec_rate = 0; p->rec_format = 0; } PCM_RELEASE_QUICK(d); } break; case AIOGCAP: /* get capabilities */ { snd_capabilities *p = (snd_capabilities *)arg; struct pcmchan_caps *pcaps = NULL, *rcaps = NULL; struct cdev *pdev; PCM_LOCK(d); if (rdch) { CHN_LOCK(rdch); rcaps = chn_getcaps(rdch); } if (wrch) { CHN_LOCK(wrch); pcaps = chn_getcaps(wrch); } p->rate_min = max(rcaps? rcaps->minspeed : 0, pcaps? pcaps->minspeed : 0); p->rate_max = min(rcaps? rcaps->maxspeed : 1000000, pcaps? pcaps->maxspeed : 1000000); p->bufsize = min(rdch? sndbuf_getsize(rdch->bufsoft) : 1000000, wrch? sndbuf_getsize(wrch->bufsoft) : 1000000); /* XXX bad on sb16 */ p->formats = (rdch? chn_getformats(rdch) : 0xffffffff) & (wrch? chn_getformats(wrch) : 0xffffffff); if (rdch && wrch) p->formats |= (dsp_get_flags(i_dev) & SD_F_SIMPLEX)? 0 : AFMT_FULLDUPLEX; pdev = d->mixer_dev; p->mixers = 1; /* default: one mixer */ p->inputs = pdev->si_drv1? mix_getdevs(pdev->si_drv1) : 0; p->left = p->right = 100; if (wrch) CHN_UNLOCK(wrch); if (rdch) CHN_UNLOCK(rdch); PCM_UNLOCK(d); } break; case AIOSTOP: if (*arg_i == AIOSYNC_PLAY && wrch) { CHN_LOCK(wrch); *arg_i = chn_abort(wrch); CHN_UNLOCK(wrch); } else if (*arg_i == AIOSYNC_CAPTURE && rdch) { CHN_LOCK(rdch); *arg_i = chn_abort(rdch); CHN_UNLOCK(rdch); } else { printf("AIOSTOP: bad channel 0x%x\n", *arg_i); *arg_i = 0; } break; case AIOSYNC: printf("AIOSYNC chan 0x%03lx pos %lu unimplemented\n", ((snd_sync_parm *)arg)->chan, ((snd_sync_parm *)arg)->pos); break; #endif /* * here follow the standard ioctls (filio.h etc.) */ case FIONREAD: /* get # bytes to read */ if (rdch) { CHN_LOCK(rdch); /* if (rdch && rdch->bufhard.dl) while (chn_rdfeed(rdch) == 0); */ *arg_i = sndbuf_getready(rdch->bufsoft); CHN_UNLOCK(rdch); } else { *arg_i = 0; ret = EINVAL; } break; case FIOASYNC: /*set/clear async i/o */ DEB( printf("FIOASYNC\n") ; ) break; case SNDCTL_DSP_NONBLOCK: /* set non-blocking i/o */ case FIONBIO: /* set/clear non-blocking i/o */ if (rdch) { CHN_LOCK(rdch); if (cmd == SNDCTL_DSP_NONBLOCK || *arg_i) rdch->flags |= CHN_F_NBIO; else rdch->flags &= ~CHN_F_NBIO; CHN_UNLOCK(rdch); } if (wrch) { CHN_LOCK(wrch); if (cmd == SNDCTL_DSP_NONBLOCK || *arg_i) wrch->flags |= CHN_F_NBIO; else wrch->flags &= ~CHN_F_NBIO; CHN_UNLOCK(wrch); } break; /* * Finally, here is the linux-compatible ioctl interface */ #define THE_REAL_SNDCTL_DSP_GETBLKSIZE _IOWR('P', 4, int) case THE_REAL_SNDCTL_DSP_GETBLKSIZE: case SNDCTL_DSP_GETBLKSIZE: chn = wrch ? wrch : rdch; if (chn) { CHN_LOCK(chn); *arg_i = sndbuf_getblksz(chn->bufsoft); CHN_UNLOCK(chn); } else { *arg_i = 0; ret = EINVAL; } break; case SNDCTL_DSP_SETBLKSIZE: RANGE(*arg_i, 16, 65536); PCM_ACQUIRE_QUICK(d); if (wrch) { CHN_LOCK(wrch); chn_setblocksize(wrch, 2, *arg_i); CHN_UNLOCK(wrch); } if (rdch) { CHN_LOCK(rdch); chn_setblocksize(rdch, 2, *arg_i); CHN_UNLOCK(rdch); } PCM_RELEASE_QUICK(d); break; case SNDCTL_DSP_RESET: DEB(printf("dsp reset\n")); if (wrch) { CHN_LOCK(wrch); chn_abort(wrch); chn_resetbuf(wrch); CHN_UNLOCK(wrch); } if (rdch) { CHN_LOCK(rdch); chn_abort(rdch); chn_resetbuf(rdch); CHN_UNLOCK(rdch); } break; case SNDCTL_DSP_SYNC: DEB(printf("dsp sync\n")); /* chn_sync may sleep */ if (wrch) { CHN_LOCK(wrch); chn_sync(wrch, 0); CHN_UNLOCK(wrch); } break; case SNDCTL_DSP_SPEED: /* chn_setspeed may sleep */ tmp = 0; PCM_ACQUIRE_QUICK(d); if (wrch) { CHN_LOCK(wrch); ret = chn_setspeed(wrch, *arg_i); tmp = wrch->speed; CHN_UNLOCK(wrch); } if (rdch && ret == 0) { CHN_LOCK(rdch); ret = chn_setspeed(rdch, *arg_i); if (tmp == 0) tmp = rdch->speed; CHN_UNLOCK(rdch); } PCM_RELEASE_QUICK(d); *arg_i = tmp; break; case SOUND_PCM_READ_RATE: chn = wrch ? wrch : rdch; if (chn) { CHN_LOCK(chn); *arg_i = chn->speed; CHN_UNLOCK(chn); } else { *arg_i = 0; ret = EINVAL; } break; case SNDCTL_DSP_STEREO: tmp = -1; *arg_i = (*arg_i)? 2 : 1; PCM_ACQUIRE_QUICK(d); if (wrch) { CHN_LOCK(wrch); ret = chn_setformat(wrch, SND_FORMAT(wrch->format, *arg_i, 0)); tmp = (AFMT_CHANNEL(wrch->format) > 1)? 1 : 0; CHN_UNLOCK(wrch); } if (rdch && ret == 0) { CHN_LOCK(rdch); ret = chn_setformat(rdch, SND_FORMAT(rdch->format, *arg_i, 0)); if (tmp == -1) tmp = (AFMT_CHANNEL(rdch->format) > 1)? 1 : 0; CHN_UNLOCK(rdch); } PCM_RELEASE_QUICK(d); *arg_i = tmp; break; case SOUND_PCM_WRITE_CHANNELS: /* case SNDCTL_DSP_CHANNELS: ( == SOUND_PCM_WRITE_CHANNELS) */ if (*arg_i < 0) { *arg_i = 0; ret = EINVAL; break; } if (*arg_i != 0) { struct pcmchan_matrix *m; uint32_t ext; tmp = 0; if (*arg_i > SND_CHN_MAX) *arg_i = SND_CHN_MAX; m = feeder_matrix_default_channel_map(*arg_i); if (m != NULL) ext = m->ext; else ext = 0; PCM_ACQUIRE_QUICK(d); if (wrch) { CHN_LOCK(wrch); ret = chn_setformat(wrch, SND_FORMAT(wrch->format, *arg_i, ext)); tmp = AFMT_CHANNEL(wrch->format); CHN_UNLOCK(wrch); } if (rdch && ret == 0) { CHN_LOCK(rdch); ret = chn_setformat(rdch, SND_FORMAT(rdch->format, *arg_i, ext)); if (tmp == 0) tmp = AFMT_CHANNEL(rdch->format); CHN_UNLOCK(rdch); } PCM_RELEASE_QUICK(d); *arg_i = tmp; } else { chn = wrch ? wrch : rdch; CHN_LOCK(chn); *arg_i = AFMT_CHANNEL(chn->format); CHN_UNLOCK(chn); } break; case SOUND_PCM_READ_CHANNELS: chn = wrch ? wrch : rdch; if (chn) { CHN_LOCK(chn); *arg_i = AFMT_CHANNEL(chn->format); CHN_UNLOCK(chn); } else { *arg_i = 0; ret = EINVAL; } break; case SNDCTL_DSP_GETFMTS: /* returns a mask of supported fmts */ chn = wrch ? wrch : rdch; if (chn) { CHN_LOCK(chn); *arg_i = chn_getformats(chn); CHN_UNLOCK(chn); } else { *arg_i = 0; ret = EINVAL; } break; case SNDCTL_DSP_SETFMT: /* sets _one_ format */ if (*arg_i != AFMT_QUERY) { tmp = 0; PCM_ACQUIRE_QUICK(d); if (wrch) { CHN_LOCK(wrch); ret = chn_setformat(wrch, SND_FORMAT(*arg_i, AFMT_CHANNEL(wrch->format), AFMT_EXTCHANNEL(wrch->format))); tmp = wrch->format; CHN_UNLOCK(wrch); } if (rdch && ret == 0) { CHN_LOCK(rdch); ret = chn_setformat(rdch, SND_FORMAT(*arg_i, AFMT_CHANNEL(rdch->format), AFMT_EXTCHANNEL(rdch->format))); if (tmp == 0) tmp = rdch->format; CHN_UNLOCK(rdch); } PCM_RELEASE_QUICK(d); *arg_i = AFMT_ENCODING(tmp); } else { chn = wrch ? wrch : rdch; CHN_LOCK(chn); *arg_i = AFMT_ENCODING(chn->format); CHN_UNLOCK(chn); } break; case SNDCTL_DSP_SETFRAGMENT: DEB(printf("SNDCTL_DSP_SETFRAGMENT 0x%08x\n", *(int *)arg)); { uint32_t fragln = (*arg_i) & 0x0000ffff; uint32_t maxfrags = ((*arg_i) & 0xffff0000) >> 16; uint32_t fragsz; uint32_t r_maxfrags, r_fragsz; RANGE(fragln, 4, 16); fragsz = 1 << fragln; if (maxfrags == 0) maxfrags = CHN_2NDBUFMAXSIZE / fragsz; if (maxfrags < 2) maxfrags = 2; if (maxfrags * fragsz > CHN_2NDBUFMAXSIZE) maxfrags = CHN_2NDBUFMAXSIZE / fragsz; DEB(printf("SNDCTL_DSP_SETFRAGMENT %d frags, %d sz\n", maxfrags, fragsz)); PCM_ACQUIRE_QUICK(d); if (rdch) { CHN_LOCK(rdch); ret = chn_setblocksize(rdch, maxfrags, fragsz); r_maxfrags = sndbuf_getblkcnt(rdch->bufsoft); r_fragsz = sndbuf_getblksz(rdch->bufsoft); CHN_UNLOCK(rdch); } else { r_maxfrags = maxfrags; r_fragsz = fragsz; } if (wrch && ret == 0) { CHN_LOCK(wrch); ret = chn_setblocksize(wrch, maxfrags, fragsz); maxfrags = sndbuf_getblkcnt(wrch->bufsoft); fragsz = sndbuf_getblksz(wrch->bufsoft); CHN_UNLOCK(wrch); } else { /* use whatever came from the read channel */ maxfrags = r_maxfrags; fragsz = r_fragsz; } PCM_RELEASE_QUICK(d); fragln = 0; while (fragsz > 1) { fragln++; fragsz >>= 1; } *arg_i = (maxfrags << 16) | fragln; } break; case SNDCTL_DSP_GETISPACE: /* return the size of data available in the input queue */ { audio_buf_info *a = (audio_buf_info *)arg; if (rdch) { struct snd_dbuf *bs = rdch->bufsoft; CHN_LOCK(rdch); a->bytes = sndbuf_getready(bs); a->fragments = a->bytes / sndbuf_getblksz(bs); a->fragstotal = sndbuf_getblkcnt(bs); a->fragsize = sndbuf_getblksz(bs); CHN_UNLOCK(rdch); } else ret = EINVAL; } break; case SNDCTL_DSP_GETOSPACE: /* return space available in the output queue */ { audio_buf_info *a = (audio_buf_info *)arg; if (wrch) { struct snd_dbuf *bs = wrch->bufsoft; CHN_LOCK(wrch); /* XXX abusive DMA update: chn_wrupdate(wrch); */ a->bytes = sndbuf_getfree(bs); a->fragments = a->bytes / sndbuf_getblksz(bs); a->fragstotal = sndbuf_getblkcnt(bs); a->fragsize = sndbuf_getblksz(bs); CHN_UNLOCK(wrch); } else ret = EINVAL; } break; case SNDCTL_DSP_GETIPTR: { count_info *a = (count_info *)arg; if (rdch) { struct snd_dbuf *bs = rdch->bufsoft; CHN_LOCK(rdch); /* XXX abusive DMA update: chn_rdupdate(rdch); */ a->bytes = sndbuf_gettotal(bs); a->blocks = sndbuf_getblocks(bs) - rdch->blocks; a->ptr = sndbuf_getfreeptr(bs); rdch->blocks = sndbuf_getblocks(bs); CHN_UNLOCK(rdch); } else ret = EINVAL; } break; case SNDCTL_DSP_GETOPTR: { count_info *a = (count_info *)arg; if (wrch) { struct snd_dbuf *bs = wrch->bufsoft; CHN_LOCK(wrch); /* XXX abusive DMA update: chn_wrupdate(wrch); */ a->bytes = sndbuf_gettotal(bs); a->blocks = sndbuf_getblocks(bs) - wrch->blocks; a->ptr = sndbuf_getreadyptr(bs); wrch->blocks = sndbuf_getblocks(bs); CHN_UNLOCK(wrch); } else ret = EINVAL; } break; case SNDCTL_DSP_GETCAPS: PCM_LOCK(d); *arg_i = PCM_CAP_REALTIME | PCM_CAP_MMAP | PCM_CAP_TRIGGER; if (rdch && wrch && !(dsp_get_flags(i_dev) & SD_F_SIMPLEX)) *arg_i |= PCM_CAP_DUPLEX; PCM_UNLOCK(d); break; case SOUND_PCM_READ_BITS: chn = wrch ? wrch : rdch; if (chn) { CHN_LOCK(chn); if (chn->format & AFMT_8BIT) *arg_i = 8; else if (chn->format & AFMT_16BIT) *arg_i = 16; else if (chn->format & AFMT_24BIT) *arg_i = 24; else if (chn->format & AFMT_32BIT) *arg_i = 32; else ret = EINVAL; CHN_UNLOCK(chn); } else { *arg_i = 0; ret = EINVAL; } break; case SNDCTL_DSP_SETTRIGGER: if (rdch) { CHN_LOCK(rdch); rdch->flags &= ~CHN_F_NOTRIGGER; if (*arg_i & PCM_ENABLE_INPUT) chn_start(rdch, 1); else { chn_abort(rdch); chn_resetbuf(rdch); rdch->flags |= CHN_F_NOTRIGGER; } CHN_UNLOCK(rdch); } if (wrch) { CHN_LOCK(wrch); wrch->flags &= ~CHN_F_NOTRIGGER; if (*arg_i & PCM_ENABLE_OUTPUT) chn_start(wrch, 1); else { chn_abort(wrch); chn_resetbuf(wrch); wrch->flags |= CHN_F_NOTRIGGER; } CHN_UNLOCK(wrch); } break; case SNDCTL_DSP_GETTRIGGER: *arg_i = 0; if (wrch) { CHN_LOCK(wrch); if (wrch->flags & CHN_F_TRIGGERED) *arg_i |= PCM_ENABLE_OUTPUT; CHN_UNLOCK(wrch); } if (rdch) { CHN_LOCK(rdch); if (rdch->flags & CHN_F_TRIGGERED) *arg_i |= PCM_ENABLE_INPUT; CHN_UNLOCK(rdch); } break; case SNDCTL_DSP_GETODELAY: if (wrch) { struct snd_dbuf *bs = wrch->bufsoft; CHN_LOCK(wrch); /* XXX abusive DMA update: chn_wrupdate(wrch); */ *arg_i = sndbuf_getready(bs); CHN_UNLOCK(wrch); } else ret = EINVAL; break; case SNDCTL_DSP_POST: if (wrch) { CHN_LOCK(wrch); wrch->flags &= ~CHN_F_NOTRIGGER; chn_start(wrch, 1); CHN_UNLOCK(wrch); } break; case SNDCTL_DSP_SETDUPLEX: /* * switch to full-duplex mode if card is in half-duplex * mode and is able to work in full-duplex mode */ PCM_LOCK(d); if (rdch && wrch && (dsp_get_flags(i_dev) & SD_F_SIMPLEX)) dsp_set_flags(i_dev, dsp_get_flags(i_dev)^SD_F_SIMPLEX); PCM_UNLOCK(d); break; /* * The following four ioctls are simple wrappers around mixer_ioctl * with no further processing. xcmd is short for "translated * command". */ case SNDCTL_DSP_GETRECVOL: if (xcmd == 0) { xcmd = SOUND_MIXER_READ_RECLEV; chn = rdch; } /* FALLTHROUGH */ case SNDCTL_DSP_SETRECVOL: if (xcmd == 0) { xcmd = SOUND_MIXER_WRITE_RECLEV; chn = rdch; } /* FALLTHROUGH */ case SNDCTL_DSP_GETPLAYVOL: if (xcmd == 0) { xcmd = SOUND_MIXER_READ_PCM; chn = wrch; } /* FALLTHROUGH */ case SNDCTL_DSP_SETPLAYVOL: if (xcmd == 0) { xcmd = SOUND_MIXER_WRITE_PCM; chn = wrch; } ret = dsp_ioctl_channel(i_dev, chn, xcmd, arg); if (ret != -1) { PCM_GIANT_EXIT(d); return (ret); } if (d->mixer_dev != NULL) { PCM_ACQUIRE_QUICK(d); ret = mixer_ioctl_cmd(d->mixer_dev, xcmd, arg, -1, td, MIXER_CMD_DIRECT); PCM_RELEASE_QUICK(d); } else ret = ENOTSUP; break; case SNDCTL_DSP_GET_RECSRC_NAMES: case SNDCTL_DSP_GET_RECSRC: case SNDCTL_DSP_SET_RECSRC: if (d->mixer_dev != NULL) { PCM_ACQUIRE_QUICK(d); ret = mixer_ioctl_cmd(d->mixer_dev, cmd, arg, -1, td, MIXER_CMD_DIRECT); PCM_RELEASE_QUICK(d); } else ret = ENOTSUP; break; /* * The following 3 ioctls aren't very useful at the moment. For * now, only a single channel is associated with a cdev (/dev/dspN * instance), so there's only a single output routing to use (i.e., * the wrch bound to this cdev). */ case SNDCTL_DSP_GET_PLAYTGT_NAMES: { oss_mixer_enuminfo *ei; ei = (oss_mixer_enuminfo *)arg; ei->dev = 0; ei->ctrl = 0; ei->version = 0; /* static for now */ ei->strindex[0] = 0; if (wrch != NULL) { ei->nvalues = 1; strlcpy(ei->strings, wrch->name, sizeof(ei->strings)); } else { ei->nvalues = 0; ei->strings[0] = '\0'; } } break; case SNDCTL_DSP_GET_PLAYTGT: case SNDCTL_DSP_SET_PLAYTGT: /* yes, they are the same for now */ /* * Re: SET_PLAYTGT * OSSv4: "The value that was accepted by the device will * be returned back in the variable pointed by the * argument." */ if (wrch != NULL) *arg_i = 0; else ret = EINVAL; break; case SNDCTL_DSP_SILENCE: /* * Flush the software (pre-feed) buffer, but try to minimize playback * interruption. (I.e., record unplayed samples with intent to * restore by SNDCTL_DSP_SKIP.) Intended for application "pause" * functionality. */ if (wrch == NULL) ret = EINVAL; else { struct snd_dbuf *bs; CHN_LOCK(wrch); while (wrch->inprog != 0) cv_wait(&wrch->cv, wrch->lock); bs = wrch->bufsoft; if ((bs->shadbuf != NULL) && (sndbuf_getready(bs) > 0)) { bs->sl = sndbuf_getready(bs); sndbuf_dispose(bs, bs->shadbuf, sndbuf_getready(bs)); sndbuf_fillsilence(bs); chn_start(wrch, 0); } CHN_UNLOCK(wrch); } break; case SNDCTL_DSP_SKIP: /* * OSSv4 docs: "This ioctl call discards all unplayed samples in the * playback buffer by moving the current write position immediately * before the point where the device is currently reading the samples." */ if (wrch == NULL) ret = EINVAL; else { struct snd_dbuf *bs; CHN_LOCK(wrch); while (wrch->inprog != 0) cv_wait(&wrch->cv, wrch->lock); bs = wrch->bufsoft; if ((bs->shadbuf != NULL) && (bs->sl > 0)) { sndbuf_softreset(bs); sndbuf_acquire(bs, bs->shadbuf, bs->sl); bs->sl = 0; chn_start(wrch, 0); } CHN_UNLOCK(wrch); } break; case SNDCTL_DSP_CURRENT_OPTR: case SNDCTL_DSP_CURRENT_IPTR: /** * @note Changing formats resets the buffer counters, which differs * from the 4Front drivers. However, I don't expect this to be * much of a problem. * * @note In a test where @c CURRENT_OPTR is called immediately after write * returns, this driver is about 32K samples behind whereas * 4Front's is about 8K samples behind. Should determine source * of discrepancy, even if only out of curiosity. * * @todo Actually test SNDCTL_DSP_CURRENT_IPTR. */ chn = (cmd == SNDCTL_DSP_CURRENT_OPTR) ? wrch : rdch; if (chn == NULL) ret = EINVAL; else { struct snd_dbuf *bs; /* int tmp; */ oss_count_t *oc = (oss_count_t *)arg; CHN_LOCK(chn); bs = chn->bufsoft; #if 0 tmp = (sndbuf_getsize(b) + chn_getptr(chn) - sndbuf_gethwptr(b)) % sndbuf_getsize(b); oc->samples = (sndbuf_gettotal(b) + tmp) / sndbuf_getalign(b); oc->fifo_samples = (sndbuf_getready(b) - tmp) / sndbuf_getalign(b); #else oc->samples = sndbuf_gettotal(bs) / sndbuf_getalign(bs); oc->fifo_samples = sndbuf_getready(bs) / sndbuf_getalign(bs); #endif CHN_UNLOCK(chn); } break; case SNDCTL_DSP_HALT_OUTPUT: case SNDCTL_DSP_HALT_INPUT: chn = (cmd == SNDCTL_DSP_HALT_OUTPUT) ? wrch : rdch; if (chn == NULL) ret = EINVAL; else { CHN_LOCK(chn); chn_abort(chn); CHN_UNLOCK(chn); } break; case SNDCTL_DSP_LOW_WATER: /* * Set the number of bytes required to attract attention by * select/poll. */ if (wrch != NULL) { CHN_LOCK(wrch); wrch->lw = (*arg_i > 1) ? *arg_i : 1; CHN_UNLOCK(wrch); } if (rdch != NULL) { CHN_LOCK(rdch); rdch->lw = (*arg_i > 1) ? *arg_i : 1; CHN_UNLOCK(rdch); } break; case SNDCTL_DSP_GETERROR: /* * OSSv4 docs: "All errors and counters will automatically be * cleared to zeroes after the call so each call will return only * the errors that occurred after the previous invocation. ... The * play_underruns and rec_overrun fields are the only useful fields * returned by OSS 4.0." */ { audio_errinfo *ei = (audio_errinfo *)arg; bzero((void *)ei, sizeof(*ei)); if (wrch != NULL) { CHN_LOCK(wrch); ei->play_underruns = wrch->xruns; wrch->xruns = 0; CHN_UNLOCK(wrch); } if (rdch != NULL) { CHN_LOCK(rdch); ei->rec_overruns = rdch->xruns; rdch->xruns = 0; CHN_UNLOCK(rdch); } } break; case SNDCTL_DSP_SYNCGROUP: PCM_ACQUIRE_QUICK(d); ret = dsp_oss_syncgroup(wrch, rdch, (oss_syncgroup *)arg); PCM_RELEASE_QUICK(d); break; case SNDCTL_DSP_SYNCSTART: PCM_ACQUIRE_QUICK(d); ret = dsp_oss_syncstart(*arg_i); PCM_RELEASE_QUICK(d); break; case SNDCTL_DSP_POLICY: PCM_ACQUIRE_QUICK(d); ret = dsp_oss_policy(wrch, rdch, *arg_i); PCM_RELEASE_QUICK(d); break; case SNDCTL_DSP_COOKEDMODE: PCM_ACQUIRE_QUICK(d); if (!(dsp_get_flags(i_dev) & SD_F_BITPERFECT)) ret = dsp_oss_cookedmode(wrch, rdch, *arg_i); PCM_RELEASE_QUICK(d); break; case SNDCTL_DSP_GET_CHNORDER: PCM_ACQUIRE_QUICK(d); ret = dsp_oss_getchnorder(wrch, rdch, (unsigned long long *)arg); PCM_RELEASE_QUICK(d); break; case SNDCTL_DSP_SET_CHNORDER: PCM_ACQUIRE_QUICK(d); ret = dsp_oss_setchnorder(wrch, rdch, (unsigned long long *)arg); PCM_RELEASE_QUICK(d); break; case SNDCTL_DSP_GETCHANNELMASK: /* XXX vlc */ PCM_ACQUIRE_QUICK(d); ret = dsp_oss_getchannelmask(wrch, rdch, (int *)arg); PCM_RELEASE_QUICK(d); break; case SNDCTL_DSP_BIND_CHANNEL: /* XXX what?!? */ ret = EINVAL; break; #ifdef OSSV4_EXPERIMENT /* * XXX The following ioctls are not yet supported and just return * EINVAL. */ case SNDCTL_DSP_GETOPEAKS: case SNDCTL_DSP_GETIPEAKS: chn = (cmd == SNDCTL_DSP_GETOPEAKS) ? wrch : rdch; if (chn == NULL) ret = EINVAL; else { oss_peaks_t *op = (oss_peaks_t *)arg; int lpeak, rpeak; CHN_LOCK(chn); ret = chn_getpeaks(chn, &lpeak, &rpeak); if (ret == -1) ret = EINVAL; else { (*op)[0] = lpeak; (*op)[1] = rpeak; } CHN_UNLOCK(chn); } break; /* * XXX Once implemented, revisit this for proper cv protection * (if necessary). */ case SNDCTL_GETLABEL: ret = dsp_oss_getlabel(wrch, rdch, (oss_label_t *)arg); break; case SNDCTL_SETLABEL: ret = dsp_oss_setlabel(wrch, rdch, (oss_label_t *)arg); break; case SNDCTL_GETSONG: ret = dsp_oss_getsong(wrch, rdch, (oss_longname_t *)arg); break; case SNDCTL_SETSONG: ret = dsp_oss_setsong(wrch, rdch, (oss_longname_t *)arg); break; case SNDCTL_SETNAME: ret = dsp_oss_setname(wrch, rdch, (oss_longname_t *)arg); break; #if 0 /** * @note The S/PDIF interface ioctls, @c SNDCTL_DSP_READCTL and * @c SNDCTL_DSP_WRITECTL have been omitted at the suggestion of * 4Front Technologies. */ case SNDCTL_DSP_READCTL: case SNDCTL_DSP_WRITECTL: ret = EINVAL; break; #endif /* !0 (explicitly omitted ioctls) */ #endif /* !OSSV4_EXPERIMENT */ case SNDCTL_DSP_MAPINBUF: case SNDCTL_DSP_MAPOUTBUF: case SNDCTL_DSP_SETSYNCRO: /* undocumented */ case SNDCTL_DSP_SUBDIVIDE: case SOUND_PCM_WRITE_FILTER: case SOUND_PCM_READ_FILTER: /* dunno what these do, don't sound important */ default: DEB(printf("default ioctl fn 0x%08lx fail\n", cmd)); ret = EINVAL; break; } PCM_GIANT_LEAVE(d); return (ret); } static int dsp_poll(struct cdev *i_dev, int events, struct thread *td) { struct snddev_info *d; struct pcm_channel *wrch, *rdch; int ret, e; d = dsp_get_info(i_dev); if (!DSP_REGISTERED(d, i_dev)) return (EBADF); PCM_GIANT_ENTER(d); wrch = NULL; rdch = NULL; ret = 0; getchns(i_dev, &rdch, &wrch, SD_F_PRIO_RD | SD_F_PRIO_WR); if (wrch != NULL && !(wrch->flags & CHN_F_DEAD)) { e = (events & (POLLOUT | POLLWRNORM)); if (e) ret |= chn_poll(wrch, e, td); } if (rdch != NULL && !(rdch->flags & CHN_F_DEAD)) { e = (events & (POLLIN | POLLRDNORM)); if (e) ret |= chn_poll(rdch, e, td); } relchns(i_dev, rdch, wrch, SD_F_PRIO_RD | SD_F_PRIO_WR); PCM_GIANT_LEAVE(d); return (ret); } static int dsp_mmap(struct cdev *i_dev, vm_ooffset_t offset, vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr) { /* XXX memattr is not honored */ *paddr = vtophys(offset); return (0); } static int dsp_mmap_single(struct cdev *i_dev, vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, int nprot) { struct snddev_info *d; struct pcm_channel *wrch, *rdch, *c; /* * Reject PROT_EXEC by default. It just doesn't makes sense. * Unfortunately, we have to give up this one due to linux_mmap * changes. * - * http://lists.freebsd.org/pipermail/freebsd-emulation/2007-June/003698.html + * https://lists.freebsd.org/pipermail/freebsd-emulation/2007-June/003698.html * */ #ifdef SV_ABI_LINUX if ((nprot & PROT_EXEC) && (dsp_mmap_allow_prot_exec < 0 || (dsp_mmap_allow_prot_exec == 0 && SV_CURPROC_ABI() != SV_ABI_LINUX))) #else if ((nprot & PROT_EXEC) && dsp_mmap_allow_prot_exec < 1) #endif return (EINVAL); /* * PROT_READ (alone) selects the input buffer. * PROT_WRITE (alone) selects the output buffer. * PROT_WRITE|PROT_READ together select the output buffer. */ if ((nprot & (PROT_READ | PROT_WRITE)) == 0) return (EINVAL); d = dsp_get_info(i_dev); if (!DSP_REGISTERED(d, i_dev)) return (EINVAL); PCM_GIANT_ENTER(d); getchns(i_dev, &rdch, &wrch, SD_F_PRIO_RD | SD_F_PRIO_WR); c = ((nprot & PROT_WRITE) != 0) ? wrch : rdch; if (c == NULL || (c->flags & CHN_F_MMAP_INVALID) || (*offset + size) > sndbuf_getsize(c->bufsoft) || (wrch != NULL && (wrch->flags & CHN_F_MMAP_INVALID)) || (rdch != NULL && (rdch->flags & CHN_F_MMAP_INVALID))) { relchns(i_dev, rdch, wrch, SD_F_PRIO_RD | SD_F_PRIO_WR); PCM_GIANT_EXIT(d); return (EINVAL); } if (wrch != NULL) wrch->flags |= CHN_F_MMAP; if (rdch != NULL) rdch->flags |= CHN_F_MMAP; *offset = (uintptr_t)sndbuf_getbufofs(c->bufsoft, *offset); relchns(i_dev, rdch, wrch, SD_F_PRIO_RD | SD_F_PRIO_WR); *object = vm_pager_allocate(OBJT_DEVICE, i_dev, size, nprot, *offset, curthread->td_ucred); PCM_GIANT_LEAVE(d); if (*object == NULL) return (EINVAL); return (0); } /* So much for dev_stdclone() */ static int dsp_stdclone(char *name, char *namep, char *sep, int use_sep, int *u, int *c) { size_t len; len = strlen(namep); if (bcmp(name, namep, len) != 0) return (ENODEV); name += len; if (isdigit(*name) == 0) return (ENODEV); len = strlen(sep); if (*name == '0' && !(name[1] == '\0' || bcmp(name + 1, sep, len) == 0)) return (ENODEV); for (*u = 0; isdigit(*name) != 0; name++) { *u *= 10; *u += *name - '0'; if (*u > dsp_umax) return (ENODEV); } if (*name == '\0') return ((use_sep == 0) ? 0 : ENODEV); if (bcmp(name, sep, len) != 0 || isdigit(name[len]) == 0) return (ENODEV); name += len; if (*name == '0' && name[1] != '\0') return (ENODEV); for (*c = 0; isdigit(*name) != 0; name++) { *c *= 10; *c += *name - '0'; if (*c > dsp_cmax) return (ENODEV); } if (*name != '\0') return (ENODEV); return (0); } static void dsp_clone(void *arg, struct ucred *cred, char *name, int namelen, struct cdev **dev) { struct snddev_info *d; struct snd_clone_entry *ce; struct pcm_channel *c; int i, unit, udcmask, cunit, devtype, devhw, devcmax, tumax; char *devname, *devcmp, *devsep; KASSERT(dsp_umax >= 0 && dsp_cmax >= 0, ("Uninitialized unit!")); if (*dev != NULL) return; unit = -1; cunit = -1; devtype = -1; devhw = 0; devcmax = -1; tumax = -1; devname = NULL; devsep = NULL; for (i = 0; unit == -1 && i < (sizeof(dsp_cdevs) / sizeof(dsp_cdevs[0])); i++) { devtype = dsp_cdevs[i].type; devcmp = dsp_cdevs[i].name; devsep = dsp_cdevs[i].sep; devname = dsp_cdevs[i].alias; if (devname == NULL) devname = devcmp; devhw = dsp_cdevs[i].hw; devcmax = dsp_cdevs[i].max - 1; if (strcmp(name, devcmp) == 0) { if (dsp_basename_clone != 0) unit = snd_unit; } else if (dsp_stdclone(name, devcmp, devsep, dsp_cdevs[i].use_sep, &unit, &cunit) != 0) { unit = -1; cunit = -1; } } d = devclass_get_softc(pcm_devclass, unit); if (!PCM_REGISTERED(d) || d->clones == NULL) return; /* XXX Need Giant magic entry ??? */ PCM_LOCK(d); if (snd_clone_disabled(d->clones)) { PCM_UNLOCK(d); return; } PCM_WAIT(d); PCM_ACQUIRE(d); PCM_UNLOCK(d); udcmask = snd_u2unit(unit) | snd_d2unit(devtype); if (devhw != 0) { KASSERT(devcmax <= dsp_cmax, ("overflow: devcmax=%d, dsp_cmax=%d", devcmax, dsp_cmax)); if (cunit > devcmax) { PCM_RELEASE_QUICK(d); return; } udcmask |= snd_c2unit(cunit); CHN_FOREACH(c, d, channels.pcm) { CHN_LOCK(c); if (c->unit != udcmask) { CHN_UNLOCK(c); continue; } CHN_UNLOCK(c); udcmask &= ~snd_c2unit(cunit); /* * Temporarily increase clone maxunit to overcome * vchan flexibility. * * # sysctl dev.pcm.0.play.vchans=256 * dev.pcm.0.play.vchans: 1 -> 256 * # cat /dev/zero > /dev/dsp0.vp255 & * [1] 17296 * # sysctl dev.pcm.0.play.vchans=0 * dev.pcm.0.play.vchans: 256 -> 1 * # fg * [1] + running cat /dev/zero > /dev/dsp0.vp255 * ^C * # cat /dev/zero > /dev/dsp0.vp255 * zsh: operation not supported: /dev/dsp0.vp255 */ tumax = snd_clone_getmaxunit(d->clones); if (cunit > tumax) snd_clone_setmaxunit(d->clones, cunit); else tumax = -1; goto dsp_clone_alloc; } /* * Ok, so we're requesting unallocated vchan, but still * within maximum vchan limit. */ if (((devtype == SND_DEV_DSPHW_VPLAY && d->pvchancount > 0) || (devtype == SND_DEV_DSPHW_VREC && d->rvchancount > 0)) && cunit < snd_maxautovchans) { udcmask &= ~snd_c2unit(cunit); tumax = snd_clone_getmaxunit(d->clones); if (cunit > tumax) snd_clone_setmaxunit(d->clones, cunit); else tumax = -1; goto dsp_clone_alloc; } PCM_RELEASE_QUICK(d); return; } dsp_clone_alloc: ce = snd_clone_alloc(d->clones, dev, &cunit, udcmask); if (tumax != -1) snd_clone_setmaxunit(d->clones, tumax); if (ce != NULL) { udcmask |= snd_c2unit(cunit); *dev = make_dev(&dsp_cdevsw, PCMMINOR(udcmask), UID_ROOT, GID_WHEEL, 0666, "%s%d%s%d", devname, unit, devsep, cunit); snd_clone_register(ce, *dev); } PCM_RELEASE_QUICK(d); if (*dev != NULL) dev_ref(*dev); } static void dsp_sysinit(void *p) { if (dsp_ehtag != NULL) return; /* initialize unit numbering */ snd_unit_init(); dsp_umax = PCMMAXUNIT; dsp_cmax = PCMMAXCHAN; dsp_ehtag = EVENTHANDLER_REGISTER(dev_clone, dsp_clone, 0, 1000); } static void dsp_sysuninit(void *p) { if (dsp_ehtag == NULL) return; EVENTHANDLER_DEREGISTER(dev_clone, dsp_ehtag); dsp_ehtag = NULL; } SYSINIT(dsp_sysinit, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, dsp_sysinit, NULL); SYSUNINIT(dsp_sysuninit, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, dsp_sysuninit, NULL); char * dsp_unit2name(char *buf, size_t len, int unit) { int i, dtype; KASSERT(buf != NULL && len != 0, ("bogus buf=%p len=%ju", buf, (uintmax_t)len)); dtype = snd_unit2d(unit); for (i = 0; i < (sizeof(dsp_cdevs) / sizeof(dsp_cdevs[0])); i++) { if (dtype != dsp_cdevs[i].type || dsp_cdevs[i].alias != NULL) continue; snprintf(buf, len, "%s%d%s%d", dsp_cdevs[i].name, snd_unit2u(unit), dsp_cdevs[i].sep, snd_unit2c(unit)); return (buf); } return (NULL); } /** * @brief Handler for SNDCTL_AUDIOINFO. * * Gathers information about the audio device specified in ai->dev. If * ai->dev == -1, then this function gathers information about the current * device. If the call comes in on a non-audio device and ai->dev == -1, * return EINVAL. * * This routine is supposed to go practically straight to the hardware, * getting capabilities directly from the sound card driver, side-stepping * the intermediate channel interface. * * Note, however, that the usefulness of this command is significantly * decreased when requesting info about any device other than the one serving * the request. While each snddev_channel refers to a specific device node, * the converse is *not* true. Currently, when a sound device node is opened, * the sound subsystem scans for an available audio channel (or channels, if * opened in read+write) and then assigns them to the si_drv[12] private * data fields. As a result, any information returned linking a channel to * a specific character device isn't necessarily accurate. * * @note * Calling threads must not hold any snddev_info or pcm_channel locks. * * @param dev device on which the ioctl was issued * @param ai ioctl request data container * * @retval 0 success * @retval EINVAL ai->dev specifies an invalid device * * @todo Verify correctness of Doxygen tags. ;) */ int dsp_oss_audioinfo(struct cdev *i_dev, oss_audioinfo *ai) { struct pcmchan_caps *caps; struct pcm_channel *ch; struct snddev_info *d; uint32_t fmts; int i, nchan, *rates, minch, maxch; char *devname, buf[CHN_NAMELEN]; /* * If probing the device that received the ioctl, make sure it's a * DSP device. (Users may use this ioctl with /dev/mixer and * /dev/midi.) */ if (ai->dev == -1 && i_dev->si_devsw != &dsp_cdevsw) return (EINVAL); ch = NULL; devname = NULL; nchan = 0; bzero(buf, sizeof(buf)); /* * Search for the requested audio device (channel). Start by * iterating over pcm devices. */ for (i = 0; pcm_devclass != NULL && i < devclass_get_maxunit(pcm_devclass); i++) { d = devclass_get_softc(pcm_devclass, i); if (!PCM_REGISTERED(d)) continue; /* XXX Need Giant magic entry ??? */ /* See the note in function docblock */ PCM_UNLOCKASSERT(d); PCM_LOCK(d); CHN_FOREACH(ch, d, channels.pcm) { CHN_UNLOCKASSERT(ch); CHN_LOCK(ch); if (ai->dev == -1) { if (DSP_REGISTERED(d, i_dev) && (ch == PCM_RDCH(i_dev) || /* record ch */ ch == PCM_WRCH(i_dev))) { /* playback ch */ devname = dsp_unit2name(buf, sizeof(buf), ch->unit); } } else if (ai->dev == nchan) { devname = dsp_unit2name(buf, sizeof(buf), ch->unit); } if (devname != NULL) break; CHN_UNLOCK(ch); ++nchan; } if (devname != NULL) { /* * At this point, the following synchronization stuff * has happened: * - a specific PCM device is locked. * - a specific audio channel has been locked, so be * sure to unlock when exiting; */ caps = chn_getcaps(ch); /* * With all handles collected, zero out the user's * container and begin filling in its fields. */ bzero((void *)ai, sizeof(oss_audioinfo)); ai->dev = nchan; strlcpy(ai->name, ch->name, sizeof(ai->name)); if ((ch->flags & CHN_F_BUSY) == 0) ai->busy = 0; else ai->busy = (ch->direction == PCMDIR_PLAY) ? OPEN_WRITE : OPEN_READ; /** * @note * @c cmd - OSSv4 docs: "Only supported under Linux at * this moment." Cop-out, I know, but I'll save * running around in the process table for later. * Is there a risk of leaking information? */ ai->pid = ch->pid; /* * These flags stolen from SNDCTL_DSP_GETCAPS handler. * Note, however, that a single channel operates in * only one direction, so PCM_CAP_DUPLEX is out. */ /** * @todo @c SNDCTL_AUDIOINFO::caps - Make drivers keep * these in pcmchan::caps? */ ai->caps = PCM_CAP_REALTIME | PCM_CAP_MMAP | PCM_CAP_TRIGGER | ((ch->direction == PCMDIR_PLAY) ? PCM_CAP_OUTPUT : PCM_CAP_INPUT); /* * Collect formats supported @b natively by the * device. Also determine min/max channels. (I.e., * mono, stereo, or both?) * * If any channel is stereo, maxch = 2; * if all channels are stereo, minch = 2, too; * if any channel is mono, minch = 1; * and if all channels are mono, maxch = 1. */ minch = 0; maxch = 0; fmts = 0; for (i = 0; caps->fmtlist[i]; i++) { fmts |= caps->fmtlist[i]; if (AFMT_CHANNEL(caps->fmtlist[i]) > 1) { minch = (minch == 0) ? 2 : minch; maxch = 2; } else { minch = 1; maxch = (maxch == 0) ? 1 : maxch; } } if (ch->direction == PCMDIR_PLAY) ai->oformats = fmts; else ai->iformats = fmts; /** * @note * @c magic - OSSv4 docs: "Reserved for internal use * by OSS." * * @par * @c card_number - OSSv4 docs: "Number of the sound * card where this device belongs or -1 if this * information is not available. Applications * should normally not use this field for any * purpose." */ ai->card_number = -1; /** * @todo @c song_name - depends first on * SNDCTL_[GS]ETSONG @todo @c label - depends * on SNDCTL_[GS]ETLABEL * @todo @c port_number - routing information? */ ai->port_number = -1; ai->mixer_dev = (d->mixer_dev != NULL) ? PCMUNIT(d->mixer_dev) : -1; /** * @note * @c real_device - OSSv4 docs: "Obsolete." */ ai->real_device = -1; strlcpy(ai->devnode, "/dev/", sizeof(ai->devnode)); strlcat(ai->devnode, devname, sizeof(ai->devnode)); ai->enabled = device_is_attached(d->dev) ? 1 : 0; /** * @note * @c flags - OSSv4 docs: "Reserved for future use." * * @note * @c binding - OSSv4 docs: "Reserved for future use." * * @todo @c handle - haven't decided how to generate * this yet; bus, vendor, device IDs? */ ai->min_rate = caps->minspeed; ai->max_rate = caps->maxspeed; ai->min_channels = minch; ai->max_channels = maxch; ai->nrates = chn_getrates(ch, &rates); if (ai->nrates > OSS_MAX_SAMPLE_RATES) ai->nrates = OSS_MAX_SAMPLE_RATES; for (i = 0; i < ai->nrates; i++) ai->rates[i] = rates[i]; ai->next_play_engine = 0; ai->next_rec_engine = 0; CHN_UNLOCK(ch); } PCM_UNLOCK(d); if (devname != NULL) return (0); } /* Exhausted the search -- nothing is locked, so return. */ return (EINVAL); } /** * @brief Assigns a PCM channel to a sync group. * * Sync groups are used to enable audio operations on multiple devices * simultaneously. They may be used with any number of devices and may * span across applications. Devices are added to groups with * the SNDCTL_DSP_SYNCGROUP ioctl, and operations are triggered with the * SNDCTL_DSP_SYNCSTART ioctl. * * If the @c id field of the @c group parameter is set to zero, then a new * sync group is created. Otherwise, wrch and rdch (if set) are added to * the group specified. * * @todo As far as memory allocation, should we assume that things are * okay and allocate with M_WAITOK before acquiring channel locks, * freeing later if not? * * @param wrch output channel associated w/ device (if any) * @param rdch input channel associated w/ device (if any) * @param group Sync group parameters * * @retval 0 success * @retval non-zero error to be propagated upstream */ static int dsp_oss_syncgroup(struct pcm_channel *wrch, struct pcm_channel *rdch, oss_syncgroup *group) { struct pcmchan_syncmember *smrd, *smwr; struct pcmchan_syncgroup *sg; int ret, sg_ids[3]; smrd = NULL; smwr = NULL; sg = NULL; ret = 0; /* * Free_unr() may sleep, so store released syncgroup IDs until after * all locks are released. */ sg_ids[0] = sg_ids[1] = sg_ids[2] = 0; PCM_SG_LOCK(); /* * - Insert channel(s) into group's member list. * - Set CHN_F_NOTRIGGER on channel(s). * - Stop channel(s). */ /* * If device's channels are already mapped to a group, unmap them. */ if (wrch) { CHN_LOCK(wrch); sg_ids[0] = chn_syncdestroy(wrch); } if (rdch) { CHN_LOCK(rdch); sg_ids[1] = chn_syncdestroy(rdch); } /* * Verify that mode matches character device properites. * - Bail if PCM_ENABLE_OUTPUT && wrch == NULL. * - Bail if PCM_ENABLE_INPUT && rdch == NULL. */ if (((wrch == NULL) && (group->mode & PCM_ENABLE_OUTPUT)) || ((rdch == NULL) && (group->mode & PCM_ENABLE_INPUT))) { ret = EINVAL; goto out; } /* * An id of zero indicates the user wants to create a new * syncgroup. */ if (group->id == 0) { sg = (struct pcmchan_syncgroup *)malloc(sizeof(*sg), M_DEVBUF, M_NOWAIT); if (sg != NULL) { SLIST_INIT(&sg->members); sg->id = alloc_unr(pcmsg_unrhdr); group->id = sg->id; SLIST_INSERT_HEAD(&snd_pcm_syncgroups, sg, link); } else ret = ENOMEM; } else { SLIST_FOREACH(sg, &snd_pcm_syncgroups, link) { if (sg->id == group->id) break; } if (sg == NULL) ret = EINVAL; } /* Couldn't create or find a syncgroup. Fail. */ if (sg == NULL) goto out; /* * Allocate a syncmember, assign it and a channel together, and * insert into syncgroup. */ if (group->mode & PCM_ENABLE_INPUT) { smrd = (struct pcmchan_syncmember *)malloc(sizeof(*smrd), M_DEVBUF, M_NOWAIT); if (smrd == NULL) { ret = ENOMEM; goto out; } SLIST_INSERT_HEAD(&sg->members, smrd, link); smrd->parent = sg; smrd->ch = rdch; chn_abort(rdch); rdch->flags |= CHN_F_NOTRIGGER; rdch->sm = smrd; } if (group->mode & PCM_ENABLE_OUTPUT) { smwr = (struct pcmchan_syncmember *)malloc(sizeof(*smwr), M_DEVBUF, M_NOWAIT); if (smwr == NULL) { ret = ENOMEM; goto out; } SLIST_INSERT_HEAD(&sg->members, smwr, link); smwr->parent = sg; smwr->ch = wrch; chn_abort(wrch); wrch->flags |= CHN_F_NOTRIGGER; wrch->sm = smwr; } out: if (ret != 0) { if (smrd != NULL) free(smrd, M_DEVBUF); if ((sg != NULL) && SLIST_EMPTY(&sg->members)) { sg_ids[2] = sg->id; SLIST_REMOVE(&snd_pcm_syncgroups, sg, pcmchan_syncgroup, link); free(sg, M_DEVBUF); } if (wrch) wrch->sm = NULL; if (rdch) rdch->sm = NULL; } if (wrch) CHN_UNLOCK(wrch); if (rdch) CHN_UNLOCK(rdch); PCM_SG_UNLOCK(); if (sg_ids[0]) free_unr(pcmsg_unrhdr, sg_ids[0]); if (sg_ids[1]) free_unr(pcmsg_unrhdr, sg_ids[1]); if (sg_ids[2]) free_unr(pcmsg_unrhdr, sg_ids[2]); return (ret); } /** * @brief Launch a sync group into action * * Sync groups are established via SNDCTL_DSP_SYNCGROUP. This function * iterates over all members, triggering them along the way. * * @note Caller must not hold any channel locks. * * @param sg_id sync group identifier * * @retval 0 success * @retval non-zero error worthy of propagating upstream to user */ static int dsp_oss_syncstart(int sg_id) { struct pcmchan_syncmember *sm, *sm_tmp; struct pcmchan_syncgroup *sg; struct pcm_channel *c; int ret, needlocks; /* Get the synclists lock */ PCM_SG_LOCK(); do { ret = 0; needlocks = 0; /* Search for syncgroup by ID */ SLIST_FOREACH(sg, &snd_pcm_syncgroups, link) { if (sg->id == sg_id) break; } /* Return EINVAL if not found */ if (sg == NULL) { ret = EINVAL; break; } /* Any removals resulting in an empty group should've handled this */ KASSERT(!SLIST_EMPTY(&sg->members), ("found empty syncgroup")); /* * Attempt to lock all member channels - if any are already * locked, unlock those acquired, sleep for a bit, and try * again. */ SLIST_FOREACH(sm, &sg->members, link) { if (CHN_TRYLOCK(sm->ch) == 0) { int timo = hz * 5/1000; if (timo < 1) timo = 1; /* Release all locked channels so far, retry */ SLIST_FOREACH(sm_tmp, &sg->members, link) { /* sm is the member already locked */ if (sm == sm_tmp) break; CHN_UNLOCK(sm_tmp->ch); } /** @todo Is PRIBIO correct/ */ ret = msleep(sm, &snd_pcm_syncgroups_mtx, PRIBIO | PCATCH, "pcmsg", timo); if (ret == EINTR || ret == ERESTART) break; needlocks = 1; ret = 0; /* Assumes ret == EAGAIN... */ } } } while (needlocks && ret == 0); /* Proceed only if no errors encountered. */ if (ret == 0) { /* Launch channels */ while ((sm = SLIST_FIRST(&sg->members)) != NULL) { SLIST_REMOVE_HEAD(&sg->members, link); c = sm->ch; c->sm = NULL; chn_start(c, 1); c->flags &= ~CHN_F_NOTRIGGER; CHN_UNLOCK(c); free(sm, M_DEVBUF); } SLIST_REMOVE(&snd_pcm_syncgroups, sg, pcmchan_syncgroup, link); free(sg, M_DEVBUF); } PCM_SG_UNLOCK(); /* * Free_unr() may sleep, so be sure to give up the syncgroup lock * first. */ if (ret == 0) free_unr(pcmsg_unrhdr, sg_id); return (ret); } /** * @brief Handler for SNDCTL_DSP_POLICY * * The SNDCTL_DSP_POLICY ioctl is a simpler interface to control fragment * size and count like with SNDCTL_DSP_SETFRAGMENT. Instead of the user * specifying those two parameters, s/he simply selects a number from 0..10 * which corresponds to a buffer size. Smaller numbers request smaller * buffers with lower latencies (at greater overhead from more frequent * interrupts), while greater numbers behave in the opposite manner. * * The 4Front spec states that a value of 5 should be the default. However, * this implementation deviates slightly by using a linear scale without * consulting drivers. I.e., even though drivers may have different default * buffer sizes, a policy argument of 5 will have the same result across * all drivers. * * See http://manuals.opensound.com/developer/SNDCTL_DSP_POLICY.html for * more information. * * @todo When SNDCTL_DSP_COOKEDMODE is supported, it'll be necessary to * work with hardware drivers directly. * * @note PCM channel arguments must not be locked by caller. * * @param wrch Pointer to opened playback channel (optional; may be NULL) * @param rdch " recording channel (optional; may be NULL) * @param policy Integer from [0:10] * * @retval 0 constant (for now) */ static int dsp_oss_policy(struct pcm_channel *wrch, struct pcm_channel *rdch, int policy) { int ret; if (policy < CHN_POLICY_MIN || policy > CHN_POLICY_MAX) return (EIO); /* Default: success */ ret = 0; if (rdch) { CHN_LOCK(rdch); ret = chn_setlatency(rdch, policy); CHN_UNLOCK(rdch); } if (wrch && ret == 0) { CHN_LOCK(wrch); ret = chn_setlatency(wrch, policy); CHN_UNLOCK(wrch); } if (ret) ret = EIO; return (ret); } /** * @brief Enable or disable "cooked" mode * * This is a handler for @c SNDCTL_DSP_COOKEDMODE. When in cooked mode, which * is the default, the sound system handles rate and format conversions * automatically (ex: user writing 11025Hz/8 bit/unsigned but card only * operates with 44100Hz/16bit/signed samples). * * Disabling cooked mode is intended for applications wanting to mmap() * a sound card's buffer space directly, bypassing the FreeBSD 2-stage * feeder architecture, presumably to gain as much control over audio * hardware as possible. * * See @c http://manuals.opensound.com/developer/SNDCTL_DSP_COOKEDMODE.html * for more details. * * @param wrch playback channel (optional; may be NULL) * @param rdch recording channel (optional; may be NULL) * @param enabled 0 = raw mode, 1 = cooked mode * * @retval EINVAL Operation not yet supported. */ static int dsp_oss_cookedmode(struct pcm_channel *wrch, struct pcm_channel *rdch, int enabled) { /* * XXX I just don't get it. Why don't they call it * "BITPERFECT" ~ SNDCTL_DSP_BITPERFECT !?!?. * This is just plain so confusing, incoherent, * . */ if (!(enabled == 1 || enabled == 0)) return (EINVAL); /* * I won't give in. I'm inverting its logic here and now. * Brag all you want, but "BITPERFECT" should be the better * term here. */ enabled ^= 0x00000001; if (wrch != NULL) { CHN_LOCK(wrch); wrch->flags &= ~CHN_F_BITPERFECT; wrch->flags |= (enabled != 0) ? CHN_F_BITPERFECT : 0x00000000; CHN_UNLOCK(wrch); } if (rdch != NULL) { CHN_LOCK(rdch); rdch->flags &= ~CHN_F_BITPERFECT; rdch->flags |= (enabled != 0) ? CHN_F_BITPERFECT : 0x00000000; CHN_UNLOCK(rdch); } return (0); } /** * @brief Retrieve channel interleaving order * * This is the handler for @c SNDCTL_DSP_GET_CHNORDER. * * See @c http://manuals.opensound.com/developer/SNDCTL_DSP_GET_CHNORDER.html * for more details. * * @note As the ioctl definition is still under construction, FreeBSD * does not currently support SNDCTL_DSP_GET_CHNORDER. * * @param wrch playback channel (optional; may be NULL) * @param rdch recording channel (optional; may be NULL) * @param map channel map (result will be stored there) * * @retval EINVAL Operation not yet supported. */ static int dsp_oss_getchnorder(struct pcm_channel *wrch, struct pcm_channel *rdch, unsigned long long *map) { struct pcm_channel *ch; int ret; ch = (wrch != NULL) ? wrch : rdch; if (ch != NULL) { CHN_LOCK(ch); ret = chn_oss_getorder(ch, map); CHN_UNLOCK(ch); } else ret = EINVAL; return (ret); } /** * @brief Specify channel interleaving order * * This is the handler for @c SNDCTL_DSP_SET_CHNORDER. * * @note As the ioctl definition is still under construction, FreeBSD * does not currently support @c SNDCTL_DSP_SET_CHNORDER. * * @param wrch playback channel (optional; may be NULL) * @param rdch recording channel (optional; may be NULL) * @param map channel map * * @retval EINVAL Operation not yet supported. */ static int dsp_oss_setchnorder(struct pcm_channel *wrch, struct pcm_channel *rdch, unsigned long long *map) { int ret; ret = 0; if (wrch != NULL) { CHN_LOCK(wrch); ret = chn_oss_setorder(wrch, map); CHN_UNLOCK(wrch); } if (ret == 0 && rdch != NULL) { CHN_LOCK(rdch); ret = chn_oss_setorder(rdch, map); CHN_UNLOCK(rdch); } return (ret); } static int dsp_oss_getchannelmask(struct pcm_channel *wrch, struct pcm_channel *rdch, int *mask) { struct pcm_channel *ch; uint32_t chnmask; int ret; chnmask = 0; ch = (wrch != NULL) ? wrch : rdch; if (ch != NULL) { CHN_LOCK(ch); ret = chn_oss_getmask(ch, &chnmask); CHN_UNLOCK(ch); } else ret = EINVAL; if (ret == 0) *mask = chnmask; return (ret); } #ifdef OSSV4_EXPERIMENT /** * @brief Retrieve an audio device's label * * This is a handler for the @c SNDCTL_GETLABEL ioctl. * * See @c http://manuals.opensound.com/developer/SNDCTL_GETLABEL.html * for more details. * * From Hannu@4Front: "For example ossxmix (just like some HW mixer * consoles) can show variable "labels" for certain controls. By default * the application name (say quake) is shown as the label but * applications may change the labels themselves." * * @note As the ioctl definition is still under construction, FreeBSD * does not currently support @c SNDCTL_GETLABEL. * * @param wrch playback channel (optional; may be NULL) * @param rdch recording channel (optional; may be NULL) * @param label label gets copied here * * @retval EINVAL Operation not yet supported. */ static int dsp_oss_getlabel(struct pcm_channel *wrch, struct pcm_channel *rdch, oss_label_t *label) { return (EINVAL); } /** * @brief Specify an audio device's label * * This is a handler for the @c SNDCTL_SETLABEL ioctl. Please see the * comments for @c dsp_oss_getlabel immediately above. * * See @c http://manuals.opensound.com/developer/SNDCTL_GETLABEL.html * for more details. * * @note As the ioctl definition is still under construction, FreeBSD * does not currently support SNDCTL_SETLABEL. * * @param wrch playback channel (optional; may be NULL) * @param rdch recording channel (optional; may be NULL) * @param label label gets copied from here * * @retval EINVAL Operation not yet supported. */ static int dsp_oss_setlabel(struct pcm_channel *wrch, struct pcm_channel *rdch, oss_label_t *label) { return (EINVAL); } /** * @brief Retrieve name of currently played song * * This is a handler for the @c SNDCTL_GETSONG ioctl. Audio players could * tell the system the name of the currently playing song, which would be * visible in @c /dev/sndstat. * * See @c http://manuals.opensound.com/developer/SNDCTL_GETSONG.html * for more details. * * @note As the ioctl definition is still under construction, FreeBSD * does not currently support SNDCTL_GETSONG. * * @param wrch playback channel (optional; may be NULL) * @param rdch recording channel (optional; may be NULL) * @param song song name gets copied here * * @retval EINVAL Operation not yet supported. */ static int dsp_oss_getsong(struct pcm_channel *wrch, struct pcm_channel *rdch, oss_longname_t *song) { return (EINVAL); } /** * @brief Retrieve name of currently played song * * This is a handler for the @c SNDCTL_SETSONG ioctl. Audio players could * tell the system the name of the currently playing song, which would be * visible in @c /dev/sndstat. * * See @c http://manuals.opensound.com/developer/SNDCTL_SETSONG.html * for more details. * * @note As the ioctl definition is still under construction, FreeBSD * does not currently support SNDCTL_SETSONG. * * @param wrch playback channel (optional; may be NULL) * @param rdch recording channel (optional; may be NULL) * @param song song name gets copied from here * * @retval EINVAL Operation not yet supported. */ static int dsp_oss_setsong(struct pcm_channel *wrch, struct pcm_channel *rdch, oss_longname_t *song) { return (EINVAL); } /** * @brief Rename a device * * This is a handler for the @c SNDCTL_SETNAME ioctl. * * See @c http://manuals.opensound.com/developer/SNDCTL_SETNAME.html for * more details. * * From Hannu@4Front: "This call is used to change the device name * reported in /dev/sndstat and ossinfo. So instead of using some generic * 'OSS loopback audio (MIDI) driver' the device may be given a meaningfull * name depending on the current context (for example 'OSS virtual wave table * synth' or 'VoIP link to London')." * * @note As the ioctl definition is still under construction, FreeBSD * does not currently support SNDCTL_SETNAME. * * @param wrch playback channel (optional; may be NULL) * @param rdch recording channel (optional; may be NULL) * @param name new device name gets copied from here * * @retval EINVAL Operation not yet supported. */ static int dsp_oss_setname(struct pcm_channel *wrch, struct pcm_channel *rdch, oss_longname_t *name) { return (EINVAL); } #endif /* !OSSV4_EXPERIMENT */ Index: head/sys/fs/nfsclient/nfs_clstate.c =================================================================== --- head/sys/fs/nfsclient/nfs_clstate.c (revision 336756) +++ head/sys/fs/nfsclient/nfs_clstate.c (revision 336757) @@ -1,5459 +1,5458 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009 Rick Macklem, University of Guelph * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); /* * These functions implement the client side state handling for NFSv4. * NFSv4 state handling: * - A lockowner is used to determine lock contention, so it * corresponds directly to a Posix pid. (1 to 1 mapping) * - The correct granularity of an OpenOwner is not nearly so * obvious. An OpenOwner does the following: * - provides a serial sequencing of Open/Close/Lock-with-new-lockowner * - is used to check for Open/Share contention (not applicable to * this client, since all Opens are Deny_None) * As such, I considered both extreme. * 1 OpenOwner per ClientID - Simple to manage, but fully serializes * all Open, Close and Lock (with a new lockowner) Ops. * 1 OpenOwner for each Open - This one results in an OpenConfirm for * every Open, for most servers. * So, I chose to use the same mapping as I did for LockOwnwers. * The main concern here is that you can end up with multiple Opens * for the same File Handle, but on different OpenOwners (opens * inherited from parents, grandparents...) and you do not know * which of these the vnodeop close applies to. This is handled by * delaying the Close Op(s) until all of the Opens have been closed. * (It is not yet obvious if this is the correct granularity.) * - How the code handles serialization: * - For the ClientId, it uses an exclusive lock while getting its * SetClientId and during recovery. Otherwise, it uses a shared * lock via a reference count. * - For the rest of the data structures, it uses an SMP mutex * (once the nfs client is SMP safe) and doesn't sleep while * manipulating the linked lists. * - The serialization of Open/Close/Lock/LockU falls out in the * "wash", since OpenOwners and LockOwners are both mapped from * Posix pid. In other words, there is only one Posix pid using * any given owner, so that owner is serialized. (If you change * the granularity of the OpenOwner, then code must be added to * serialize Ops on the OpenOwner.) * - When to get rid of OpenOwners and LockOwners. * - The function nfscl_cleanup_common() is executed after a process exits. * It goes through the client list looking for all Open and Lock Owners. * When one is found, it is marked "defunct" or in the case of * an OpenOwner without any Opens, freed. * The renew thread scans for defunct Owners and gets rid of them, * if it can. The LockOwners will also be deleted when the * associated Open is closed. * - If the LockU or Close Op(s) fail during close in a way * that could be recovered upon retry, they are relinked to the * ClientId's defunct open list and retried by the renew thread * until they succeed or an unmount/recovery occurs. * (Since we are done with them, they do not need to be recovered.) */ #ifndef APPLEKEXT #include /* * Global variables */ extern struct nfsstatsv1 nfsstatsv1; extern struct nfsreqhead nfsd_reqq; extern u_int32_t newnfs_false, newnfs_true; extern int nfscl_debuglevel; extern int nfscl_enablecallb; extern int nfs_numnfscbd; NFSREQSPINLOCK; NFSCLSTATEMUTEX; int nfscl_inited = 0; struct nfsclhead nfsclhead; /* Head of clientid list */ int nfscl_deleghighwater = NFSCLDELEGHIGHWATER; int nfscl_layouthighwater = NFSCLLAYOUTHIGHWATER; #endif /* !APPLEKEXT */ static int nfscl_delegcnt = 0; static int nfscl_layoutcnt = 0; static int nfscl_getopen(struct nfsclownerhead *, u_int8_t *, int, u_int8_t *, u_int8_t *, u_int32_t, struct nfscllockowner **, struct nfsclopen **); static void nfscl_clrelease(struct nfsclclient *); static void nfscl_cleanclient(struct nfsclclient *); static void nfscl_expireclient(struct nfsclclient *, struct nfsmount *, struct ucred *, NFSPROC_T *); static int nfscl_expireopen(struct nfsclclient *, struct nfsclopen *, struct nfsmount *, struct ucred *, NFSPROC_T *); static void nfscl_recover(struct nfsclclient *, struct ucred *, NFSPROC_T *); static void nfscl_insertlock(struct nfscllockowner *, struct nfscllock *, struct nfscllock *, int); static int nfscl_updatelock(struct nfscllockowner *, struct nfscllock **, struct nfscllock **, int); static void nfscl_delegreturnall(struct nfsclclient *, NFSPROC_T *); static u_int32_t nfscl_nextcbident(void); static mount_t nfscl_getmnt(int, uint8_t *, u_int32_t, struct nfsclclient **); static struct nfsclclient *nfscl_getclnt(u_int32_t); static struct nfsclclient *nfscl_getclntsess(uint8_t *); static struct nfscldeleg *nfscl_finddeleg(struct nfsclclient *, u_int8_t *, int); static void nfscl_retoncloselayout(vnode_t, struct nfsclclient *, uint8_t *, int, struct nfsclrecalllayout **); static void nfscl_reldevinfo_locked(struct nfscldevinfo *); static struct nfscllayout *nfscl_findlayout(struct nfsclclient *, u_int8_t *, int); static struct nfscldevinfo *nfscl_finddevinfo(struct nfsclclient *, uint8_t *); static int nfscl_checkconflict(struct nfscllockownerhead *, struct nfscllock *, u_int8_t *, struct nfscllock **); static void nfscl_freealllocks(struct nfscllockownerhead *, int); static int nfscl_localconflict(struct nfsclclient *, u_int8_t *, int, struct nfscllock *, u_int8_t *, struct nfscldeleg *, struct nfscllock **); static void nfscl_newopen(struct nfsclclient *, struct nfscldeleg *, struct nfsclowner **, struct nfsclowner **, struct nfsclopen **, struct nfsclopen **, u_int8_t *, u_int8_t *, int, struct ucred *, int *); static int nfscl_moveopen(vnode_t , struct nfsclclient *, struct nfsmount *, struct nfsclopen *, struct nfsclowner *, struct nfscldeleg *, struct ucred *, NFSPROC_T *); static void nfscl_totalrecall(struct nfsclclient *); static int nfscl_relock(vnode_t , struct nfsclclient *, struct nfsmount *, struct nfscllockowner *, struct nfscllock *, struct ucred *, NFSPROC_T *); static int nfscl_tryopen(struct nfsmount *, vnode_t , u_int8_t *, int, u_int8_t *, int, u_int32_t, struct nfsclopen *, u_int8_t *, int, struct nfscldeleg **, int, u_int32_t, struct ucred *, NFSPROC_T *); static int nfscl_trylock(struct nfsmount *, vnode_t , u_int8_t *, int, struct nfscllockowner *, int, int, u_int64_t, u_int64_t, short, struct ucred *, NFSPROC_T *); static int nfsrpc_reopen(struct nfsmount *, u_int8_t *, int, u_int32_t, struct nfsclopen *, struct nfscldeleg **, struct ucred *, NFSPROC_T *); static void nfscl_freedeleg(struct nfscldeleghead *, struct nfscldeleg *); static int nfscl_errmap(struct nfsrv_descript *, u_int32_t); static void nfscl_cleanup_common(struct nfsclclient *, u_int8_t *); static int nfscl_recalldeleg(struct nfsclclient *, struct nfsmount *, struct nfscldeleg *, vnode_t, struct ucred *, NFSPROC_T *, int); static void nfscl_freeopenowner(struct nfsclowner *, int); static void nfscl_cleandeleg(struct nfscldeleg *); static int nfscl_trydelegreturn(struct nfscldeleg *, struct ucred *, struct nfsmount *, NFSPROC_T *); static void nfscl_emptylockowner(struct nfscllockowner *, struct nfscllockownerfhhead *); static void nfscl_mergeflayouts(struct nfsclflayouthead *, struct nfsclflayouthead *); static int nfscl_layoutrecall(int, struct nfscllayout *, uint32_t, uint64_t, uint64_t, uint32_t, uint32_t, uint32_t, char *, struct nfsclrecalllayout *); static int nfscl_seq(uint32_t, uint32_t); static void nfscl_layoutreturn(struct nfsmount *, struct nfscllayout *, struct ucred *, NFSPROC_T *); static void nfscl_dolayoutcommit(struct nfsmount *, struct nfscllayout *, struct ucred *, NFSPROC_T *); static short nfscberr_null[] = { 0, 0, }; static short nfscberr_getattr[] = { NFSERR_RESOURCE, NFSERR_BADHANDLE, NFSERR_BADXDR, NFSERR_RESOURCE, NFSERR_SERVERFAULT, 0, }; static short nfscberr_recall[] = { NFSERR_RESOURCE, NFSERR_BADHANDLE, NFSERR_BADSTATEID, NFSERR_BADXDR, NFSERR_RESOURCE, NFSERR_SERVERFAULT, 0, }; static short *nfscl_cberrmap[] = { nfscberr_null, nfscberr_null, nfscberr_null, nfscberr_getattr, nfscberr_recall }; #define NETFAMILY(clp) \ (((clp)->nfsc_flags & NFSCLFLAGS_AFINET6) ? AF_INET6 : AF_INET) /* * Called for an open operation. * If the nfhp argument is NULL, just get an openowner. */ APPLESTATIC int nfscl_open(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t amode, int usedeleg, struct ucred *cred, NFSPROC_T *p, struct nfsclowner **owpp, struct nfsclopen **opp, int *newonep, int *retp, int lockit) { struct nfsclclient *clp; struct nfsclowner *owp, *nowp; struct nfsclopen *op = NULL, *nop = NULL; struct nfscldeleg *dp; struct nfsclownerhead *ohp; u_int8_t own[NFSV4CL_LOCKNAMELEN]; int ret; if (newonep != NULL) *newonep = 0; if (opp != NULL) *opp = NULL; if (owpp != NULL) *owpp = NULL; /* * Might need one or both of these, so MALLOC them now, to * avoid a tsleep() in MALLOC later. */ nowp = malloc(sizeof (struct nfsclowner), M_NFSCLOWNER, M_WAITOK); if (nfhp != NULL) nop = malloc(sizeof (struct nfsclopen) + fhlen - 1, M_NFSCLOPEN, M_WAITOK); ret = nfscl_getcl(vnode_mount(vp), cred, p, 1, &clp); if (ret != 0) { free(nowp, M_NFSCLOWNER); if (nop != NULL) free(nop, M_NFSCLOPEN); return (ret); } /* * Get the Open iff it already exists. * If none found, add the new one or return error, depending upon * "create". */ NFSLOCKCLSTATE(); dp = NULL; /* First check the delegation list */ if (nfhp != NULL && usedeleg) { LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) { if (dp->nfsdl_fhlen == fhlen && !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) { if (!(amode & NFSV4OPEN_ACCESSWRITE) || (dp->nfsdl_flags & NFSCLDL_WRITE)) break; dp = NULL; break; } } } if (dp != NULL) { nfscl_filllockowner(p->td_proc, own, F_POSIX); ohp = &dp->nfsdl_owner; } else { /* For NFSv4.1 and this option, use a single open_owner. */ if (NFSHASONEOPENOWN(VFSTONFS(vnode_mount(vp)))) nfscl_filllockowner(NULL, own, F_POSIX); else nfscl_filllockowner(p->td_proc, own, F_POSIX); ohp = &clp->nfsc_owner; } /* Now, search for an openowner */ LIST_FOREACH(owp, ohp, nfsow_list) { if (!NFSBCMP(owp->nfsow_owner, own, NFSV4CL_LOCKNAMELEN)) break; } /* * Create a new open, as required. */ nfscl_newopen(clp, dp, &owp, &nowp, &op, &nop, own, nfhp, fhlen, cred, newonep); /* * Now, check the mode on the open and return the appropriate * value. */ if (retp != NULL) { if (nfhp != NULL && dp != NULL && nop == NULL) /* new local open on delegation */ *retp = NFSCLOPEN_SETCRED; else *retp = NFSCLOPEN_OK; } if (op != NULL && (amode & ~(op->nfso_mode))) { op->nfso_mode |= amode; if (retp != NULL && dp == NULL) *retp = NFSCLOPEN_DOOPEN; } /* * Serialize modifications to the open owner for multiple threads * within the same process using a read/write sleep lock. * For NFSv4.1 and a single OpenOwner, allow concurrent open operations * by acquiring a shared lock. The close operations still use an * exclusive lock for this case. */ if (lockit != 0) { if (NFSHASONEOPENOWN(VFSTONFS(vnode_mount(vp)))) { /* * Get a shared lock on the OpenOwner, but first * wait for any pending exclusive lock, so that the * exclusive locker gets priority. */ nfsv4_lock(&owp->nfsow_rwlock, 0, NULL, NFSCLSTATEMUTEXPTR, NULL); nfsv4_getref(&owp->nfsow_rwlock, NULL, NFSCLSTATEMUTEXPTR, NULL); } else nfscl_lockexcl(&owp->nfsow_rwlock, NFSCLSTATEMUTEXPTR); } NFSUNLOCKCLSTATE(); if (nowp != NULL) free(nowp, M_NFSCLOWNER); if (nop != NULL) free(nop, M_NFSCLOPEN); if (owpp != NULL) *owpp = owp; if (opp != NULL) *opp = op; return (0); } /* * Create a new open, as required. */ static void nfscl_newopen(struct nfsclclient *clp, struct nfscldeleg *dp, struct nfsclowner **owpp, struct nfsclowner **nowpp, struct nfsclopen **opp, struct nfsclopen **nopp, u_int8_t *own, u_int8_t *fhp, int fhlen, struct ucred *cred, int *newonep) { struct nfsclowner *owp = *owpp, *nowp; struct nfsclopen *op, *nop; if (nowpp != NULL) nowp = *nowpp; else nowp = NULL; if (nopp != NULL) nop = *nopp; else nop = NULL; if (owp == NULL && nowp != NULL) { NFSBCOPY(own, nowp->nfsow_owner, NFSV4CL_LOCKNAMELEN); LIST_INIT(&nowp->nfsow_open); nowp->nfsow_clp = clp; nowp->nfsow_seqid = 0; nowp->nfsow_defunct = 0; nfscl_lockinit(&nowp->nfsow_rwlock); if (dp != NULL) { nfsstatsv1.cllocalopenowners++; LIST_INSERT_HEAD(&dp->nfsdl_owner, nowp, nfsow_list); } else { nfsstatsv1.clopenowners++; LIST_INSERT_HEAD(&clp->nfsc_owner, nowp, nfsow_list); } owp = *owpp = nowp; *nowpp = NULL; if (newonep != NULL) *newonep = 1; } /* If an fhp has been specified, create an Open as well. */ if (fhp != NULL) { /* and look for the correct open, based upon FH */ LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { if (op->nfso_fhlen == fhlen && !NFSBCMP(op->nfso_fh, fhp, fhlen)) break; } if (op == NULL && nop != NULL) { nop->nfso_own = owp; nop->nfso_mode = 0; nop->nfso_opencnt = 0; nop->nfso_posixlock = 1; nop->nfso_fhlen = fhlen; NFSBCOPY(fhp, nop->nfso_fh, fhlen); LIST_INIT(&nop->nfso_lock); nop->nfso_stateid.seqid = 0; nop->nfso_stateid.other[0] = 0; nop->nfso_stateid.other[1] = 0; nop->nfso_stateid.other[2] = 0; KASSERT(cred != NULL, ("%s: cred NULL\n", __func__)); newnfs_copyincred(cred, &nop->nfso_cred); if (dp != NULL) { TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list); TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list); dp->nfsdl_timestamp = NFSD_MONOSEC + 120; nfsstatsv1.cllocalopens++; } else { nfsstatsv1.clopens++; } LIST_INSERT_HEAD(&owp->nfsow_open, nop, nfso_list); *opp = nop; *nopp = NULL; if (newonep != NULL) *newonep = 1; } else { *opp = op; } } } /* * Called to find/add a delegation to a client. */ APPLESTATIC int nfscl_deleg(mount_t mp, struct nfsclclient *clp, u_int8_t *nfhp, int fhlen, struct ucred *cred, NFSPROC_T *p, struct nfscldeleg **dpp) { struct nfscldeleg *dp = *dpp, *tdp; /* * First, if we have received a Read delegation for a file on a * read/write file system, just return it, because they aren't * useful, imho. */ if (mp != NULL && dp != NULL && !NFSMNT_RDONLY(mp) && (dp->nfsdl_flags & NFSCLDL_READ)) { (void) nfscl_trydelegreturn(dp, cred, VFSTONFS(mp), p); free(dp, M_NFSCLDELEG); *dpp = NULL; return (0); } /* Look for the correct deleg, based upon FH */ NFSLOCKCLSTATE(); tdp = nfscl_finddeleg(clp, nfhp, fhlen); if (tdp == NULL) { if (dp == NULL) { NFSUNLOCKCLSTATE(); return (NFSERR_BADSTATEID); } *dpp = NULL; TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list); LIST_INSERT_HEAD(NFSCLDELEGHASH(clp, nfhp, fhlen), dp, nfsdl_hash); dp->nfsdl_timestamp = NFSD_MONOSEC + 120; nfsstatsv1.cldelegates++; nfscl_delegcnt++; } else { /* * Delegation already exists, what do we do if a new one?? */ if (dp != NULL) { printf("Deleg already exists!\n"); free(dp, M_NFSCLDELEG); *dpp = NULL; } else { *dpp = tdp; } } NFSUNLOCKCLSTATE(); return (0); } /* * Find a delegation for this file handle. Return NULL upon failure. */ static struct nfscldeleg * nfscl_finddeleg(struct nfsclclient *clp, u_int8_t *fhp, int fhlen) { struct nfscldeleg *dp; LIST_FOREACH(dp, NFSCLDELEGHASH(clp, fhp, fhlen), nfsdl_hash) { if (dp->nfsdl_fhlen == fhlen && !NFSBCMP(dp->nfsdl_fh, fhp, fhlen)) break; } return (dp); } /* * Get a stateid for an I/O operation. First, look for an open and iff * found, return either a lockowner stateid or the open stateid. * If no Open is found, just return error and the special stateid of all zeros. */ APPLESTATIC int nfscl_getstateid(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t mode, int fords, struct ucred *cred, NFSPROC_T *p, nfsv4stateid_t *stateidp, void **lckpp) { struct nfsclclient *clp; struct nfsclowner *owp; struct nfsclopen *op = NULL, *top; struct nfscllockowner *lp; struct nfscldeleg *dp; struct nfsnode *np; struct nfsmount *nmp; u_int8_t own[NFSV4CL_LOCKNAMELEN]; int error, done; *lckpp = NULL; /* * Initially, just set the special stateid of all zeros. * (Don't do this for a DS, since the special stateid can't be used.) */ if (fords == 0) { stateidp->seqid = 0; stateidp->other[0] = 0; stateidp->other[1] = 0; stateidp->other[2] = 0; } if (vnode_vtype(vp) != VREG) return (EISDIR); np = VTONFS(vp); nmp = VFSTONFS(vnode_mount(vp)); NFSLOCKCLSTATE(); clp = nfscl_findcl(nmp); if (clp == NULL) { NFSUNLOCKCLSTATE(); return (EACCES); } /* * Wait for recovery to complete. */ while ((clp->nfsc_flags & NFSCLFLAGS_RECVRINPROG)) (void) nfsmsleep(&clp->nfsc_flags, NFSCLSTATEMUTEXPTR, PZERO, "nfsrecvr", NULL); /* * First, look for a delegation. */ LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) { if (dp->nfsdl_fhlen == fhlen && !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) { if (!(mode & NFSV4OPEN_ACCESSWRITE) || (dp->nfsdl_flags & NFSCLDL_WRITE)) { stateidp->seqid = dp->nfsdl_stateid.seqid; stateidp->other[0] = dp->nfsdl_stateid.other[0]; stateidp->other[1] = dp->nfsdl_stateid.other[1]; stateidp->other[2] = dp->nfsdl_stateid.other[2]; if (!(np->n_flag & NDELEGRECALL)) { TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list); TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list); dp->nfsdl_timestamp = NFSD_MONOSEC + 120; dp->nfsdl_rwlock.nfslock_usecnt++; *lckpp = (void *)&dp->nfsdl_rwlock; } NFSUNLOCKCLSTATE(); return (0); } break; } } if (p != NULL) { /* * If p != NULL, we want to search the parentage tree * for a matching OpenOwner and use that. */ if (NFSHASONEOPENOWN(VFSTONFS(vnode_mount(vp)))) nfscl_filllockowner(NULL, own, F_POSIX); else nfscl_filllockowner(p->td_proc, own, F_POSIX); lp = NULL; error = nfscl_getopen(&clp->nfsc_owner, nfhp, fhlen, own, own, mode, &lp, &op); if (error == 0 && lp != NULL && fords == 0) { /* Don't return a lock stateid for a DS. */ stateidp->seqid = lp->nfsl_stateid.seqid; stateidp->other[0] = lp->nfsl_stateid.other[0]; stateidp->other[1] = lp->nfsl_stateid.other[1]; stateidp->other[2] = lp->nfsl_stateid.other[2]; NFSUNLOCKCLSTATE(); return (0); } } if (op == NULL) { /* If not found, just look for any OpenOwner that will work. */ top = NULL; done = 0; owp = LIST_FIRST(&clp->nfsc_owner); while (!done && owp != NULL) { LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { if (op->nfso_fhlen == fhlen && !NFSBCMP(op->nfso_fh, nfhp, fhlen)) { if (top == NULL && (op->nfso_mode & NFSV4OPEN_ACCESSWRITE) != 0 && (mode & NFSV4OPEN_ACCESSREAD) != 0) top = op; if ((mode & op->nfso_mode) == mode) { done = 1; break; } } } if (!done) owp = LIST_NEXT(owp, nfsow_list); } if (!done) { NFSCL_DEBUG(2, "openmode top=%p\n", top); if (top == NULL || NFSHASOPENMODE(nmp)) { NFSUNLOCKCLSTATE(); return (ENOENT); } else op = top; } /* * For read aheads or write behinds, use the open cred. * A read ahead or write behind is indicated by p == NULL. */ if (p == NULL) newnfs_copycred(&op->nfso_cred, cred); } /* * No lock stateid, so return the open stateid. */ stateidp->seqid = op->nfso_stateid.seqid; stateidp->other[0] = op->nfso_stateid.other[0]; stateidp->other[1] = op->nfso_stateid.other[1]; stateidp->other[2] = op->nfso_stateid.other[2]; NFSUNLOCKCLSTATE(); return (0); } /* * Search for a matching file, mode and, optionally, lockowner. */ static int nfscl_getopen(struct nfsclownerhead *ohp, u_int8_t *nfhp, int fhlen, u_int8_t *openown, u_int8_t *lockown, u_int32_t mode, struct nfscllockowner **lpp, struct nfsclopen **opp) { struct nfsclowner *owp; struct nfsclopen *op, *rop, *rop2; struct nfscllockowner *lp; int keep_looping; if (lpp != NULL) *lpp = NULL; /* * rop will be set to the open to be returned. There are three * variants of this, all for an open of the correct file: * 1 - A match of lockown. * 2 - A match of the openown, when no lockown match exists. * 3 - A match for any open, if no openown or lockown match exists. * Looking for #2 over #3 probably isn't necessary, but since * RFC3530 is vague w.r.t. the relationship between openowners and * lockowners, I think this is the safer way to go. */ rop = NULL; rop2 = NULL; keep_looping = 1; /* Search the client list */ owp = LIST_FIRST(ohp); while (owp != NULL && keep_looping != 0) { /* and look for the correct open */ op = LIST_FIRST(&owp->nfsow_open); while (op != NULL && keep_looping != 0) { if (op->nfso_fhlen == fhlen && !NFSBCMP(op->nfso_fh, nfhp, fhlen) && (op->nfso_mode & mode) == mode) { if (lpp != NULL) { /* Now look for a matching lockowner. */ LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { if (!NFSBCMP(lp->nfsl_owner, lockown, NFSV4CL_LOCKNAMELEN)) { *lpp = lp; rop = op; keep_looping = 0; break; } } } if (rop == NULL && !NFSBCMP(owp->nfsow_owner, openown, NFSV4CL_LOCKNAMELEN)) { rop = op; if (lpp == NULL) keep_looping = 0; } if (rop2 == NULL) rop2 = op; } op = LIST_NEXT(op, nfso_list); } owp = LIST_NEXT(owp, nfsow_list); } if (rop == NULL) rop = rop2; if (rop == NULL) return (EBADF); *opp = rop; return (0); } /* * Release use of an open owner. Called when open operations are done * with the open owner. */ APPLESTATIC void nfscl_ownerrelease(struct nfsmount *nmp, struct nfsclowner *owp, __unused int error, __unused int candelete, int unlocked) { if (owp == NULL) return; NFSLOCKCLSTATE(); if (unlocked == 0) { if (NFSHASONEOPENOWN(nmp)) nfsv4_relref(&owp->nfsow_rwlock); else nfscl_lockunlock(&owp->nfsow_rwlock); } nfscl_clrelease(owp->nfsow_clp); NFSUNLOCKCLSTATE(); } /* * Release use of an open structure under an open owner. */ APPLESTATIC void nfscl_openrelease(struct nfsmount *nmp, struct nfsclopen *op, int error, int candelete) { struct nfsclclient *clp; struct nfsclowner *owp; if (op == NULL) return; NFSLOCKCLSTATE(); owp = op->nfso_own; if (NFSHASONEOPENOWN(nmp)) nfsv4_relref(&owp->nfsow_rwlock); else nfscl_lockunlock(&owp->nfsow_rwlock); clp = owp->nfsow_clp; if (error && candelete && op->nfso_opencnt == 0) nfscl_freeopen(op, 0); nfscl_clrelease(clp); NFSUNLOCKCLSTATE(); } /* * Called to get a clientid structure. It will optionally lock the * client data structures to do the SetClientId/SetClientId_confirm, * but will release that lock and return the clientid with a reference * count on it. * If the "cred" argument is NULL, a new clientid should not be created. * If the "p" argument is NULL, a SetClientID/SetClientIDConfirm cannot * be done. * The start_renewthread argument tells nfscl_getcl() to start a renew * thread if this creates a new clp. * It always clpp with a reference count on it, unless returning an error. */ APPLESTATIC int nfscl_getcl(struct mount *mp, struct ucred *cred, NFSPROC_T *p, int start_renewthread, struct nfsclclient **clpp) { struct nfsclclient *clp; struct nfsclclient *newclp = NULL; struct nfsmount *nmp; char uuid[HOSTUUIDLEN]; int igotlock = 0, error, trystalecnt, clidinusedelay, i; u_int16_t idlen = 0; nmp = VFSTONFS(mp); if (cred != NULL) { getcredhostuuid(cred, uuid, sizeof uuid); idlen = strlen(uuid); if (idlen > 0) idlen += sizeof (u_int64_t); else idlen += sizeof (u_int64_t) + 16; /* 16 random bytes */ newclp = malloc( sizeof (struct nfsclclient) + idlen - 1, M_NFSCLCLIENT, M_WAITOK | M_ZERO); } NFSLOCKCLSTATE(); /* * If a forced dismount is already in progress, don't * allocate a new clientid and get out now. For the case where * clp != NULL, this is a harmless optimization. */ if (NFSCL_FORCEDISM(mp)) { NFSUNLOCKCLSTATE(); if (newclp != NULL) free(newclp, M_NFSCLCLIENT); return (EBADF); } clp = nmp->nm_clp; if (clp == NULL) { if (newclp == NULL) { NFSUNLOCKCLSTATE(); return (EACCES); } clp = newclp; clp->nfsc_idlen = idlen; LIST_INIT(&clp->nfsc_owner); TAILQ_INIT(&clp->nfsc_deleg); TAILQ_INIT(&clp->nfsc_layout); LIST_INIT(&clp->nfsc_devinfo); for (i = 0; i < NFSCLDELEGHASHSIZE; i++) LIST_INIT(&clp->nfsc_deleghash[i]); for (i = 0; i < NFSCLLAYOUTHASHSIZE; i++) LIST_INIT(&clp->nfsc_layouthash[i]); clp->nfsc_flags = NFSCLFLAGS_INITED; clp->nfsc_clientidrev = 1; clp->nfsc_cbident = nfscl_nextcbident(); nfscl_fillclid(nmp->nm_clval, uuid, clp->nfsc_id, clp->nfsc_idlen); LIST_INSERT_HEAD(&nfsclhead, clp, nfsc_list); nmp->nm_clp = clp; clp->nfsc_nmp = nmp; NFSUNLOCKCLSTATE(); if (start_renewthread != 0) nfscl_start_renewthread(clp); } else { NFSUNLOCKCLSTATE(); if (newclp != NULL) free(newclp, M_NFSCLCLIENT); } NFSLOCKCLSTATE(); while ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0 && !igotlock && !NFSCL_FORCEDISM(mp)) igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, NFSCLSTATEMUTEXPTR, mp); if (igotlock == 0) { /* * Call nfsv4_lock() with "iwantlock == 0" so that it will * wait for a pending exclusive lock request. This gives the * exclusive lock request priority over this shared lock * request. * An exclusive lock on nfsc_lock is used mainly for server * crash recoveries. */ nfsv4_lock(&clp->nfsc_lock, 0, NULL, NFSCLSTATEMUTEXPTR, mp); nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, mp); } if (igotlock == 0 && NFSCL_FORCEDISM(mp)) { /* * Both nfsv4_lock() and nfsv4_getref() know to check * for NFSCL_FORCEDISM() and return without sleeping to * wait for the exclusive lock to be released, since it * might be held by nfscl_umount() and we need to get out * now for that case and not wait until nfscl_umount() * releases it. */ NFSUNLOCKCLSTATE(); return (EBADF); } NFSUNLOCKCLSTATE(); /* * If it needs a clientid, do the setclientid now. */ if ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0) { if (!igotlock) panic("nfscl_clget"); if (p == NULL || cred == NULL) { NFSLOCKCLSTATE(); nfsv4_unlock(&clp->nfsc_lock, 0); NFSUNLOCKCLSTATE(); return (EACCES); } /* * If RFC3530 Sec. 14.2.33 is taken literally, * NFSERR_CLIDINUSE will be returned persistently for the * case where a new mount of the same file system is using * a different principal. In practice, NFSERR_CLIDINUSE is * only returned when there is outstanding unexpired state * on the clientid. As such, try for twice the lease * interval, if we know what that is. Otherwise, make a * wild ass guess. * The case of returning NFSERR_STALECLIENTID is far less * likely, but might occur if there is a significant delay * between doing the SetClientID and SetClientIDConfirm Ops, * such that the server throws away the clientid before * receiving the SetClientIDConfirm. */ if (clp->nfsc_renew > 0) clidinusedelay = NFSCL_LEASE(clp->nfsc_renew) * 2; else clidinusedelay = 120; trystalecnt = 3; do { error = nfsrpc_setclient(nmp, clp, 0, cred, p); if (error == NFSERR_STALECLIENTID || error == NFSERR_STALEDONTRECOVER || error == NFSERR_BADSESSION || error == NFSERR_CLIDINUSE) { (void) nfs_catnap(PZERO, error, "nfs_setcl"); } } while (((error == NFSERR_STALECLIENTID || error == NFSERR_BADSESSION || error == NFSERR_STALEDONTRECOVER) && --trystalecnt > 0) || (error == NFSERR_CLIDINUSE && --clidinusedelay > 0)); if (error) { NFSLOCKCLSTATE(); nfsv4_unlock(&clp->nfsc_lock, 0); NFSUNLOCKCLSTATE(); return (error); } clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID; } if (igotlock) { NFSLOCKCLSTATE(); nfsv4_unlock(&clp->nfsc_lock, 1); NFSUNLOCKCLSTATE(); } *clpp = clp; return (0); } /* * Get a reference to a clientid and return it, if valid. */ APPLESTATIC struct nfsclclient * nfscl_findcl(struct nfsmount *nmp) { struct nfsclclient *clp; clp = nmp->nm_clp; if (clp == NULL || !(clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID)) return (NULL); return (clp); } /* * Release the clientid structure. It may be locked or reference counted. */ static void nfscl_clrelease(struct nfsclclient *clp) { if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK) nfsv4_unlock(&clp->nfsc_lock, 0); else nfsv4_relref(&clp->nfsc_lock); } /* * External call for nfscl_clrelease. */ APPLESTATIC void nfscl_clientrelease(struct nfsclclient *clp) { NFSLOCKCLSTATE(); if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK) nfsv4_unlock(&clp->nfsc_lock, 0); else nfsv4_relref(&clp->nfsc_lock); NFSUNLOCKCLSTATE(); } /* * Called when wanting to lock a byte region. */ APPLESTATIC int nfscl_getbytelock(vnode_t vp, u_int64_t off, u_int64_t len, short type, struct ucred *cred, NFSPROC_T *p, struct nfsclclient *rclp, int recovery, void *id, int flags, u_int8_t *rownp, u_int8_t *ropenownp, struct nfscllockowner **lpp, int *newonep, int *donelocallyp) { struct nfscllockowner *lp; struct nfsclopen *op; struct nfsclclient *clp; struct nfscllockowner *nlp; struct nfscllock *nlop, *otherlop; struct nfscldeleg *dp = NULL, *ldp = NULL; struct nfscllockownerhead *lhp = NULL; struct nfsnode *np; u_int8_t own[NFSV4CL_LOCKNAMELEN], *ownp, openown[NFSV4CL_LOCKNAMELEN]; u_int8_t *openownp; int error = 0, ret, donelocally = 0; u_int32_t mode; /* For Lock Ops, the open mode doesn't matter, so use 0 to match any. */ mode = 0; np = VTONFS(vp); *lpp = NULL; lp = NULL; *newonep = 0; *donelocallyp = 0; /* * Might need these, so MALLOC them now, to * avoid a tsleep() in MALLOC later. */ nlp = malloc( sizeof (struct nfscllockowner), M_NFSCLLOCKOWNER, M_WAITOK); otherlop = malloc( sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); nlop = malloc( sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); nlop->nfslo_type = type; nlop->nfslo_first = off; if (len == NFS64BITSSET) { nlop->nfslo_end = NFS64BITSSET; } else { nlop->nfslo_end = off + len; if (nlop->nfslo_end <= nlop->nfslo_first) error = NFSERR_INVAL; } if (!error) { if (recovery) clp = rclp; else error = nfscl_getcl(vnode_mount(vp), cred, p, 1, &clp); } if (error) { free(nlp, M_NFSCLLOCKOWNER); free(otherlop, M_NFSCLLOCK); free(nlop, M_NFSCLLOCK); return (error); } op = NULL; if (recovery) { ownp = rownp; openownp = ropenownp; } else { nfscl_filllockowner(id, own, flags); ownp = own; if (NFSHASONEOPENOWN(VFSTONFS(vnode_mount(vp)))) nfscl_filllockowner(NULL, openown, F_POSIX); else nfscl_filllockowner(p->td_proc, openown, F_POSIX); openownp = openown; } if (!recovery) { NFSLOCKCLSTATE(); /* * First, search for a delegation. If one exists for this file, * the lock can be done locally against it, so long as there * isn't a local lock conflict. */ ldp = dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); /* Just sanity check for correct type of delegation */ if (dp != NULL && ((dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) != 0 || (type == F_WRLCK && (dp->nfsdl_flags & NFSCLDL_WRITE) == 0))) dp = NULL; } if (dp != NULL) { /* Now, find an open and maybe a lockowner. */ ret = nfscl_getopen(&dp->nfsdl_owner, np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp, ownp, mode, NULL, &op); if (ret) ret = nfscl_getopen(&clp->nfsc_owner, np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp, ownp, mode, NULL, &op); if (!ret) { lhp = &dp->nfsdl_lock; TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list); TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list); dp->nfsdl_timestamp = NFSD_MONOSEC + 120; donelocally = 1; } else { dp = NULL; } } if (!donelocally) { /* * Get the related Open and maybe lockowner. */ error = nfscl_getopen(&clp->nfsc_owner, np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp, ownp, mode, &lp, &op); if (!error) lhp = &op->nfso_lock; } if (!error && !recovery) error = nfscl_localconflict(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len, nlop, ownp, ldp, NULL); if (error) { if (!recovery) { nfscl_clrelease(clp); NFSUNLOCKCLSTATE(); } free(nlp, M_NFSCLLOCKOWNER); free(otherlop, M_NFSCLLOCK); free(nlop, M_NFSCLLOCK); return (error); } /* * Ok, see if a lockowner exists and create one, as required. */ if (lp == NULL) LIST_FOREACH(lp, lhp, nfsl_list) { if (!NFSBCMP(lp->nfsl_owner, ownp, NFSV4CL_LOCKNAMELEN)) break; } if (lp == NULL) { NFSBCOPY(ownp, nlp->nfsl_owner, NFSV4CL_LOCKNAMELEN); if (recovery) NFSBCOPY(ropenownp, nlp->nfsl_openowner, NFSV4CL_LOCKNAMELEN); else NFSBCOPY(op->nfso_own->nfsow_owner, nlp->nfsl_openowner, NFSV4CL_LOCKNAMELEN); nlp->nfsl_seqid = 0; nlp->nfsl_lockflags = flags; nlp->nfsl_inprog = NULL; nfscl_lockinit(&nlp->nfsl_rwlock); LIST_INIT(&nlp->nfsl_lock); if (donelocally) { nlp->nfsl_open = NULL; nfsstatsv1.cllocallockowners++; } else { nlp->nfsl_open = op; nfsstatsv1.cllockowners++; } LIST_INSERT_HEAD(lhp, nlp, nfsl_list); lp = nlp; nlp = NULL; *newonep = 1; } /* * Now, update the byte ranges for locks. */ ret = nfscl_updatelock(lp, &nlop, &otherlop, donelocally); if (!ret) donelocally = 1; if (donelocally) { *donelocallyp = 1; if (!recovery) nfscl_clrelease(clp); } else { /* * Serial modifications on the lock owner for multiple threads * for the same process using a read/write lock. */ if (!recovery) nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR); } if (!recovery) NFSUNLOCKCLSTATE(); if (nlp) free(nlp, M_NFSCLLOCKOWNER); if (nlop) free(nlop, M_NFSCLLOCK); if (otherlop) free(otherlop, M_NFSCLLOCK); *lpp = lp; return (0); } /* * Called to unlock a byte range, for LockU. */ APPLESTATIC int nfscl_relbytelock(vnode_t vp, u_int64_t off, u_int64_t len, __unused struct ucred *cred, NFSPROC_T *p, int callcnt, struct nfsclclient *clp, void *id, int flags, struct nfscllockowner **lpp, int *dorpcp) { struct nfscllockowner *lp; struct nfsclowner *owp; struct nfsclopen *op; struct nfscllock *nlop, *other_lop = NULL; struct nfscldeleg *dp; struct nfsnode *np; u_int8_t own[NFSV4CL_LOCKNAMELEN]; int ret = 0, fnd; np = VTONFS(vp); *lpp = NULL; *dorpcp = 0; /* * Might need these, so MALLOC them now, to * avoid a tsleep() in MALLOC later. */ nlop = malloc( sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); nlop->nfslo_type = F_UNLCK; nlop->nfslo_first = off; if (len == NFS64BITSSET) { nlop->nfslo_end = NFS64BITSSET; } else { nlop->nfslo_end = off + len; if (nlop->nfslo_end <= nlop->nfslo_first) { free(nlop, M_NFSCLLOCK); return (NFSERR_INVAL); } } if (callcnt == 0) { other_lop = malloc( sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); *other_lop = *nlop; } nfscl_filllockowner(id, own, flags); dp = NULL; NFSLOCKCLSTATE(); if (callcnt == 0) dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); /* * First, unlock any local regions on a delegation. */ if (dp != NULL) { /* Look for this lockowner. */ LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { if (!NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) break; } if (lp != NULL) /* Use other_lop, so nlop is still available */ (void)nfscl_updatelock(lp, &other_lop, NULL, 1); } /* * Now, find a matching open/lockowner that hasn't already been done, * as marked by nfsl_inprog. */ lp = NULL; fnd = 0; LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { if (op->nfso_fhlen == np->n_fhp->nfh_len && !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) { LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { if (lp->nfsl_inprog == NULL && !NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) { fnd = 1; break; } } if (fnd) break; } } if (fnd) break; } if (lp != NULL) { ret = nfscl_updatelock(lp, &nlop, NULL, 0); if (ret) *dorpcp = 1; /* * Serial modifications on the lock owner for multiple * threads for the same process using a read/write lock. */ lp->nfsl_inprog = p; nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR); *lpp = lp; } NFSUNLOCKCLSTATE(); if (nlop) free(nlop, M_NFSCLLOCK); if (other_lop) free(other_lop, M_NFSCLLOCK); return (0); } /* * Release all lockowners marked in progess for this process and file. */ APPLESTATIC void nfscl_releasealllocks(struct nfsclclient *clp, vnode_t vp, NFSPROC_T *p, void *id, int flags) { struct nfsclowner *owp; struct nfsclopen *op; struct nfscllockowner *lp; struct nfsnode *np; u_int8_t own[NFSV4CL_LOCKNAMELEN]; np = VTONFS(vp); nfscl_filllockowner(id, own, flags); NFSLOCKCLSTATE(); LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { if (op->nfso_fhlen == np->n_fhp->nfh_len && !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) { LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { if (lp->nfsl_inprog == p && !NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) { lp->nfsl_inprog = NULL; nfscl_lockunlock(&lp->nfsl_rwlock); } } } } } nfscl_clrelease(clp); NFSUNLOCKCLSTATE(); } /* * Called to find out if any bytes within the byte range specified are * write locked by the calling process. Used to determine if flushing * is required before a LockU. * If in doubt, return 1, so the flush will occur. */ APPLESTATIC int nfscl_checkwritelocked(vnode_t vp, struct flock *fl, struct ucred *cred, NFSPROC_T *p, void *id, int flags) { struct nfsclowner *owp; struct nfscllockowner *lp; struct nfsclopen *op; struct nfsclclient *clp; struct nfscllock *lop; struct nfscldeleg *dp; struct nfsnode *np; u_int64_t off, end; u_int8_t own[NFSV4CL_LOCKNAMELEN]; int error = 0; np = VTONFS(vp); switch (fl->l_whence) { case SEEK_SET: case SEEK_CUR: /* * Caller is responsible for adding any necessary offset * when SEEK_CUR is used. */ off = fl->l_start; break; case SEEK_END: off = np->n_size + fl->l_start; break; default: return (1); } if (fl->l_len != 0) { end = off + fl->l_len; if (end < off) return (1); } else { end = NFS64BITSSET; } error = nfscl_getcl(vnode_mount(vp), cred, p, 1, &clp); if (error) return (1); nfscl_filllockowner(id, own, flags); NFSLOCKCLSTATE(); /* * First check the delegation locks. */ dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); if (dp != NULL) { LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { if (!NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) break; } if (lp != NULL) { LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { if (lop->nfslo_first >= end) break; if (lop->nfslo_end <= off) continue; if (lop->nfslo_type == F_WRLCK) { nfscl_clrelease(clp); NFSUNLOCKCLSTATE(); return (1); } } } } /* * Now, check state against the server. */ LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { if (op->nfso_fhlen == np->n_fhp->nfh_len && !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) { LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { if (!NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) break; } if (lp != NULL) { LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { if (lop->nfslo_first >= end) break; if (lop->nfslo_end <= off) continue; if (lop->nfslo_type == F_WRLCK) { nfscl_clrelease(clp); NFSUNLOCKCLSTATE(); return (1); } } } } } } nfscl_clrelease(clp); NFSUNLOCKCLSTATE(); return (0); } /* * Release a byte range lock owner structure. */ APPLESTATIC void nfscl_lockrelease(struct nfscllockowner *lp, int error, int candelete) { struct nfsclclient *clp; if (lp == NULL) return; NFSLOCKCLSTATE(); clp = lp->nfsl_open->nfso_own->nfsow_clp; if (error != 0 && candelete && (lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED) == 0) nfscl_freelockowner(lp, 0); else nfscl_lockunlock(&lp->nfsl_rwlock); nfscl_clrelease(clp); NFSUNLOCKCLSTATE(); } /* * Free up an open structure and any associated byte range lock structures. */ APPLESTATIC void nfscl_freeopen(struct nfsclopen *op, int local) { LIST_REMOVE(op, nfso_list); nfscl_freealllocks(&op->nfso_lock, local); free(op, M_NFSCLOPEN); if (local) nfsstatsv1.cllocalopens--; else nfsstatsv1.clopens--; } /* * Free up all lock owners and associated locks. */ static void nfscl_freealllocks(struct nfscllockownerhead *lhp, int local) { struct nfscllockowner *lp, *nlp; LIST_FOREACH_SAFE(lp, lhp, nfsl_list, nlp) { if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED)) panic("nfscllckw"); nfscl_freelockowner(lp, local); } } /* * Called for an Open when NFSERR_EXPIRED is received from the server. * If there are no byte range locks nor a Share Deny lost, try to do a * fresh Open. Otherwise, free the open. */ static int nfscl_expireopen(struct nfsclclient *clp, struct nfsclopen *op, struct nfsmount *nmp, struct ucred *cred, NFSPROC_T *p) { struct nfscllockowner *lp; struct nfscldeleg *dp; int mustdelete = 0, error; /* * Look for any byte range lock(s). */ LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { if (!LIST_EMPTY(&lp->nfsl_lock)) { mustdelete = 1; break; } } /* * If no byte range lock(s) nor a Share deny, try to re-open. */ if (!mustdelete && (op->nfso_mode & NFSLCK_DENYBITS) == 0) { newnfs_copycred(&op->nfso_cred, cred); dp = NULL; error = nfsrpc_reopen(nmp, op->nfso_fh, op->nfso_fhlen, op->nfso_mode, op, &dp, cred, p); if (error) { mustdelete = 1; if (dp != NULL) { free(dp, M_NFSCLDELEG); dp = NULL; } } if (dp != NULL) nfscl_deleg(nmp->nm_mountp, clp, op->nfso_fh, op->nfso_fhlen, cred, p, &dp); } /* * If a byte range lock or Share deny or couldn't re-open, free it. */ if (mustdelete) nfscl_freeopen(op, 0); return (mustdelete); } /* * Free up an open owner structure. */ static void nfscl_freeopenowner(struct nfsclowner *owp, int local) { LIST_REMOVE(owp, nfsow_list); free(owp, M_NFSCLOWNER); if (local) nfsstatsv1.cllocalopenowners--; else nfsstatsv1.clopenowners--; } /* * Free up a byte range lock owner structure. */ APPLESTATIC void nfscl_freelockowner(struct nfscllockowner *lp, int local) { struct nfscllock *lop, *nlop; LIST_REMOVE(lp, nfsl_list); LIST_FOREACH_SAFE(lop, &lp->nfsl_lock, nfslo_list, nlop) { nfscl_freelock(lop, local); } free(lp, M_NFSCLLOCKOWNER); if (local) nfsstatsv1.cllocallockowners--; else nfsstatsv1.cllockowners--; } /* * Free up a byte range lock structure. */ APPLESTATIC void nfscl_freelock(struct nfscllock *lop, int local) { LIST_REMOVE(lop, nfslo_list); free(lop, M_NFSCLLOCK); if (local) nfsstatsv1.cllocallocks--; else nfsstatsv1.cllocks--; } /* * Clean out the state related to a delegation. */ static void nfscl_cleandeleg(struct nfscldeleg *dp) { struct nfsclowner *owp, *nowp; struct nfsclopen *op; LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) { op = LIST_FIRST(&owp->nfsow_open); if (op != NULL) { if (LIST_NEXT(op, nfso_list) != NULL) panic("nfscleandel"); nfscl_freeopen(op, 1); } nfscl_freeopenowner(owp, 1); } nfscl_freealllocks(&dp->nfsdl_lock, 1); } /* * Free a delegation. */ static void nfscl_freedeleg(struct nfscldeleghead *hdp, struct nfscldeleg *dp) { TAILQ_REMOVE(hdp, dp, nfsdl_list); LIST_REMOVE(dp, nfsdl_hash); free(dp, M_NFSCLDELEG); nfsstatsv1.cldelegates--; nfscl_delegcnt--; } /* * Free up all state related to this client structure. */ static void nfscl_cleanclient(struct nfsclclient *clp) { struct nfsclowner *owp, *nowp; struct nfsclopen *op, *nop; struct nfscllayout *lyp, *nlyp; struct nfscldevinfo *dip, *ndip; TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp) nfscl_freelayout(lyp); LIST_FOREACH_SAFE(dip, &clp->nfsc_devinfo, nfsdi_list, ndip) nfscl_freedevinfo(dip); /* Now, all the OpenOwners, etc. */ LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) { LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) { nfscl_freeopen(op, 0); } nfscl_freeopenowner(owp, 0); } } /* * Called when an NFSERR_EXPIRED is received from the server. */ static void nfscl_expireclient(struct nfsclclient *clp, struct nfsmount *nmp, struct ucred *cred, NFSPROC_T *p) { struct nfsclowner *owp, *nowp, *towp; struct nfsclopen *op, *nop, *top; struct nfscldeleg *dp, *ndp; int ret, printed = 0; /* * First, merge locally issued Opens into the list for the server. */ dp = TAILQ_FIRST(&clp->nfsc_deleg); while (dp != NULL) { ndp = TAILQ_NEXT(dp, nfsdl_list); owp = LIST_FIRST(&dp->nfsdl_owner); while (owp != NULL) { nowp = LIST_NEXT(owp, nfsow_list); op = LIST_FIRST(&owp->nfsow_open); if (op != NULL) { if (LIST_NEXT(op, nfso_list) != NULL) panic("nfsclexp"); LIST_FOREACH(towp, &clp->nfsc_owner, nfsow_list) { if (!NFSBCMP(towp->nfsow_owner, owp->nfsow_owner, NFSV4CL_LOCKNAMELEN)) break; } if (towp != NULL) { /* Merge opens in */ LIST_FOREACH(top, &towp->nfsow_open, nfso_list) { if (top->nfso_fhlen == op->nfso_fhlen && !NFSBCMP(top->nfso_fh, op->nfso_fh, op->nfso_fhlen)) { top->nfso_mode |= op->nfso_mode; top->nfso_opencnt += op->nfso_opencnt; break; } } if (top == NULL) { /* Just add the open to the owner list */ LIST_REMOVE(op, nfso_list); op->nfso_own = towp; LIST_INSERT_HEAD(&towp->nfsow_open, op, nfso_list); nfsstatsv1.cllocalopens--; nfsstatsv1.clopens++; } } else { /* Just add the openowner to the client list */ LIST_REMOVE(owp, nfsow_list); owp->nfsow_clp = clp; LIST_INSERT_HEAD(&clp->nfsc_owner, owp, nfsow_list); nfsstatsv1.cllocalopenowners--; nfsstatsv1.clopenowners++; nfsstatsv1.cllocalopens--; nfsstatsv1.clopens++; } } owp = nowp; } if (!printed && !LIST_EMPTY(&dp->nfsdl_lock)) { printed = 1; printf("nfsv4 expired locks lost\n"); } nfscl_cleandeleg(dp); nfscl_freedeleg(&clp->nfsc_deleg, dp); dp = ndp; } if (!TAILQ_EMPTY(&clp->nfsc_deleg)) panic("nfsclexp"); /* * Now, try and reopen against the server. */ LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) { owp->nfsow_seqid = 0; LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) { ret = nfscl_expireopen(clp, op, nmp, cred, p); if (ret && !printed) { printed = 1; printf("nfsv4 expired locks lost\n"); } } if (LIST_EMPTY(&owp->nfsow_open)) nfscl_freeopenowner(owp, 0); } } /* * This function must be called after the process represented by "own" has * exited. Must be called with CLSTATE lock held. */ static void nfscl_cleanup_common(struct nfsclclient *clp, u_int8_t *own) { struct nfsclowner *owp, *nowp; struct nfscllockowner *lp, *nlp; struct nfscldeleg *dp; /* First, get rid of local locks on delegations. */ TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) { if (!NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) { if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED)) panic("nfscllckw"); nfscl_freelockowner(lp, 1); } } } owp = LIST_FIRST(&clp->nfsc_owner); while (owp != NULL) { nowp = LIST_NEXT(owp, nfsow_list); if (!NFSBCMP(owp->nfsow_owner, own, NFSV4CL_LOCKNAMELEN)) { /* * If there are children that haven't closed the * file descriptors yet, the opens will still be * here. For that case, let the renew thread clear * out the OpenOwner later. */ if (LIST_EMPTY(&owp->nfsow_open)) nfscl_freeopenowner(owp, 0); else owp->nfsow_defunct = 1; } owp = nowp; } } /* * Find open/lock owners for processes that have exited. */ static void nfscl_cleanupkext(struct nfsclclient *clp, struct nfscllockownerfhhead *lhp) { struct nfsclowner *owp, *nowp; struct nfsclopen *op; struct nfscllockowner *lp, *nlp; struct nfscldeleg *dp; NFSPROCLISTLOCK(); NFSLOCKCLSTATE(); LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) { LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { LIST_FOREACH_SAFE(lp, &op->nfso_lock, nfsl_list, nlp) { if (LIST_EMPTY(&lp->nfsl_lock)) nfscl_emptylockowner(lp, lhp); } } if (nfscl_procdoesntexist(owp->nfsow_owner)) nfscl_cleanup_common(clp, owp->nfsow_owner); } /* * For the single open_owner case, these lock owners need to be * checked to see if they still exist separately. * This is because nfscl_procdoesntexist() never returns true for * the single open_owner so that the above doesn't ever call * nfscl_cleanup_common(). */ TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) { if (nfscl_procdoesntexist(lp->nfsl_owner)) nfscl_cleanup_common(clp, lp->nfsl_owner); } } NFSUNLOCKCLSTATE(); NFSPROCLISTUNLOCK(); } /* * Take the empty lock owner and move it to the local lhp list if the * associated process no longer exists. */ static void nfscl_emptylockowner(struct nfscllockowner *lp, struct nfscllockownerfhhead *lhp) { struct nfscllockownerfh *lfhp, *mylfhp; struct nfscllockowner *nlp; int fnd_it; /* If not a Posix lock owner, just return. */ if ((lp->nfsl_lockflags & F_POSIX) == 0) return; fnd_it = 0; mylfhp = NULL; /* * First, search to see if this lock owner is already in the list. * If it is, then the associated process no longer exists. */ SLIST_FOREACH(lfhp, lhp, nfslfh_list) { if (lfhp->nfslfh_len == lp->nfsl_open->nfso_fhlen && !NFSBCMP(lfhp->nfslfh_fh, lp->nfsl_open->nfso_fh, lfhp->nfslfh_len)) mylfhp = lfhp; LIST_FOREACH(nlp, &lfhp->nfslfh_lock, nfsl_list) if (!NFSBCMP(nlp->nfsl_owner, lp->nfsl_owner, NFSV4CL_LOCKNAMELEN)) fnd_it = 1; } /* If not found, check if process still exists. */ if (fnd_it == 0 && nfscl_procdoesntexist(lp->nfsl_owner) == 0) return; /* Move the lock owner over to the local list. */ if (mylfhp == NULL) { mylfhp = malloc(sizeof(struct nfscllockownerfh), M_TEMP, M_NOWAIT); if (mylfhp == NULL) return; mylfhp->nfslfh_len = lp->nfsl_open->nfso_fhlen; NFSBCOPY(lp->nfsl_open->nfso_fh, mylfhp->nfslfh_fh, mylfhp->nfslfh_len); LIST_INIT(&mylfhp->nfslfh_lock); SLIST_INSERT_HEAD(lhp, mylfhp, nfslfh_list); } LIST_REMOVE(lp, nfsl_list); LIST_INSERT_HEAD(&mylfhp->nfslfh_lock, lp, nfsl_list); } static int fake_global; /* Used to force visibility of MNTK_UNMOUNTF */ /* * Called from nfs umount to free up the clientid. */ APPLESTATIC void nfscl_umount(struct nfsmount *nmp, NFSPROC_T *p) { struct nfsclclient *clp; struct ucred *cred; int igotlock; /* * For the case that matters, this is the thread that set * MNTK_UNMOUNTF, so it will see it set. The code that follows is * done to ensure that any thread executing nfscl_getcl() after * this time, will see MNTK_UNMOUNTF set. nfscl_getcl() uses the * mutex for NFSLOCKCLSTATE(), so it is "m" for the following * explanation, courtesy of Alan Cox. * What follows is a snippet from Alan Cox's email at: - * http://docs.FreeBSD.org/cgi/ - * mid.cgi?BANLkTikR3d65zPHo9==08ZfJ2vmqZucEvw + * https://docs.FreeBSD.org/cgi/mid.cgi?BANLkTikR3d65zPHo9==08ZfJ2vmqZucEvw * * 1. Set MNTK_UNMOUNTF * 2. Acquire a standard FreeBSD mutex "m". * 3. Update some data structures. * 4. Release mutex "m". * * Then, other threads that acquire "m" after step 4 has occurred will * see MNTK_UNMOUNTF as set. But, other threads that beat thread X to * step 2 may or may not see MNTK_UNMOUNTF as set. */ NFSLOCKCLSTATE(); if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) { fake_global++; NFSUNLOCKCLSTATE(); NFSLOCKCLSTATE(); } clp = nmp->nm_clp; if (clp != NULL) { if ((clp->nfsc_flags & NFSCLFLAGS_INITED) == 0) panic("nfscl umount"); /* * First, handshake with the nfscl renew thread, to terminate * it. */ clp->nfsc_flags |= NFSCLFLAGS_UMOUNT; while (clp->nfsc_flags & NFSCLFLAGS_HASTHREAD) (void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT, "nfsclumnt", hz); /* * Now, get the exclusive lock on the client state, so * that no uses of the state are still in progress. */ do { igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, NFSCLSTATEMUTEXPTR, NULL); } while (!igotlock); NFSUNLOCKCLSTATE(); /* * Free up all the state. It will expire on the server, but * maybe we should do a SetClientId/SetClientIdConfirm so * the server throws it away? */ LIST_REMOVE(clp, nfsc_list); nfscl_delegreturnall(clp, p); cred = newnfs_getcred(); if (NFSHASNFSV4N(nmp)) { (void)nfsrpc_destroysession(nmp, clp, cred, p); (void)nfsrpc_destroyclient(nmp, clp, cred, p); } else (void)nfsrpc_setclient(nmp, clp, 0, cred, p); nfscl_cleanclient(clp); nmp->nm_clp = NULL; NFSFREECRED(cred); free(clp, M_NFSCLCLIENT); } else NFSUNLOCKCLSTATE(); } /* * This function is called when a server replies with NFSERR_STALECLIENTID * NFSERR_STALESTATEID or NFSERR_BADSESSION. It traverses the clientid lists, * doing Opens and Locks with reclaim. If these fail, it deletes the * corresponding state. */ static void nfscl_recover(struct nfsclclient *clp, struct ucred *cred, NFSPROC_T *p) { struct nfsclowner *owp, *nowp; struct nfsclopen *op, *nop; struct nfscllockowner *lp, *nlp; struct nfscllock *lop, *nlop; struct nfscldeleg *dp, *ndp, *tdp; struct nfsmount *nmp; struct ucred *tcred; struct nfsclopenhead extra_open; struct nfscldeleghead extra_deleg; struct nfsreq *rep; u_int64_t len; u_int32_t delegtype = NFSV4OPEN_DELEGATEWRITE, mode; int i, igotlock = 0, error, trycnt, firstlock; struct nfscllayout *lyp, *nlyp; /* * First, lock the client structure, so everyone else will * block when trying to use state. */ NFSLOCKCLSTATE(); clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG; do { igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, NFSCLSTATEMUTEXPTR, NULL); } while (!igotlock); NFSUNLOCKCLSTATE(); nmp = clp->nfsc_nmp; if (nmp == NULL) panic("nfscl recover"); /* * For now, just get rid of all layouts. There may be a need * to do LayoutCommit Ops with reclaim == true later. */ TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp) nfscl_freelayout(lyp); TAILQ_INIT(&clp->nfsc_layout); for (i = 0; i < NFSCLLAYOUTHASHSIZE; i++) LIST_INIT(&clp->nfsc_layouthash[i]); trycnt = 5; do { error = nfsrpc_setclient(nmp, clp, 1, cred, p); } while ((error == NFSERR_STALECLIENTID || error == NFSERR_BADSESSION || error == NFSERR_STALEDONTRECOVER) && --trycnt > 0); if (error) { NFSLOCKCLSTATE(); clp->nfsc_flags &= ~(NFSCLFLAGS_RECOVER | NFSCLFLAGS_RECVRINPROG); wakeup(&clp->nfsc_flags); nfsv4_unlock(&clp->nfsc_lock, 0); NFSUNLOCKCLSTATE(); return; } clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID; clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER; /* * Mark requests already queued on the server, so that they don't * initiate another recovery cycle. Any requests already in the * queue that handle state information will have the old stale * clientid/stateid and will get a NFSERR_STALESTATEID, * NFSERR_STALECLIENTID or NFSERR_BADSESSION reply from the server. * This will be translated to NFSERR_STALEDONTRECOVER when * R_DONTRECOVER is set. */ NFSLOCKREQ(); TAILQ_FOREACH(rep, &nfsd_reqq, r_chain) { if (rep->r_nmp == nmp) rep->r_flags |= R_DONTRECOVER; } NFSUNLOCKREQ(); /* * Now, mark all delegations "need reclaim". */ TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) dp->nfsdl_flags |= NFSCLDL_NEEDRECLAIM; TAILQ_INIT(&extra_deleg); LIST_INIT(&extra_open); /* * Now traverse the state lists, doing Open and Lock Reclaims. */ tcred = newnfs_getcred(); owp = LIST_FIRST(&clp->nfsc_owner); while (owp != NULL) { nowp = LIST_NEXT(owp, nfsow_list); owp->nfsow_seqid = 0; op = LIST_FIRST(&owp->nfsow_open); while (op != NULL) { nop = LIST_NEXT(op, nfso_list); if (error != NFSERR_NOGRACE && error != NFSERR_BADSESSION) { /* Search for a delegation to reclaim with the open */ TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) continue; if ((dp->nfsdl_flags & NFSCLDL_WRITE)) { mode = NFSV4OPEN_ACCESSWRITE; delegtype = NFSV4OPEN_DELEGATEWRITE; } else { mode = NFSV4OPEN_ACCESSREAD; delegtype = NFSV4OPEN_DELEGATEREAD; } if ((op->nfso_mode & mode) == mode && op->nfso_fhlen == dp->nfsdl_fhlen && !NFSBCMP(op->nfso_fh, dp->nfsdl_fh, op->nfso_fhlen)) break; } ndp = dp; if (dp == NULL) delegtype = NFSV4OPEN_DELEGATENONE; newnfs_copycred(&op->nfso_cred, tcred); error = nfscl_tryopen(nmp, NULL, op->nfso_fh, op->nfso_fhlen, op->nfso_fh, op->nfso_fhlen, op->nfso_mode, op, NULL, 0, &ndp, 1, delegtype, tcred, p); if (!error) { /* Handle any replied delegation */ if (ndp != NULL && ((ndp->nfsdl_flags & NFSCLDL_WRITE) || NFSMNT_RDONLY(nmp->nm_mountp))) { if ((ndp->nfsdl_flags & NFSCLDL_WRITE)) mode = NFSV4OPEN_ACCESSWRITE; else mode = NFSV4OPEN_ACCESSREAD; TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) continue; if ((op->nfso_mode & mode) == mode && op->nfso_fhlen == dp->nfsdl_fhlen && !NFSBCMP(op->nfso_fh, dp->nfsdl_fh, op->nfso_fhlen)) { dp->nfsdl_stateid = ndp->nfsdl_stateid; dp->nfsdl_sizelimit = ndp->nfsdl_sizelimit; dp->nfsdl_ace = ndp->nfsdl_ace; dp->nfsdl_change = ndp->nfsdl_change; dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM; if ((ndp->nfsdl_flags & NFSCLDL_RECALL)) dp->nfsdl_flags |= NFSCLDL_RECALL; free(ndp, M_NFSCLDELEG); ndp = NULL; break; } } } if (ndp != NULL) TAILQ_INSERT_HEAD(&extra_deleg, ndp, nfsdl_list); /* and reclaim all byte range locks */ lp = LIST_FIRST(&op->nfso_lock); while (lp != NULL) { nlp = LIST_NEXT(lp, nfsl_list); lp->nfsl_seqid = 0; firstlock = 1; lop = LIST_FIRST(&lp->nfsl_lock); while (lop != NULL) { nlop = LIST_NEXT(lop, nfslo_list); if (lop->nfslo_end == NFS64BITSSET) len = NFS64BITSSET; else len = lop->nfslo_end - lop->nfslo_first; error = nfscl_trylock(nmp, NULL, op->nfso_fh, op->nfso_fhlen, lp, firstlock, 1, lop->nfslo_first, len, lop->nfslo_type, tcred, p); if (error != 0) nfscl_freelock(lop, 0); else firstlock = 0; lop = nlop; } /* If no locks, but a lockowner, just delete it. */ if (LIST_EMPTY(&lp->nfsl_lock)) nfscl_freelockowner(lp, 0); lp = nlp; } } } if (error != 0 && error != NFSERR_BADSESSION) nfscl_freeopen(op, 0); op = nop; } owp = nowp; } /* * Now, try and get any delegations not yet reclaimed by cobbling * to-gether an appropriate open. */ nowp = NULL; dp = TAILQ_FIRST(&clp->nfsc_deleg); while (dp != NULL) { ndp = TAILQ_NEXT(dp, nfsdl_list); if ((dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) { if (nowp == NULL) { nowp = malloc( sizeof (struct nfsclowner), M_NFSCLOWNER, M_WAITOK); /* * Name must be as long an largest possible * NFSV4CL_LOCKNAMELEN. 12 for now. */ NFSBCOPY("RECLAIMDELEG", nowp->nfsow_owner, NFSV4CL_LOCKNAMELEN); LIST_INIT(&nowp->nfsow_open); nowp->nfsow_clp = clp; nowp->nfsow_seqid = 0; nowp->nfsow_defunct = 0; nfscl_lockinit(&nowp->nfsow_rwlock); } nop = NULL; if (error != NFSERR_NOGRACE && error != NFSERR_BADSESSION) { nop = malloc(sizeof (struct nfsclopen) + dp->nfsdl_fhlen - 1, M_NFSCLOPEN, M_WAITOK); nop->nfso_own = nowp; if ((dp->nfsdl_flags & NFSCLDL_WRITE)) { nop->nfso_mode = NFSV4OPEN_ACCESSWRITE; delegtype = NFSV4OPEN_DELEGATEWRITE; } else { nop->nfso_mode = NFSV4OPEN_ACCESSREAD; delegtype = NFSV4OPEN_DELEGATEREAD; } nop->nfso_opencnt = 0; nop->nfso_posixlock = 1; nop->nfso_fhlen = dp->nfsdl_fhlen; NFSBCOPY(dp->nfsdl_fh, nop->nfso_fh, dp->nfsdl_fhlen); LIST_INIT(&nop->nfso_lock); nop->nfso_stateid.seqid = 0; nop->nfso_stateid.other[0] = 0; nop->nfso_stateid.other[1] = 0; nop->nfso_stateid.other[2] = 0; newnfs_copycred(&dp->nfsdl_cred, tcred); newnfs_copyincred(tcred, &nop->nfso_cred); tdp = NULL; error = nfscl_tryopen(nmp, NULL, nop->nfso_fh, nop->nfso_fhlen, nop->nfso_fh, nop->nfso_fhlen, nop->nfso_mode, nop, NULL, 0, &tdp, 1, delegtype, tcred, p); if (tdp != NULL) { if ((tdp->nfsdl_flags & NFSCLDL_WRITE)) mode = NFSV4OPEN_ACCESSWRITE; else mode = NFSV4OPEN_ACCESSREAD; if ((nop->nfso_mode & mode) == mode && nop->nfso_fhlen == tdp->nfsdl_fhlen && !NFSBCMP(nop->nfso_fh, tdp->nfsdl_fh, nop->nfso_fhlen)) { dp->nfsdl_stateid = tdp->nfsdl_stateid; dp->nfsdl_sizelimit = tdp->nfsdl_sizelimit; dp->nfsdl_ace = tdp->nfsdl_ace; dp->nfsdl_change = tdp->nfsdl_change; dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM; if ((tdp->nfsdl_flags & NFSCLDL_RECALL)) dp->nfsdl_flags |= NFSCLDL_RECALL; free(tdp, M_NFSCLDELEG); } else { TAILQ_INSERT_HEAD(&extra_deleg, tdp, nfsdl_list); } } } if (error) { if (nop != NULL) free(nop, M_NFSCLOPEN); /* * Couldn't reclaim it, so throw the state * away. Ouch!! */ nfscl_cleandeleg(dp); nfscl_freedeleg(&clp->nfsc_deleg, dp); } else { LIST_INSERT_HEAD(&extra_open, nop, nfso_list); } } dp = ndp; } /* * Now, get rid of extra Opens and Delegations. */ LIST_FOREACH_SAFE(op, &extra_open, nfso_list, nop) { do { newnfs_copycred(&op->nfso_cred, tcred); error = nfscl_tryclose(op, tcred, nmp, p); if (error == NFSERR_GRACE) (void) nfs_catnap(PZERO, error, "nfsexcls"); } while (error == NFSERR_GRACE); LIST_REMOVE(op, nfso_list); free(op, M_NFSCLOPEN); } if (nowp != NULL) free(nowp, M_NFSCLOWNER); TAILQ_FOREACH_SAFE(dp, &extra_deleg, nfsdl_list, ndp) { do { newnfs_copycred(&dp->nfsdl_cred, tcred); error = nfscl_trydelegreturn(dp, tcred, nmp, p); if (error == NFSERR_GRACE) (void) nfs_catnap(PZERO, error, "nfsexdlg"); } while (error == NFSERR_GRACE); TAILQ_REMOVE(&extra_deleg, dp, nfsdl_list); free(dp, M_NFSCLDELEG); } /* For NFSv4.1 or later, do a RECLAIM_COMPLETE. */ if (NFSHASNFSV4N(nmp)) (void)nfsrpc_reclaimcomplete(nmp, cred, p); NFSLOCKCLSTATE(); clp->nfsc_flags &= ~NFSCLFLAGS_RECVRINPROG; wakeup(&clp->nfsc_flags); nfsv4_unlock(&clp->nfsc_lock, 0); NFSUNLOCKCLSTATE(); NFSFREECRED(tcred); } /* * This function is called when a server replies with NFSERR_EXPIRED. * It deletes all state for the client and does a fresh SetClientId/confirm. * XXX Someday it should post a signal to the process(es) that hold the * state, so they know that lock state has been lost. */ APPLESTATIC int nfscl_hasexpired(struct nfsclclient *clp, u_int32_t clidrev, NFSPROC_T *p) { struct nfsmount *nmp; struct ucred *cred; int igotlock = 0, error, trycnt; /* * If the clientid has gone away or a new SetClientid has already * been done, just return ok. */ if (clp == NULL || clidrev != clp->nfsc_clientidrev) return (0); /* * First, lock the client structure, so everyone else will * block when trying to use state. Also, use NFSCLFLAGS_EXPIREIT so * that only one thread does the work. */ NFSLOCKCLSTATE(); clp->nfsc_flags |= NFSCLFLAGS_EXPIREIT; do { igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, NFSCLSTATEMUTEXPTR, NULL); } while (!igotlock && (clp->nfsc_flags & NFSCLFLAGS_EXPIREIT)); if ((clp->nfsc_flags & NFSCLFLAGS_EXPIREIT) == 0) { if (igotlock) nfsv4_unlock(&clp->nfsc_lock, 0); NFSUNLOCKCLSTATE(); return (0); } clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG; NFSUNLOCKCLSTATE(); nmp = clp->nfsc_nmp; if (nmp == NULL) panic("nfscl expired"); cred = newnfs_getcred(); trycnt = 5; do { error = nfsrpc_setclient(nmp, clp, 0, cred, p); } while ((error == NFSERR_STALECLIENTID || error == NFSERR_BADSESSION || error == NFSERR_STALEDONTRECOVER) && --trycnt > 0); if (error) { NFSLOCKCLSTATE(); clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER; } else { /* * Expire the state for the client. */ nfscl_expireclient(clp, nmp, cred, p); NFSLOCKCLSTATE(); clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID; clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER; } clp->nfsc_flags &= ~(NFSCLFLAGS_EXPIREIT | NFSCLFLAGS_RECVRINPROG); wakeup(&clp->nfsc_flags); nfsv4_unlock(&clp->nfsc_lock, 0); NFSUNLOCKCLSTATE(); NFSFREECRED(cred); return (error); } /* * This function inserts a lock in the list after insert_lop. */ static void nfscl_insertlock(struct nfscllockowner *lp, struct nfscllock *new_lop, struct nfscllock *insert_lop, int local) { if ((struct nfscllockowner *)insert_lop == lp) LIST_INSERT_HEAD(&lp->nfsl_lock, new_lop, nfslo_list); else LIST_INSERT_AFTER(insert_lop, new_lop, nfslo_list); if (local) nfsstatsv1.cllocallocks++; else nfsstatsv1.cllocks++; } /* * This function updates the locking for a lock owner and given file. It * maintains a list of lock ranges ordered on increasing file offset that * are NFSCLLOCK_READ or NFSCLLOCK_WRITE and non-overlapping (aka POSIX style). * It always adds new_lop to the list and sometimes uses the one pointed * at by other_lopp. * Returns 1 if the locks were modified, 0 otherwise. */ static int nfscl_updatelock(struct nfscllockowner *lp, struct nfscllock **new_lopp, struct nfscllock **other_lopp, int local) { struct nfscllock *new_lop = *new_lopp; struct nfscllock *lop, *tlop, *ilop; struct nfscllock *other_lop; int unlock = 0, modified = 0; u_int64_t tmp; /* * Work down the list until the lock is merged. */ if (new_lop->nfslo_type == F_UNLCK) unlock = 1; ilop = (struct nfscllock *)lp; lop = LIST_FIRST(&lp->nfsl_lock); while (lop != NULL) { /* * Only check locks for this file that aren't before the start of * new lock's range. */ if (lop->nfslo_end >= new_lop->nfslo_first) { if (new_lop->nfslo_end < lop->nfslo_first) { /* * If the new lock ends before the start of the * current lock's range, no merge, just insert * the new lock. */ break; } if (new_lop->nfslo_type == lop->nfslo_type || (new_lop->nfslo_first <= lop->nfslo_first && new_lop->nfslo_end >= lop->nfslo_end)) { /* * This lock can be absorbed by the new lock/unlock. * This happens when it covers the entire range * of the old lock or is contiguous * with the old lock and is of the same type or an * unlock. */ if (new_lop->nfslo_type != lop->nfslo_type || new_lop->nfslo_first != lop->nfslo_first || new_lop->nfslo_end != lop->nfslo_end) modified = 1; if (lop->nfslo_first < new_lop->nfslo_first) new_lop->nfslo_first = lop->nfslo_first; if (lop->nfslo_end > new_lop->nfslo_end) new_lop->nfslo_end = lop->nfslo_end; tlop = lop; lop = LIST_NEXT(lop, nfslo_list); nfscl_freelock(tlop, local); continue; } /* * All these cases are for contiguous locks that are not the * same type, so they can't be merged. */ if (new_lop->nfslo_first <= lop->nfslo_first) { /* * This case is where the new lock overlaps with the * first part of the old lock. Move the start of the * old lock to just past the end of the new lock. The * new lock will be inserted in front of the old, since * ilop hasn't been updated. (We are done now.) */ if (lop->nfslo_first != new_lop->nfslo_end) { lop->nfslo_first = new_lop->nfslo_end; modified = 1; } break; } if (new_lop->nfslo_end >= lop->nfslo_end) { /* * This case is where the new lock overlaps with the * end of the old lock's range. Move the old lock's * end to just before the new lock's first and insert * the new lock after the old lock. * Might not be done yet, since the new lock could * overlap further locks with higher ranges. */ if (lop->nfslo_end != new_lop->nfslo_first) { lop->nfslo_end = new_lop->nfslo_first; modified = 1; } ilop = lop; lop = LIST_NEXT(lop, nfslo_list); continue; } /* * The final case is where the new lock's range is in the * middle of the current lock's and splits the current lock * up. Use *other_lopp to handle the second part of the * split old lock range. (We are done now.) * For unlock, we use new_lop as other_lop and tmp, since * other_lop and new_lop are the same for this case. * We noted the unlock case above, so we don't need * new_lop->nfslo_type any longer. */ tmp = new_lop->nfslo_first; if (unlock) { other_lop = new_lop; *new_lopp = NULL; } else { other_lop = *other_lopp; *other_lopp = NULL; } other_lop->nfslo_first = new_lop->nfslo_end; other_lop->nfslo_end = lop->nfslo_end; other_lop->nfslo_type = lop->nfslo_type; lop->nfslo_end = tmp; nfscl_insertlock(lp, other_lop, lop, local); ilop = lop; modified = 1; break; } ilop = lop; lop = LIST_NEXT(lop, nfslo_list); if (lop == NULL) break; } /* * Insert the new lock in the list at the appropriate place. */ if (!unlock) { nfscl_insertlock(lp, new_lop, ilop, local); *new_lopp = NULL; modified = 1; } return (modified); } /* * This function must be run as a kernel thread. * It does Renew Ops and recovery, when required. */ APPLESTATIC void nfscl_renewthread(struct nfsclclient *clp, NFSPROC_T *p) { struct nfsclowner *owp, *nowp; struct nfsclopen *op; struct nfscllockowner *lp, *nlp; struct nfscldeleghead dh; struct nfscldeleg *dp, *ndp; struct ucred *cred; u_int32_t clidrev; int error, cbpathdown, islept, igotlock, ret, clearok; uint32_t recover_done_time = 0; time_t mytime; static time_t prevsec = 0; struct nfscllockownerfh *lfhp, *nlfhp; struct nfscllockownerfhhead lfh; struct nfscllayout *lyp, *nlyp; struct nfscldevinfo *dip, *ndip; struct nfscllayouthead rlh; struct nfsclrecalllayout *recallp; struct nfsclds *dsp; cred = newnfs_getcred(); NFSLOCKCLSTATE(); clp->nfsc_flags |= NFSCLFLAGS_HASTHREAD; NFSUNLOCKCLSTATE(); for(;;) { newnfs_setroot(cred); cbpathdown = 0; if (clp->nfsc_flags & NFSCLFLAGS_RECOVER) { /* * Only allow one recover within 1/2 of the lease * duration (nfsc_renew). */ if (recover_done_time < NFSD_MONOSEC) { recover_done_time = NFSD_MONOSEC + clp->nfsc_renew; NFSCL_DEBUG(1, "Doing recovery..\n"); nfscl_recover(clp, cred, p); } else { NFSCL_DEBUG(1, "Clear Recovery dt=%u ms=%jd\n", recover_done_time, (intmax_t)NFSD_MONOSEC); NFSLOCKCLSTATE(); clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER; NFSUNLOCKCLSTATE(); } } if (clp->nfsc_expire <= NFSD_MONOSEC && (clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID)) { clp->nfsc_expire = NFSD_MONOSEC + clp->nfsc_renew; clidrev = clp->nfsc_clientidrev; error = nfsrpc_renew(clp, NULL, cred, p); if (error == NFSERR_CBPATHDOWN) cbpathdown = 1; else if (error == NFSERR_STALECLIENTID || error == NFSERR_BADSESSION) { NFSLOCKCLSTATE(); clp->nfsc_flags |= NFSCLFLAGS_RECOVER; NFSUNLOCKCLSTATE(); } else if (error == NFSERR_EXPIRED) (void) nfscl_hasexpired(clp, clidrev, p); } checkdsrenew: if (NFSHASNFSV4N(clp->nfsc_nmp)) { /* Do renews for any DS sessions. */ NFSLOCKMNT(clp->nfsc_nmp); /* Skip first entry, since the MDS is handled above. */ dsp = TAILQ_FIRST(&clp->nfsc_nmp->nm_sess); if (dsp != NULL) dsp = TAILQ_NEXT(dsp, nfsclds_list); while (dsp != NULL) { if (dsp->nfsclds_expire <= NFSD_MONOSEC && dsp->nfsclds_sess.nfsess_defunct == 0) { dsp->nfsclds_expire = NFSD_MONOSEC + clp->nfsc_renew; NFSUNLOCKMNT(clp->nfsc_nmp); (void)nfsrpc_renew(clp, dsp, cred, p); goto checkdsrenew; } dsp = TAILQ_NEXT(dsp, nfsclds_list); } NFSUNLOCKMNT(clp->nfsc_nmp); } TAILQ_INIT(&dh); NFSLOCKCLSTATE(); if (cbpathdown) /* It's a Total Recall! */ nfscl_totalrecall(clp); /* * Now, handle defunct owners. */ LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) { if (LIST_EMPTY(&owp->nfsow_open)) { if (owp->nfsow_defunct != 0) nfscl_freeopenowner(owp, 0); } } /* * Do the recall on any delegations. To avoid trouble, always * come back up here after having slept. */ igotlock = 0; tryagain: dp = TAILQ_FIRST(&clp->nfsc_deleg); while (dp != NULL) { ndp = TAILQ_NEXT(dp, nfsdl_list); if ((dp->nfsdl_flags & NFSCLDL_RECALL)) { /* * Wait for outstanding I/O ops to be done. */ if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { if (igotlock) { nfsv4_unlock(&clp->nfsc_lock, 0); igotlock = 0; } dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED; (void) nfsmsleep(&dp->nfsdl_rwlock, NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL); goto tryagain; } while (!igotlock) { igotlock = nfsv4_lock(&clp->nfsc_lock, 1, &islept, NFSCLSTATEMUTEXPTR, NULL); if (islept) goto tryagain; } NFSUNLOCKCLSTATE(); newnfs_copycred(&dp->nfsdl_cred, cred); ret = nfscl_recalldeleg(clp, clp->nfsc_nmp, dp, NULL, cred, p, 1); if (!ret) { nfscl_cleandeleg(dp); TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list); LIST_REMOVE(dp, nfsdl_hash); TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list); nfscl_delegcnt--; nfsstatsv1.cldelegates--; } NFSLOCKCLSTATE(); } dp = ndp; } /* * Clear out old delegations, if we are above the high water * mark. Only clear out ones with no state related to them. * The tailq list is in LRU order. */ dp = TAILQ_LAST(&clp->nfsc_deleg, nfscldeleghead); while (nfscl_delegcnt > nfscl_deleghighwater && dp != NULL) { ndp = TAILQ_PREV(dp, nfscldeleghead, nfsdl_list); if (dp->nfsdl_rwlock.nfslock_usecnt == 0 && dp->nfsdl_rwlock.nfslock_lock == 0 && dp->nfsdl_timestamp < NFSD_MONOSEC && (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_ZAPPED | NFSCLDL_NEEDRECLAIM | NFSCLDL_DELEGRET)) == 0) { clearok = 1; LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { op = LIST_FIRST(&owp->nfsow_open); if (op != NULL) { clearok = 0; break; } } if (clearok) { LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { if (!LIST_EMPTY(&lp->nfsl_lock)) { clearok = 0; break; } } } if (clearok) { TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list); LIST_REMOVE(dp, nfsdl_hash); TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list); nfscl_delegcnt--; nfsstatsv1.cldelegates--; } } dp = ndp; } if (igotlock) nfsv4_unlock(&clp->nfsc_lock, 0); /* * Do the recall on any layouts. To avoid trouble, always * come back up here after having slept. */ TAILQ_INIT(&rlh); tryagain2: TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp) { if ((lyp->nfsly_flags & NFSLY_RECALL) != 0) { /* * Wait for outstanding I/O ops to be done. */ if (lyp->nfsly_lock.nfslock_usecnt > 0 || (lyp->nfsly_lock.nfslock_lock & NFSV4LOCK_LOCK) != 0) { lyp->nfsly_lock.nfslock_lock |= NFSV4LOCK_WANTED; nfsmsleep(&lyp->nfsly_lock.nfslock_lock, NFSCLSTATEMUTEXPTR, PZERO, "nfslyp", NULL); goto tryagain2; } /* Move the layout to the recall list. */ TAILQ_REMOVE(&clp->nfsc_layout, lyp, nfsly_list); LIST_REMOVE(lyp, nfsly_hash); TAILQ_INSERT_HEAD(&rlh, lyp, nfsly_list); /* Handle any layout commits. */ if (!NFSHASNOLAYOUTCOMMIT(clp->nfsc_nmp) && (lyp->nfsly_flags & NFSLY_WRITTEN) != 0) { lyp->nfsly_flags &= ~NFSLY_WRITTEN; NFSUNLOCKCLSTATE(); NFSCL_DEBUG(3, "do layoutcommit\n"); nfscl_dolayoutcommit(clp->nfsc_nmp, lyp, cred, p); NFSLOCKCLSTATE(); goto tryagain2; } } } /* Now, look for stale layouts. */ lyp = TAILQ_LAST(&clp->nfsc_layout, nfscllayouthead); while (lyp != NULL) { nlyp = TAILQ_PREV(lyp, nfscllayouthead, nfsly_list); if (lyp->nfsly_timestamp < NFSD_MONOSEC && (lyp->nfsly_flags & NFSLY_RECALL) == 0 && lyp->nfsly_lock.nfslock_usecnt == 0 && lyp->nfsly_lock.nfslock_lock == 0) { NFSCL_DEBUG(4, "ret stale lay=%d\n", nfscl_layoutcnt); recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL, M_NOWAIT); if (recallp == NULL) break; (void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE, lyp, NFSLAYOUTIOMODE_ANY, 0, UINT64_MAX, lyp->nfsly_stateid.seqid, 0, 0, NULL, recallp); } lyp = nlyp; } /* * Free up any unreferenced device info structures. */ LIST_FOREACH_SAFE(dip, &clp->nfsc_devinfo, nfsdi_list, ndip) { if (dip->nfsdi_layoutrefs == 0 && dip->nfsdi_refcnt == 0) { NFSCL_DEBUG(4, "freeing devinfo\n"); LIST_REMOVE(dip, nfsdi_list); nfscl_freedevinfo(dip); } } NFSUNLOCKCLSTATE(); /* Do layout return(s), as required. */ TAILQ_FOREACH_SAFE(lyp, &rlh, nfsly_list, nlyp) { TAILQ_REMOVE(&rlh, lyp, nfsly_list); NFSCL_DEBUG(4, "ret layout\n"); nfscl_layoutreturn(clp->nfsc_nmp, lyp, cred, p); nfscl_freelayout(lyp); } /* * Delegreturn any delegations cleaned out or recalled. */ TAILQ_FOREACH_SAFE(dp, &dh, nfsdl_list, ndp) { newnfs_copycred(&dp->nfsdl_cred, cred); (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p); TAILQ_REMOVE(&dh, dp, nfsdl_list); free(dp, M_NFSCLDELEG); } SLIST_INIT(&lfh); /* * Call nfscl_cleanupkext() once per second to check for * open/lock owners where the process has exited. */ mytime = NFSD_MONOSEC; if (prevsec != mytime) { prevsec = mytime; nfscl_cleanupkext(clp, &lfh); } /* * Do a ReleaseLockOwner for all lock owners where the * associated process no longer exists, as found by * nfscl_cleanupkext(). */ newnfs_setroot(cred); SLIST_FOREACH_SAFE(lfhp, &lfh, nfslfh_list, nlfhp) { LIST_FOREACH_SAFE(lp, &lfhp->nfslfh_lock, nfsl_list, nlp) { (void)nfsrpc_rellockown(clp->nfsc_nmp, lp, lfhp->nfslfh_fh, lfhp->nfslfh_len, cred, p); nfscl_freelockowner(lp, 0); } free(lfhp, M_TEMP); } SLIST_INIT(&lfh); NFSLOCKCLSTATE(); if ((clp->nfsc_flags & NFSCLFLAGS_RECOVER) == 0) (void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT, "nfscl", hz); if (clp->nfsc_flags & NFSCLFLAGS_UMOUNT) { clp->nfsc_flags &= ~NFSCLFLAGS_HASTHREAD; NFSUNLOCKCLSTATE(); NFSFREECRED(cred); wakeup((caddr_t)clp); return; } NFSUNLOCKCLSTATE(); } } /* * Initiate state recovery. Called when NFSERR_STALECLIENTID, * NFSERR_STALESTATEID or NFSERR_BADSESSION is received. */ APPLESTATIC void nfscl_initiate_recovery(struct nfsclclient *clp) { if (clp == NULL) return; NFSLOCKCLSTATE(); clp->nfsc_flags |= NFSCLFLAGS_RECOVER; NFSUNLOCKCLSTATE(); wakeup((caddr_t)clp); } /* * Dump out the state stuff for debugging. */ APPLESTATIC void nfscl_dumpstate(struct nfsmount *nmp, int openowner, int opens, int lockowner, int locks) { struct nfsclclient *clp; struct nfsclowner *owp; struct nfsclopen *op; struct nfscllockowner *lp; struct nfscllock *lop; struct nfscldeleg *dp; clp = nmp->nm_clp; if (clp == NULL) { printf("nfscl dumpstate NULL clp\n"); return; } NFSLOCKCLSTATE(); TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { if (openowner && !LIST_EMPTY(&owp->nfsow_open)) printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n", owp->nfsow_owner[0], owp->nfsow_owner[1], owp->nfsow_owner[2], owp->nfsow_owner[3], owp->nfsow_seqid); LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { if (opens) printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n", op->nfso_stateid.other[0], op->nfso_stateid.other[1], op->nfso_stateid.other[2], op->nfso_opencnt, op->nfso_fh[12]); LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { if (lockowner) printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n", lp->nfsl_owner[0], lp->nfsl_owner[1], lp->nfsl_owner[2], lp->nfsl_owner[3], lp->nfsl_seqid, lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1], lp->nfsl_stateid.other[2]); LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { if (locks) #ifdef __FreeBSD__ printf("lck typ=%d fst=%ju end=%ju\n", lop->nfslo_type, (intmax_t)lop->nfslo_first, (intmax_t)lop->nfslo_end); #else printf("lck typ=%d fst=%qd end=%qd\n", lop->nfslo_type, lop->nfslo_first, lop->nfslo_end); #endif } } } } } LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { if (openowner && !LIST_EMPTY(&owp->nfsow_open)) printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n", owp->nfsow_owner[0], owp->nfsow_owner[1], owp->nfsow_owner[2], owp->nfsow_owner[3], owp->nfsow_seqid); LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { if (opens) printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n", op->nfso_stateid.other[0], op->nfso_stateid.other[1], op->nfso_stateid.other[2], op->nfso_opencnt, op->nfso_fh[12]); LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { if (lockowner) printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n", lp->nfsl_owner[0], lp->nfsl_owner[1], lp->nfsl_owner[2], lp->nfsl_owner[3], lp->nfsl_seqid, lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1], lp->nfsl_stateid.other[2]); LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { if (locks) #ifdef __FreeBSD__ printf("lck typ=%d fst=%ju end=%ju\n", lop->nfslo_type, (intmax_t)lop->nfslo_first, (intmax_t)lop->nfslo_end); #else printf("lck typ=%d fst=%qd end=%qd\n", lop->nfslo_type, lop->nfslo_first, lop->nfslo_end); #endif } } } } NFSUNLOCKCLSTATE(); } /* * Check for duplicate open owners and opens. * (Only used as a diagnostic aid.) */ APPLESTATIC void nfscl_dupopen(vnode_t vp, int dupopens) { struct nfsclclient *clp; struct nfsclowner *owp, *owp2; struct nfsclopen *op, *op2; struct nfsfh *nfhp; clp = VFSTONFS(vnode_mount(vp))->nm_clp; if (clp == NULL) { printf("nfscl dupopen NULL clp\n"); return; } nfhp = VTONFS(vp)->n_fhp; NFSLOCKCLSTATE(); /* * First, search for duplicate owners. * These should never happen! */ LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) { LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { if (owp != owp2 && !NFSBCMP(owp->nfsow_owner, owp2->nfsow_owner, NFSV4CL_LOCKNAMELEN)) { NFSUNLOCKCLSTATE(); printf("DUP OWNER\n"); nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 0, 0); return; } } } /* * Now, search for duplicate stateids. * These shouldn't happen, either. */ LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) { LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) { LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { if (op != op2 && (op->nfso_stateid.other[0] != 0 || op->nfso_stateid.other[1] != 0 || op->nfso_stateid.other[2] != 0) && op->nfso_stateid.other[0] == op2->nfso_stateid.other[0] && op->nfso_stateid.other[1] == op2->nfso_stateid.other[1] && op->nfso_stateid.other[2] == op2->nfso_stateid.other[2]) { NFSUNLOCKCLSTATE(); printf("DUP STATEID\n"); nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 0, 0); return; } } } } } /* * Now search for duplicate opens. * Duplicate opens for the same owner * should never occur. Other duplicates are * possible and are checked for if "dupopens" * is true. */ LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) { LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) { if (nfhp->nfh_len == op2->nfso_fhlen && !NFSBCMP(nfhp->nfh_fh, op2->nfso_fh, nfhp->nfh_len)) { LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { if (op != op2 && nfhp->nfh_len == op->nfso_fhlen && !NFSBCMP(nfhp->nfh_fh, op->nfso_fh, nfhp->nfh_len) && (!NFSBCMP(op->nfso_own->nfsow_owner, op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN) || dupopens)) { if (!NFSBCMP(op->nfso_own->nfsow_owner, op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN)) { NFSUNLOCKCLSTATE(); printf("BADDUP OPEN\n"); } else { NFSUNLOCKCLSTATE(); printf("DUP OPEN\n"); } nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 0, 0); return; } } } } } } NFSUNLOCKCLSTATE(); } /* * During close, find an open that needs to be dereferenced and * dereference it. If there are no more opens for this file, * log a message to that effect. * Opens aren't actually Close'd until VOP_INACTIVE() is performed * on the file's vnode. * This is the safe way, since it is difficult to identify * which open the close is for and I/O can be performed after the * close(2) system call when a file is mmap'd. * If it returns 0 for success, there will be a referenced * clp returned via clpp. */ APPLESTATIC int nfscl_getclose(vnode_t vp, struct nfsclclient **clpp) { struct nfsclclient *clp; struct nfsclowner *owp; struct nfsclopen *op; struct nfscldeleg *dp; struct nfsfh *nfhp; int error, notdecr; error = nfscl_getcl(vnode_mount(vp), NULL, NULL, 1, &clp); if (error) return (error); *clpp = clp; nfhp = VTONFS(vp)->n_fhp; notdecr = 1; NFSLOCKCLSTATE(); /* * First, look for one under a delegation that was locally issued * and just decrement the opencnt for it. Since all my Opens against * the server are DENY_NONE, I don't see a problem with hanging * onto them. (It is much easier to use one of the extant Opens * that I already have on the server when a Delegation is recalled * than to do fresh Opens.) Someday, I might need to rethink this, but. */ dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len); if (dp != NULL) { LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { op = LIST_FIRST(&owp->nfsow_open); if (op != NULL) { /* * Since a delegation is for a file, there * should never be more than one open for * each openowner. */ if (LIST_NEXT(op, nfso_list) != NULL) panic("nfscdeleg opens"); if (notdecr && op->nfso_opencnt > 0) { notdecr = 0; op->nfso_opencnt--; break; } } } } /* Now process the opens against the server. */ LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { if (op->nfso_fhlen == nfhp->nfh_len && !NFSBCMP(op->nfso_fh, nfhp->nfh_fh, nfhp->nfh_len)) { /* Found an open, decrement cnt if possible */ if (notdecr && op->nfso_opencnt > 0) { notdecr = 0; op->nfso_opencnt--; } /* * There are more opens, so just return. */ if (op->nfso_opencnt > 0) { NFSUNLOCKCLSTATE(); return (0); } } } } NFSUNLOCKCLSTATE(); if (notdecr) printf("nfscl: never fnd open\n"); return (0); } APPLESTATIC int nfscl_doclose(vnode_t vp, struct nfsclclient **clpp, NFSPROC_T *p) { struct nfsclclient *clp; struct nfsclowner *owp, *nowp; struct nfsclopen *op; struct nfscldeleg *dp; struct nfsfh *nfhp; struct nfsclrecalllayout *recallp; int error; error = nfscl_getcl(vnode_mount(vp), NULL, NULL, 1, &clp); if (error) return (error); *clpp = clp; nfhp = VTONFS(vp)->n_fhp; recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL, M_WAITOK); NFSLOCKCLSTATE(); /* * First get rid of the local Open structures, which should be no * longer in use. */ dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len); if (dp != NULL) { LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) { op = LIST_FIRST(&owp->nfsow_open); if (op != NULL) { KASSERT((op->nfso_opencnt == 0), ("nfscl: bad open cnt on deleg")); nfscl_freeopen(op, 1); } nfscl_freeopenowner(owp, 1); } } /* Return any layouts marked return on close. */ nfscl_retoncloselayout(vp, clp, nfhp->nfh_fh, nfhp->nfh_len, &recallp); /* Now process the opens against the server. */ lookformore: LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { op = LIST_FIRST(&owp->nfsow_open); while (op != NULL) { if (op->nfso_fhlen == nfhp->nfh_len && !NFSBCMP(op->nfso_fh, nfhp->nfh_fh, nfhp->nfh_len)) { /* Found an open, close it. */ KASSERT((op->nfso_opencnt == 0), ("nfscl: bad open cnt on server")); NFSUNLOCKCLSTATE(); nfsrpc_doclose(VFSTONFS(vnode_mount(vp)), op, p); NFSLOCKCLSTATE(); goto lookformore; } op = LIST_NEXT(op, nfso_list); } } NFSUNLOCKCLSTATE(); /* * recallp has been set NULL by nfscl_retoncloselayout() if it was * used by the function, but calling free() with a NULL pointer is ok. */ free(recallp, M_NFSLAYRECALL); return (0); } /* * Return all delegations on this client. * (Must be called with client sleep lock.) */ static void nfscl_delegreturnall(struct nfsclclient *clp, NFSPROC_T *p) { struct nfscldeleg *dp, *ndp; struct ucred *cred; cred = newnfs_getcred(); TAILQ_FOREACH_SAFE(dp, &clp->nfsc_deleg, nfsdl_list, ndp) { nfscl_cleandeleg(dp); (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p); nfscl_freedeleg(&clp->nfsc_deleg, dp); } NFSFREECRED(cred); } /* * Do a callback RPC. */ APPLESTATIC void nfscl_docb(struct nfsrv_descript *nd, NFSPROC_T *p) { int clist, gotseq_ok, i, j, k, op, rcalls; u_int32_t *tl; struct nfsclclient *clp; struct nfscldeleg *dp = NULL; int numops, taglen = -1, error = 0, trunc; u_int32_t minorvers = 0, retops = 0, *retopsp = NULL, *repp, cbident; u_char tag[NFSV4_SMALLSTR + 1], *tagstr; vnode_t vp = NULL; struct nfsnode *np; struct vattr va; struct nfsfh *nfhp; mount_t mp; nfsattrbit_t attrbits, rattrbits; nfsv4stateid_t stateid; uint32_t seqid, slotid = 0, highslot, cachethis; uint8_t sessionid[NFSX_V4SESSIONID]; struct mbuf *rep; struct nfscllayout *lyp; uint64_t filesid[2], len, off; int changed, gotone, laytype, recalltype; uint32_t iomode; struct nfsclrecalllayout *recallp = NULL; struct nfsclsession *tsep; gotseq_ok = 0; nfsrvd_rephead(nd); NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); taglen = fxdr_unsigned(int, *tl); if (taglen < 0) { error = EBADRPC; goto nfsmout; } if (taglen <= NFSV4_SMALLSTR) tagstr = tag; else tagstr = malloc(taglen + 1, M_TEMP, M_WAITOK); error = nfsrv_mtostr(nd, tagstr, taglen); if (error) { if (taglen > NFSV4_SMALLSTR) free(tagstr, M_TEMP); taglen = -1; goto nfsmout; } (void) nfsm_strtom(nd, tag, taglen); if (taglen > NFSV4_SMALLSTR) { free(tagstr, M_TEMP); } NFSM_BUILD(retopsp, u_int32_t *, NFSX_UNSIGNED); NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED); minorvers = fxdr_unsigned(u_int32_t, *tl++); if (minorvers != NFSV4_MINORVERSION && minorvers != NFSV41_MINORVERSION) nd->nd_repstat = NFSERR_MINORVERMISMATCH; cbident = fxdr_unsigned(u_int32_t, *tl++); if (nd->nd_repstat) numops = 0; else numops = fxdr_unsigned(int, *tl); /* * Loop around doing the sub ops. */ for (i = 0; i < numops; i++) { NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); NFSM_BUILD(repp, u_int32_t *, 2 * NFSX_UNSIGNED); *repp++ = *tl; op = fxdr_unsigned(int, *tl); if (op < NFSV4OP_CBGETATTR || (op > NFSV4OP_CBRECALL && minorvers == NFSV4_MINORVERSION) || (op > NFSV4OP_CBNOTIFYDEVID && minorvers == NFSV41_MINORVERSION)) { nd->nd_repstat = NFSERR_OPILLEGAL; *repp = nfscl_errmap(nd, minorvers); retops++; break; } nd->nd_procnum = op; if (op < NFSV41_CBNOPS) nfsstatsv1.cbrpccnt[nd->nd_procnum]++; switch (op) { case NFSV4OP_CBGETATTR: NFSCL_DEBUG(4, "cbgetattr\n"); mp = NULL; vp = NULL; error = nfsm_getfh(nd, &nfhp); if (!error) error = nfsrv_getattrbits(nd, &attrbits, NULL, NULL); if (error == 0 && i == 0 && minorvers != NFSV4_MINORVERSION) error = NFSERR_OPNOTINSESS; if (!error) { mp = nfscl_getmnt(minorvers, sessionid, cbident, &clp); if (mp == NULL) error = NFSERR_SERVERFAULT; } if (!error) { error = nfscl_ngetreopen(mp, nfhp->nfh_fh, nfhp->nfh_len, p, &np); if (!error) vp = NFSTOV(np); } if (!error) { NFSZERO_ATTRBIT(&rattrbits); NFSLOCKCLSTATE(); dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len); if (dp != NULL) { if (NFSISSET_ATTRBIT(&attrbits, NFSATTRBIT_SIZE)) { if (vp != NULL) va.va_size = np->n_size; else va.va_size = dp->nfsdl_size; NFSSETBIT_ATTRBIT(&rattrbits, NFSATTRBIT_SIZE); } if (NFSISSET_ATTRBIT(&attrbits, NFSATTRBIT_CHANGE)) { va.va_filerev = dp->nfsdl_change; if (vp == NULL || (np->n_flag & NDELEGMOD)) va.va_filerev++; NFSSETBIT_ATTRBIT(&rattrbits, NFSATTRBIT_CHANGE); } } else error = NFSERR_SERVERFAULT; NFSUNLOCKCLSTATE(); } if (vp != NULL) vrele(vp); if (mp != NULL) vfs_unbusy(mp); if (nfhp != NULL) free(nfhp, M_NFSFH); if (!error) (void) nfsv4_fillattr(nd, NULL, NULL, NULL, &va, NULL, 0, &rattrbits, NULL, p, 0, 0, 0, 0, (uint64_t)0, NULL); break; case NFSV4OP_CBRECALL: NFSCL_DEBUG(4, "cbrecall\n"); NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID + NFSX_UNSIGNED); stateid.seqid = *tl++; NFSBCOPY((caddr_t)tl, (caddr_t)stateid.other, NFSX_STATEIDOTHER); tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED); trunc = fxdr_unsigned(int, *tl); error = nfsm_getfh(nd, &nfhp); if (error == 0 && i == 0 && minorvers != NFSV4_MINORVERSION) error = NFSERR_OPNOTINSESS; if (!error) { NFSLOCKCLSTATE(); if (minorvers == NFSV4_MINORVERSION) clp = nfscl_getclnt(cbident); else clp = nfscl_getclntsess(sessionid); if (clp != NULL) { dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len); if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_DELEGRET) == 0) { dp->nfsdl_flags |= NFSCLDL_RECALL; wakeup((caddr_t)clp); } } else { error = NFSERR_SERVERFAULT; } NFSUNLOCKCLSTATE(); } if (nfhp != NULL) free(nfhp, M_NFSFH); break; case NFSV4OP_CBLAYOUTRECALL: NFSCL_DEBUG(4, "cblayrec\n"); nfhp = NULL; NFSM_DISSECT(tl, uint32_t *, 4 * NFSX_UNSIGNED); laytype = fxdr_unsigned(int, *tl++); iomode = fxdr_unsigned(uint32_t, *tl++); if (newnfs_true == *tl++) changed = 1; else changed = 0; recalltype = fxdr_unsigned(int, *tl); NFSCL_DEBUG(4, "layt=%d iom=%d ch=%d rectyp=%d\n", laytype, iomode, changed, recalltype); recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL, M_WAITOK); if (laytype != NFSLAYOUT_NFSV4_1_FILES && laytype != NFSLAYOUT_FLEXFILE) error = NFSERR_NOMATCHLAYOUT; else if (recalltype == NFSLAYOUTRETURN_FILE) { error = nfsm_getfh(nd, &nfhp); NFSCL_DEBUG(4, "retfile getfh=%d\n", error); if (error != 0) goto nfsmout; NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_HYPER + NFSX_STATEID); off = fxdr_hyper(tl); tl += 2; len = fxdr_hyper(tl); tl += 2; stateid.seqid = fxdr_unsigned(uint32_t, *tl++); NFSBCOPY(tl, stateid.other, NFSX_STATEIDOTHER); if (minorvers == NFSV4_MINORVERSION) error = NFSERR_NOTSUPP; else if (i == 0) error = NFSERR_OPNOTINSESS; NFSCL_DEBUG(4, "off=%ju len=%ju sq=%u err=%d\n", (uintmax_t)off, (uintmax_t)len, stateid.seqid, error); if (error == 0) { NFSLOCKCLSTATE(); clp = nfscl_getclntsess(sessionid); NFSCL_DEBUG(4, "cbly clp=%p\n", clp); if (clp != NULL) { lyp = nfscl_findlayout(clp, nfhp->nfh_fh, nfhp->nfh_len); NFSCL_DEBUG(4, "cblyp=%p\n", lyp); if (lyp != NULL && (lyp->nfsly_flags & (NFSLY_FILES | NFSLY_FLEXFILE)) != 0 && !NFSBCMP(stateid.other, lyp->nfsly_stateid.other, NFSX_STATEIDOTHER)) { error = nfscl_layoutrecall( recalltype, lyp, iomode, off, len, stateid.seqid, 0, 0, NULL, recallp); recallp = NULL; wakeup(clp); NFSCL_DEBUG(4, "aft layrcal=%d\n", error); } else error = NFSERR_NOMATCHLAYOUT; } else error = NFSERR_NOMATCHLAYOUT; NFSUNLOCKCLSTATE(); } free(nfhp, M_NFSFH); } else if (recalltype == NFSLAYOUTRETURN_FSID) { NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_HYPER); filesid[0] = fxdr_hyper(tl); tl += 2; filesid[1] = fxdr_hyper(tl); tl += 2; gotone = 0; NFSLOCKCLSTATE(); clp = nfscl_getclntsess(sessionid); if (clp != NULL) { TAILQ_FOREACH(lyp, &clp->nfsc_layout, nfsly_list) { if (lyp->nfsly_filesid[0] == filesid[0] && lyp->nfsly_filesid[1] == filesid[1]) { error = nfscl_layoutrecall( recalltype, lyp, iomode, 0, UINT64_MAX, lyp->nfsly_stateid.seqid, 0, 0, NULL, recallp); recallp = NULL; gotone = 1; } } if (gotone != 0) wakeup(clp); else error = NFSERR_NOMATCHLAYOUT; } else error = NFSERR_NOMATCHLAYOUT; NFSUNLOCKCLSTATE(); } else if (recalltype == NFSLAYOUTRETURN_ALL) { gotone = 0; NFSLOCKCLSTATE(); clp = nfscl_getclntsess(sessionid); if (clp != NULL) { TAILQ_FOREACH(lyp, &clp->nfsc_layout, nfsly_list) { error = nfscl_layoutrecall( recalltype, lyp, iomode, 0, UINT64_MAX, lyp->nfsly_stateid.seqid, 0, 0, NULL, recallp); recallp = NULL; gotone = 1; } if (gotone != 0) wakeup(clp); else error = NFSERR_NOMATCHLAYOUT; } else error = NFSERR_NOMATCHLAYOUT; NFSUNLOCKCLSTATE(); } else error = NFSERR_NOMATCHLAYOUT; if (recallp != NULL) { free(recallp, M_NFSLAYRECALL); recallp = NULL; } break; case NFSV4OP_CBSEQUENCE: NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID + 5 * NFSX_UNSIGNED); bcopy(tl, sessionid, NFSX_V4SESSIONID); tl += NFSX_V4SESSIONID / NFSX_UNSIGNED; seqid = fxdr_unsigned(uint32_t, *tl++); slotid = fxdr_unsigned(uint32_t, *tl++); highslot = fxdr_unsigned(uint32_t, *tl++); cachethis = *tl++; /* Throw away the referring call stuff. */ clist = fxdr_unsigned(int, *tl); for (j = 0; j < clist; j++) { NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID + NFSX_UNSIGNED); tl += NFSX_V4SESSIONID / NFSX_UNSIGNED; rcalls = fxdr_unsigned(int, *tl); for (k = 0; k < rcalls; k++) { NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_UNSIGNED); } } NFSLOCKCLSTATE(); if (i == 0) { clp = nfscl_getclntsess(sessionid); if (clp == NULL) error = NFSERR_SERVERFAULT; } else error = NFSERR_SEQUENCEPOS; if (error == 0) { tsep = nfsmnt_mdssession(clp->nfsc_nmp); error = nfsv4_seqsession(seqid, slotid, highslot, tsep->nfsess_cbslots, &rep, tsep->nfsess_backslots); } NFSUNLOCKCLSTATE(); if (error == 0 || error == NFSERR_REPLYFROMCACHE) { gotseq_ok = 1; if (rep != NULL) { /* * Handle a reply for a retried * callback. The reply will be * re-inserted in the session cache * by the nfsv4_seqsess_cacherep() call * after out: */ KASSERT(error == NFSERR_REPLYFROMCACHE, ("cbsequence: non-NULL rep")); NFSCL_DEBUG(4, "Got cbretry\n"); m_freem(nd->nd_mreq); nd->nd_mreq = rep; rep = NULL; goto out; } NFSM_BUILD(tl, uint32_t *, NFSX_V4SESSIONID + 4 * NFSX_UNSIGNED); bcopy(sessionid, tl, NFSX_V4SESSIONID); tl += NFSX_V4SESSIONID / NFSX_UNSIGNED; *tl++ = txdr_unsigned(seqid); *tl++ = txdr_unsigned(slotid); *tl++ = txdr_unsigned(NFSV4_CBSLOTS - 1); *tl = txdr_unsigned(NFSV4_CBSLOTS - 1); } break; default: if (i == 0 && minorvers == NFSV41_MINORVERSION) error = NFSERR_OPNOTINSESS; else { NFSCL_DEBUG(1, "unsupp callback %d\n", op); error = NFSERR_NOTSUPP; } break; } if (error) { if (error == EBADRPC || error == NFSERR_BADXDR) { nd->nd_repstat = NFSERR_BADXDR; } else { nd->nd_repstat = error; } error = 0; } retops++; if (nd->nd_repstat) { *repp = nfscl_errmap(nd, minorvers); break; } else *repp = 0; /* NFS4_OK */ } nfsmout: if (recallp != NULL) free(recallp, M_NFSLAYRECALL); if (error) { if (error == EBADRPC || error == NFSERR_BADXDR) nd->nd_repstat = NFSERR_BADXDR; else printf("nfsv4 comperr1=%d\n", error); } if (taglen == -1) { NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); *tl++ = 0; *tl = 0; } else { *retopsp = txdr_unsigned(retops); } *nd->nd_errp = nfscl_errmap(nd, minorvers); out: if (gotseq_ok != 0) { rep = m_copym(nd->nd_mreq, 0, M_COPYALL, M_WAITOK); NFSLOCKCLSTATE(); clp = nfscl_getclntsess(sessionid); if (clp != NULL) { tsep = nfsmnt_mdssession(clp->nfsc_nmp); nfsv4_seqsess_cacherep(slotid, tsep->nfsess_cbslots, NFSERR_OK, &rep); NFSUNLOCKCLSTATE(); } else { NFSUNLOCKCLSTATE(); m_freem(rep); } } } /* * Generate the next cbident value. Basically just increment a static value * and then check that it isn't already in the list, if it has wrapped around. */ static u_int32_t nfscl_nextcbident(void) { struct nfsclclient *clp; int matched; static u_int32_t nextcbident = 0; static int haswrapped = 0; nextcbident++; if (nextcbident == 0) haswrapped = 1; if (haswrapped) { /* * Search the clientid list for one already using this cbident. */ do { matched = 0; NFSLOCKCLSTATE(); LIST_FOREACH(clp, &nfsclhead, nfsc_list) { if (clp->nfsc_cbident == nextcbident) { matched = 1; break; } } NFSUNLOCKCLSTATE(); if (matched == 1) nextcbident++; } while (matched); } return (nextcbident); } /* * Get the mount point related to a given cbident or session and busy it. */ static mount_t nfscl_getmnt(int minorvers, uint8_t *sessionid, u_int32_t cbident, struct nfsclclient **clpp) { struct nfsclclient *clp; mount_t mp; int error; struct nfsclsession *tsep; *clpp = NULL; NFSLOCKCLSTATE(); LIST_FOREACH(clp, &nfsclhead, nfsc_list) { tsep = nfsmnt_mdssession(clp->nfsc_nmp); if (minorvers == NFSV4_MINORVERSION) { if (clp->nfsc_cbident == cbident) break; } else if (!NFSBCMP(tsep->nfsess_sessionid, sessionid, NFSX_V4SESSIONID)) break; } if (clp == NULL) { NFSUNLOCKCLSTATE(); return (NULL); } mp = clp->nfsc_nmp->nm_mountp; vfs_ref(mp); NFSUNLOCKCLSTATE(); error = vfs_busy(mp, 0); vfs_rel(mp); if (error != 0) return (NULL); *clpp = clp; return (mp); } /* * Get the clientid pointer related to a given cbident. */ static struct nfsclclient * nfscl_getclnt(u_int32_t cbident) { struct nfsclclient *clp; LIST_FOREACH(clp, &nfsclhead, nfsc_list) if (clp->nfsc_cbident == cbident) break; return (clp); } /* * Get the clientid pointer related to a given sessionid. */ static struct nfsclclient * nfscl_getclntsess(uint8_t *sessionid) { struct nfsclclient *clp; struct nfsclsession *tsep; LIST_FOREACH(clp, &nfsclhead, nfsc_list) { tsep = nfsmnt_mdssession(clp->nfsc_nmp); if (!NFSBCMP(tsep->nfsess_sessionid, sessionid, NFSX_V4SESSIONID)) break; } return (clp); } /* * Search for a lock conflict locally on the client. A conflict occurs if * - not same owner and overlapping byte range and at least one of them is * a write lock or this is an unlock. */ static int nfscl_localconflict(struct nfsclclient *clp, u_int8_t *fhp, int fhlen, struct nfscllock *nlop, u_int8_t *own, struct nfscldeleg *dp, struct nfscllock **lopp) { struct nfsclowner *owp; struct nfsclopen *op; int ret; if (dp != NULL) { ret = nfscl_checkconflict(&dp->nfsdl_lock, nlop, own, lopp); if (ret) return (ret); } LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { if (op->nfso_fhlen == fhlen && !NFSBCMP(op->nfso_fh, fhp, fhlen)) { ret = nfscl_checkconflict(&op->nfso_lock, nlop, own, lopp); if (ret) return (ret); } } } return (0); } static int nfscl_checkconflict(struct nfscllockownerhead *lhp, struct nfscllock *nlop, u_int8_t *own, struct nfscllock **lopp) { struct nfscllockowner *lp; struct nfscllock *lop; LIST_FOREACH(lp, lhp, nfsl_list) { if (NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) { LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { if (lop->nfslo_first >= nlop->nfslo_end) break; if (lop->nfslo_end <= nlop->nfslo_first) continue; if (lop->nfslo_type == F_WRLCK || nlop->nfslo_type == F_WRLCK || nlop->nfslo_type == F_UNLCK) { if (lopp != NULL) *lopp = lop; return (NFSERR_DENIED); } } } } return (0); } /* * Check for a local conflicting lock. */ APPLESTATIC int nfscl_lockt(vnode_t vp, struct nfsclclient *clp, u_int64_t off, u_int64_t len, struct flock *fl, NFSPROC_T *p, void *id, int flags) { struct nfscllock *lop, nlck; struct nfscldeleg *dp; struct nfsnode *np; u_int8_t own[NFSV4CL_LOCKNAMELEN]; int error; nlck.nfslo_type = fl->l_type; nlck.nfslo_first = off; if (len == NFS64BITSSET) { nlck.nfslo_end = NFS64BITSSET; } else { nlck.nfslo_end = off + len; if (nlck.nfslo_end <= nlck.nfslo_first) return (NFSERR_INVAL); } np = VTONFS(vp); nfscl_filllockowner(id, own, flags); NFSLOCKCLSTATE(); dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); error = nfscl_localconflict(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len, &nlck, own, dp, &lop); if (error != 0) { fl->l_whence = SEEK_SET; fl->l_start = lop->nfslo_first; if (lop->nfslo_end == NFS64BITSSET) fl->l_len = 0; else fl->l_len = lop->nfslo_end - lop->nfslo_first; fl->l_pid = (pid_t)0; fl->l_type = lop->nfslo_type; error = -1; /* no RPC required */ } else if (dp != NULL && ((dp->nfsdl_flags & NFSCLDL_WRITE) || fl->l_type == F_RDLCK)) { /* * The delegation ensures that there isn't a conflicting * lock on the server, so return -1 to indicate an RPC * isn't required. */ fl->l_type = F_UNLCK; error = -1; } NFSUNLOCKCLSTATE(); return (error); } /* * Handle Recall of a delegation. * The clp must be exclusive locked when this is called. */ static int nfscl_recalldeleg(struct nfsclclient *clp, struct nfsmount *nmp, struct nfscldeleg *dp, vnode_t vp, struct ucred *cred, NFSPROC_T *p, int called_from_renewthread) { struct nfsclowner *owp, *lowp, *nowp; struct nfsclopen *op, *lop; struct nfscllockowner *lp; struct nfscllock *lckp; struct nfsnode *np; int error = 0, ret, gotvp = 0; if (vp == NULL) { /* * First, get a vnode for the file. This is needed to do RPCs. */ ret = nfscl_ngetreopen(nmp->nm_mountp, dp->nfsdl_fh, dp->nfsdl_fhlen, p, &np); if (ret) { /* * File isn't open, so nothing to move over to the * server. */ return (0); } vp = NFSTOV(np); gotvp = 1; } else { np = VTONFS(vp); } dp->nfsdl_flags &= ~NFSCLDL_MODTIMESET; /* * Ok, if it's a write delegation, flush data to the server, so * that close/open consistency is retained. */ ret = 0; NFSLOCKNODE(np); if ((dp->nfsdl_flags & NFSCLDL_WRITE) && (np->n_flag & NMODIFIED)) { np->n_flag |= NDELEGRECALL; NFSUNLOCKNODE(np); ret = ncl_flush(vp, MNT_WAIT, p, 1, called_from_renewthread); NFSLOCKNODE(np); np->n_flag &= ~NDELEGRECALL; } NFSINVALATTRCACHE(np); NFSUNLOCKNODE(np); if (ret == EIO && called_from_renewthread != 0) { /* * If the flush failed with EIO for the renew thread, * return now, so that the dirty buffer will be flushed * later. */ if (gotvp != 0) vrele(vp); return (ret); } /* * Now, for each openowner with opens issued locally, move them * over to state against the server. */ LIST_FOREACH(lowp, &dp->nfsdl_owner, nfsow_list) { lop = LIST_FIRST(&lowp->nfsow_open); if (lop != NULL) { if (LIST_NEXT(lop, nfso_list) != NULL) panic("nfsdlg mult opens"); /* * Look for the same openowner against the server. */ LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { if (!NFSBCMP(lowp->nfsow_owner, owp->nfsow_owner, NFSV4CL_LOCKNAMELEN)) { newnfs_copycred(&dp->nfsdl_cred, cred); ret = nfscl_moveopen(vp, clp, nmp, lop, owp, dp, cred, p); if (ret == NFSERR_STALECLIENTID || ret == NFSERR_STALEDONTRECOVER || ret == NFSERR_BADSESSION) { if (gotvp) vrele(vp); return (ret); } if (ret) { nfscl_freeopen(lop, 1); if (!error) error = ret; } break; } } /* * If no openowner found, create one and get an open * for it. */ if (owp == NULL) { nowp = malloc( sizeof (struct nfsclowner), M_NFSCLOWNER, M_WAITOK); nfscl_newopen(clp, NULL, &owp, &nowp, &op, NULL, lowp->nfsow_owner, dp->nfsdl_fh, dp->nfsdl_fhlen, NULL, NULL); newnfs_copycred(&dp->nfsdl_cred, cred); ret = nfscl_moveopen(vp, clp, nmp, lop, owp, dp, cred, p); if (ret) { nfscl_freeopenowner(owp, 0); if (ret == NFSERR_STALECLIENTID || ret == NFSERR_STALEDONTRECOVER || ret == NFSERR_BADSESSION) { if (gotvp) vrele(vp); return (ret); } if (ret) { nfscl_freeopen(lop, 1); if (!error) error = ret; } } } } } /* * Now, get byte range locks for any locks done locally. */ LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { LIST_FOREACH(lckp, &lp->nfsl_lock, nfslo_list) { newnfs_copycred(&dp->nfsdl_cred, cred); ret = nfscl_relock(vp, clp, nmp, lp, lckp, cred, p); if (ret == NFSERR_STALESTATEID || ret == NFSERR_STALEDONTRECOVER || ret == NFSERR_STALECLIENTID || ret == NFSERR_BADSESSION) { if (gotvp) vrele(vp); return (ret); } if (ret && !error) error = ret; } } if (gotvp) vrele(vp); return (error); } /* * Move a locally issued open over to an owner on the state list. * SIDE EFFECT: If it needs to sleep (do an rpc), it unlocks clstate and * returns with it unlocked. */ static int nfscl_moveopen(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp, struct nfsclopen *lop, struct nfsclowner *owp, struct nfscldeleg *dp, struct ucred *cred, NFSPROC_T *p) { struct nfsclopen *op, *nop; struct nfscldeleg *ndp; struct nfsnode *np; int error = 0, newone; /* * First, look for an appropriate open, If found, just increment the * opencnt in it. */ LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { if ((op->nfso_mode & lop->nfso_mode) == lop->nfso_mode && op->nfso_fhlen == lop->nfso_fhlen && !NFSBCMP(op->nfso_fh, lop->nfso_fh, op->nfso_fhlen)) { op->nfso_opencnt += lop->nfso_opencnt; nfscl_freeopen(lop, 1); return (0); } } /* No appropriate open, so we have to do one against the server. */ np = VTONFS(vp); nop = malloc(sizeof (struct nfsclopen) + lop->nfso_fhlen - 1, M_NFSCLOPEN, M_WAITOK); newone = 0; nfscl_newopen(clp, NULL, &owp, NULL, &op, &nop, owp->nfsow_owner, lop->nfso_fh, lop->nfso_fhlen, cred, &newone); ndp = dp; error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data, np->n_v4->n4_fhlen, lop->nfso_fh, lop->nfso_fhlen, lop->nfso_mode, op, NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, &ndp, 0, 0, cred, p); if (error) { if (newone) nfscl_freeopen(op, 0); } else { op->nfso_mode |= lop->nfso_mode; op->nfso_opencnt += lop->nfso_opencnt; nfscl_freeopen(lop, 1); } if (nop != NULL) free(nop, M_NFSCLOPEN); if (ndp != NULL) { /* * What should I do with the returned delegation, since the * delegation is being recalled? For now, just printf and * through it away. */ printf("Moveopen returned deleg\n"); free(ndp, M_NFSCLDELEG); } return (error); } /* * Recall all delegations on this client. */ static void nfscl_totalrecall(struct nfsclclient *clp) { struct nfscldeleg *dp; TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { if ((dp->nfsdl_flags & NFSCLDL_DELEGRET) == 0) dp->nfsdl_flags |= NFSCLDL_RECALL; } } /* * Relock byte ranges. Called for delegation recall and state expiry. */ static int nfscl_relock(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp, struct nfscllockowner *lp, struct nfscllock *lop, struct ucred *cred, NFSPROC_T *p) { struct nfscllockowner *nlp; struct nfsfh *nfhp; u_int64_t off, len; int error, newone, donelocally; off = lop->nfslo_first; len = lop->nfslo_end - lop->nfslo_first; error = nfscl_getbytelock(vp, off, len, lop->nfslo_type, cred, p, clp, 1, NULL, lp->nfsl_lockflags, lp->nfsl_owner, lp->nfsl_openowner, &nlp, &newone, &donelocally); if (error || donelocally) return (error); nfhp = VTONFS(vp)->n_fhp; error = nfscl_trylock(nmp, vp, nfhp->nfh_fh, nfhp->nfh_len, nlp, newone, 0, off, len, lop->nfslo_type, cred, p); if (error) nfscl_freelockowner(nlp, 0); return (error); } /* * Called to re-open a file. Basically get a vnode for the file handle * and then call nfsrpc_openrpc() to do the rest. */ static int nfsrpc_reopen(struct nfsmount *nmp, u_int8_t *fhp, int fhlen, u_int32_t mode, struct nfsclopen *op, struct nfscldeleg **dpp, struct ucred *cred, NFSPROC_T *p) { struct nfsnode *np; vnode_t vp; int error; error = nfscl_ngetreopen(nmp->nm_mountp, fhp, fhlen, p, &np); if (error) return (error); vp = NFSTOV(np); if (np->n_v4 != NULL) { error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data, np->n_v4->n4_fhlen, fhp, fhlen, mode, op, NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, dpp, 0, 0, cred, p); } else { error = EINVAL; } vrele(vp); return (error); } /* * Try an open against the server. Just call nfsrpc_openrpc(), retrying while * NFSERR_DELAY. Also, try system credentials, if the passed in credentials * fail. */ static int nfscl_tryopen(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen, u_int8_t *newfhp, int newfhlen, u_int32_t mode, struct nfsclopen *op, u_int8_t *name, int namelen, struct nfscldeleg **ndpp, int reclaim, u_int32_t delegtype, struct ucred *cred, NFSPROC_T *p) { int error; do { error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp, newfhlen, mode, op, name, namelen, ndpp, reclaim, delegtype, cred, p, 0, 0); if (error == NFSERR_DELAY) (void) nfs_catnap(PZERO, error, "nfstryop"); } while (error == NFSERR_DELAY); if (error == EAUTH || error == EACCES) { /* Try again using system credentials */ newnfs_setroot(cred); do { error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp, newfhlen, mode, op, name, namelen, ndpp, reclaim, delegtype, cred, p, 1, 0); if (error == NFSERR_DELAY) (void) nfs_catnap(PZERO, error, "nfstryop"); } while (error == NFSERR_DELAY); } return (error); } /* * Try a byte range lock. Just loop on nfsrpc_lock() while it returns * NFSERR_DELAY. Also, retry with system credentials, if the provided * cred don't work. */ static int nfscl_trylock(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen, struct nfscllockowner *nlp, int newone, int reclaim, u_int64_t off, u_int64_t len, short type, struct ucred *cred, NFSPROC_T *p) { struct nfsrv_descript nfsd, *nd = &nfsd; int error; do { error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp, newone, reclaim, off, len, type, cred, p, 0); if (!error && nd->nd_repstat == NFSERR_DELAY) (void) nfs_catnap(PZERO, (int)nd->nd_repstat, "nfstrylck"); } while (!error && nd->nd_repstat == NFSERR_DELAY); if (!error) error = nd->nd_repstat; if (error == EAUTH || error == EACCES) { /* Try again using root credentials */ newnfs_setroot(cred); do { error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp, newone, reclaim, off, len, type, cred, p, 1); if (!error && nd->nd_repstat == NFSERR_DELAY) (void) nfs_catnap(PZERO, (int)nd->nd_repstat, "nfstrylck"); } while (!error && nd->nd_repstat == NFSERR_DELAY); if (!error) error = nd->nd_repstat; } return (error); } /* * Try a delegreturn against the server. Just call nfsrpc_delegreturn(), * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in * credentials fail. */ static int nfscl_trydelegreturn(struct nfscldeleg *dp, struct ucred *cred, struct nfsmount *nmp, NFSPROC_T *p) { int error; do { error = nfsrpc_delegreturn(dp, cred, nmp, p, 0); if (error == NFSERR_DELAY) (void) nfs_catnap(PZERO, error, "nfstrydp"); } while (error == NFSERR_DELAY); if (error == EAUTH || error == EACCES) { /* Try again using system credentials */ newnfs_setroot(cred); do { error = nfsrpc_delegreturn(dp, cred, nmp, p, 1); if (error == NFSERR_DELAY) (void) nfs_catnap(PZERO, error, "nfstrydp"); } while (error == NFSERR_DELAY); } return (error); } /* * Try a close against the server. Just call nfsrpc_closerpc(), * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in * credentials fail. */ APPLESTATIC int nfscl_tryclose(struct nfsclopen *op, struct ucred *cred, struct nfsmount *nmp, NFSPROC_T *p) { struct nfsrv_descript nfsd, *nd = &nfsd; int error; do { error = nfsrpc_closerpc(nd, nmp, op, cred, p, 0); if (error == NFSERR_DELAY) (void) nfs_catnap(PZERO, error, "nfstrycl"); } while (error == NFSERR_DELAY); if (error == EAUTH || error == EACCES) { /* Try again using system credentials */ newnfs_setroot(cred); do { error = nfsrpc_closerpc(nd, nmp, op, cred, p, 1); if (error == NFSERR_DELAY) (void) nfs_catnap(PZERO, error, "nfstrycl"); } while (error == NFSERR_DELAY); } return (error); } /* * Decide if a delegation on a file permits close without flushing writes * to the server. This might be a big performance win in some environments. * (Not useful until the client does caching on local stable storage.) */ APPLESTATIC int nfscl_mustflush(vnode_t vp) { struct nfsclclient *clp; struct nfscldeleg *dp; struct nfsnode *np; struct nfsmount *nmp; np = VTONFS(vp); nmp = VFSTONFS(vnode_mount(vp)); if (!NFSHASNFSV4(nmp)) return (1); NFSLOCKCLSTATE(); clp = nfscl_findcl(nmp); if (clp == NULL) { NFSUNLOCKCLSTATE(); return (1); } dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); if (dp != NULL && (dp->nfsdl_flags & (NFSCLDL_WRITE | NFSCLDL_RECALL | NFSCLDL_DELEGRET)) == NFSCLDL_WRITE && (dp->nfsdl_sizelimit >= np->n_size || !NFSHASSTRICT3530(nmp))) { NFSUNLOCKCLSTATE(); return (0); } NFSUNLOCKCLSTATE(); return (1); } /* * See if a (write) delegation exists for this file. */ APPLESTATIC int nfscl_nodeleg(vnode_t vp, int writedeleg) { struct nfsclclient *clp; struct nfscldeleg *dp; struct nfsnode *np; struct nfsmount *nmp; np = VTONFS(vp); nmp = VFSTONFS(vnode_mount(vp)); if (!NFSHASNFSV4(nmp)) return (1); NFSLOCKCLSTATE(); clp = nfscl_findcl(nmp); if (clp == NULL) { NFSUNLOCKCLSTATE(); return (1); } dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); if (dp != NULL && (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) == 0 && (writedeleg == 0 || (dp->nfsdl_flags & NFSCLDL_WRITE) == NFSCLDL_WRITE)) { NFSUNLOCKCLSTATE(); return (0); } NFSUNLOCKCLSTATE(); return (1); } /* * Look for an associated delegation that should be DelegReturned. */ APPLESTATIC int nfscl_removedeleg(vnode_t vp, NFSPROC_T *p, nfsv4stateid_t *stp) { struct nfsclclient *clp; struct nfscldeleg *dp; struct nfsclowner *owp; struct nfscllockowner *lp; struct nfsmount *nmp; struct ucred *cred; struct nfsnode *np; int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept; nmp = VFSTONFS(vnode_mount(vp)); np = VTONFS(vp); NFSLOCKCLSTATE(); /* * Loop around waiting for: * - outstanding I/O operations on delegations to complete * - for a delegation on vp that has state, lock the client and * do a recall * - return delegation with no state */ while (1) { clp = nfscl_findcl(nmp); if (clp == NULL) { NFSUNLOCKCLSTATE(); return (retcnt); } dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); if (dp != NULL) { /* * Wait for outstanding I/O ops to be done. */ if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { if (igotlock) { nfsv4_unlock(&clp->nfsc_lock, 0); igotlock = 0; } dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED; (void) nfsmsleep(&dp->nfsdl_rwlock, NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL); continue; } needsrecall = 0; LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { if (!LIST_EMPTY(&owp->nfsow_open)) { needsrecall = 1; break; } } if (!needsrecall) { LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { if (!LIST_EMPTY(&lp->nfsl_lock)) { needsrecall = 1; break; } } } if (needsrecall && !triedrecall) { dp->nfsdl_flags |= NFSCLDL_DELEGRET; islept = 0; while (!igotlock) { igotlock = nfsv4_lock(&clp->nfsc_lock, 1, &islept, NFSCLSTATEMUTEXPTR, NULL); if (islept) break; } if (islept) continue; NFSUNLOCKCLSTATE(); cred = newnfs_getcred(); newnfs_copycred(&dp->nfsdl_cred, cred); (void) nfscl_recalldeleg(clp, nmp, dp, vp, cred, p, 0); NFSFREECRED(cred); triedrecall = 1; NFSLOCKCLSTATE(); nfsv4_unlock(&clp->nfsc_lock, 0); igotlock = 0; continue; } *stp = dp->nfsdl_stateid; retcnt = 1; nfscl_cleandeleg(dp); nfscl_freedeleg(&clp->nfsc_deleg, dp); } if (igotlock) nfsv4_unlock(&clp->nfsc_lock, 0); NFSUNLOCKCLSTATE(); return (retcnt); } } /* * Look for associated delegation(s) that should be DelegReturned. */ APPLESTATIC int nfscl_renamedeleg(vnode_t fvp, nfsv4stateid_t *fstp, int *gotfdp, vnode_t tvp, nfsv4stateid_t *tstp, int *gottdp, NFSPROC_T *p) { struct nfsclclient *clp; struct nfscldeleg *dp; struct nfsclowner *owp; struct nfscllockowner *lp; struct nfsmount *nmp; struct ucred *cred; struct nfsnode *np; int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept; nmp = VFSTONFS(vnode_mount(fvp)); *gotfdp = 0; *gottdp = 0; NFSLOCKCLSTATE(); /* * Loop around waiting for: * - outstanding I/O operations on delegations to complete * - for a delegation on fvp that has state, lock the client and * do a recall * - return delegation(s) with no state. */ while (1) { clp = nfscl_findcl(nmp); if (clp == NULL) { NFSUNLOCKCLSTATE(); return (retcnt); } np = VTONFS(fvp); dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); if (dp != NULL && *gotfdp == 0) { /* * Wait for outstanding I/O ops to be done. */ if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { if (igotlock) { nfsv4_unlock(&clp->nfsc_lock, 0); igotlock = 0; } dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED; (void) nfsmsleep(&dp->nfsdl_rwlock, NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL); continue; } needsrecall = 0; LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { if (!LIST_EMPTY(&owp->nfsow_open)) { needsrecall = 1; break; } } if (!needsrecall) { LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { if (!LIST_EMPTY(&lp->nfsl_lock)) { needsrecall = 1; break; } } } if (needsrecall && !triedrecall) { dp->nfsdl_flags |= NFSCLDL_DELEGRET; islept = 0; while (!igotlock) { igotlock = nfsv4_lock(&clp->nfsc_lock, 1, &islept, NFSCLSTATEMUTEXPTR, NULL); if (islept) break; } if (islept) continue; NFSUNLOCKCLSTATE(); cred = newnfs_getcred(); newnfs_copycred(&dp->nfsdl_cred, cred); (void) nfscl_recalldeleg(clp, nmp, dp, fvp, cred, p, 0); NFSFREECRED(cred); triedrecall = 1; NFSLOCKCLSTATE(); nfsv4_unlock(&clp->nfsc_lock, 0); igotlock = 0; continue; } *fstp = dp->nfsdl_stateid; retcnt++; *gotfdp = 1; nfscl_cleandeleg(dp); nfscl_freedeleg(&clp->nfsc_deleg, dp); } if (igotlock) { nfsv4_unlock(&clp->nfsc_lock, 0); igotlock = 0; } if (tvp != NULL) { np = VTONFS(tvp); dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); if (dp != NULL && *gottdp == 0) { /* * Wait for outstanding I/O ops to be done. */ if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED; (void) nfsmsleep(&dp->nfsdl_rwlock, NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL); continue; } LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { if (!LIST_EMPTY(&owp->nfsow_open)) { NFSUNLOCKCLSTATE(); return (retcnt); } } LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { if (!LIST_EMPTY(&lp->nfsl_lock)) { NFSUNLOCKCLSTATE(); return (retcnt); } } *tstp = dp->nfsdl_stateid; retcnt++; *gottdp = 1; nfscl_cleandeleg(dp); nfscl_freedeleg(&clp->nfsc_deleg, dp); } } NFSUNLOCKCLSTATE(); return (retcnt); } } /* * Get a reference on the clientid associated with the mount point. * Return 1 if success, 0 otherwise. */ APPLESTATIC int nfscl_getref(struct nfsmount *nmp) { struct nfsclclient *clp; NFSLOCKCLSTATE(); clp = nfscl_findcl(nmp); if (clp == NULL) { NFSUNLOCKCLSTATE(); return (0); } nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, NULL); NFSUNLOCKCLSTATE(); return (1); } /* * Release a reference on a clientid acquired with the above call. */ APPLESTATIC void nfscl_relref(struct nfsmount *nmp) { struct nfsclclient *clp; NFSLOCKCLSTATE(); clp = nfscl_findcl(nmp); if (clp == NULL) { NFSUNLOCKCLSTATE(); return; } nfsv4_relref(&clp->nfsc_lock); NFSUNLOCKCLSTATE(); } /* * Save the size attribute in the delegation, since the nfsnode * is going away. */ APPLESTATIC void nfscl_reclaimnode(vnode_t vp) { struct nfsclclient *clp; struct nfscldeleg *dp; struct nfsnode *np = VTONFS(vp); struct nfsmount *nmp; nmp = VFSTONFS(vnode_mount(vp)); if (!NFSHASNFSV4(nmp)) return; NFSLOCKCLSTATE(); clp = nfscl_findcl(nmp); if (clp == NULL) { NFSUNLOCKCLSTATE(); return; } dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) dp->nfsdl_size = np->n_size; NFSUNLOCKCLSTATE(); } /* * Get the saved size attribute in the delegation, since it is a * newly allocated nfsnode. */ APPLESTATIC void nfscl_newnode(vnode_t vp) { struct nfsclclient *clp; struct nfscldeleg *dp; struct nfsnode *np = VTONFS(vp); struct nfsmount *nmp; nmp = VFSTONFS(vnode_mount(vp)); if (!NFSHASNFSV4(nmp)) return; NFSLOCKCLSTATE(); clp = nfscl_findcl(nmp); if (clp == NULL) { NFSUNLOCKCLSTATE(); return; } dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) np->n_size = dp->nfsdl_size; NFSUNLOCKCLSTATE(); } /* * If there is a valid write delegation for this file, set the modtime * to the local clock time. */ APPLESTATIC void nfscl_delegmodtime(vnode_t vp) { struct nfsclclient *clp; struct nfscldeleg *dp; struct nfsnode *np = VTONFS(vp); struct nfsmount *nmp; nmp = VFSTONFS(vnode_mount(vp)); if (!NFSHASNFSV4(nmp)) return; NFSLOCKCLSTATE(); clp = nfscl_findcl(nmp); if (clp == NULL) { NFSUNLOCKCLSTATE(); return; } dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) { nanotime(&dp->nfsdl_modtime); dp->nfsdl_flags |= NFSCLDL_MODTIMESET; } NFSUNLOCKCLSTATE(); } /* * If there is a valid write delegation for this file with a modtime set, * put that modtime in mtime. */ APPLESTATIC void nfscl_deleggetmodtime(vnode_t vp, struct timespec *mtime) { struct nfsclclient *clp; struct nfscldeleg *dp; struct nfsnode *np = VTONFS(vp); struct nfsmount *nmp; nmp = VFSTONFS(vnode_mount(vp)); if (!NFSHASNFSV4(nmp)) return; NFSLOCKCLSTATE(); clp = nfscl_findcl(nmp); if (clp == NULL) { NFSUNLOCKCLSTATE(); return; } dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); if (dp != NULL && (dp->nfsdl_flags & (NFSCLDL_WRITE | NFSCLDL_MODTIMESET)) == (NFSCLDL_WRITE | NFSCLDL_MODTIMESET)) *mtime = dp->nfsdl_modtime; NFSUNLOCKCLSTATE(); } static int nfscl_errmap(struct nfsrv_descript *nd, u_int32_t minorvers) { short *defaulterrp, *errp; if (!nd->nd_repstat) return (0); if (nd->nd_procnum == NFSPROC_NOOP) return (txdr_unsigned(nd->nd_repstat & 0xffff)); if (nd->nd_repstat == EBADRPC) return (txdr_unsigned(NFSERR_BADXDR)); if (nd->nd_repstat == NFSERR_MINORVERMISMATCH || nd->nd_repstat == NFSERR_OPILLEGAL) return (txdr_unsigned(nd->nd_repstat)); if (nd->nd_repstat >= NFSERR_BADIOMODE && nd->nd_repstat < 20000 && minorvers > NFSV4_MINORVERSION) { /* NFSv4.n error. */ return (txdr_unsigned(nd->nd_repstat)); } if (nd->nd_procnum < NFSV4OP_CBNOPS) errp = defaulterrp = nfscl_cberrmap[nd->nd_procnum]; else return (txdr_unsigned(nd->nd_repstat)); while (*++errp) if (*errp == (short)nd->nd_repstat) return (txdr_unsigned(nd->nd_repstat)); return (txdr_unsigned(*defaulterrp)); } /* * Called to find/add a layout to a client. * This function returns the layout with a refcnt (shared lock) upon * success (returns 0) or with no lock/refcnt on the layout when an * error is returned. * If a layout is passed in via lypp, it is locked (exclusively locked). */ APPLESTATIC int nfscl_layout(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen, nfsv4stateid_t *stateidp, int layouttype, int retonclose, struct nfsclflayouthead *fhlp, struct nfscllayout **lypp, struct ucred *cred, NFSPROC_T *p) { struct nfsclclient *clp; struct nfscllayout *lyp, *tlyp; struct nfsclflayout *flp; struct nfsnode *np = VTONFS(vp); mount_t mp; int layout_passed_in; mp = nmp->nm_mountp; layout_passed_in = 1; tlyp = NULL; lyp = *lypp; if (lyp == NULL) { layout_passed_in = 0; tlyp = malloc(sizeof(*tlyp) + fhlen - 1, M_NFSLAYOUT, M_WAITOK | M_ZERO); } NFSLOCKCLSTATE(); clp = nmp->nm_clp; if (clp == NULL) { if (layout_passed_in != 0) nfsv4_unlock(&lyp->nfsly_lock, 0); NFSUNLOCKCLSTATE(); if (tlyp != NULL) free(tlyp, M_NFSLAYOUT); return (EPERM); } if (lyp == NULL) { /* * Although no lyp was passed in, another thread might have * allocated one. If one is found, just increment it's ref * count and return it. */ lyp = nfscl_findlayout(clp, fhp, fhlen); if (lyp == NULL) { lyp = tlyp; tlyp = NULL; lyp->nfsly_stateid.seqid = stateidp->seqid; lyp->nfsly_stateid.other[0] = stateidp->other[0]; lyp->nfsly_stateid.other[1] = stateidp->other[1]; lyp->nfsly_stateid.other[2] = stateidp->other[2]; lyp->nfsly_lastbyte = 0; LIST_INIT(&lyp->nfsly_flayread); LIST_INIT(&lyp->nfsly_flayrw); LIST_INIT(&lyp->nfsly_recall); lyp->nfsly_filesid[0] = np->n_vattr.na_filesid[0]; lyp->nfsly_filesid[1] = np->n_vattr.na_filesid[1]; lyp->nfsly_clp = clp; if (layouttype == NFSLAYOUT_FLEXFILE) lyp->nfsly_flags = NFSLY_FLEXFILE; else lyp->nfsly_flags = NFSLY_FILES; if (retonclose != 0) lyp->nfsly_flags |= NFSLY_RETONCLOSE; lyp->nfsly_fhlen = fhlen; NFSBCOPY(fhp, lyp->nfsly_fh, fhlen); TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list); LIST_INSERT_HEAD(NFSCLLAYOUTHASH(clp, fhp, fhlen), lyp, nfsly_hash); lyp->nfsly_timestamp = NFSD_MONOSEC + 120; nfscl_layoutcnt++; } else { if (retonclose != 0) lyp->nfsly_flags |= NFSLY_RETONCLOSE; TAILQ_REMOVE(&clp->nfsc_layout, lyp, nfsly_list); TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list); lyp->nfsly_timestamp = NFSD_MONOSEC + 120; } nfsv4_getref(&lyp->nfsly_lock, NULL, NFSCLSTATEMUTEXPTR, mp); if (NFSCL_FORCEDISM(mp)) { NFSUNLOCKCLSTATE(); if (tlyp != NULL) free(tlyp, M_NFSLAYOUT); return (EPERM); } *lypp = lyp; } else lyp->nfsly_stateid.seqid = stateidp->seqid; /* Merge the new list of File Layouts into the list. */ flp = LIST_FIRST(fhlp); if (flp != NULL) { if (flp->nfsfl_iomode == NFSLAYOUTIOMODE_READ) nfscl_mergeflayouts(&lyp->nfsly_flayread, fhlp); else nfscl_mergeflayouts(&lyp->nfsly_flayrw, fhlp); } if (layout_passed_in != 0) nfsv4_unlock(&lyp->nfsly_lock, 1); NFSUNLOCKCLSTATE(); if (tlyp != NULL) free(tlyp, M_NFSLAYOUT); return (0); } /* * Search for a layout by MDS file handle. * If one is found, it is returned with a refcnt (shared lock) iff * retflpp returned non-NULL and locked (exclusive locked) iff retflpp is * returned NULL. */ struct nfscllayout * nfscl_getlayout(struct nfsclclient *clp, uint8_t *fhp, int fhlen, uint64_t off, struct nfsclflayout **retflpp, int *recalledp) { struct nfscllayout *lyp; mount_t mp; int error, igotlock; mp = clp->nfsc_nmp->nm_mountp; *recalledp = 0; *retflpp = NULL; NFSLOCKCLSTATE(); lyp = nfscl_findlayout(clp, fhp, fhlen); if (lyp != NULL) { if ((lyp->nfsly_flags & NFSLY_RECALL) == 0) { TAILQ_REMOVE(&clp->nfsc_layout, lyp, nfsly_list); TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list); lyp->nfsly_timestamp = NFSD_MONOSEC + 120; error = nfscl_findlayoutforio(lyp, off, NFSV4OPEN_ACCESSREAD, retflpp); if (error == 0) nfsv4_getref(&lyp->nfsly_lock, NULL, NFSCLSTATEMUTEXPTR, mp); else { do { igotlock = nfsv4_lock(&lyp->nfsly_lock, 1, NULL, NFSCLSTATEMUTEXPTR, mp); } while (igotlock == 0 && !NFSCL_FORCEDISM(mp)); *retflpp = NULL; } if (NFSCL_FORCEDISM(mp)) { lyp = NULL; *recalledp = 1; } } else { lyp = NULL; *recalledp = 1; } } NFSUNLOCKCLSTATE(); return (lyp); } /* * Search for a layout by MDS file handle. If one is found, mark in to be * recalled, if it already marked "return on close". */ static void nfscl_retoncloselayout(vnode_t vp, struct nfsclclient *clp, uint8_t *fhp, int fhlen, struct nfsclrecalllayout **recallpp) { struct nfscllayout *lyp; uint32_t iomode; if (vp->v_type != VREG || !NFSHASPNFS(VFSTONFS(vnode_mount(vp))) || nfscl_enablecallb == 0 || nfs_numnfscbd == 0 || (VTONFS(vp)->n_flag & NNOLAYOUT) != 0) return; lyp = nfscl_findlayout(clp, fhp, fhlen); if (lyp != NULL && (lyp->nfsly_flags & (NFSLY_RETONCLOSE | NFSLY_RECALL)) == NFSLY_RETONCLOSE) { iomode = 0; if (!LIST_EMPTY(&lyp->nfsly_flayread)) iomode |= NFSLAYOUTIOMODE_READ; if (!LIST_EMPTY(&lyp->nfsly_flayrw)) iomode |= NFSLAYOUTIOMODE_RW; (void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE, lyp, iomode, 0, UINT64_MAX, lyp->nfsly_stateid.seqid, 0, 0, NULL, *recallpp); NFSCL_DEBUG(4, "retoncls recall iomode=%d\n", iomode); *recallpp = NULL; } } /* * Mark the layout to be recalled and with an error. * Also, disable the dsp from further use. */ void nfscl_dserr(uint32_t op, uint32_t stat, struct nfscldevinfo *dp, struct nfscllayout *lyp, struct nfsclds *dsp) { struct nfsclrecalllayout *recallp; uint32_t iomode; printf("DS being disabled, error=%d\n", stat); /* Set up the return of the layout. */ recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL, M_WAITOK); iomode = 0; NFSLOCKCLSTATE(); if ((lyp->nfsly_flags & NFSLY_RECALL) == 0) { if (!LIST_EMPTY(&lyp->nfsly_flayread)) iomode |= NFSLAYOUTIOMODE_READ; if (!LIST_EMPTY(&lyp->nfsly_flayrw)) iomode |= NFSLAYOUTIOMODE_RW; (void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE, lyp, iomode, 0, UINT64_MAX, lyp->nfsly_stateid.seqid, stat, op, dp->nfsdi_deviceid, recallp); NFSUNLOCKCLSTATE(); NFSCL_DEBUG(4, "nfscl_dserr recall iomode=%d\n", iomode); } else { NFSUNLOCKCLSTATE(); free(recallp, M_NFSLAYRECALL); } /* And shut the TCP connection down. */ nfscl_cancelreqs(dsp); } /* * Cancel all RPCs for this "dsp" by closing the connection. * Also, mark the session as defunct. * If NFSCLDS_SAMECONN is set, the connection is shared with other DSs and * cannot be shut down. */ APPLESTATIC void nfscl_cancelreqs(struct nfsclds *dsp) { struct __rpc_client *cl; static int non_event; NFSLOCKDS(dsp); if ((dsp->nfsclds_flags & (NFSCLDS_CLOSED | NFSCLDS_SAMECONN)) == 0 && dsp->nfsclds_sockp != NULL && dsp->nfsclds_sockp->nr_client != NULL) { dsp->nfsclds_flags |= NFSCLDS_CLOSED; cl = dsp->nfsclds_sockp->nr_client; dsp->nfsclds_sess.nfsess_defunct = 1; NFSUNLOCKDS(dsp); CLNT_CLOSE(cl); /* * This 1sec sleep is done to reduce the number of reconnect * attempts made on the DS while it has failed. */ tsleep(&non_event, PVFS, "ndscls", hz); return; } NFSUNLOCKDS(dsp); } /* * Dereference a layout. */ void nfscl_rellayout(struct nfscllayout *lyp, int exclocked) { NFSLOCKCLSTATE(); if (exclocked != 0) nfsv4_unlock(&lyp->nfsly_lock, 0); else nfsv4_relref(&lyp->nfsly_lock); NFSUNLOCKCLSTATE(); } /* * Search for a devinfo by deviceid. If one is found, return it after * acquiring a reference count on it. */ struct nfscldevinfo * nfscl_getdevinfo(struct nfsclclient *clp, uint8_t *deviceid, struct nfscldevinfo *dip) { NFSLOCKCLSTATE(); if (dip == NULL) dip = nfscl_finddevinfo(clp, deviceid); if (dip != NULL) dip->nfsdi_refcnt++; NFSUNLOCKCLSTATE(); return (dip); } /* * Dereference a devinfo structure. */ static void nfscl_reldevinfo_locked(struct nfscldevinfo *dip) { dip->nfsdi_refcnt--; if (dip->nfsdi_refcnt == 0) wakeup(&dip->nfsdi_refcnt); } /* * Dereference a devinfo structure. */ void nfscl_reldevinfo(struct nfscldevinfo *dip) { NFSLOCKCLSTATE(); nfscl_reldevinfo_locked(dip); NFSUNLOCKCLSTATE(); } /* * Find a layout for this file handle. Return NULL upon failure. */ static struct nfscllayout * nfscl_findlayout(struct nfsclclient *clp, u_int8_t *fhp, int fhlen) { struct nfscllayout *lyp; LIST_FOREACH(lyp, NFSCLLAYOUTHASH(clp, fhp, fhlen), nfsly_hash) if (lyp->nfsly_fhlen == fhlen && !NFSBCMP(lyp->nfsly_fh, fhp, fhlen)) break; return (lyp); } /* * Find a devinfo for this deviceid. Return NULL upon failure. */ static struct nfscldevinfo * nfscl_finddevinfo(struct nfsclclient *clp, uint8_t *deviceid) { struct nfscldevinfo *dip; LIST_FOREACH(dip, &clp->nfsc_devinfo, nfsdi_list) if (NFSBCMP(dip->nfsdi_deviceid, deviceid, NFSX_V4DEVICEID) == 0) break; return (dip); } /* * Merge the new file layout list into the main one, maintaining it in * increasing offset order. */ static void nfscl_mergeflayouts(struct nfsclflayouthead *fhlp, struct nfsclflayouthead *newfhlp) { struct nfsclflayout *flp, *nflp, *prevflp, *tflp; flp = LIST_FIRST(fhlp); prevflp = NULL; LIST_FOREACH_SAFE(nflp, newfhlp, nfsfl_list, tflp) { while (flp != NULL && flp->nfsfl_off < nflp->nfsfl_off) { prevflp = flp; flp = LIST_NEXT(flp, nfsfl_list); } if (prevflp == NULL) LIST_INSERT_HEAD(fhlp, nflp, nfsfl_list); else LIST_INSERT_AFTER(prevflp, nflp, nfsfl_list); prevflp = nflp; } } /* * Add this nfscldevinfo to the client, if it doesn't already exist. * This function consumes the structure pointed at by dip, if not NULL. */ APPLESTATIC int nfscl_adddevinfo(struct nfsmount *nmp, struct nfscldevinfo *dip, int ind, struct nfsclflayout *flp) { struct nfsclclient *clp; struct nfscldevinfo *tdip; uint8_t *dev; NFSLOCKCLSTATE(); clp = nmp->nm_clp; if (clp == NULL) { NFSUNLOCKCLSTATE(); if (dip != NULL) free(dip, M_NFSDEVINFO); return (ENODEV); } if ((flp->nfsfl_flags & NFSFL_FILE) != 0) dev = flp->nfsfl_dev; else dev = flp->nfsfl_ffm[ind].dev; tdip = nfscl_finddevinfo(clp, dev); if (tdip != NULL) { tdip->nfsdi_layoutrefs++; if ((flp->nfsfl_flags & NFSFL_FILE) != 0) flp->nfsfl_devp = tdip; else flp->nfsfl_ffm[ind].devp = tdip; nfscl_reldevinfo_locked(tdip); NFSUNLOCKCLSTATE(); if (dip != NULL) free(dip, M_NFSDEVINFO); return (0); } if (dip != NULL) { LIST_INSERT_HEAD(&clp->nfsc_devinfo, dip, nfsdi_list); dip->nfsdi_layoutrefs = 1; if ((flp->nfsfl_flags & NFSFL_FILE) != 0) flp->nfsfl_devp = dip; else flp->nfsfl_ffm[ind].devp = dip; } NFSUNLOCKCLSTATE(); if (dip == NULL) return (ENODEV); return (0); } /* * Free up a layout structure and associated file layout structure(s). */ APPLESTATIC void nfscl_freelayout(struct nfscllayout *layp) { struct nfsclflayout *flp, *nflp; struct nfsclrecalllayout *rp, *nrp; LIST_FOREACH_SAFE(flp, &layp->nfsly_flayread, nfsfl_list, nflp) { LIST_REMOVE(flp, nfsfl_list); nfscl_freeflayout(flp); } LIST_FOREACH_SAFE(flp, &layp->nfsly_flayrw, nfsfl_list, nflp) { LIST_REMOVE(flp, nfsfl_list); nfscl_freeflayout(flp); } LIST_FOREACH_SAFE(rp, &layp->nfsly_recall, nfsrecly_list, nrp) { LIST_REMOVE(rp, nfsrecly_list); free(rp, M_NFSLAYRECALL); } nfscl_layoutcnt--; free(layp, M_NFSLAYOUT); } /* * Free up a file layout structure. */ APPLESTATIC void nfscl_freeflayout(struct nfsclflayout *flp) { int i, j; if ((flp->nfsfl_flags & NFSFL_FILE) != 0) { for (i = 0; i < flp->nfsfl_fhcnt; i++) free(flp->nfsfl_fh[i], M_NFSFH); if (flp->nfsfl_devp != NULL) flp->nfsfl_devp->nfsdi_layoutrefs--; } if ((flp->nfsfl_flags & NFSFL_FLEXFILE) != 0) for (i = 0; i < flp->nfsfl_mirrorcnt; i++) { for (j = 0; j < flp->nfsfl_ffm[i].fhcnt; j++) free(flp->nfsfl_ffm[i].fh[j], M_NFSFH); if (flp->nfsfl_ffm[i].devp != NULL) flp->nfsfl_ffm[i].devp->nfsdi_layoutrefs--; } free(flp, M_NFSFLAYOUT); } /* * Free up a file layout devinfo structure. */ APPLESTATIC void nfscl_freedevinfo(struct nfscldevinfo *dip) { free(dip, M_NFSDEVINFO); } /* * Mark any layouts that match as recalled. */ static int nfscl_layoutrecall(int recalltype, struct nfscllayout *lyp, uint32_t iomode, uint64_t off, uint64_t len, uint32_t stateseqid, uint32_t stat, uint32_t op, char *devid, struct nfsclrecalllayout *recallp) { struct nfsclrecalllayout *rp, *orp; recallp->nfsrecly_recalltype = recalltype; recallp->nfsrecly_iomode = iomode; recallp->nfsrecly_stateseqid = stateseqid; recallp->nfsrecly_off = off; recallp->nfsrecly_len = len; recallp->nfsrecly_stat = stat; recallp->nfsrecly_op = op; if (devid != NULL) NFSBCOPY(devid, recallp->nfsrecly_devid, NFSX_V4DEVICEID); /* * Order the list as file returns first, followed by fsid and any * returns, both in increasing stateseqid order. * Note that the seqids wrap around, so 1 is after 0xffffffff. * (I'm not sure this is correct because I find RFC5661 confusing * on this, but hopefully it will work ok.) */ orp = NULL; LIST_FOREACH(rp, &lyp->nfsly_recall, nfsrecly_list) { orp = rp; if ((recalltype == NFSLAYOUTRETURN_FILE && (rp->nfsrecly_recalltype != NFSLAYOUTRETURN_FILE || nfscl_seq(stateseqid, rp->nfsrecly_stateseqid) != 0)) || (recalltype != NFSLAYOUTRETURN_FILE && rp->nfsrecly_recalltype != NFSLAYOUTRETURN_FILE && nfscl_seq(stateseqid, rp->nfsrecly_stateseqid) != 0)) { LIST_INSERT_BEFORE(rp, recallp, nfsrecly_list); break; } /* * Put any error return on all the file returns that will * preceed this one. */ if (rp->nfsrecly_recalltype == NFSLAYOUTRETURN_FILE && stat != 0 && rp->nfsrecly_stat == 0) { rp->nfsrecly_stat = stat; rp->nfsrecly_op = op; if (devid != NULL) NFSBCOPY(devid, rp->nfsrecly_devid, NFSX_V4DEVICEID); } } if (rp == NULL) { if (orp == NULL) LIST_INSERT_HEAD(&lyp->nfsly_recall, recallp, nfsrecly_list); else LIST_INSERT_AFTER(orp, recallp, nfsrecly_list); } lyp->nfsly_flags |= NFSLY_RECALL; wakeup(lyp->nfsly_clp); return (0); } /* * Compare the two seqids for ordering. The trick is that the seqids can * wrap around from 0xffffffff->0, so check for the cases where one * has wrapped around. * Return 1 if seqid1 comes before seqid2, 0 otherwise. */ static int nfscl_seq(uint32_t seqid1, uint32_t seqid2) { if (seqid2 > seqid1 && (seqid2 - seqid1) >= 0x7fffffff) /* seqid2 has wrapped around. */ return (0); if (seqid1 > seqid2 && (seqid1 - seqid2) >= 0x7fffffff) /* seqid1 has wrapped around. */ return (1); if (seqid1 <= seqid2) return (1); return (0); } /* * Do a layout return for each of the recalls. */ static void nfscl_layoutreturn(struct nfsmount *nmp, struct nfscllayout *lyp, struct ucred *cred, NFSPROC_T *p) { struct nfsclrecalllayout *rp; nfsv4stateid_t stateid; int layouttype; NFSBCOPY(lyp->nfsly_stateid.other, stateid.other, NFSX_STATEIDOTHER); stateid.seqid = lyp->nfsly_stateid.seqid; if ((lyp->nfsly_flags & NFSLY_FILES) != 0) layouttype = NFSLAYOUT_NFSV4_1_FILES; else layouttype = NFSLAYOUT_FLEXFILE; LIST_FOREACH(rp, &lyp->nfsly_recall, nfsrecly_list) { (void)nfsrpc_layoutreturn(nmp, lyp->nfsly_fh, lyp->nfsly_fhlen, 0, layouttype, rp->nfsrecly_iomode, rp->nfsrecly_recalltype, rp->nfsrecly_off, rp->nfsrecly_len, &stateid, cred, p, rp->nfsrecly_stat, rp->nfsrecly_op, rp->nfsrecly_devid); } } /* * Do the layout commit for a file layout. */ static void nfscl_dolayoutcommit(struct nfsmount *nmp, struct nfscllayout *lyp, struct ucred *cred, NFSPROC_T *p) { struct nfsclflayout *flp; uint64_t len; int error, layouttype; if ((lyp->nfsly_flags & NFSLY_FILES) != 0) layouttype = NFSLAYOUT_NFSV4_1_FILES; else layouttype = NFSLAYOUT_FLEXFILE; LIST_FOREACH(flp, &lyp->nfsly_flayrw, nfsfl_list) { if (layouttype == NFSLAYOUT_FLEXFILE && (flp->nfsfl_fflags & NFSFLEXFLAG_NO_LAYOUTCOMMIT) != 0) { NFSCL_DEBUG(4, "Flex file: no layoutcommit\n"); /* If not supported, don't bother doing it. */ NFSLOCKMNT(nmp); nmp->nm_state |= NFSSTA_NOLAYOUTCOMMIT; NFSUNLOCKMNT(nmp); break; } else if (flp->nfsfl_off <= lyp->nfsly_lastbyte) { len = flp->nfsfl_end - flp->nfsfl_off; error = nfsrpc_layoutcommit(nmp, lyp->nfsly_fh, lyp->nfsly_fhlen, 0, flp->nfsfl_off, len, lyp->nfsly_lastbyte, &lyp->nfsly_stateid, layouttype, cred, p, NULL); NFSCL_DEBUG(4, "layoutcommit err=%d\n", error); if (error == NFSERR_NOTSUPP) { /* If not supported, don't bother doing it. */ NFSLOCKMNT(nmp); nmp->nm_state |= NFSSTA_NOLAYOUTCOMMIT; NFSUNLOCKMNT(nmp); break; } } } } /* * Commit all layouts for a file (vnode). */ int nfscl_layoutcommit(vnode_t vp, NFSPROC_T *p) { struct nfsclclient *clp; struct nfscllayout *lyp; struct nfsnode *np = VTONFS(vp); mount_t mp; struct nfsmount *nmp; mp = vnode_mount(vp); nmp = VFSTONFS(mp); if (NFSHASNOLAYOUTCOMMIT(nmp)) return (0); NFSLOCKCLSTATE(); clp = nmp->nm_clp; if (clp == NULL) { NFSUNLOCKCLSTATE(); return (EPERM); } lyp = nfscl_findlayout(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); if (lyp == NULL) { NFSUNLOCKCLSTATE(); return (EPERM); } nfsv4_getref(&lyp->nfsly_lock, NULL, NFSCLSTATEMUTEXPTR, mp); if (NFSCL_FORCEDISM(mp)) { NFSUNLOCKCLSTATE(); return (EPERM); } tryagain: if ((lyp->nfsly_flags & NFSLY_WRITTEN) != 0) { lyp->nfsly_flags &= ~NFSLY_WRITTEN; NFSUNLOCKCLSTATE(); NFSCL_DEBUG(4, "do layoutcommit2\n"); nfscl_dolayoutcommit(clp->nfsc_nmp, lyp, NFSPROCCRED(p), p); NFSLOCKCLSTATE(); goto tryagain; } nfsv4_relref(&lyp->nfsly_lock); NFSUNLOCKCLSTATE(); return (0); }