diff --git a/share/man/man9/pci.9 b/share/man/man9/pci.9 index 8f772e76ba99..eeb62a63a2bd 100644 --- a/share/man/man9/pci.9 +++ b/share/man/man9/pci.9 @@ -1,1155 +1,1159 @@ .\" .\" Copyright (c) 2005 Bruce M Simpson .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .Dd March 27, 2025 .Dt PCI 9 .Os .Sh NAME .Nm pci , .Nm pci_alloc_msi , .Nm pci_alloc_msix , .Nm pci_clear_pme , .Nm pci_disable_busmaster , .Nm pci_disable_io , .Nm pci_enable_busmaster , .Nm pci_enable_io , .Nm pci_enable_pme , .Nm pci_find_bsf , .Nm pci_find_cap , .Nm pci_find_dbsf , .Nm pci_find_device , .Nm pci_find_extcap , .Nm pci_find_htcap , .Nm pci_find_next_cap , .Nm pci_find_next_extcap , .Nm pci_find_next_htcap , .Nm pci_find_pcie_root_port , .Nm pci_get_id , .Nm pci_get_max_payload , .Nm pci_get_max_read_req , .Nm pci_get_powerstate , .Nm pci_get_vpd_ident , .Nm pci_get_vpd_readonly , .Nm pci_has_pm , .Nm pci_iov_attach , .Nm pci_iov_attach_name , .Nm pci_iov_detach , .Nm pci_msi_count , .Nm pci_msix_count , .Nm pci_msix_pba_bar , .Nm pci_msix_table_bar , .Nm pci_pending_msix , .Nm pci_read_config , .Nm pci_release_msi , .Nm pci_remap_msix , .Nm pci_restore_state , .Nm pci_save_state , .Nm pci_set_max_read_req , .Nm pci_set_powerstate , .Nm pci_write_config , .Nm pcie_adjust_config , .Nm pcie_flr , .Nm pcie_get_max_completion_timeout , .Nm pcie_read_config , .Nm pcie_wait_for_pending_transactions , .Nm pcie_write_config .Nd PCI bus interface .Sh SYNOPSIS .In sys/bus.h .In dev/pci/pcireg.h .In dev/pci/pcivar.h .Ft int .Fn pci_alloc_msi "device_t dev" "int *count" .Ft int .Fn pci_alloc_msix "device_t dev" "int *count" .Ft void .Fn pci_clear_pme "device_t dev" .Ft int .Fn pci_disable_busmaster "device_t dev" .Ft int .Fn pci_disable_io "device_t dev" "int space" .Ft int .Fn pci_enable_busmaster "device_t dev" .Ft int .Fn pci_enable_io "device_t dev" "int space" .Ft void .Fn pci_enable_pme "device_t dev" .Ft device_t .Fn pci_find_bsf "uint8_t bus" "uint8_t slot" "uint8_t func" .Ft int .Fn pci_find_cap "device_t dev" "int capability" "int *capreg" .Ft device_t .Fn pci_find_dbsf "uint32_t domain" "uint8_t bus" "uint8_t slot" "uint8_t func" .Ft device_t .Fn pci_find_device "uint16_t vendor" "uint16_t device" .Ft int .Fn pci_find_extcap "device_t dev" "int capability" "int *capreg" .Ft int .Fn pci_find_htcap "device_t dev" "int capability" "int *capreg" .Ft int .Fn pci_find_next_cap "device_t dev" "int capability" "int start" "int *capreg" .Ft int .Fn pci_find_next_extcap "device_t dev" "int capability" "int start" "int *capreg" .Ft int .Fn pci_find_next_htcap "device_t dev" "int capability" "int start" "int *capreg" .Ft device_t .Fn pci_find_pcie_root_port "device_t dev" .Ft int .Fn pci_get_id "device_t dev" "enum pci_id_type type" "uintptr_t *id" .Ft int .Fn pci_get_max_payload "device_t dev" .Ft int .Fn pci_get_max_read_req "device_t dev" .Ft int .Fn pci_get_powerstate "device_t dev" .Ft int .Fn pci_get_vpd_ident "device_t dev" "const char **identptr" .Ft int .Fn pci_get_vpd_readonly "device_t dev" "const char *kw" "const char **vptr" .Ft bool .Fn pci_has_pm "device_t dev" .Ft int .Fn pci_msi_count "device_t dev" .Ft int .Fn pci_msix_count "device_t dev" .Ft int .Fn pci_msix_pba_bar "device_t dev" .Ft int .Fn pci_msix_table_bar "device_t dev" .Ft int .Fn pci_pending_msix "device_t dev" "u_int index" .Ft uint32_t .Fn pci_read_config "device_t dev" "int reg" "int width" .Ft int .Fn pci_release_msi "device_t dev" .Ft int .Fn pci_remap_msix "device_t dev" "int count" "const u_int *vectors" .Ft void .Fn pci_restore_state "device_t dev" .Ft void .Fn pci_save_state "device_t dev" .Ft int .Fn pci_set_max_read_req "device_t dev" "int size" .Ft int .Fn pci_set_powerstate "device_t dev" "int state" .Ft void .Fn pci_write_config "device_t dev" "int reg" "uint32_t val" "int width" .Ft uint32_t .Fo pcie_adjust_config .Fa "device_t dev" .Fa "int reg" .Fa "uint32_t mask" .Fa "uint32_t val" .Fa "int width" .Fc .Ft bool .Fn pcie_flr "device_t dev" "u_int max_delay" "bool force" .Ft int .Fn pcie_get_max_completion_timeout "device_t dev" .Ft uint32_t .Fn pcie_read_config "device_t dev" "int reg" "int width" .Ft bool .Fn pcie_wait_for_pending_transactions "device_t dev" "u_int max_delay" .Ft void .Fn pcie_write_config "device_t dev" "int reg" "uint32_t val" "int width" .Ft void .Fn pci_event_fn "void *arg" "device_t dev" .Fn EVENTHANDLER_REGISTER "pci_add_device" "pci_event_fn" .Fn EVENTHANDLER_DEREGISTER "pci_delete_resource" "pci_event_fn" .In dev/pci/pci_iov.h .Ft int .Fn pci_iov_attach "device_t dev" "nvlist_t *pf_schema" "nvlist_t *vf_schema" .Ft int .Fo pci_iov_attach_name .Fa "device_t dev" .Fa "nvlist_t *pf_schema" .Fa "nvlist_t *vf_schema" .Fa "const char *fmt" .Fa "..." .Fc .Ft int .Fn pci_iov_detach "device_t dev" .Sh DESCRIPTION The .Nm set of functions are used for managing PCI devices. The functions are split into several groups: raw configuration access, locating devices, device information, device configuration, and message signaled interrupts. .Ss Raw Configuration Access The .Fn pci_read_config function is used to read data from the PCI configuration space of the device .Fa dev , at offset .Fa reg , with .Fa width specifying the size of the access. .Pp The .Fn pci_write_config function is used to write the value .Fa val to the PCI configuration space of the device .Fa dev , at offset .Fa reg , with .Fa width specifying the size of the access. .Pp The .Fn pcie_adjust_config function is used to modify the value of a register in the PCI-express capability register set of device .Fa dev . The offset .Fa reg specifies a relative offset in the register set with .Fa width specifying the size of the access. The new value of the register is computed by modifying bits set in .Fa mask to the value in .Fa val . Any bits not specified in .Fa mask are preserved. The previous value of the register is returned. .Pp The .Fn pcie_read_config function is used to read the value of a register in the PCI-express capability register set of device .Fa dev . The offset .Fa reg specifies a relative offset in the register set with .Fa width specifying the size of the access. .Pp The .Fn pcie_write_config function is used to write the value .Fa val to a register in the PCI-express capability register set of device .Fa dev . The offset .Fa reg specifies a relative offset in the register set with .Fa width specifying the size of the access. .Pp .Em NOTE : Device drivers should only use these functions for functionality that is not available via another .Fn pci function. .Ss Locating Devices The .Fn pci_find_bsf function looks up the .Vt device_t of a PCI device, given its .Fa bus , .Fa slot , and .Fa func . The .Fa slot number actually refers to the number of the device on the bus, which does not necessarily indicate its geographic location in terms of a physical slot. Note that in case the system has multiple PCI domains, the .Fn pci_find_bsf function only searches the first one. Actually, it is equivalent to: .Bd -literal -offset indent pci_find_dbsf(0, bus, slot, func); .Ed .Pp The .Fn pci_find_dbsf function looks up the .Vt device_t of a PCI device, given its .Fa domain , .Fa bus , .Fa slot , and .Fa func . The .Fa slot number actually refers to the number of the device on the bus, which does not necessarily indicate its geographic location in terms of a physical slot. .Pp The .Fn pci_find_device function looks up the .Vt device_t of a PCI device, given its .Fa vendor and .Fa device IDs. Note that there can be multiple matches for this search; this function only returns the first matching device. .Ss Device Information The .Fn pci_find_cap function is used to locate the first instance of a PCI capability register set for the device .Fa dev . The capability to locate is specified by ID via .Fa capability . Constant macros of the form .Dv PCIY_xxx for standard capability IDs are defined in .In dev/pci/pcireg.h . If the capability is found, then .Fa *capreg is set to the offset in configuration space of the capability register set, and .Fn pci_find_cap returns zero. If the capability is not found or the device does not support capabilities, .Fn pci_find_cap returns an error. The .Fn pci_find_next_cap function is used to locate the next instance of a PCI capability register set for the device .Fa dev . The .Fa start should be the .Fa *capreg returned by a prior .Fn pci_find_cap or .Fn pci_find_next_cap . When no more instances are located .Fn pci_find_next_cap returns an error. .Pp The .Fn pci_has_pm function returns true if .Fa dev supports power management. .Pp The .Fn pci_find_extcap function is used to locate the first instance of a PCI-express extended capability register set for the device .Fa dev . The extended capability to locate is specified by ID via .Fa capability . Constant macros of the form .Dv PCIZ_xxx for standard extended capability IDs are defined in .In dev/pci/pcireg.h . If the extended capability is found, then .Fa *capreg is set to the offset in configuration space of the extended capability register set, and .Fn pci_find_extcap returns zero. If the extended capability is not found or the device is not a PCI-express device, .Fn pci_find_extcap returns an error. The .Fn pci_find_next_extcap function is used to locate the next instance of a PCI-express extended capability register set for the device .Fa dev . The .Fa start should be the .Fa *capreg returned by a prior .Fn pci_find_extcap or .Fn pci_find_next_extcap . When no more instances are located .Fn pci_find_next_extcap returns an error. .Pp The .Fn pci_find_htcap function is used to locate the first instance of a HyperTransport capability register set for the device .Fa dev . The capability to locate is specified by type via .Fa capability . Constant macros of the form .Dv PCIM_HTCAP_xxx for standard HyperTransport capability types are defined in .In dev/pci/pcireg.h . If the capability is found, then .Fa *capreg is set to the offset in configuration space of the capability register set, and .Fn pci_find_htcap returns zero. If the capability is not found or the device is not a HyperTransport device, .Fn pci_find_htcap returns an error. The .Fn pci_find_next_htcap function is used to locate the next instance of a HyperTransport capability register set for the device .Fa dev . The .Fa start should be the .Fa *capreg returned by a prior .Fn pci_find_htcap or .Fn pci_find_next_htcap . When no more instances are located .Fn pci_find_next_htcap returns an error. .Pp The .Fn pci_find_pcie_root_port function walks up the PCI device hierarchy to locate the PCI-express root port upstream of .Fa dev . If a root port is not found, .Fn pci_find_pcie_root_port returns .Dv NULL . .Pp The .Fn pci_get_id function is used to read an identifier from a device. The .Fa type flag is used to specify which identifier to read. The following flags are supported: .Bl -hang -width ".Dv PCI_ID_RID" .It Dv PCI_ID_RID Read the routing identifier for the device. .It Dv PCI_ID_MSI Read the MSI routing ID. This is needed by some interrupt controllers to route MSI and MSI-X interrupts. .El .Pp The .Fn pci_get_vpd_ident function is used to fetch a device's Vital Product Data .Pq VPD identifier string. If the device .Fa dev supports VPD and provides an identifier string, then .Fa *identptr is set to point at a read-only, null-terminated copy of the identifier string, and .Fn pci_get_vpd_ident returns zero. If the device does not support VPD or does not provide an identifier string, then .Fn pci_get_vpd_ident returns an error. .Pp The .Fn pci_get_vpd_readonly function is used to fetch the value of a single VPD read-only keyword for the device .Fa dev . The keyword to fetch is identified by the two character string .Fa kw . If the device supports VPD and provides a read-only value for the requested keyword, then .Fa *vptr is set to point at a read-only, null-terminated copy of the value, and .Fn pci_get_vpd_readonly returns zero. If the device does not support VPD or does not provide the requested keyword, then .Fn pci_get_vpd_readonly returns an error. .Pp The .Fn pcie_get_max_completion_timeout function returns the maximum completion timeout configured for the device .Fa dev in microseconds. If the .Fa dev device is not a PCI-express device, .Fn pcie_get_max_completion_timeout returns zero. When completion timeouts are disabled for .Fa dev , this function returns the maxmimum timeout that would be used if timeouts were enabled. .Pp The .Fn pcie_wait_for_pending_transactions function waits for any pending transactions initiated by the .Fa dev device to complete. The function checks for pending transactions by polling the transactions pending flag in the PCI-express device status register. It returns .Dv true once the transaction pending flag is clear. If transactions are still pending after .Fa max_delay milliseconds, .Fn pcie_wait_for_pending_transactions returns .Dv false . If .Fa max_delay is set to zero, .Fn pcie_wait_for_pending_transactions performs a single check; otherwise, this function may sleep while polling the transactions pending flag. .Nm pcie_wait_for_pending_transactions returns .Dv true if .Fa dev is not a PCI-express device. .Ss Device Configuration The .Fn pci_enable_busmaster function enables PCI bus mastering for the device .Fa dev , by setting the .Dv PCIM_CMD_BUSMASTEREN bit in the .Dv PCIR_COMMAND register. The .Fn pci_disable_busmaster function clears this bit. .Pp The .Fn pci_enable_io function enables memory or I/O port address decoding for the device .Fa dev , by setting the .Dv PCIM_CMD_MEMEN or .Dv PCIM_CMD_PORTEN bit in the .Dv PCIR_COMMAND register appropriately. The .Fn pci_disable_io function clears the appropriate bit. The .Fa space argument specifies which resource is affected; this can be either .Dv SYS_RES_MEMORY or .Dv SYS_RES_IOPORT as appropriate. Device drivers should generally not use these routines directly. The PCI bus will enable decoding automatically when a .Dv SYS_RES_MEMORY or .Dv SYS_RES_IOPORT resource is activated via .Xr bus_alloc_resource 9 or .Xr bus_activate_resource 9 . .Pp The .Fn pci_get_max_payload function returns the current maximum TLP payload size in bytes for a PCI-express device. If the .Fa dev device is not a PCI-express device, .Fn pci_get_max_payload returns zero. .Pp The .Fn pci_get_max_read_req function returns the current maximum read request size in bytes for a PCI-express device. If the .Fa dev device is not a PCI-express device, .Fn pci_get_max_read_req returns zero. .Pp The .Fn pci_set_max_read_req sets the PCI-express maximum read request size for .Fa dev . The requested .Fa size may be adjusted, and .Fn pci_set_max_read_req returns the actual size set in bytes. If the .Fa dev device is not a PCI-express device, .Fn pci_set_max_read_req returns zero. .Pp The .Fn pci_get_powerstate function returns the current power state of the device .Fa dev . If the device does not support power management capabilities, then the default state of .Dv PCI_POWERSTATE_D0 is returned. The following power states are defined by PCI: .Bl -hang -width ".Dv PCI_POWERSTATE_UNKNOWN" .It Dv PCI_POWERSTATE_D0 State in which device is on and running. It is receiving full power from the system and delivering full functionality to the user. .It Dv PCI_POWERSTATE_D1 Class-specific low-power state in which device context may or may not be lost. Buses in this state cannot do anything to the bus, to force devices to lose context. .It Dv PCI_POWERSTATE_D2 Class-specific low-power state in which device context may or may not be lost. Attains greater power savings than .Dv PCI_POWERSTATE_D1 . Buses in this state can cause devices to lose some context. Devices .Em must be prepared for the bus to be in this state or higher. -.It Dv PCI_POWERSTATE_D3 +.It Dv PCI_POWERSTATE_D3_HOT State in which the device is off and not running. Device context is lost, and power from the device can -be removed. +be (but is not necessarily) removed. +.It Dv PCI_POWERSTATE_D3_COLD +Same as +.Dv PCI_POWERSTATE_D3_HOT , +except power has been removed from the device. .It Dv PCI_POWERSTATE_UNKNOWN State of the device is unknown. .El .Pp The .Fn pci_set_powerstate function is used to transition the device .Fa dev to the PCI power state .Fa state . If the device does not support power management capabilities or it does not support the specific power state .Fa state , then the function will fail with .Er EOPNOTSUPP . .Pp The .Fn pci_clear_pme function is used to clear any pending PME# signal and disable generation of power management events. .Pp The .Fn pci_enable_pme function is used to enable generation of power management events before suspending a device. .Pp The .Fn pci_iov_attach function is used to advertise that the given device .Pq and associated device driver supports PCI Single-Root I/O Virtualization .Pq SR-IOV . A driver that supports SR-IOV must implement the .Xr PCI_IOV_INIT 9 , .Xr PCI_IOV_ADD_VF 9 and .Xr PCI_IOV_UNINIT 9 methods. This function should be called during the .Xr DEVICE_ATTACH 9 method. If this function returns an error, it is recommended that the device driver still successfully attaches, but runs with SR-IOV disabled. The .Fa pf_schema and .Fa vf_schema parameters are used to define what device-specific configuration parameters the device driver accepts when SR-IOV is enabled for the Physical Function .Pq PF and for individual Virtual Functions .Pq VFs respectively. See .Xr pci_iov_schema 9 for details on how to construct the schema. If either the .Pa pf_schema or .Pa vf_schema is invalid or specifies parameter names that conflict with parameter names that are already in use, .Fn pci_iov_attach will return an error and SR-IOV will not be available on the PF device. If a driver does not accept configuration parameters for either the PF device or the VF devices, the driver must pass an empty schema for that device. The SR-IOV infrastructure takes ownership of the .Fa pf_schema and .Fa vf_schema and is responsible for freeing them. The driver must never free the schemas itself. .Pp The .Fn pci_iov_attach_name function is a variant of .Fn pci_iov_attach that allows the name of the associated character device in .Pa /dev/iov to be specified by .Fa fmt . The .Fn pci_iov_attach function uses the name of .Fa dev as the device name. .Pp The .Fn pci_iov_detach function is used to advise the SR-IOV infrastructure that the driver for the given device is attempting to detach and that all SR-IOV resources for the device must be released. This function must be called during the .Xr DEVICE_DETACH 9 method if .Fn pci_iov_attach was successfully called on the device and .Fn pci_iov_detach has not subsequently been called on the device and returned no error. If this function returns an error, the .Xr DEVICE_DETACH 9 method must fail and return an error, as detaching the PF driver while VF devices are active would cause system instability. This function is safe to call and will always succeed if .Fn pci_iov_attach previously failed with an error on the given device, or if .Fn pci_iov_attach was never called on the device. .Pp The .Fn pci_save_state and .Fn pci_restore_state functions can be used by a device driver to save and restore standard PCI config registers. The .Fn pci_save_state function must be invoked while the device has valid state before .Fn pci_restore_state can be used. If the device is not in the fully-powered state .Pq Dv PCI_POWERSTATE_D0 when .Fn pci_restore_state is invoked, then the device will be transitioned to .Dv PCI_POWERSTATE_D0 before any config registers are restored. .Pp The .Fn pcie_flr function requests a Function Level Reset .Pq FLR of .Fa dev . If .Fa dev is not a PCI-express device or does not support Function Level Resets via the PCI-express device control register, .Dv false is returned. Pending transactions are drained by disabling busmastering and calling .Fn pcie_wait_for_pending_transactions before resetting the device. The .Fa max_delay argument specifies the maximum timeout to wait for pending transactions as described for .Fn pcie_wait_for_pending_transactions . If .Fn pcie_wait_for_pending_transactions fails with a timeout and .Fa force is .Dv false , busmastering is re-enabled and .Dv false is returned. If .Fn pcie_wait_for_pending_transactions fails with a timeout and .Fa force is .Dv true , the device is reset despite the timeout. After the reset has been requested, .Nm pcie_flr sleeps for at least 100 milliseconds before returning .Dv true . Note that .Nm pcie_flr does not save and restore any state around the reset. The caller should save and restore state as needed. .Ss Message Signaled Interrupts Message Signaled Interrupts .Pq MSI and Enhanced Message Signaled Interrupts .Pq MSI-X are PCI capabilities that provide an alternate method for PCI devices to signal interrupts. The legacy INTx interrupt is available to PCI devices as a .Dv SYS_RES_IRQ resource with a resource ID of zero. MSI and MSI-X interrupts are available to PCI devices as one or more .Dv SYS_RES_IRQ resources with resource IDs greater than zero. A driver must ask the PCI bus to allocate MSI or MSI-X interrupts using .Fn pci_alloc_msi or .Fn pci_alloc_msix before it can use MSI or MSI-X .Dv SYS_RES_IRQ resources. A driver is not allowed to use the legacy INTx .Dv SYS_RES_IRQ resource if MSI or MSI-X interrupts have been allocated, and attempts to allocate MSI or MSI-X interrupts will fail if the driver is currently using the legacy INTx .Dv SYS_RES_IRQ resource. A driver is only allowed to use either MSI or MSI-X, but not both. .Pp The .Fn pci_msi_count function returns the maximum number of MSI messages supported by the device .Fa dev . If the device does not support MSI, then .Fn pci_msi_count returns zero. .Pp The .Fn pci_alloc_msi function attempts to allocate .Fa *count MSI messages for the device .Fa dev . The .Fn pci_alloc_msi function may allocate fewer messages than requested for various reasons including requests for more messages than the device .Fa dev supports, or if the system has a shortage of available MSI messages. On success, .Fa *count is set to the number of messages allocated and .Fn pci_alloc_msi returns zero. The .Dv SYS_RES_IRQ resources for the allocated messages will be available at consecutive resource IDs beginning with one. If .Fn pci_alloc_msi is not able to allocate any messages, it returns an error. Note that MSI only supports message counts that are powers of two; requests to allocate a non-power of two count of messages will fail. .Pp The .Fn pci_release_msi function is used to release any allocated MSI or MSI-X messages back to the system. If any MSI or MSI-X .Dv SYS_RES_IRQ resources are allocated by the driver or have a configured interrupt handler, this function will fail with .Er EBUSY . The .Fn pci_release_msi function returns zero on success and an error on failure. .Pp The .Fn pci_msix_count function returns the maximum number of MSI-X messages supported by the device .Fa dev . If the device does not support MSI-X, then .Fn pci_msix_count returns zero. .Pp The .Fn pci_msix_pba_bar function returns the offset in configuration space of the Base Address Register .Pq BAR containing the MSI-X Pending Bit Array (PBA) for device .Fa dev . The returned value can be used as the resource ID with .Xr bus_alloc_resource 9 and .Xr bus_release_resource 9 to allocate the BAR. If the device does not support MSI-X, then .Fn pci_msix_pba_bar returns -1. .Pp The .Fn pci_msix_table_bar function returns the offset in configuration space of the BAR containing the MSI-X vector table for device .Fa dev . The returned value can be used as the resource ID with .Xr bus_alloc_resource 9 and .Xr bus_release_resource 9 to allocate the BAR. If the device does not support MSI-X, then .Fn pci_msix_table_bar returns -1. .Pp The .Fn pci_alloc_msix function attempts to allocate .Fa *count MSI-X messages for the device .Fa dev . The .Fn pci_alloc_msix function may allocate fewer messages than requested for various reasons including requests for more messages than the device .Fa dev supports, or if the system has a shortage of available MSI-X messages. On success, .Fa *count is set to the number of messages allocated and .Fn pci_alloc_msix returns zero. For MSI-X messages, the resource ID for each .Dv SYS_RES_IRQ resource identifies the index in the MSI-X table of the corresponding message. A resource ID of one maps to the first index of the MSI-X table; a resource ID two identifies the second index in the table, etc. The .Fn pci_alloc_msix function assigns the .Fa *count messages allocated to the first .Fa *count table indices. If .Fn pci_alloc_msix is not able to allocate any messages, it returns an error. Unlike MSI, MSI-X does not require message counts that are powers of two. .Pp The BARs containing the MSI-X vector table and PBA must be allocated via .Xr bus_alloc_resource 9 before calling .Fn pci_alloc_msix and must not be released until after calling .Fn pci_release_msi . Note that the vector table and PBA may be stored in the same BAR or in different BARs. .Pp The .Fn pci_pending_msix function examines the .Fa dev device's PBA to determine the pending status of the MSI-X message at table index .Fa index . If the indicated message is pending, this function returns a non-zero value; otherwise, it returns zero. Passing an invalid .Fa index to this function will result in undefined behavior. .Pp As mentioned in the description of .Fn pci_alloc_msix , MSI-X messages are initially assigned to the first N table entries. A driver may use a different distribution of available messages to table entries via the .Fn pci_remap_msix function. Note that this function must be called after a successful call to .Fn pci_alloc_msix but before any of the .Dv SYS_RES_IRQ resources are allocated. The .Fn pci_remap_msix function returns zero on success, or an error on failure. .Pp The .Fa vectors array should contain .Fa count message vectors. The array maps directly to the MSI-X table in that the first entry in the array specifies the message used for the first entry in the MSI-X table, the second entry in the array corresponds to the second entry in the MSI-X table, etc. The vector value in each array index can either be zero to indicate that no message should be assigned to the corresponding MSI-X table entry, or it can be a number from one to N .Po where N is the count returned from the previous call to .Fn pci_alloc_msix .Pc to indicate which of the allocated messages should be assigned to the corresponding MSI-X table entry. .Pp If .Fn pci_remap_msix succeeds, each MSI-X table entry with a non-zero vector will have an associated .Dv SYS_RES_IRQ resource whose resource ID corresponds to the table index as described above for .Fn pci_alloc_msix . MSI-X table entries that with a vector of zero will not have an associated .Dv SYS_RES_IRQ resource. Additionally, if any of the original messages allocated by .Fn pci_alloc_msix are not used in the new distribution of messages in the MSI-X table, they will be released automatically. Note that if a driver wishes to use fewer messages than were allocated by .Fn pci_alloc_msix , the driver must use a single, contiguous range of messages beginning with one in the new distribution. The .Fn pci_remap_msix function will fail if this condition is not met. .Ss Device Events The .Va pci_add_device event handler is invoked every time a new PCI device is added to the system. This includes the creation of Virtual Functions via SR-IOV. .Pp The .Va pci_delete_device event handler is invoked every time a PCI device is removed from the system. .Pp Both event handlers pass the .Vt device_t object of the relevant PCI device as .Fa dev to each callback function. Both event handlers are invoked while .Fa dev is unattached but with valid instance variables. .Sh SEE ALSO .Xr pci 4 , .Xr pciconf 8 , .Xr bus_alloc_resource 9 , .Xr bus_dma 9 , .Xr bus_release_resource 9 , .Xr bus_setup_intr 9 , .Xr bus_teardown_intr 9 , .Xr devclass 9 , .Xr device 9 , .Xr driver 9 , .Xr eventhandler 9 , .Xr rman 9 .Rs .%B FreeBSD Developers' Handbook .%T NewBus .%U https://docs.freebsd.org/en/books/developers-handbook/ .Re .Rs .%A Shanley .%A Anderson .%B PCI System Architecture .%N 2nd Edition .%I Addison-Wesley .%O ISBN 0-201-30974-2 .Re .Sh AUTHORS .An -nosplit This manual page was written by .An Bruce M Simpson Aq Mt bms@FreeBSD.org and .An John Baldwin Aq Mt jhb@FreeBSD.org . .Sh BUGS The kernel PCI code has a number of references to .Dq "slot numbers" . These do not refer to the geographic location of PCI devices, but to the device number assigned by the combination of the PCI IDSEL mechanism and the platform firmware. This should be taken note of when working with the kernel PCI code. .Pp The PCI bus driver should allocate the MSI-X vector table and PBA internally as necessary rather than requiring the caller to do so. diff --git a/sys/compat/linuxkpi/common/include/linux/pci.h b/sys/compat/linuxkpi/common/include/linux/pci.h index ba1c0d2ac99e..3fd4191b9917 100644 --- a/sys/compat/linuxkpi/common/include/linux/pci.h +++ b/sys/compat/linuxkpi/common/include/linux/pci.h @@ -1,1545 +1,1545 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2016 Mellanox Technologies, Ltd. * All rights reserved. * Copyright (c) 2020-2025 The FreeBSD Foundation * * Portions of this software were developed by Björn Zeeb * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _LINUXKPI_LINUX_PCI_H_ #define _LINUXKPI_LINUX_PCI_H_ #define CONFIG_PCI_MSI #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* pr_debug */ struct pci_device_id { uint32_t vendor; uint32_t device; uint32_t subvendor; uint32_t subdevice; uint32_t class; uint32_t class_mask; uintptr_t driver_data; }; #define MODULE_DEVICE_TABLE_BUS_pci(_bus, _table) \ MODULE_PNP_INFO("U32:vendor;U32:device;V32:subvendor;V32:subdevice", \ _bus, lkpi_ ## _table, _table, nitems(_table) - 1) /* Linux has an empty element at the end of the ID table -> nitems() - 1. */ #define MODULE_DEVICE_TABLE(_bus, _table) \ \ static device_method_t _ ## _bus ## _ ## _table ## _methods[] = { \ DEVMETHOD_END \ }; \ \ static driver_t _ ## _bus ## _ ## _table ## _driver = { \ "lkpi_" #_bus #_table, \ _ ## _bus ## _ ## _table ## _methods, \ 0 \ }; \ \ DRIVER_MODULE(lkpi_ ## _table, _bus, _ ## _bus ## _ ## _table ## _driver,\ 0, 0); \ \ MODULE_DEVICE_TABLE_BUS_ ## _bus(_bus, _table) #define PCI_ANY_ID -1U #define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07)) #define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f) #define PCI_FUNC(devfn) ((devfn) & 0x07) #define PCI_BUS_NUM(devfn) (((devfn) >> 8) & 0xff) #define PCI_DEVID(bus, devfn) ((((uint16_t)(bus)) << 8) | (devfn)) #define PCI_VDEVICE(_vendor, _device) \ .vendor = PCI_VENDOR_ID_##_vendor, .device = (_device), \ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID #define PCI_DEVICE(_vendor, _device) \ .vendor = (_vendor), .device = (_device), \ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID #define to_pci_dev(n) container_of(n, struct pci_dev, dev) #define PCI_STD_NUM_BARS 6 #define PCI_BASE_ADDRESS_0 PCIR_BARS #define PCI_BASE_ADDRESS_MEM_TYPE_64 PCIM_BAR_MEM_64 #define PCI_VENDOR_ID PCIR_VENDOR #define PCI_DEVICE_ID PCIR_DEVICE #define PCI_COMMAND PCIR_COMMAND #define PCI_COMMAND_INTX_DISABLE PCIM_CMD_INTxDIS #define PCI_COMMAND_MEMORY PCIM_CMD_MEMEN #define PCI_PRIMARY_BUS PCIR_PRIBUS_1 #define PCI_SECONDARY_BUS PCIR_SECBUS_1 #define PCI_SUBORDINATE_BUS PCIR_SUBBUS_1 #define PCI_SEC_LATENCY_TIMER PCIR_SECLAT_1 #define PCI_EXP_DEVCTL PCIER_DEVICE_CTL /* Device Control */ #define PCI_EXP_LNKCTL PCIER_LINK_CTL /* Link Control */ #define PCI_EXP_LNKCTL_ASPM_L0S PCIEM_LINK_CTL_ASPMC_L0S #define PCI_EXP_LNKCTL_ASPM_L1 PCIEM_LINK_CTL_ASPMC_L1 #define PCI_EXP_LNKCTL_ASPMC PCIEM_LINK_CTL_ASPMC #define PCI_EXP_LNKCTL_CLKREQ_EN PCIEM_LINK_CTL_ECPM /* Enable clock PM */ #define PCI_EXP_LNKCTL_HAWD PCIEM_LINK_CTL_HAWD #define PCI_EXP_FLAGS_TYPE PCIEM_FLAGS_TYPE /* Device/Port type */ #define PCI_EXP_DEVCAP PCIER_DEVICE_CAP /* Device capabilities */ #define PCI_EXP_DEVSTA PCIER_DEVICE_STA /* Device Status */ #define PCI_EXP_LNKCAP PCIER_LINK_CAP /* Link Capabilities */ #define PCI_EXP_LNKSTA PCIER_LINK_STA /* Link Status */ #define PCI_EXP_SLTCAP PCIER_SLOT_CAP /* Slot Capabilities */ #define PCI_EXP_SLTCTL PCIER_SLOT_CTL /* Slot Control */ #define PCI_EXP_SLTSTA PCIER_SLOT_STA /* Slot Status */ #define PCI_EXP_RTCTL PCIER_ROOT_CTL /* Root Control */ #define PCI_EXP_RTCAP PCIER_ROOT_CAP /* Root Capabilities */ #define PCI_EXP_RTSTA PCIER_ROOT_STA /* Root Status */ #define PCI_EXP_DEVCAP2 PCIER_DEVICE_CAP2 /* Device Capabilities 2 */ #define PCI_EXP_DEVCTL2 PCIER_DEVICE_CTL2 /* Device Control 2 */ #define PCI_EXP_DEVCTL2_LTR_EN PCIEM_CTL2_LTR_ENABLE #define PCI_EXP_DEVCTL2_COMP_TMOUT_DIS PCIEM_CTL2_COMP_TIMO_DISABLE #define PCI_EXP_LNKCAP2 PCIER_LINK_CAP2 /* Link Capabilities 2 */ #define PCI_EXP_LNKCTL2 PCIER_LINK_CTL2 /* Link Control 2 */ #define PCI_EXP_LNKSTA2 PCIER_LINK_STA2 /* Link Status 2 */ #define PCI_EXP_FLAGS PCIER_FLAGS /* Capabilities register */ #define PCI_EXP_FLAGS_VERS PCIEM_FLAGS_VERSION /* Capability version */ #define PCI_EXP_TYPE_ROOT_PORT PCIEM_TYPE_ROOT_PORT /* Root Port */ #define PCI_EXP_TYPE_ENDPOINT PCIEM_TYPE_ENDPOINT /* Express Endpoint */ #define PCI_EXP_TYPE_LEG_END PCIEM_TYPE_LEGACY_ENDPOINT /* Legacy Endpoint */ #define PCI_EXP_TYPE_DOWNSTREAM PCIEM_TYPE_DOWNSTREAM_PORT /* Downstream Port */ #define PCI_EXP_FLAGS_SLOT PCIEM_FLAGS_SLOT /* Slot implemented */ #define PCI_EXP_TYPE_RC_EC PCIEM_TYPE_ROOT_EC /* Root Complex Event Collector */ #define PCI_EXP_LNKSTA_CLS PCIEM_LINK_STA_SPEED #define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 /* Current Link Speed 8.0GT/s */ #define PCI_EXP_LNKCAP_SLS_2_5GB 0x01 /* Supported Link Speed 2.5GT/s */ #define PCI_EXP_LNKCAP_SLS_5_0GB 0x02 /* Supported Link Speed 5.0GT/s */ #define PCI_EXP_LNKCAP_SLS_8_0GB 0x03 /* Supported Link Speed 8.0GT/s */ #define PCI_EXP_LNKCAP_SLS_16_0GB 0x04 /* Supported Link Speed 16.0GT/s */ #define PCI_EXP_LNKCAP_SLS_32_0GB 0x05 /* Supported Link Speed 32.0GT/s */ #define PCI_EXP_LNKCAP_SLS_64_0GB 0x06 /* Supported Link Speed 64.0GT/s */ #define PCI_EXP_LNKCAP_MLW 0x03f0 /* Maximum Link Width */ #define PCI_EXP_LNKCAP2_SLS_2_5GB 0x02 /* Supported Link Speed 2.5GT/s */ #define PCI_EXP_LNKCAP2_SLS_5_0GB 0x04 /* Supported Link Speed 5.0GT/s */ #define PCI_EXP_LNKCAP2_SLS_8_0GB 0x08 /* Supported Link Speed 8.0GT/s */ #define PCI_EXP_LNKCAP2_SLS_16_0GB 0x10 /* Supported Link Speed 16.0GT/s */ #define PCI_EXP_LNKCAP2_SLS_32_0GB 0x20 /* Supported Link Speed 32.0GT/s */ #define PCI_EXP_LNKCAP2_SLS_64_0GB 0x40 /* Supported Link Speed 64.0GT/s */ #define PCI_EXP_LNKCTL2_TLS 0x000f #define PCI_EXP_LNKCTL2_TLS_2_5GT 0x0001 /* Supported Speed 2.5GT/s */ #define PCI_EXP_LNKCTL2_TLS_5_0GT 0x0002 /* Supported Speed 5GT/s */ #define PCI_EXP_LNKCTL2_TLS_8_0GT 0x0003 /* Supported Speed 8GT/s */ #define PCI_EXP_LNKCTL2_TLS_16_0GT 0x0004 /* Supported Speed 16GT/s */ #define PCI_EXP_LNKCTL2_TLS_32_0GT 0x0005 /* Supported Speed 32GT/s */ #define PCI_EXP_LNKCTL2_TLS_64_0GT 0x0006 /* Supported Speed 64GT/s */ #define PCI_EXP_LNKCTL2_ENTER_COMP 0x0010 /* Enter Compliance */ #define PCI_EXP_LNKCTL2_TX_MARGIN 0x0380 /* Transmit Margin */ #define PCI_MSI_ADDRESS_LO PCIR_MSI_ADDR #define PCI_MSI_ADDRESS_HI PCIR_MSI_ADDR_HIGH #define PCI_MSI_FLAGS PCIR_MSI_CTRL #define PCI_MSI_FLAGS_ENABLE PCIM_MSICTRL_MSI_ENABLE #define PCI_MSIX_FLAGS PCIR_MSIX_CTRL #define PCI_MSIX_FLAGS_ENABLE PCIM_MSIXCTRL_MSIX_ENABLE #define PCI_EXP_LNKCAP_CLKPM 0x00040000 #define PCI_EXP_DEVSTA_TRPND 0x0020 #define IORESOURCE_MEM (1 << SYS_RES_MEMORY) #define IORESOURCE_IO (1 << SYS_RES_IOPORT) #define IORESOURCE_IRQ (1 << SYS_RES_IRQ) enum pci_bus_speed { PCI_SPEED_UNKNOWN = -1, PCIE_SPEED_2_5GT, PCIE_SPEED_5_0GT, PCIE_SPEED_8_0GT, PCIE_SPEED_16_0GT, PCIE_SPEED_32_0GT, PCIE_SPEED_64_0GT, }; enum pcie_link_width { PCIE_LNK_WIDTH_RESRV = 0x00, PCIE_LNK_X1 = 0x01, PCIE_LNK_X2 = 0x02, PCIE_LNK_X4 = 0x04, PCIE_LNK_X8 = 0x08, PCIE_LNK_X12 = 0x0c, PCIE_LNK_X16 = 0x10, PCIE_LNK_X32 = 0x20, PCIE_LNK_WIDTH_UNKNOWN = 0xff, }; #define PCIE_LINK_STATE_L0S 0x00000001 #define PCIE_LINK_STATE_L1 0x00000002 #define PCIE_LINK_STATE_CLKPM 0x00000004 typedef int pci_power_t; -#define PCI_D0 PCI_POWERSTATE_D0 -#define PCI_D1 PCI_POWERSTATE_D1 -#define PCI_D2 PCI_POWERSTATE_D2 -#define PCI_D3hot PCI_POWERSTATE_D3 -#define PCI_D3cold 4 +#define PCI_D0 PCI_POWERSTATE_D0 +#define PCI_D1 PCI_POWERSTATE_D1 +#define PCI_D2 PCI_POWERSTATE_D2 +#define PCI_D3hot PCI_POWERSTATE_D3_HOT +#define PCI_D3cold PCI_POWERSTATE_D3_COLD #define PCI_POWER_ERROR PCI_POWERSTATE_UNKNOWN extern const char *pci_power_names[6]; #define PCI_ERR_UNCOR_STATUS PCIR_AER_UC_STATUS #define PCI_ERR_COR_STATUS PCIR_AER_COR_STATUS #define PCI_ERR_ROOT_COMMAND PCIR_AER_ROOTERR_CMD #define PCI_ERR_ROOT_ERR_SRC PCIR_AER_COR_SOURCE_ID #define PCI_EXT_CAP_ID_ERR PCIZ_AER #define PCI_EXT_CAP_ID_L1SS PCIZ_L1PM #define PCI_L1SS_CTL1 0x8 #define PCI_L1SS_CTL1_L1SS_MASK 0xf #define PCI_IRQ_INTX 0x01 #define PCI_IRQ_MSI 0x02 #define PCI_IRQ_MSIX 0x04 #define PCI_IRQ_ALL_TYPES (PCI_IRQ_MSIX|PCI_IRQ_MSI|PCI_IRQ_INTX) #if defined(LINUXKPI_VERSION) && (LINUXKPI_VERSION <= 61000) #define PCI_IRQ_LEGACY PCI_IRQ_INTX #endif struct pci_dev; struct pci_driver { struct list_head node; char *name; const struct pci_device_id *id_table; int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); void (*remove)(struct pci_dev *dev); int (*suspend) (struct pci_dev *dev, pm_message_t state); /* Device suspended */ int (*resume) (struct pci_dev *dev); /* Device woken up */ void (*shutdown) (struct pci_dev *dev); /* Device shutdown */ driver_t bsddriver; devclass_t bsdclass; struct device_driver driver; const struct pci_error_handlers *err_handler; bool isdrm; int bsd_probe_return; int (*bsd_iov_init)(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config); void (*bsd_iov_uninit)(device_t dev); int (*bsd_iov_add_vf)(device_t dev, uint16_t vfnum, const nvlist_t *vf_config); }; struct pci_bus { struct pci_dev *self; /* struct pci_bus *parent */ int domain; int number; }; extern struct list_head pci_drivers; extern struct list_head pci_devices; extern spinlock_t pci_lock; #define __devexit_p(x) x #define module_pci_driver(_drv) \ module_driver(_drv, linux_pci_register_driver, linux_pci_unregister_driver) struct msi_msg { uint32_t data; }; struct pci_msi_desc { struct { bool is_64; } msi_attrib; }; struct msi_desc { struct msi_msg msg; struct pci_msi_desc pci; }; struct msix_entry { int entry; int vector; }; /* * If we find drivers accessing this from multiple KPIs we may have to * refcount objects of this structure. */ struct resource; struct pci_mmio_region { TAILQ_ENTRY(pci_mmio_region) next; struct resource *res; int rid; int type; }; struct pci_dev { struct device dev; struct list_head links; struct pci_driver *pdrv; struct pci_bus *bus; struct pci_dev *root; pci_power_t current_state; uint16_t device; uint16_t vendor; uint16_t subsystem_vendor; uint16_t subsystem_device; unsigned int irq; unsigned int devfn; uint32_t class; uint8_t revision; uint8_t msi_cap; uint8_t msix_cap; bool managed; /* devres "pcim_*(). */ bool want_iomap_res; bool msi_enabled; bool msix_enabled; phys_addr_t rom; size_t romlen; struct msi_desc **msi_desc; char *path_name; spinlock_t pcie_cap_lock; TAILQ_HEAD(, pci_mmio_region) mmio; }; int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name); int pci_alloc_irq_vectors(struct pci_dev *pdev, int minv, int maxv, unsigned int flags); bool pci_device_is_present(struct pci_dev *pdev); int linuxkpi_pcim_enable_device(struct pci_dev *pdev); void __iomem **linuxkpi_pcim_iomap_table(struct pci_dev *pdev); void *linuxkpi_pci_iomap_range(struct pci_dev *, int, unsigned long, unsigned long); void *linuxkpi_pci_iomap(struct pci_dev *, int, unsigned long); void linuxkpi_pci_iounmap(struct pci_dev *pdev, void *res); int linuxkpi_pcim_iomap_regions(struct pci_dev *pdev, uint32_t mask, const char *name); int linuxkpi_pci_request_regions(struct pci_dev *pdev, const char *res_name); void linuxkpi_pci_release_region(struct pci_dev *pdev, int bar); void linuxkpi_pci_release_regions(struct pci_dev *pdev); int linuxkpi_pci_enable_msix(struct pci_dev *pdev, struct msix_entry *entries, int nreq); /* Internal helper function(s). */ struct pci_dev *lkpinew_pci_dev(device_t); void lkpi_pci_devres_release(struct device *, void *); struct pci_dev *lkpi_pci_get_device(uint32_t, uint32_t, struct pci_dev *); struct msi_desc *lkpi_pci_msi_desc_alloc(int); struct device *lkpi_pci_find_irq_dev(unsigned int irq); int _lkpi_pci_enable_msi_range(struct pci_dev *pdev, int minvec, int maxvec); #define pci_err(pdev, fmt, ...) \ dev_err(&(pdev)->dev, fmt, ##__VA_ARGS__) #define pci_info(pdev, fmt, ...) \ dev_info(&(pdev)->dev, fmt, ##__VA_ARGS__) static inline bool dev_is_pci(struct device *dev) { return (device_get_devclass(dev->bsddev) == devclass_find("pci")); } static inline uint16_t pci_dev_id(struct pci_dev *pdev) { return (PCI_DEVID(pdev->bus->number, pdev->devfn)); } static inline int pci_resource_type(struct pci_dev *pdev, int bar) { struct pci_map *pm; pm = pci_find_bar(pdev->dev.bsddev, PCIR_BAR(bar)); if (!pm) return (-1); if (PCI_BAR_IO(pm->pm_value)) return (SYS_RES_IOPORT); else return (SYS_RES_MEMORY); } /* * All drivers just seem to want to inspect the type not flags. */ static inline int pci_resource_flags(struct pci_dev *pdev, int bar) { int type; type = pci_resource_type(pdev, bar); if (type < 0) return (0); return (1 << type); } static inline const char * pci_name(struct pci_dev *d) { return d->path_name; } static inline void * pci_get_drvdata(struct pci_dev *pdev) { return dev_get_drvdata(&pdev->dev); } static inline void pci_set_drvdata(struct pci_dev *pdev, void *data) { dev_set_drvdata(&pdev->dev, data); } static inline struct pci_dev * pci_dev_get(struct pci_dev *pdev) { if (pdev != NULL) get_device(&pdev->dev); return (pdev); } static __inline void pci_dev_put(struct pci_dev *pdev) { if (pdev != NULL) put_device(&pdev->dev); } static inline int pci_enable_device(struct pci_dev *pdev) { pci_enable_io(pdev->dev.bsddev, SYS_RES_IOPORT); pci_enable_io(pdev->dev.bsddev, SYS_RES_MEMORY); return (0); } static inline void pci_disable_device(struct pci_dev *pdev) { pci_disable_busmaster(pdev->dev.bsddev); } static inline int pci_set_master(struct pci_dev *pdev) { pci_enable_busmaster(pdev->dev.bsddev); return (0); } static inline int pci_set_power_state(struct pci_dev *pdev, int state) { pci_set_powerstate(pdev->dev.bsddev, state); return (0); } static inline int pci_clear_master(struct pci_dev *pdev) { pci_disable_busmaster(pdev->dev.bsddev); return (0); } static inline bool pci_is_root_bus(struct pci_bus *pbus) { return (pbus->self == NULL); } static inline struct pci_dev * pci_upstream_bridge(struct pci_dev *pdev) { if (pci_is_root_bus(pdev->bus)) return (NULL); /* * If we do not have a (proper) "upstream bridge" set, e.g., we point * to ourselves, try to handle this case on the fly like we do * for pcie_find_root_port(). */ if (pdev == pdev->bus->self) { device_t bridge; /* * In the case of DRM drivers, the passed device is a child of * `vgapci`. We want to start the lookup from `vgapci`, so the * parent of the passed `drmn`. * * We can use the `isdrm` flag to determine this. */ bridge = pdev->dev.bsddev; if (pdev->pdrv != NULL && pdev->pdrv->isdrm) bridge = device_get_parent(bridge); if (bridge == NULL) goto done; bridge = device_get_parent(bridge); if (bridge == NULL) goto done; bridge = device_get_parent(bridge); if (bridge == NULL) goto done; if (device_get_devclass(device_get_parent(bridge)) != devclass_find("pci")) goto done; /* * "bridge" is a PCI-to-PCI bridge. Create a Linux pci_dev * for it so it can be returned. */ pdev->bus->self = lkpinew_pci_dev(bridge); } done: return (pdev->bus->self); } #define pci_release_region(pdev, bar) \ linuxkpi_pci_release_region(pdev, bar) #define pci_release_regions(pdev) \ linuxkpi_pci_release_regions(pdev) #define pci_request_regions(pdev, res_name) \ linuxkpi_pci_request_regions(pdev, res_name) static inline void lkpi_pci_disable_msix(struct pci_dev *pdev) { pci_release_msi(pdev->dev.bsddev); /* * The MSIX IRQ numbers associated with this PCI device are no * longer valid and might be re-assigned. Make sure * lkpi_pci_find_irq_dev() does no longer see them by * resetting their references to zero: */ pdev->dev.irq_start = 0; pdev->dev.irq_end = 0; pdev->msix_enabled = false; } /* Only for consistency. No conflict on that one. */ #define pci_disable_msix(pdev) lkpi_pci_disable_msix(pdev) static inline void lkpi_pci_disable_msi(struct pci_dev *pdev) { pci_release_msi(pdev->dev.bsddev); pdev->dev.irq_start = 0; pdev->dev.irq_end = 0; pdev->irq = pdev->dev.irq; pdev->msi_enabled = false; } #define pci_disable_msi(pdev) lkpi_pci_disable_msi(pdev) #define pci_free_irq_vectors(pdev) lkpi_pci_disable_msi(pdev) unsigned long pci_resource_start(struct pci_dev *pdev, int bar); unsigned long pci_resource_len(struct pci_dev *pdev, int bar); static inline bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar) { return (pci_resource_start(pdev, bar)); } #define PCI_CAP_ID_EXP PCIY_EXPRESS #define PCI_CAP_ID_PCIX PCIY_PCIX #define PCI_CAP_ID_AGP PCIY_AGP #define PCI_CAP_ID_PM PCIY_PMG #define PCI_EXP_DEVCTL PCIER_DEVICE_CTL #define PCI_EXP_DEVCTL_PAYLOAD PCIEM_CTL_MAX_PAYLOAD #define PCI_EXP_DEVCTL_READRQ PCIEM_CTL_MAX_READ_REQUEST #define PCI_EXP_LNKCTL PCIER_LINK_CTL #define PCI_EXP_LNKSTA PCIER_LINK_STA static inline int pci_find_capability(struct pci_dev *pdev, int capid) { int reg; if (pci_find_cap(pdev->dev.bsddev, capid, ®)) return (0); return (reg); } static inline int pci_pcie_cap(struct pci_dev *dev) { return pci_find_capability(dev, PCI_CAP_ID_EXP); } static inline int pci_find_ext_capability(struct pci_dev *pdev, int capid) { int reg; if (pci_find_extcap(pdev->dev.bsddev, capid, ®)) return (0); return (reg); } #define PCIM_PCAP_PME_SHIFT 11 static __inline bool pci_pme_capable(struct pci_dev *pdev, uint32_t flag) { struct pci_devinfo *dinfo; pcicfgregs *cfg; if (flag > (PCIM_PCAP_D3PME_COLD >> PCIM_PCAP_PME_SHIFT)) return (false); dinfo = device_get_ivars(pdev->dev.bsddev); cfg = &dinfo->cfg; if (cfg->pp.pp_cap == 0) return (false); if ((cfg->pp.pp_cap & (1 << (PCIM_PCAP_PME_SHIFT + flag))) != 0) return (true); return (false); } static inline int pci_disable_link_state(struct pci_dev *pdev, uint32_t flags) { if (!pci_enable_aspm) return (-EPERM); return (-ENXIO); } static inline int pci_read_config_byte(const struct pci_dev *pdev, int where, u8 *val) { *val = (u8)pci_read_config(pdev->dev.bsddev, where, 1); return (0); } static inline int pci_read_config_word(const struct pci_dev *pdev, int where, u16 *val) { *val = (u16)pci_read_config(pdev->dev.bsddev, where, 2); return (0); } static inline int pci_read_config_dword(const struct pci_dev *pdev, int where, u32 *val) { *val = (u32)pci_read_config(pdev->dev.bsddev, where, 4); return (0); } static inline int pci_write_config_byte(const struct pci_dev *pdev, int where, u8 val) { pci_write_config(pdev->dev.bsddev, where, val, 1); return (0); } static inline int pci_write_config_word(const struct pci_dev *pdev, int where, u16 val) { pci_write_config(pdev->dev.bsddev, where, val, 2); return (0); } static inline int pci_write_config_dword(const struct pci_dev *pdev, int where, u32 val) { pci_write_config(pdev->dev.bsddev, where, val, 4); return (0); } int linux_pci_register_driver(struct pci_driver *pdrv); int linux_pci_register_drm_driver(struct pci_driver *pdrv); void linux_pci_unregister_driver(struct pci_driver *pdrv); void linux_pci_unregister_drm_driver(struct pci_driver *pdrv); #define pci_register_driver(pdrv) \ linux_pci_register_driver(pdrv) #define pci_unregister_driver(pdrv) \ linux_pci_unregister_driver(pdrv) /* * Enable msix, positive errors indicate actual number of available * vectors. Negative errors are failures. * * NB: define added to prevent this definition of pci_enable_msix from * clashing with the native FreeBSD version. */ #define pci_enable_msix(...) \ linuxkpi_pci_enable_msix(__VA_ARGS__) #define pci_enable_msix_range(...) \ linux_pci_enable_msix_range(__VA_ARGS__) static inline int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int minvec, int maxvec) { int nvec = maxvec; int rc; if (maxvec < minvec) return (-ERANGE); do { rc = pci_enable_msix(dev, entries, nvec); if (rc < 0) { return (rc); } else if (rc > 0) { if (rc < minvec) return (-ENOSPC); nvec = rc; } } while (rc); return (nvec); } #define pci_enable_msi(pdev) \ linux_pci_enable_msi(pdev) static inline int pci_enable_msi(struct pci_dev *pdev) { return (_lkpi_pci_enable_msi_range(pdev, 1, 1)); } static inline int pci_channel_offline(struct pci_dev *pdev) { return (pci_read_config(pdev->dev.bsddev, PCIR_VENDOR, 2) == PCIV_INVALID); } static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn) { return -ENODEV; } static inline void pci_disable_sriov(struct pci_dev *dev) { } #define pci_iomap_range(pdev, mmio_bar, mmio_off, mmio_size) \ linuxkpi_pci_iomap_range(pdev, mmio_bar, mmio_off, mmio_size) #define pci_iomap(pdev, mmio_bar, mmio_size) \ linuxkpi_pci_iomap(pdev, mmio_bar, mmio_size) #define pci_iounmap(pdev, res) \ linuxkpi_pci_iounmap(pdev, res) static inline void lkpi_pci_save_state(struct pci_dev *pdev) { pci_save_state(pdev->dev.bsddev); } static inline void lkpi_pci_restore_state(struct pci_dev *pdev) { pci_restore_state(pdev->dev.bsddev); } #define pci_save_state(dev) lkpi_pci_save_state(dev) #define pci_restore_state(dev) lkpi_pci_restore_state(dev) static inline int pci_reset_function(struct pci_dev *pdev) { return (-ENOSYS); } #define DEFINE_PCI_DEVICE_TABLE(_table) \ const struct pci_device_id _table[] __devinitdata /* XXX This should not be necessary. */ #define pcix_set_mmrbc(d, v) 0 #define pcix_get_max_mmrbc(d) 0 #define pcie_set_readrq(d, v) pci_set_max_read_req((d)->dev.bsddev, (v)) #define PCI_DMA_BIDIRECTIONAL 0 #define PCI_DMA_TODEVICE 1 #define PCI_DMA_FROMDEVICE 2 #define PCI_DMA_NONE 3 #define pci_pool dma_pool #define pci_pool_destroy(...) dma_pool_destroy(__VA_ARGS__) #define pci_pool_alloc(...) dma_pool_alloc(__VA_ARGS__) #define pci_pool_free(...) dma_pool_free(__VA_ARGS__) #define pci_pool_create(_name, _pdev, _size, _align, _alloc) \ dma_pool_create(_name, &(_pdev)->dev, _size, _align, _alloc) #define pci_free_consistent(_hwdev, _size, _vaddr, _dma_handle) \ dma_free_coherent((_hwdev) == NULL ? NULL : &(_hwdev)->dev, \ _size, _vaddr, _dma_handle) #define pci_map_sg(_hwdev, _sg, _nents, _dir) \ dma_map_sg((_hwdev) == NULL ? NULL : &(_hwdev->dev), \ _sg, _nents, (enum dma_data_direction)_dir) #define pci_map_single(_hwdev, _ptr, _size, _dir) \ dma_map_single((_hwdev) == NULL ? NULL : &(_hwdev->dev), \ (_ptr), (_size), (enum dma_data_direction)_dir) #define pci_unmap_single(_hwdev, _addr, _size, _dir) \ dma_unmap_single((_hwdev) == NULL ? NULL : &(_hwdev)->dev, \ _addr, _size, (enum dma_data_direction)_dir) #define pci_unmap_sg(_hwdev, _sg, _nents, _dir) \ dma_unmap_sg((_hwdev) == NULL ? NULL : &(_hwdev)->dev, \ _sg, _nents, (enum dma_data_direction)_dir) #define pci_map_page(_hwdev, _page, _offset, _size, _dir) \ dma_map_page((_hwdev) == NULL ? NULL : &(_hwdev)->dev, _page,\ _offset, _size, (enum dma_data_direction)_dir) #define pci_unmap_page(_hwdev, _dma_address, _size, _dir) \ dma_unmap_page((_hwdev) == NULL ? NULL : &(_hwdev)->dev, \ _dma_address, _size, (enum dma_data_direction)_dir) #define pci_set_dma_mask(_pdev, mask) dma_set_mask(&(_pdev)->dev, (mask)) #define pci_dma_mapping_error(_pdev, _dma_addr) \ dma_mapping_error(&(_pdev)->dev, _dma_addr) #define pci_set_consistent_dma_mask(_pdev, _mask) \ dma_set_coherent_mask(&(_pdev)->dev, (_mask)) #define DECLARE_PCI_UNMAP_ADDR(x) DEFINE_DMA_UNMAP_ADDR(x); #define DECLARE_PCI_UNMAP_LEN(x) DEFINE_DMA_UNMAP_LEN(x); #define pci_unmap_addr dma_unmap_addr #define pci_unmap_addr_set dma_unmap_addr_set #define pci_unmap_len dma_unmap_len #define pci_unmap_len_set dma_unmap_len_set typedef unsigned int __bitwise pci_channel_state_t; typedef unsigned int __bitwise pci_ers_result_t; enum pci_channel_state { pci_channel_io_normal = 1, pci_channel_io_frozen = 2, pci_channel_io_perm_failure = 3, }; enum pci_ers_result { PCI_ERS_RESULT_NONE = 1, PCI_ERS_RESULT_CAN_RECOVER = 2, PCI_ERS_RESULT_NEED_RESET = 3, PCI_ERS_RESULT_DISCONNECT = 4, PCI_ERS_RESULT_RECOVERED = 5, }; /* PCI bus error event callbacks */ struct pci_error_handlers { pci_ers_result_t (*error_detected)(struct pci_dev *dev, enum pci_channel_state error); pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev); pci_ers_result_t (*link_reset)(struct pci_dev *dev); pci_ers_result_t (*slot_reset)(struct pci_dev *dev); void (*resume)(struct pci_dev *dev); }; /* FreeBSD does not support SRIOV - yet */ static inline struct pci_dev *pci_physfn(struct pci_dev *dev) { return dev; } static inline bool pci_is_pcie(struct pci_dev *dev) { return !!pci_pcie_cap(dev); } static inline u16 pcie_flags_reg(struct pci_dev *dev) { int pos; u16 reg16; pos = pci_find_capability(dev, PCI_CAP_ID_EXP); if (!pos) return 0; pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); return reg16; } static inline int pci_pcie_type(struct pci_dev *dev) { return (pcie_flags_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4; } static inline int pcie_cap_version(struct pci_dev *dev) { return pcie_flags_reg(dev) & PCI_EXP_FLAGS_VERS; } static inline bool pcie_cap_has_lnkctl(struct pci_dev *dev) { int type = pci_pcie_type(dev); return pcie_cap_version(dev) > 1 || type == PCI_EXP_TYPE_ROOT_PORT || type == PCI_EXP_TYPE_ENDPOINT || type == PCI_EXP_TYPE_LEG_END; } static inline bool pcie_cap_has_devctl(const struct pci_dev *dev) { return true; } static inline bool pcie_cap_has_sltctl(struct pci_dev *dev) { int type = pci_pcie_type(dev); return pcie_cap_version(dev) > 1 || type == PCI_EXP_TYPE_ROOT_PORT || (type == PCI_EXP_TYPE_DOWNSTREAM && pcie_flags_reg(dev) & PCI_EXP_FLAGS_SLOT); } static inline bool pcie_cap_has_rtctl(struct pci_dev *dev) { int type = pci_pcie_type(dev); return pcie_cap_version(dev) > 1 || type == PCI_EXP_TYPE_ROOT_PORT || type == PCI_EXP_TYPE_RC_EC; } static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos) { if (!pci_is_pcie(dev)) return false; switch (pos) { case PCI_EXP_FLAGS_TYPE: return true; case PCI_EXP_DEVCAP: case PCI_EXP_DEVCTL: case PCI_EXP_DEVSTA: return pcie_cap_has_devctl(dev); case PCI_EXP_LNKCAP: case PCI_EXP_LNKCTL: case PCI_EXP_LNKSTA: return pcie_cap_has_lnkctl(dev); case PCI_EXP_SLTCAP: case PCI_EXP_SLTCTL: case PCI_EXP_SLTSTA: return pcie_cap_has_sltctl(dev); case PCI_EXP_RTCTL: case PCI_EXP_RTCAP: case PCI_EXP_RTSTA: return pcie_cap_has_rtctl(dev); case PCI_EXP_DEVCAP2: case PCI_EXP_DEVCTL2: case PCI_EXP_LNKCAP2: case PCI_EXP_LNKCTL2: case PCI_EXP_LNKSTA2: return pcie_cap_version(dev) > 1; default: return false; } } static inline int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *dst) { *dst = 0; if (pos & 3) return -EINVAL; if (!pcie_capability_reg_implemented(dev, pos)) return -EINVAL; return pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, dst); } static inline int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *dst) { *dst = 0; if (pos & 3) return -EINVAL; if (!pcie_capability_reg_implemented(dev, pos)) return -EINVAL; return pci_read_config_word(dev, pci_pcie_cap(dev) + pos, dst); } static inline int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) { if (pos & 1) return -EINVAL; if (!pcie_capability_reg_implemented(dev, pos)) return 0; return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); } static inline int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, uint16_t clear, uint16_t set) { int error; uint16_t v; if (pos == PCI_EXP_LNKCTL || pos == PCI_EXP_RTCTL) spin_lock(&dev->pcie_cap_lock); error = pcie_capability_read_word(dev, pos, &v); if (error == 0) { v &= ~clear; v |= set; error = pcie_capability_write_word(dev, pos, v); } if (pos == PCI_EXP_LNKCTL || pos == PCI_EXP_RTCTL) spin_unlock(&dev->pcie_cap_lock); return (error); } static inline int pcie_capability_set_word(struct pci_dev *dev, int pos, uint16_t val) { return (pcie_capability_clear_and_set_word(dev, pos, 0, val)); } static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos, uint16_t val) { return (pcie_capability_clear_and_set_word(dev, pos, val, 0)); } static inline int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, enum pcie_link_width *width) { *speed = PCI_SPEED_UNKNOWN; *width = PCIE_LNK_WIDTH_UNKNOWN; return (0); } static inline int pci_num_vf(struct pci_dev *dev) { return (0); } static inline enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev) { device_t root; uint32_t lnkcap, lnkcap2; int error, pos; root = device_get_parent(dev->dev.bsddev); if (root == NULL) return (PCI_SPEED_UNKNOWN); root = device_get_parent(root); if (root == NULL) return (PCI_SPEED_UNKNOWN); root = device_get_parent(root); if (root == NULL) return (PCI_SPEED_UNKNOWN); if (pci_get_vendor(root) == PCI_VENDOR_ID_VIA || pci_get_vendor(root) == PCI_VENDOR_ID_SERVERWORKS) return (PCI_SPEED_UNKNOWN); if ((error = pci_find_cap(root, PCIY_EXPRESS, &pos)) != 0) return (PCI_SPEED_UNKNOWN); lnkcap2 = pci_read_config(root, pos + PCIER_LINK_CAP2, 4); if (lnkcap2) { /* PCIe r3.0-compliant */ if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) return (PCIE_SPEED_2_5GT); if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) return (PCIE_SPEED_5_0GT); if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) return (PCIE_SPEED_8_0GT); if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB) return (PCIE_SPEED_16_0GT); if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_32_0GB) return (PCIE_SPEED_32_0GT); if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_64_0GB) return (PCIE_SPEED_64_0GT); } else { /* pre-r3.0 */ lnkcap = pci_read_config(root, pos + PCIER_LINK_CAP, 4); if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB) return (PCIE_SPEED_2_5GT); if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB) return (PCIE_SPEED_5_0GT); if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB) return (PCIE_SPEED_8_0GT); if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB) return (PCIE_SPEED_16_0GT); if (lnkcap & PCI_EXP_LNKCAP_SLS_32_0GB) return (PCIE_SPEED_32_0GT); if (lnkcap & PCI_EXP_LNKCAP_SLS_64_0GB) return (PCIE_SPEED_64_0GT); } return (PCI_SPEED_UNKNOWN); } static inline enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev) { uint32_t lnkcap; pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); if (lnkcap) return ((lnkcap & PCI_EXP_LNKCAP_MLW) >> 4); return (PCIE_LNK_WIDTH_UNKNOWN); } static inline int pcie_get_mps(struct pci_dev *dev) { return (pci_get_max_payload(dev->dev.bsddev)); } static inline uint32_t PCIE_SPEED2MBS_ENC(enum pci_bus_speed spd) { switch(spd) { case PCIE_SPEED_64_0GT: return (64000 * 128 / 130); case PCIE_SPEED_32_0GT: return (32000 * 128 / 130); case PCIE_SPEED_16_0GT: return (16000 * 128 / 130); case PCIE_SPEED_8_0GT: return (8000 * 128 / 130); case PCIE_SPEED_5_0GT: return (5000 * 8 / 10); case PCIE_SPEED_2_5GT: return (2500 * 8 / 10); default: return (0); } } static inline uint32_t pcie_bandwidth_available(struct pci_dev *pdev, struct pci_dev **limiting, enum pci_bus_speed *speed, enum pcie_link_width *width) { enum pci_bus_speed nspeed = pcie_get_speed_cap(pdev); enum pcie_link_width nwidth = pcie_get_width_cap(pdev); if (speed) *speed = nspeed; if (width) *width = nwidth; return (nwidth * PCIE_SPEED2MBS_ENC(nspeed)); } static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return (false); } static inline struct pci_dev * pcie_find_root_port(struct pci_dev *pdev) { device_t root; if (pdev->root != NULL) return (pdev->root); root = pci_find_pcie_root_port(pdev->dev.bsddev); if (root == NULL) return (NULL); pdev->root = lkpinew_pci_dev(root); return (pdev->root); } /* This is needed when people rip out the device "HotPlug". */ static inline void pci_lock_rescan_remove(void) { } static inline void pci_unlock_rescan_remove(void) { } static __inline void pci_stop_and_remove_bus_device(struct pci_dev *pdev) { } static inline int pci_rescan_bus(struct pci_bus *pbus) { device_t *devlist, parent; int devcount, error; if (!device_is_attached(pbus->self->dev.bsddev)) return (0); /* pci_rescan_method() will work on the pcib (parent). */ error = BUS_RESCAN(pbus->self->dev.bsddev); if (error != 0) return (0); parent = device_get_parent(pbus->self->dev.bsddev); error = device_get_children(parent, &devlist, &devcount); if (error != 0) return (0); if (devcount != 0) free(devlist, M_TEMP); return (devcount); } /* * The following functions can be used to attach/detach the LinuxKPI's * PCI device runtime. The pci_driver and pci_device_id pointer is * allowed to be NULL. Other pointers must be all valid. * The pci_dev structure should be zero-initialized before passed * to the linux_pci_attach_device function. */ extern int linux_pci_attach_device(device_t, struct pci_driver *, const struct pci_device_id *, struct pci_dev *); extern int linux_pci_detach_device(struct pci_dev *); static inline int pci_dev_present(const struct pci_device_id *cur) { while (cur != NULL && (cur->vendor || cur->device)) { if (pci_find_device(cur->vendor, cur->device) != NULL) { return (1); } cur++; } return (0); } static inline const struct pci_device_id * pci_match_id(const struct pci_device_id *ids, struct pci_dev *pdev) { if (ids == NULL) return (NULL); for (; ids->vendor != 0 || ids->subvendor != 0 || ids->class_mask != 0; ids++) if ((ids->vendor == PCI_ANY_ID || ids->vendor == pdev->vendor) && (ids->device == PCI_ANY_ID || ids->device == pdev->device) && (ids->subvendor == PCI_ANY_ID || ids->subvendor == pdev->subsystem_vendor) && (ids->subdevice == PCI_ANY_ID || ids->subdevice == pdev->subsystem_device) && ((ids->class ^ pdev->class) & ids->class_mask) == 0) return (ids); return (NULL); } struct pci_dev *lkpi_pci_get_domain_bus_and_slot(int domain, unsigned int bus, unsigned int devfn); #define pci_get_domain_bus_and_slot(domain, bus, devfn) \ lkpi_pci_get_domain_bus_and_slot(domain, bus, devfn) static inline int pci_domain_nr(struct pci_bus *pbus) { return (pbus->domain); } static inline int pci_bus_read_config(struct pci_bus *bus, unsigned int devfn, int pos, uint32_t *val, int len) { *val = pci_read_config(bus->self->dev.bsddev, pos, len); return (0); } static inline int pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn, int pos, u16 *val) { uint32_t tmp; int ret; ret = pci_bus_read_config(bus, devfn, pos, &tmp, 2); *val = (u16)tmp; return (ret); } static inline int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn, int pos, u8 *val) { uint32_t tmp; int ret; ret = pci_bus_read_config(bus, devfn, pos, &tmp, 1); *val = (u8)tmp; return (ret); } static inline int pci_bus_write_config(struct pci_bus *bus, unsigned int devfn, int pos, uint32_t val, int size) { pci_write_config(bus->self->dev.bsddev, pos, val, size); return (0); } static inline int pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn, int pos, uint8_t val) { return (pci_bus_write_config(bus, devfn, pos, val, 1)); } static inline int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn, int pos, uint16_t val) { return (pci_bus_write_config(bus, devfn, pos, val, 2)); } struct pci_dev *lkpi_pci_get_class(unsigned int class, struct pci_dev *from); #define pci_get_class(class, from) lkpi_pci_get_class(class, from) struct pci_dev *lkpi_pci_get_base_class(unsigned int class, struct pci_dev *from); #define pci_get_base_class(class, from) lkpi_pci_get_base_class(class, from) /* -------------------------------------------------------------------------- */ #define pcim_enable_device(pdev) \ linuxkpi_pcim_enable_device(pdev) #define pcim_iomap_table(pdev) \ linuxkpi_pcim_iomap_table(pdev) #define pcim_iomap_regions(pdev, mask, name) \ linuxkpi_pcim_iomap_regions(pdev, mask, name) static inline int pcim_iomap_regions_request_all(struct pci_dev *pdev, uint32_t mask, char *name) { uint32_t requests, req_mask; int bar, error; /* Request all the BARs ("regions") we do not iomap. */ req_mask = ((1 << (PCIR_MAX_BAR_0 + 1)) - 1) & ~mask; for (bar = requests = 0; requests != req_mask; bar++) { if ((req_mask & (1 << bar)) == 0) continue; error = pci_request_region(pdev, bar, name); if (error != 0 && error != -ENODEV) goto err; requests |= (1 << bar); } error = pcim_iomap_regions(pdev, mask, name); if (error != 0) goto err; return (0); err: for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { if ((requests & (1 << bar)) != 0) pci_release_region(pdev, bar); } return (-EINVAL); } /* * We cannot simply re-define pci_get_device() as we would normally do * and then hide it in linux_pci.c as too many semi-native drivers still * include linux/pci.h and run into the conflict with native PCI. Linux drivers * using pci_get_device() need to be changed to call linuxkpi_pci_get_device(). */ static inline struct pci_dev * linuxkpi_pci_get_device(uint32_t vendor, uint32_t device, struct pci_dev *odev) { return (lkpi_pci_get_device(vendor, device, odev)); } /* This is a FreeBSD extension so we can use bus_*(). */ static inline void linuxkpi_pcim_want_to_use_bus_functions(struct pci_dev *pdev) { pdev->want_iomap_res = true; } static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev) { return (false); } static inline void * pci_platform_rom(struct pci_dev *pdev, size_t *size) { return (NULL); } static inline void pci_ignore_hotplug(struct pci_dev *pdev) { } static inline const char * pci_power_name(pci_power_t state) { int pstate = state + 1; if (pstate >= 0 && pstate < nitems(pci_power_names)) return (pci_power_names[pstate]); else return (pci_power_names[0]); } static inline int pcie_get_readrq(struct pci_dev *dev) { u16 ctl; if (pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl)) return (-EINVAL); return (128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12)); } static inline bool pci_is_enabled(struct pci_dev *pdev) { return ((pci_read_config(pdev->dev.bsddev, PCIR_COMMAND, 2) & PCIM_CMD_BUSMASTEREN) != 0); } static inline int pci_wait_for_pending_transaction(struct pci_dev *pdev) { return (0); } static inline int pci_assign_resource(struct pci_dev *pdev, int bar) { return (0); } static inline int pci_irq_vector(struct pci_dev *pdev, unsigned int vector) { if (!pdev->msix_enabled && !pdev->msi_enabled) { if (vector != 0) return (-EINVAL); return (pdev->irq); } if (pdev->msix_enabled || pdev->msi_enabled) { if ((pdev->dev.irq_start + vector) >= pdev->dev.irq_end) return (-EINVAL); return (pdev->dev.irq_start + vector); } return (-ENXIO); } static inline int pci_wake_from_d3(struct pci_dev *pdev, bool enable) { pr_debug("%s: TODO\n", __func__); return (0); } #endif /* _LINUXKPI_LINUX_PCI_H_ */ diff --git a/sys/contrib/dev/acpica/include/actypes.h b/sys/contrib/dev/acpica/include/actypes.h index 66333243907b..3c95887b1678 100644 --- a/sys/contrib/dev/acpica/include/actypes.h +++ b/sys/contrib/dev/acpica/include/actypes.h @@ -1,1585 +1,1587 @@ /****************************************************************************** * * Name: actypes.h - Common data types for the entire ACPI subsystem * *****************************************************************************/ /****************************************************************************** * * 1. Copyright Notice * * Some or all of this work - Copyright (c) 1999 - 2025, Intel Corp. * All rights reserved. * * 2. License * * 2.1. This is your license from Intel Corp. under its intellectual property * rights. You may have additional license terms from the party that provided * you this software, covering your right to use that party's intellectual * property rights. * * 2.2. Intel grants, free of charge, to any person ("Licensee") obtaining a * copy of the source code appearing in this file ("Covered Code") an * irrevocable, perpetual, worldwide license under Intel's copyrights in the * base code distributed originally by Intel ("Original Intel Code") to copy, * make derivatives, distribute, use and display any portion of the Covered * Code in any form, with the right to sublicense such rights; and * * 2.3. Intel grants Licensee a non-exclusive and non-transferable patent * license (with the right to sublicense), under only those claims of Intel * patents that are infringed by the Original Intel Code, to make, use, sell, * offer to sell, and import the Covered Code and derivative works thereof * solely to the minimum extent necessary to exercise the above copyright * license, and in no event shall the patent license extend to any additions * to or modifications of the Original Intel Code. No other license or right * is granted directly or by implication, estoppel or otherwise; * * The above copyright and patent license is granted only if the following * conditions are met: * * 3. Conditions * * 3.1. Redistribution of Source with Rights to Further Distribute Source. * Redistribution of source code of any substantial portion of the Covered * Code or modification with rights to further distribute source must include * the above Copyright Notice, the above License, this list of Conditions, * and the following Disclaimer and Export Compliance provision. In addition, * Licensee must cause all Covered Code to which Licensee contributes to * contain a file documenting the changes Licensee made to create that Covered * Code and the date of any change. Licensee must include in that file the * documentation of any changes made by any predecessor Licensee. Licensee * must include a prominent statement that the modification is derived, * directly or indirectly, from Original Intel Code. * * 3.2. Redistribution of Source with no Rights to Further Distribute Source. * Redistribution of source code of any substantial portion of the Covered * Code or modification without rights to further distribute source must * include the following Disclaimer and Export Compliance provision in the * documentation and/or other materials provided with distribution. In * addition, Licensee may not authorize further sublicense of source of any * portion of the Covered Code, and must include terms to the effect that the * license from Licensee to its licensee is limited to the intellectual * property embodied in the software Licensee provides to its licensee, and * not to intellectual property embodied in modifications its licensee may * make. * * 3.3. Redistribution of Executable. Redistribution in executable form of any * substantial portion of the Covered Code or modification must reproduce the * above Copyright Notice, and the following Disclaimer and Export Compliance * provision in the documentation and/or other materials provided with the * distribution. * * 3.4. Intel retains all right, title, and interest in and to the Original * Intel Code. * * 3.5. Neither the name Intel nor any other trademark owned or controlled by * Intel shall be used in advertising or otherwise to promote the sale, use or * other dealings in products derived from or relating to the Covered Code * without prior written authorization from Intel. * * 4. Disclaimer and Export Compliance * * 4.1. INTEL MAKES NO WARRANTY OF ANY KIND REGARDING ANY SOFTWARE PROVIDED * HERE. ANY SOFTWARE ORIGINATING FROM INTEL OR DERIVED FROM INTEL SOFTWARE * IS PROVIDED "AS IS," AND INTEL WILL NOT PROVIDE ANY SUPPORT, ASSISTANCE, * INSTALLATION, TRAINING OR OTHER SERVICES. INTEL WILL NOT PROVIDE ANY * UPDATES, ENHANCEMENTS OR EXTENSIONS. INTEL SPECIFICALLY DISCLAIMS ANY * IMPLIED WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT AND FITNESS FOR A * PARTICULAR PURPOSE. * * 4.2. IN NO EVENT SHALL INTEL HAVE ANY LIABILITY TO LICENSEE, ITS LICENSEES * OR ANY OTHER THIRD PARTY, FOR ANY LOST PROFITS, LOST DATA, LOSS OF USE OR * COSTS OF PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES, OR FOR ANY INDIRECT, * SPECIAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THIS AGREEMENT, UNDER ANY * CAUSE OF ACTION OR THEORY OF LIABILITY, AND IRRESPECTIVE OF WHETHER INTEL * HAS ADVANCE NOTICE OF THE POSSIBILITY OF SUCH DAMAGES. THESE LIMITATIONS * SHALL APPLY NOTWITHSTANDING THE FAILURE OF THE ESSENTIAL PURPOSE OF ANY * LIMITED REMEDY. * * 4.3. Licensee shall not export, either directly or indirectly, any of this * software or system incorporating such software without first obtaining any * required license or other approval from the U. S. Department of Commerce or * any other agency or department of the United States Government. In the * event Licensee exports any such software from the United States or * re-exports any such software from a foreign destination, Licensee shall * ensure that the distribution and export/re-export of the software is in * compliance with all laws, regulations, orders, or other restrictions of the * U.S. Export Administration Regulations. Licensee agrees that neither it nor * any of its subsidiaries will export/re-export any technical data, process, * software, or service, directly or indirectly, to any country for which the * United States government or any agency thereof requires an export license, * other governmental approval, or letter of assurance, without first obtaining * such license, approval or letter. * ***************************************************************************** * * Alternatively, you may choose to be licensed under the terms of the * following license: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, you may choose to be licensed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * *****************************************************************************/ #ifndef __ACTYPES_H__ #define __ACTYPES_H__ /* acpisrc:StructDefs -- for acpisrc conversion */ /* * ACPI_MACHINE_WIDTH must be specified in an OS- or compiler-dependent * header and must be either 32 or 64. 16-bit ACPICA is no longer * supported, as of 12/2006. */ #ifndef ACPI_MACHINE_WIDTH #error ACPI_MACHINE_WIDTH not defined #endif /* * Data type ranges * Note: These macros are designed to be compiler independent as well as * working around problems that some 32-bit compilers have with 64-bit * constants. */ #define ACPI_UINT8_MAX (UINT8) (~((UINT8) 0)) /* 0xFF */ #define ACPI_UINT16_MAX (UINT16)(~((UINT16) 0)) /* 0xFFFF */ #define ACPI_UINT32_MAX (UINT32)(~((UINT32) 0)) /* 0xFFFFFFFF */ #define ACPI_UINT64_MAX (UINT64)(~((UINT64) 0)) /* 0xFFFFFFFFFFFFFFFF */ #define ACPI_ASCII_MAX 0x7F /* * Architecture-specific ACPICA Subsystem Data Types * * The goal of these types is to provide source code portability across * 16-bit, 32-bit, and 64-bit targets. * * 1) The following types are of fixed size for all targets (16/32/64): * * BOOLEAN Logical boolean * * UINT8 8-bit (1 byte) unsigned value * UINT16 16-bit (2 byte) unsigned value * UINT32 32-bit (4 byte) unsigned value * UINT64 64-bit (8 byte) unsigned value * * INT16 16-bit (2 byte) signed value * INT32 32-bit (4 byte) signed value * INT64 64-bit (8 byte) signed value * * COMPILER_DEPENDENT_UINT64/INT64 - These types are defined in the * compiler-dependent header(s) and were introduced because there is no * common 64-bit integer type across the various compilation models, as * shown in the table below. * * Datatype LP64 ILP64 LLP64 ILP32 LP32 16bit * char 8 8 8 8 8 8 * short 16 16 16 16 16 16 * _int32 32 * int 32 64 32 32 16 16 * long 64 64 32 32 32 32 * long long 64 64 * pointer 64 64 64 32 32 32 * * Note: ILP64 and LP32 are currently not supported. * * * 2) These types represent the native word size of the target mode of the * processor, and may be 16-bit, 32-bit, or 64-bit as required. They are * usually used for memory allocation, efficient loop counters, and array * indexes. The types are similar to the size_t type in the C library and * are required because there is no C type that consistently represents the * native data width. ACPI_SIZE is needed because there is no guarantee * that a kernel-level C library is present. * * ACPI_SIZE 16/32/64-bit unsigned value * ACPI_NATIVE_INT 16/32/64-bit signed value */ /******************************************************************************* * * Common types for all compilers, all targets * ******************************************************************************/ #ifndef ACPI_USE_SYSTEM_INTTYPES typedef unsigned char BOOLEAN; typedef unsigned char UINT8; typedef unsigned short UINT16; typedef short INT16; typedef COMPILER_DEPENDENT_UINT64 UINT64; typedef COMPILER_DEPENDENT_INT64 INT64; #endif /* ACPI_USE_SYSTEM_INTTYPES */ /* * Value returned by AcpiOsGetThreadId. There is no standard "thread_id" * across operating systems or even the various UNIX systems. Since ACPICA * only needs the thread ID as a unique thread identifier, we use a UINT64 * as the only common data type - it will accommodate any type of pointer or * any type of integer. It is up to the host-dependent OSL to cast the * native thread ID type to a UINT64 (in AcpiOsGetThreadId). */ #define ACPI_THREAD_ID UINT64 /******************************************************************************* * * Types specific to 64-bit targets * ******************************************************************************/ #if ACPI_MACHINE_WIDTH == 64 #ifndef ACPI_USE_SYSTEM_INTTYPES typedef unsigned int UINT32; typedef int INT32; #endif /* ACPI_USE_SYSTEM_INTTYPES */ typedef INT64 ACPI_NATIVE_INT; typedef UINT64 ACPI_SIZE; typedef UINT64 ACPI_IO_ADDRESS; typedef UINT64 ACPI_PHYSICAL_ADDRESS; #define ACPI_MAX_PTR ACPI_UINT64_MAX #define ACPI_SIZE_MAX ACPI_UINT64_MAX #define ACPI_USE_NATIVE_DIVIDE /* Has native 64-bit integer support */ #define ACPI_USE_NATIVE_MATH64 /* Has native 64-bit integer support */ /* * In the case of the Itanium Processor Family (IPF), the hardware does not * support misaligned memory transfers. Set the MISALIGNMENT_NOT_SUPPORTED * flag to indicate that special precautions must be taken to avoid alignment * faults. (IA64 or ia64 is currently used by existing compilers to indicate * IPF.) * * Note: EM64T and other X86-64 processors support misaligned transfers, * so there is no need to define this flag. */ #if defined (__IA64__) || defined (__ia64__) #define ACPI_MISALIGNMENT_NOT_SUPPORTED #endif /******************************************************************************* * * Types specific to 32-bit targets * ******************************************************************************/ #elif ACPI_MACHINE_WIDTH == 32 #ifndef ACPI_USE_SYSTEM_INTTYPES typedef unsigned int UINT32; typedef int INT32; #endif /* ACPI_USE_SYSTEM_INTTYPES */ typedef INT32 ACPI_NATIVE_INT; typedef UINT32 ACPI_SIZE; #ifdef ACPI_32BIT_PHYSICAL_ADDRESS /* * OSPMs can define this to shrink the size of the structures for 32-bit * none PAE environment. ASL compiler may always define this to generate * 32-bit OSPM compliant tables. */ typedef UINT32 ACPI_IO_ADDRESS; typedef UINT32 ACPI_PHYSICAL_ADDRESS; #else /* ACPI_32BIT_PHYSICAL_ADDRESS */ /* * It is reported that, after some calculations, the physical addresses can * wrap over the 32-bit boundary on 32-bit PAE environment. * https://bugzilla.kernel.org/show_bug.cgi?id=87971 */ typedef UINT64 ACPI_IO_ADDRESS; typedef UINT64 ACPI_PHYSICAL_ADDRESS; #endif /* ACPI_32BIT_PHYSICAL_ADDRESS */ #define ACPI_MAX_PTR ACPI_UINT32_MAX #define ACPI_SIZE_MAX ACPI_UINT32_MAX #else /* ACPI_MACHINE_WIDTH must be either 64 or 32 */ #error unknown ACPI_MACHINE_WIDTH #endif /******************************************************************************* * * OS-dependent types * * If the defaults below are not appropriate for the host system, they can * be defined in the OS-specific header, and this will take precedence. * ******************************************************************************/ /* Flags for AcpiOsAcquireLock/AcpiOsReleaseLock */ #ifndef ACPI_CPU_FLAGS #define ACPI_CPU_FLAGS ACPI_SIZE #endif /* Object returned from AcpiOsCreateCache */ #ifndef ACPI_CACHE_T #ifdef ACPI_USE_LOCAL_CACHE #define ACPI_CACHE_T ACPI_MEMORY_LIST #else #define ACPI_CACHE_T void * #endif #endif /* * Synchronization objects - Mutexes, Semaphores, and SpinLocks */ #if (ACPI_MUTEX_TYPE == ACPI_BINARY_SEMAPHORE) /* * These macros are used if the host OS does not support a mutex object. * Map the OSL Mutex interfaces to binary semaphores. */ #define ACPI_MUTEX ACPI_SEMAPHORE #define AcpiOsCreateMutex(OutHandle) AcpiOsCreateSemaphore (1, 1, OutHandle) #define AcpiOsDeleteMutex(Handle) (void) AcpiOsDeleteSemaphore (Handle) #define AcpiOsAcquireMutex(Handle,Time) AcpiOsWaitSemaphore (Handle, 1, Time) #define AcpiOsReleaseMutex(Handle) (void) AcpiOsSignalSemaphore (Handle, 1) #endif /* Configurable types for synchronization objects */ #ifndef ACPI_SPINLOCK #define ACPI_SPINLOCK void * #endif #ifndef ACPI_SEMAPHORE #define ACPI_SEMAPHORE void * #endif #ifndef ACPI_MUTEX #define ACPI_MUTEX void * #endif /******************************************************************************* * * Compiler-dependent types * * If the defaults below are not appropriate for the host compiler, they can * be defined in the compiler-specific header, and this will take precedence. * ******************************************************************************/ /* Use C99 uintptr_t for pointer casting if available, "void *" otherwise */ #ifndef ACPI_UINTPTR_T #define ACPI_UINTPTR_T void * #endif /* * ACPI_PRINTF_LIKE is used to tag functions as "printf-like" because * some compilers can catch printf format string problems */ #ifndef ACPI_PRINTF_LIKE #define ACPI_PRINTF_LIKE(c) #endif /* * Some compilers complain about unused variables. Sometimes we don't want * to use all the variables (for example, _AcpiModuleName). This allows us * to tell the compiler in a per-variable manner that a variable * is unused */ #ifndef ACPI_UNUSED_VAR #define ACPI_UNUSED_VAR #endif /* * All ACPICA external functions that are available to the rest of the * kernel are tagged with these macros which can be defined as appropriate * for the host. * * Notes: * ACPI_EXPORT_SYMBOL_INIT is used for initialization and termination * interfaces that may need special processing. * ACPI_EXPORT_SYMBOL is used for all other public external functions. */ #ifndef ACPI_EXPORT_SYMBOL_INIT #define ACPI_EXPORT_SYMBOL_INIT(Symbol) #endif #ifndef ACPI_EXPORT_SYMBOL #define ACPI_EXPORT_SYMBOL(Symbol) #endif /* * Compiler/Clibrary-dependent debug initialization. Used for ACPICA * utilities only. */ #ifndef ACPI_DEBUG_INITIALIZE #define ACPI_DEBUG_INITIALIZE() #endif /******************************************************************************* * * Configuration * ******************************************************************************/ #ifdef ACPI_NO_MEM_ALLOCATIONS #define ACPI_ALLOCATE(a) NULL #define ACPI_ALLOCATE_ZEROED(a) NULL #define ACPI_FREE(a) #define ACPI_MEM_TRACKING(a) #else /* ACPI_NO_MEM_ALLOCATIONS */ #ifdef ACPI_DBG_TRACK_ALLOCATIONS /* * Memory allocation tracking (used by AcpiExec to detect memory leaks) */ #define ACPI_MEM_PARAMETERS _COMPONENT, _AcpiModuleName, __LINE__ #define ACPI_ALLOCATE(a) AcpiUtAllocateAndTrack ((ACPI_SIZE) (a), ACPI_MEM_PARAMETERS) #define ACPI_ALLOCATE_ZEROED(a) AcpiUtAllocateZeroedAndTrack ((ACPI_SIZE) (a), ACPI_MEM_PARAMETERS) #define ACPI_FREE(a) AcpiUtFreeAndTrack (a, ACPI_MEM_PARAMETERS) #define ACPI_MEM_TRACKING(a) a #else /* * Normal memory allocation directly via the OS services layer */ #define ACPI_ALLOCATE(a) AcpiOsAllocate ((ACPI_SIZE) (a)) #define ACPI_ALLOCATE_ZEROED(a) AcpiOsAllocateZeroed ((ACPI_SIZE) (a)) #define ACPI_FREE(a) AcpiOsFree (a) #define ACPI_MEM_TRACKING(a) #endif /* ACPI_DBG_TRACK_ALLOCATIONS */ #endif /* ACPI_NO_MEM_ALLOCATIONS */ /****************************************************************************** * * ACPI Specification constants (Do not change unless the specification * changes) * *****************************************************************************/ /* Number of distinct FADT-based GPE register blocks (GPE0 and GPE1) */ #define ACPI_MAX_GPE_BLOCKS 2 /* Default ACPI register widths */ #define ACPI_GPE_REGISTER_WIDTH 8 #define ACPI_PM1_REGISTER_WIDTH 16 #define ACPI_PM2_REGISTER_WIDTH 8 #define ACPI_PM_TIMER_WIDTH 32 #define ACPI_RESET_REGISTER_WIDTH 8 /* Names within the namespace are 4 bytes long */ #define ACPI_NAMESEG_SIZE 4 /* Fixed by ACPI spec */ #define ACPI_PATH_SEGMENT_LENGTH 5 /* 4 chars for name + 1 char for separator */ #define ACPI_PATH_SEPARATOR '.' /* Sizes for ACPI table headers */ #define ACPI_OEM_ID_SIZE 6 #define ACPI_OEM_TABLE_ID_SIZE 8 /* ACPI/PNP hardware IDs */ #define PCI_ROOT_HID_STRING "PNP0A03" #define PCI_EXPRESS_ROOT_HID_STRING "PNP0A08" /* PM Timer ticks per second (HZ) */ #define ACPI_PM_TIMER_FREQUENCY 3579545 /******************************************************************************* * * Independent types * ******************************************************************************/ /* Logical defines and NULL */ #ifdef FALSE #undef FALSE #endif #define FALSE (1 == 0) #ifdef TRUE #undef TRUE #endif #define TRUE (1 == 1) #ifndef NULL #define NULL (void *) 0 #endif /* * Miscellaneous types */ typedef UINT32 ACPI_STATUS; /* All ACPI Exceptions */ typedef UINT32 ACPI_NAME; /* 4-byte ACPI name */ typedef char * ACPI_STRING; /* Null terminated ASCII string */ typedef void * ACPI_HANDLE; /* Actually a ptr to a NS Node */ /* Time constants for timer calculations */ #define ACPI_MSEC_PER_SEC 1000L #define ACPI_USEC_PER_MSEC 1000L #define ACPI_USEC_PER_SEC 1000000L #define ACPI_100NSEC_PER_USEC 10L #define ACPI_100NSEC_PER_MSEC 10000L #define ACPI_100NSEC_PER_SEC 10000000L #define ACPI_NSEC_PER_USEC 1000L #define ACPI_NSEC_PER_MSEC 1000000L #define ACPI_NSEC_PER_SEC 1000000000L #define ACPI_TIME_AFTER(a, b) ((INT64)((b) - (a)) < 0) /* Owner IDs are used to track namespace nodes for selective deletion */ typedef UINT16 ACPI_OWNER_ID; #define ACPI_OWNER_ID_MAX 0xFFF /* 4095 possible owner IDs */ #define ACPI_INTEGER_BIT_SIZE 64 #define ACPI_MAX_DECIMAL_DIGITS 20 /* 2^64 = 18,446,744,073,709,551,616 */ #define ACPI_MAX64_DECIMAL_DIGITS 20 #define ACPI_MAX32_DECIMAL_DIGITS 10 #define ACPI_MAX16_DECIMAL_DIGITS 5 #define ACPI_MAX8_DECIMAL_DIGITS 3 /* * Constants with special meanings */ #define ACPI_ROOT_OBJECT ((ACPI_HANDLE) ACPI_TO_POINTER (ACPI_MAX_PTR)) #define ACPI_WAIT_FOREVER 0xFFFF /* UINT16, as per ACPI spec */ #define ACPI_DO_NOT_WAIT 0 /* * Obsolete: Acpi integer width. In ACPI version 1 (1996), integers are * 32 bits. In ACPI version 2 (2000) and later, integers are max 64 bits. * Note that this pertains to the ACPI integer type only, not to other * integers used in the implementation of the ACPICA subsystem. * * 01/2010: This type is obsolete and has been removed from the entire ACPICA * code base. It remains here for compatibility with device drivers that use * the type. However, it will be removed in the future. */ typedef UINT64 ACPI_INTEGER; #define ACPI_INTEGER_MAX ACPI_UINT64_MAX /******************************************************************************* * * Commonly used macros * ******************************************************************************/ /* Data manipulation */ #define ACPI_LOBYTE(Integer) ((UINT8) (UINT16)(Integer)) #define ACPI_HIBYTE(Integer) ((UINT8) (((UINT16)(Integer)) >> 8)) #define ACPI_LOWORD(Integer) ((UINT16) (UINT32)(Integer)) #define ACPI_HIWORD(Integer) ((UINT16)(((UINT32)(Integer)) >> 16)) #define ACPI_LODWORD(Integer64) ((UINT32) (UINT64)(Integer64)) #define ACPI_HIDWORD(Integer64) ((UINT32)(((UINT64)(Integer64)) >> 32)) #define ACPI_SET_BIT(target,bit) ((target) |= (bit)) #define ACPI_CLEAR_BIT(target,bit) ((target) &= ~(bit)) #define ACPI_MIN(a,b) (((a)<(b))?(a):(b)) #define ACPI_MAX(a,b) (((a)>(b))?(a):(b)) /* Size calculation */ #define ACPI_ARRAY_LENGTH(x) (sizeof(x) / sizeof((x)[0])) /* Pointer manipulation */ #define ACPI_CAST_PTR(t, p) ((t *) (ACPI_UINTPTR_T) (p)) #define ACPI_CAST_INDIRECT_PTR(t, p) ((t **) (ACPI_UINTPTR_T) (p)) #define ACPI_ADD_PTR(t, a, b) ACPI_CAST_PTR (t, (ACPI_CAST_PTR (UINT8, (a)) + (ACPI_SIZE)(b))) #define ACPI_SUB_PTR(t, a, b) ACPI_CAST_PTR (t, (ACPI_CAST_PTR (UINT8, (a)) - (ACPI_SIZE)(b))) #define ACPI_PTR_DIFF(a, b) ((ACPI_SIZE) (ACPI_CAST_PTR (UINT8, (a)) - ACPI_CAST_PTR (UINT8, (b)))) /* Pointer/Integer type conversions */ #define ACPI_TO_POINTER(i) ACPI_CAST_PTR (void, (ACPI_SIZE) (i)) #ifndef ACPI_TO_INTEGER #define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p, (void *) 0) #endif #ifndef ACPI_OFFSET #define ACPI_OFFSET(d, f) ACPI_PTR_DIFF (&(((d *) 0)->f), (void *) 0) #endif #define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i) /* Optimizations for 4-character (32-bit) ACPI_NAME manipulation */ #ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED #define ACPI_COMPARE_NAMESEG(a,b) (*ACPI_CAST_PTR (UINT32, (a)) == *ACPI_CAST_PTR (UINT32, (b))) #define ACPI_COPY_NAMESEG(dest,src) (*ACPI_CAST_PTR (UINT32, (dest)) = *ACPI_CAST_PTR (UINT32, (src))) #else #define ACPI_COMPARE_NAMESEG(a,b) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAMESEG_SIZE)) #define ACPI_COPY_NAMESEG(dest,src) (memcpy (ACPI_CAST_PTR (char, (dest)), ACPI_CAST_PTR (char, (src)), ACPI_NAMESEG_SIZE)) #endif /* Support for the special RSDP signature (8 characters) */ #define ACPI_VALIDATE_RSDP_SIG(a) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, (sizeof(a) < 8) ? ACPI_NAMESEG_SIZE : 8)) #define ACPI_MAKE_RSDP_SIG(dest) (memcpy (ACPI_CAST_PTR (char, (dest)), ACPI_SIG_RSDP, 8)) /* Support for OEMx signature (x can be any character) */ #define ACPI_IS_OEM_SIG(a) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_OEM_NAME, 3) &&\ strnlen (a, ACPI_NAMESEG_SIZE) == ACPI_NAMESEG_SIZE) /* * Algorithm to obtain access bit or byte width. * Can be used with AccessSize field of ACPI_GENERIC_ADDRESS and * ACPI_RESOURCE_GENERIC_REGISTER. */ #define ACPI_ACCESS_BIT_SHIFT 2 #define ACPI_ACCESS_BYTE_SHIFT -1 #define ACPI_ACCESS_BIT_MAX (31 - ACPI_ACCESS_BIT_SHIFT) #define ACPI_ACCESS_BYTE_MAX (31 - ACPI_ACCESS_BYTE_SHIFT) #define ACPI_ACCESS_BIT_DEFAULT (8 - ACPI_ACCESS_BIT_SHIFT) #define ACPI_ACCESS_BYTE_DEFAULT (8 - ACPI_ACCESS_BYTE_SHIFT) #define ACPI_ACCESS_BIT_WIDTH(size) (1 << ((size) + ACPI_ACCESS_BIT_SHIFT)) #define ACPI_ACCESS_BYTE_WIDTH(size) (1 << ((size) + ACPI_ACCESS_BYTE_SHIFT)) /******************************************************************************* * * Miscellaneous constants * ******************************************************************************/ /* * Initialization sequence options */ #define ACPI_FULL_INITIALIZATION 0x0000 #define ACPI_NO_FACS_INIT 0x0001 #define ACPI_NO_ACPI_ENABLE 0x0002 #define ACPI_NO_HARDWARE_INIT 0x0004 #define ACPI_NO_EVENT_INIT 0x0008 #define ACPI_NO_HANDLER_INIT 0x0010 #define ACPI_NO_OBJECT_INIT 0x0020 #define ACPI_NO_DEVICE_INIT 0x0040 #define ACPI_NO_ADDRESS_SPACE_INIT 0x0080 /* * Initialization state */ #define ACPI_SUBSYSTEM_INITIALIZE 0x01 #define ACPI_INITIALIZED_OK 0x02 /* * Power state values */ #define ACPI_STATE_UNKNOWN (UINT8) 0xFF #define ACPI_STATE_S0 (UINT8) 0 #define ACPI_STATE_S1 (UINT8) 1 #define ACPI_STATE_S2 (UINT8) 2 #define ACPI_STATE_S3 (UINT8) 3 #define ACPI_STATE_S4 (UINT8) 4 #define ACPI_STATE_S5 (UINT8) 5 #define ACPI_S_STATES_MAX ACPI_STATE_S5 #define ACPI_S_STATE_COUNT 6 #define ACPI_STATE_D0 (UINT8) 0 #define ACPI_STATE_D1 (UINT8) 1 #define ACPI_STATE_D2 (UINT8) 2 -#define ACPI_STATE_D3 (UINT8) 3 -#define ACPI_D_STATES_MAX ACPI_STATE_D3 -#define ACPI_D_STATE_COUNT 4 +#define ACPI_STATE_D3_HOT (UINT8) 3 +#define ACPI_STATE_D3_COLD (UINT8) 4 +#define ACPI_STATE_D3 ACPI_STATE_D3_COLD +#define ACPI_D_STATES_MAX ACPI_STATE_D3_COLD +#define ACPI_D_STATE_COUNT 5 #define ACPI_STATE_C0 (UINT8) 0 #define ACPI_STATE_C1 (UINT8) 1 #define ACPI_STATE_C2 (UINT8) 2 #define ACPI_STATE_C3 (UINT8) 3 #define ACPI_C_STATES_MAX ACPI_STATE_C3 #define ACPI_C_STATE_COUNT 4 /* * Sleep type invalid value */ #define ACPI_SLEEP_TYPE_MAX 0x7 #define ACPI_SLEEP_TYPE_INVALID 0xFF /* * Standard notify values */ #define ACPI_NOTIFY_BUS_CHECK (UINT8) 0x00 #define ACPI_NOTIFY_DEVICE_CHECK (UINT8) 0x01 #define ACPI_NOTIFY_DEVICE_WAKE (UINT8) 0x02 #define ACPI_NOTIFY_EJECT_REQUEST (UINT8) 0x03 #define ACPI_NOTIFY_DEVICE_CHECK_LIGHT (UINT8) 0x04 #define ACPI_NOTIFY_FREQUENCY_MISMATCH (UINT8) 0x05 #define ACPI_NOTIFY_BUS_MODE_MISMATCH (UINT8) 0x06 #define ACPI_NOTIFY_POWER_FAULT (UINT8) 0x07 #define ACPI_NOTIFY_CAPABILITIES_CHECK (UINT8) 0x08 #define ACPI_NOTIFY_DEVICE_PLD_CHECK (UINT8) 0x09 #define ACPI_NOTIFY_RESERVED (UINT8) 0x0A #define ACPI_NOTIFY_LOCALITY_UPDATE (UINT8) 0x0B #define ACPI_NOTIFY_SHUTDOWN_REQUEST (UINT8) 0x0C #define ACPI_NOTIFY_AFFINITY_UPDATE (UINT8) 0x0D #define ACPI_NOTIFY_MEMORY_UPDATE (UINT8) 0x0E #define ACPI_NOTIFY_DISCONNECT_RECOVER (UINT8) 0x0F #define ACPI_GENERIC_NOTIFY_MAX 0x0F #define ACPI_SPECIFIC_NOTIFY_MAX 0x84 /* * Types associated with ACPI names and objects. The first group of * values (up to ACPI_TYPE_EXTERNAL_MAX) correspond to the definition * of the ACPI ObjectType() operator (See the ACPI Spec). Therefore, * only add to the first group if the spec changes. * * NOTE: Types must be kept in sync with the global AcpiNsProperties * and AcpiNsTypeNames arrays. */ typedef UINT32 ACPI_OBJECT_TYPE; #define ACPI_TYPE_ANY 0x00 #define ACPI_TYPE_INTEGER 0x01 /* Byte/Word/Dword/Zero/One/Ones */ #define ACPI_TYPE_STRING 0x02 #define ACPI_TYPE_BUFFER 0x03 #define ACPI_TYPE_PACKAGE 0x04 /* ByteConst, multiple DataTerm/Constant/SuperName */ #define ACPI_TYPE_FIELD_UNIT 0x05 #define ACPI_TYPE_DEVICE 0x06 /* Name, multiple Node */ #define ACPI_TYPE_EVENT 0x07 #define ACPI_TYPE_METHOD 0x08 /* Name, ByteConst, multiple Code */ #define ACPI_TYPE_MUTEX 0x09 #define ACPI_TYPE_REGION 0x0A #define ACPI_TYPE_POWER 0x0B /* Name,ByteConst,WordConst,multi Node */ #define ACPI_TYPE_PROCESSOR 0x0C /* Name,ByteConst,DWordConst,ByteConst,multi NmO */ #define ACPI_TYPE_THERMAL 0x0D /* Name, multiple Node */ #define ACPI_TYPE_BUFFER_FIELD 0x0E #define ACPI_TYPE_DDB_HANDLE 0x0F #define ACPI_TYPE_DEBUG_OBJECT 0x10 #define ACPI_TYPE_EXTERNAL_MAX 0x10 #define ACPI_NUM_TYPES (ACPI_TYPE_EXTERNAL_MAX + 1) /* * These are object types that do not map directly to the ACPI * ObjectType() operator. They are used for various internal purposes * only. If new predefined ACPI_TYPEs are added (via the ACPI * specification), these internal types must move upwards. (There * is code that depends on these values being contiguous with the * external types above.) */ #define ACPI_TYPE_LOCAL_REGION_FIELD 0x11 #define ACPI_TYPE_LOCAL_BANK_FIELD 0x12 #define ACPI_TYPE_LOCAL_INDEX_FIELD 0x13 #define ACPI_TYPE_LOCAL_REFERENCE 0x14 /* Arg#, Local#, Name, Debug, RefOf, Index */ #define ACPI_TYPE_LOCAL_ALIAS 0x15 #define ACPI_TYPE_LOCAL_METHOD_ALIAS 0x16 #define ACPI_TYPE_LOCAL_NOTIFY 0x17 #define ACPI_TYPE_LOCAL_ADDRESS_HANDLER 0x18 #define ACPI_TYPE_LOCAL_RESOURCE 0x19 #define ACPI_TYPE_LOCAL_RESOURCE_FIELD 0x1A #define ACPI_TYPE_LOCAL_SCOPE 0x1B /* 1 Name, multiple ObjectList Nodes */ #define ACPI_TYPE_NS_NODE_MAX 0x1B /* Last typecode used within a NS Node */ #define ACPI_TOTAL_TYPES (ACPI_TYPE_NS_NODE_MAX + 1) /* * These are special object types that never appear in * a Namespace node, only in an object of ACPI_OPERAND_OBJECT */ #define ACPI_TYPE_LOCAL_EXTRA 0x1C #define ACPI_TYPE_LOCAL_DATA 0x1D #define ACPI_TYPE_LOCAL_MAX 0x1D /* All types above here are invalid */ #define ACPI_TYPE_INVALID 0x1E #define ACPI_TYPE_NOT_FOUND 0xFF #define ACPI_NUM_NS_TYPES (ACPI_TYPE_INVALID + 1) /* * All I/O */ #define ACPI_READ 0 #define ACPI_WRITE 1 #define ACPI_IO_MASK 1 /* * Event Types: Fixed & General Purpose */ typedef UINT32 ACPI_EVENT_TYPE; /* * Fixed events */ #define ACPI_EVENT_PMTIMER 0 #define ACPI_EVENT_GLOBAL 1 #define ACPI_EVENT_POWER_BUTTON 2 #define ACPI_EVENT_SLEEP_BUTTON 3 #define ACPI_EVENT_RTC 4 #define ACPI_EVENT_MAX 4 #define ACPI_NUM_FIXED_EVENTS ACPI_EVENT_MAX + 1 /* * Event Status - Per event * ------------- * The encoding of ACPI_EVENT_STATUS is illustrated below. * Note that a set bit (1) indicates the property is TRUE * (e.g. if bit 0 is set then the event is enabled). * +-------------+-+-+-+-+-+-+ * | Bits 31:6 |5|4|3|2|1|0| * +-------------+-+-+-+-+-+-+ * | | | | | | | * | | | | | | +- Enabled? * | | | | | +--- Enabled for wake? * | | | | +----- Status bit set? * | | | +------- Enable bit set? * | | +--------- Has a handler? * | +----------- Masked? * +----------------- */ typedef UINT32 ACPI_EVENT_STATUS; #define ACPI_EVENT_FLAG_DISABLED (ACPI_EVENT_STATUS) 0x00 #define ACPI_EVENT_FLAG_ENABLED (ACPI_EVENT_STATUS) 0x01 #define ACPI_EVENT_FLAG_WAKE_ENABLED (ACPI_EVENT_STATUS) 0x02 #define ACPI_EVENT_FLAG_STATUS_SET (ACPI_EVENT_STATUS) 0x04 #define ACPI_EVENT_FLAG_ENABLE_SET (ACPI_EVENT_STATUS) 0x08 #define ACPI_EVENT_FLAG_HAS_HANDLER (ACPI_EVENT_STATUS) 0x10 #define ACPI_EVENT_FLAG_MASKED (ACPI_EVENT_STATUS) 0x20 #define ACPI_EVENT_FLAG_SET ACPI_EVENT_FLAG_STATUS_SET /* Actions for AcpiSetGpe, AcpiGpeWakeup, AcpiHwLowSetGpe */ #define ACPI_GPE_ENABLE 0 #define ACPI_GPE_DISABLE 1 #define ACPI_GPE_CONDITIONAL_ENABLE 2 /* * GPE info flags - Per GPE * +---+-+-+-+---+ * |7:6|5|4|3|2:0| * +---+-+-+-+---+ * | | | | | * | | | | +-- Type of dispatch:to method, handler, notify, or none * | | | +----- Interrupt type: edge or level triggered * | | +------- Is a Wake GPE * | +--------- Has been enabled automatically at init time * +------------ */ #define ACPI_GPE_DISPATCH_NONE (UINT8) 0x00 #define ACPI_GPE_DISPATCH_METHOD (UINT8) 0x01 #define ACPI_GPE_DISPATCH_HANDLER (UINT8) 0x02 #define ACPI_GPE_DISPATCH_NOTIFY (UINT8) 0x03 #define ACPI_GPE_DISPATCH_RAW_HANDLER (UINT8) 0x04 #define ACPI_GPE_DISPATCH_MASK (UINT8) 0x07 #define ACPI_GPE_DISPATCH_TYPE(flags) ((UINT8) ((flags) & ACPI_GPE_DISPATCH_MASK)) #define ACPI_GPE_LEVEL_TRIGGERED (UINT8) 0x08 #define ACPI_GPE_EDGE_TRIGGERED (UINT8) 0x00 #define ACPI_GPE_XRUPT_TYPE_MASK (UINT8) 0x08 #define ACPI_GPE_CAN_WAKE (UINT8) 0x10 #define ACPI_GPE_AUTO_ENABLED (UINT8) 0x20 #define ACPI_GPE_INITIALIZED (UINT8) 0x40 /* * Flags for GPE and Lock interfaces */ #define ACPI_NOT_ISR 0x1 #define ACPI_ISR 0x0 /* Notify types */ #define ACPI_SYSTEM_NOTIFY 0x1 #define ACPI_DEVICE_NOTIFY 0x2 #define ACPI_ALL_NOTIFY (ACPI_SYSTEM_NOTIFY | ACPI_DEVICE_NOTIFY) #define ACPI_MAX_NOTIFY_HANDLER_TYPE 0x3 #define ACPI_NUM_NOTIFY_TYPES 2 #define ACPI_MAX_SYS_NOTIFY 0x7F #define ACPI_MAX_DEVICE_SPECIFIC_NOTIFY 0xBF #define ACPI_SYSTEM_HANDLER_LIST 0 /* Used as index, must be SYSTEM_NOTIFY -1 */ #define ACPI_DEVICE_HANDLER_LIST 1 /* Used as index, must be DEVICE_NOTIFY -1 */ /* Address Space (Operation Region) Types */ typedef UINT8 ACPI_ADR_SPACE_TYPE; #define ACPI_ADR_SPACE_SYSTEM_MEMORY (ACPI_ADR_SPACE_TYPE) 0 #define ACPI_ADR_SPACE_SYSTEM_IO (ACPI_ADR_SPACE_TYPE) 1 #define ACPI_ADR_SPACE_PCI_CONFIG (ACPI_ADR_SPACE_TYPE) 2 #define ACPI_ADR_SPACE_EC (ACPI_ADR_SPACE_TYPE) 3 #define ACPI_ADR_SPACE_SMBUS (ACPI_ADR_SPACE_TYPE) 4 #define ACPI_ADR_SPACE_CMOS (ACPI_ADR_SPACE_TYPE) 5 #define ACPI_ADR_SPACE_PCI_BAR_TARGET (ACPI_ADR_SPACE_TYPE) 6 #define ACPI_ADR_SPACE_IPMI (ACPI_ADR_SPACE_TYPE) 7 #define ACPI_ADR_SPACE_GPIO (ACPI_ADR_SPACE_TYPE) 8 #define ACPI_ADR_SPACE_GSBUS (ACPI_ADR_SPACE_TYPE) 9 #define ACPI_ADR_SPACE_PLATFORM_COMM (ACPI_ADR_SPACE_TYPE) 10 #define ACPI_ADR_SPACE_PLATFORM_RT (ACPI_ADR_SPACE_TYPE) 11 #define ACPI_NUM_PREDEFINED_REGIONS 12 /* * Special Address Spaces * * Note: A Data Table region is a special type of operation region * that has its own AML opcode. However, internally, the AML * interpreter simply creates an operation region with an address * space type of ACPI_ADR_SPACE_DATA_TABLE. */ #define ACPI_ADR_SPACE_DATA_TABLE (ACPI_ADR_SPACE_TYPE) 0x7E /* Internal to ACPICA only */ #define ACPI_ADR_SPACE_FIXED_HARDWARE (ACPI_ADR_SPACE_TYPE) 0x7F /* Values for _REG connection code */ #define ACPI_REG_DISCONNECT 0 #define ACPI_REG_CONNECT 1 /* * BitRegister IDs * * These values are intended to be used by the hardware interfaces * and are mapped to individual bitfields defined within the ACPI * registers. See the AcpiGbl_BitRegisterInfo global table in utglobal.c * for this mapping. */ /* PM1 Status register */ #define ACPI_BITREG_TIMER_STATUS 0x00 #define ACPI_BITREG_BUS_MASTER_STATUS 0x01 #define ACPI_BITREG_GLOBAL_LOCK_STATUS 0x02 #define ACPI_BITREG_POWER_BUTTON_STATUS 0x03 #define ACPI_BITREG_SLEEP_BUTTON_STATUS 0x04 #define ACPI_BITREG_RT_CLOCK_STATUS 0x05 #define ACPI_BITREG_WAKE_STATUS 0x06 #define ACPI_BITREG_PCIEXP_WAKE_STATUS 0x07 /* PM1 Enable register */ #define ACPI_BITREG_TIMER_ENABLE 0x08 #define ACPI_BITREG_GLOBAL_LOCK_ENABLE 0x09 #define ACPI_BITREG_POWER_BUTTON_ENABLE 0x0A #define ACPI_BITREG_SLEEP_BUTTON_ENABLE 0x0B #define ACPI_BITREG_RT_CLOCK_ENABLE 0x0C #define ACPI_BITREG_PCIEXP_WAKE_DISABLE 0x0D /* PM1 Control register */ #define ACPI_BITREG_SCI_ENABLE 0x0E #define ACPI_BITREG_BUS_MASTER_RLD 0x0F #define ACPI_BITREG_GLOBAL_LOCK_RELEASE 0x10 #define ACPI_BITREG_SLEEP_TYPE 0x11 #define ACPI_BITREG_SLEEP_ENABLE 0x12 /* PM2 Control register */ #define ACPI_BITREG_ARB_DISABLE 0x13 #define ACPI_BITREG_MAX 0x13 #define ACPI_NUM_BITREG ACPI_BITREG_MAX + 1 /* Status register values. A 1 clears a status bit. 0 = no effect */ #define ACPI_CLEAR_STATUS 1 /* Enable and Control register values */ #define ACPI_ENABLE_EVENT 1 #define ACPI_DISABLE_EVENT 0 /* Sleep function dispatch */ typedef ACPI_STATUS (*ACPI_SLEEP_FUNCTION) ( UINT8 SleepState); typedef struct acpi_sleep_functions { ACPI_SLEEP_FUNCTION LegacyFunction; ACPI_SLEEP_FUNCTION ExtendedFunction; } ACPI_SLEEP_FUNCTIONS; /* * External ACPI object definition */ /* * Note: Type == ACPI_TYPE_ANY (0) is used to indicate a NULL package * element or an unresolved named reference. */ typedef union acpi_object { ACPI_OBJECT_TYPE Type; /* See definition of AcpiNsType for values */ struct { ACPI_OBJECT_TYPE Type; /* ACPI_TYPE_INTEGER */ UINT64 Value; /* The actual number */ } Integer; struct { ACPI_OBJECT_TYPE Type; /* ACPI_TYPE_STRING */ UINT32 Length; /* # of bytes in string, excluding trailing null */ char *Pointer; /* points to the string value */ } String; struct { ACPI_OBJECT_TYPE Type; /* ACPI_TYPE_BUFFER */ UINT32 Length; /* # of bytes in buffer */ UINT8 *Pointer; /* points to the buffer */ } Buffer; struct { ACPI_OBJECT_TYPE Type; /* ACPI_TYPE_PACKAGE */ UINT32 Count; /* # of elements in package */ union acpi_object *Elements; /* Pointer to an array of ACPI_OBJECTs */ } Package; struct { ACPI_OBJECT_TYPE Type; /* ACPI_TYPE_LOCAL_REFERENCE */ ACPI_OBJECT_TYPE ActualType; /* Type associated with the Handle */ ACPI_HANDLE Handle; /* object reference */ } Reference; struct { ACPI_OBJECT_TYPE Type; /* ACPI_TYPE_PROCESSOR */ UINT32 ProcId; ACPI_IO_ADDRESS PblkAddress; UINT32 PblkLength; } Processor; struct { ACPI_OBJECT_TYPE Type; /* ACPI_TYPE_POWER */ UINT32 SystemLevel; UINT32 ResourceOrder; } PowerResource; } ACPI_OBJECT; /* * List of objects, used as a parameter list for control method evaluation */ typedef struct acpi_object_list { UINT32 Count; ACPI_OBJECT *Pointer; } ACPI_OBJECT_LIST; /* * Miscellaneous common Data Structures used by the interfaces */ #define ACPI_NO_BUFFER 0 #ifdef ACPI_NO_MEM_ALLOCATIONS #define ACPI_ALLOCATE_BUFFER (ACPI_SIZE) (0) #define ACPI_ALLOCATE_LOCAL_BUFFER (ACPI_SIZE) (0) #else /* ACPI_NO_MEM_ALLOCATIONS */ #define ACPI_ALLOCATE_BUFFER (ACPI_SIZE) (-1) /* Let ACPICA allocate buffer */ #define ACPI_ALLOCATE_LOCAL_BUFFER (ACPI_SIZE) (-2) /* For internal use only (enables tracking) */ #endif /* ACPI_NO_MEM_ALLOCATIONS */ typedef struct acpi_buffer { ACPI_SIZE Length; /* Length in bytes of the buffer */ void *Pointer; /* pointer to buffer */ } ACPI_BUFFER; /* * NameType for AcpiGetName */ #define ACPI_FULL_PATHNAME 0 #define ACPI_SINGLE_NAME 1 #define ACPI_FULL_PATHNAME_NO_TRAILING 2 #define ACPI_NAME_TYPE_MAX 2 /* * Predefined Namespace items */ typedef struct acpi_predefined_names { const char *Name; UINT8 Type; char *Val; } ACPI_PREDEFINED_NAMES; /* * Structure and flags for AcpiGetSystemInfo */ #define ACPI_SYS_MODE_UNKNOWN 0x0000 #define ACPI_SYS_MODE_ACPI 0x0001 #define ACPI_SYS_MODE_LEGACY 0x0002 #define ACPI_SYS_MODES_MASK 0x0003 /* * System info returned by AcpiGetSystemInfo() */ typedef struct acpi_system_info { UINT32 AcpiCaVersion; UINT32 Flags; UINT32 TimerResolution; UINT32 Reserved1; UINT32 Reserved2; UINT32 DebugLevel; UINT32 DebugLayer; } ACPI_SYSTEM_INFO; /* * System statistics returned by AcpiGetStatistics() */ typedef struct acpi_statistics { UINT32 SciCount; UINT32 GpeCount; UINT32 FixedEventCount[ACPI_NUM_FIXED_EVENTS]; UINT32 MethodCount; } ACPI_STATISTICS; /* * Types specific to the OS service interfaces */ typedef UINT32 (ACPI_SYSTEM_XFACE *ACPI_OSD_HANDLER) ( void *Context); typedef void (ACPI_SYSTEM_XFACE *ACPI_OSD_EXEC_CALLBACK) ( void *Context); /* * Various handlers and callback procedures */ typedef UINT32 (*ACPI_SCI_HANDLER) ( void *Context); typedef void (*ACPI_GBL_EVENT_HANDLER) ( UINT32 EventType, ACPI_HANDLE Device, UINT32 EventNumber, void *Context); #define ACPI_EVENT_TYPE_GPE 0 #define ACPI_EVENT_TYPE_FIXED 1 typedef UINT32 (*ACPI_EVENT_HANDLER) ( void *Context); typedef UINT32 (*ACPI_GPE_HANDLER) ( ACPI_HANDLE GpeDevice, UINT32 GpeNumber, void *Context); typedef void (*ACPI_NOTIFY_HANDLER) ( ACPI_HANDLE Device, UINT32 Value, void *Context); typedef void (*ACPI_OBJECT_HANDLER) ( ACPI_HANDLE Object, void *Data); typedef ACPI_STATUS (*ACPI_INIT_HANDLER) ( ACPI_HANDLE Object, UINT32 Function); #define ACPI_INIT_DEVICE_INI 1 typedef ACPI_STATUS (*ACPI_EXCEPTION_HANDLER) ( ACPI_STATUS AmlStatus, ACPI_NAME Name, UINT16 Opcode, UINT32 AmlOffset, void *Context); /* Table Event handler (Load, LoadTable, etc.) and types */ typedef ACPI_STATUS (*ACPI_TABLE_HANDLER) ( UINT32 Event, void *Table, void *Context); /* Table Event Types */ #define ACPI_TABLE_EVENT_LOAD 0x0 #define ACPI_TABLE_EVENT_UNLOAD 0x1 #define ACPI_TABLE_EVENT_INSTALL 0x2 #define ACPI_TABLE_EVENT_UNINSTALL 0x3 #define ACPI_NUM_TABLE_EVENTS 4 /* Address Spaces (For Operation Regions) */ typedef ACPI_STATUS (*ACPI_ADR_SPACE_HANDLER) ( UINT32 Function, ACPI_PHYSICAL_ADDRESS Address, UINT32 BitWidth, UINT64 *Value, void *HandlerContext, void *RegionContext); #define ACPI_DEFAULT_HANDLER NULL /* Special Context data for GenericSerialBus/GeneralPurposeIo (ACPI 5.0) */ typedef struct acpi_connection_info { UINT8 *Connection; UINT16 Length; UINT8 AccessLength; } ACPI_CONNECTION_INFO; /* Special Context data for PCC Opregion (ACPI 6.3) */ typedef struct acpi_pcc_info { UINT8 SubspaceId; UINT16 Length; UINT8 *InternalBuffer; } ACPI_PCC_INFO; /* Special Context data for FFH Opregion (ACPI 6.5) */ typedef struct acpi_ffh_info { UINT64 Offset; UINT64 Length; } ACPI_FFH_INFO; typedef ACPI_STATUS (*ACPI_ADR_SPACE_SETUP) ( ACPI_HANDLE RegionHandle, UINT32 Function, void *HandlerContext, void **RegionContext); #define ACPI_REGION_ACTIVATE 0 #define ACPI_REGION_DEACTIVATE 1 typedef ACPI_STATUS (*ACPI_WALK_CALLBACK) ( ACPI_HANDLE Object, UINT32 NestingLevel, void *Context, void **ReturnValue); typedef UINT32 (*ACPI_INTERFACE_HANDLER) ( ACPI_STRING InterfaceName, UINT32 Supported); /* Interrupt handler return values */ #define ACPI_INTERRUPT_NOT_HANDLED 0x00 #define ACPI_INTERRUPT_HANDLED 0x01 /* GPE handler return values */ #define ACPI_REENABLE_GPE 0x80 /* Length of 32-bit EISAID values when converted back to a string */ #define ACPI_EISAID_STRING_SIZE 8 /* Includes null terminator */ /* Length of UUID (string) values */ #define ACPI_UUID_LENGTH 16 /* Length of 3-byte PCI class code values when converted back to a string */ #define ACPI_PCICLS_STRING_SIZE 7 /* Includes null terminator */ /* Structures used for device/processor HID, UID, CID */ typedef struct acpi_pnp_device_id { UINT32 Length; /* Length of string + null */ char *String; } ACPI_PNP_DEVICE_ID; typedef struct acpi_pnp_device_id_list { UINT32 Count; /* Number of IDs in Ids array */ UINT32 ListSize; /* Size of list, including ID strings */ ACPI_PNP_DEVICE_ID Ids[]; /* ID array */ } ACPI_PNP_DEVICE_ID_LIST; /* * Structure returned from AcpiGetObjectInfo. * Optimized for both 32-bit and 64-bit builds. */ typedef struct acpi_device_info { UINT32 InfoSize; /* Size of info, including ID strings */ UINT32 Name; /* ACPI object Name */ ACPI_OBJECT_TYPE Type; /* ACPI object Type */ UINT8 ParamCount; /* If a method, required parameter count */ UINT16 Valid; /* Indicates which optional fields are valid */ UINT8 Flags; /* Miscellaneous info */ UINT8 HighestDstates[4]; /* _SxD values: 0xFF indicates not valid */ UINT8 LowestDstates[5]; /* _SxW values: 0xFF indicates not valid */ UINT64 Address; /* _ADR value */ ACPI_PNP_DEVICE_ID HardwareId; /* _HID value */ ACPI_PNP_DEVICE_ID UniqueId; /* _UID value */ ACPI_PNP_DEVICE_ID ClassCode; /* _CLS value */ ACPI_PNP_DEVICE_ID_LIST CompatibleIdList; /* _CID list */ } ACPI_DEVICE_INFO; /* Values for Flags field above (AcpiGetObjectInfo) */ #define ACPI_PCI_ROOT_BRIDGE 0x01 /* Flags for Valid field above (AcpiGetObjectInfo) */ #define ACPI_VALID_ADR 0x0002 #define ACPI_VALID_HID 0x0004 #define ACPI_VALID_UID 0x0008 #define ACPI_VALID_CID 0x0020 #define ACPI_VALID_CLS 0x0040 #define ACPI_VALID_SXDS 0x0100 #define ACPI_VALID_SXWS 0x0200 /* Flags for _STA method */ #define ACPI_STA_DEVICE_PRESENT 0x01 #define ACPI_STA_DEVICE_ENABLED 0x02 #define ACPI_STA_DEVICE_UI 0x04 #define ACPI_STA_DEVICE_FUNCTIONING 0x08 #define ACPI_STA_DEVICE_OK 0x08 /* Synonym */ #define ACPI_STA_BATTERY_PRESENT 0x10 /* Context structs for address space handlers */ typedef struct acpi_pci_id { UINT16 Segment; UINT16 Bus; UINT16 Device; UINT16 Function; } ACPI_PCI_ID; typedef struct acpi_mem_mapping { ACPI_PHYSICAL_ADDRESS PhysicalAddress; UINT8 *LogicalAddress; ACPI_SIZE Length; struct acpi_mem_mapping *NextMm; } ACPI_MEM_MAPPING; typedef struct acpi_mem_space_context { UINT32 Length; ACPI_PHYSICAL_ADDRESS Address; ACPI_MEM_MAPPING *CurMm; ACPI_MEM_MAPPING *FirstMm; } ACPI_MEM_SPACE_CONTEXT; typedef struct acpi_data_table_mapping { void *Pointer; } ACPI_DATA_TABLE_MAPPING; /* * ACPI_MEMORY_LIST is used only if the ACPICA local cache is enabled */ typedef struct acpi_memory_list { const char *ListName; void *ListHead; UINT16 ObjectSize; UINT16 MaxDepth; UINT16 CurrentDepth; #ifdef ACPI_DBG_TRACK_ALLOCATIONS /* Statistics for debug memory tracking only */ UINT32 TotalAllocated; UINT32 TotalFreed; UINT32 MaxOccupied; UINT32 TotalSize; UINT32 CurrentTotalSize; UINT32 Requests; UINT32 Hits; #endif } ACPI_MEMORY_LIST; /* Definitions of trace event types */ typedef enum { ACPI_TRACE_AML_METHOD, ACPI_TRACE_AML_OPCODE, ACPI_TRACE_AML_REGION } ACPI_TRACE_EVENT_TYPE; /* Definitions of _OSI support */ #define ACPI_VENDOR_STRINGS 0x01 #define ACPI_FEATURE_STRINGS 0x02 #define ACPI_ENABLE_INTERFACES 0x00 #define ACPI_DISABLE_INTERFACES 0x04 #define ACPI_DISABLE_ALL_VENDOR_STRINGS (ACPI_DISABLE_INTERFACES | ACPI_VENDOR_STRINGS) #define ACPI_DISABLE_ALL_FEATURE_STRINGS (ACPI_DISABLE_INTERFACES | ACPI_FEATURE_STRINGS) #define ACPI_DISABLE_ALL_STRINGS (ACPI_DISABLE_INTERFACES | ACPI_VENDOR_STRINGS | ACPI_FEATURE_STRINGS) #define ACPI_ENABLE_ALL_VENDOR_STRINGS (ACPI_ENABLE_INTERFACES | ACPI_VENDOR_STRINGS) #define ACPI_ENABLE_ALL_FEATURE_STRINGS (ACPI_ENABLE_INTERFACES | ACPI_FEATURE_STRINGS) #define ACPI_ENABLE_ALL_STRINGS (ACPI_ENABLE_INTERFACES | ACPI_VENDOR_STRINGS | ACPI_FEATURE_STRINGS) #define ACPI_OSI_WIN_2000 0x01 #define ACPI_OSI_WIN_XP 0x02 #define ACPI_OSI_WIN_XP_SP1 0x03 #define ACPI_OSI_WINSRV_2003 0x04 #define ACPI_OSI_WIN_XP_SP2 0x05 #define ACPI_OSI_WINSRV_2003_SP1 0x06 #define ACPI_OSI_WIN_VISTA 0x07 #define ACPI_OSI_WINSRV_2008 0x08 #define ACPI_OSI_WIN_VISTA_SP1 0x09 #define ACPI_OSI_WIN_VISTA_SP2 0x0A #define ACPI_OSI_WIN_7 0x0B #define ACPI_OSI_WIN_8 0x0C #define ACPI_OSI_WIN_8_1 0x0D #define ACPI_OSI_WIN_10 0x0E #define ACPI_OSI_WIN_10_RS1 0x0F #define ACPI_OSI_WIN_10_RS2 0x10 #define ACPI_OSI_WIN_10_RS3 0x11 #define ACPI_OSI_WIN_10_RS4 0x12 #define ACPI_OSI_WIN_10_RS5 0x13 #define ACPI_OSI_WIN_10_19H1 0x14 #define ACPI_OSI_WIN_10_20H1 0x15 #define ACPI_OSI_WIN_11 0x16 #define ACPI_OSI_WIN_11_22H2 0x17 /* Definitions of getopt */ #define ACPI_OPT_END -1 /* Definitions for explicit fallthrough */ #ifndef ACPI_FALLTHROUGH #define ACPI_FALLTHROUGH do {} while(0) #endif #ifndef ACPI_FLEX_ARRAY #define ACPI_FLEX_ARRAY(TYPE, NAME) TYPE NAME[0] #endif #ifndef ACPI_NONSTRING #define ACPI_NONSTRING /* No terminating NUL character */ #endif #endif /* __ACTYPES_H__ */ diff --git a/sys/dev/acpica/acpi.c b/sys/dev/acpica/acpi.c index f2ff1d59ccc7..a2159b12876f 100644 --- a/sys/dev/acpica/acpi.c +++ b/sys/dev/acpica/acpi.c @@ -1,4679 +1,4680 @@ /*- * Copyright (c) 2000 Takanori Watanabe * Copyright (c) 2000 Mitsuru IWASAKI * Copyright (c) 2000, 2001 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_acpi.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) || defined(__amd64__) #include #include #include #include #endif #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_ACPIDEV, "acpidev", "ACPI devices"); /* Hooks for the ACPI CA debugging infrastructure */ #define _COMPONENT ACPI_BUS ACPI_MODULE_NAME("ACPI") static d_open_t acpiopen; static d_close_t acpiclose; static d_ioctl_t acpiioctl; static struct cdevsw acpi_cdevsw = { .d_version = D_VERSION, .d_open = acpiopen, .d_close = acpiclose, .d_ioctl = acpiioctl, .d_name = "acpi", }; struct acpi_interface { ACPI_STRING *data; int num; }; static char *sysres_ids[] = { "PNP0C01", "PNP0C02", NULL }; /* Global mutex for locking access to the ACPI subsystem. */ struct mtx acpi_mutex; struct callout acpi_sleep_timer; /* Bitmap of device quirks. */ int acpi_quirks; /* Supported sleep states. */ static BOOLEAN acpi_sleep_states[ACPI_S_STATE_COUNT]; static void acpi_lookup(void *arg, const char *name, device_t *dev); static int acpi_modevent(struct module *mod, int event, void *junk); static device_probe_t acpi_probe; static device_attach_t acpi_attach; static device_suspend_t acpi_suspend; static device_resume_t acpi_resume; static device_shutdown_t acpi_shutdown; static bus_add_child_t acpi_add_child; static bus_print_child_t acpi_print_child; static bus_probe_nomatch_t acpi_probe_nomatch; static bus_driver_added_t acpi_driver_added; static bus_child_deleted_t acpi_child_deleted; static bus_read_ivar_t acpi_read_ivar; static bus_write_ivar_t acpi_write_ivar; static bus_get_resource_list_t acpi_get_rlist; static bus_get_rman_t acpi_get_rman; static bus_set_resource_t acpi_set_resource; static bus_alloc_resource_t acpi_alloc_resource; static bus_adjust_resource_t acpi_adjust_resource; static bus_release_resource_t acpi_release_resource; static bus_delete_resource_t acpi_delete_resource; static bus_activate_resource_t acpi_activate_resource; static bus_deactivate_resource_t acpi_deactivate_resource; static bus_map_resource_t acpi_map_resource; static bus_unmap_resource_t acpi_unmap_resource; static bus_child_pnpinfo_t acpi_child_pnpinfo_method; static bus_child_location_t acpi_child_location_method; static bus_hint_device_unit_t acpi_hint_device_unit; static bus_get_property_t acpi_bus_get_prop; static bus_get_device_path_t acpi_get_device_path; static bus_get_domain_t acpi_get_domain_method; static acpi_id_probe_t acpi_device_id_probe; static acpi_evaluate_object_t acpi_device_eval_obj; static acpi_get_property_t acpi_device_get_prop; static acpi_scan_children_t acpi_device_scan_children; static isa_pnp_probe_t acpi_isa_pnp_probe; static void acpi_reserve_resources(device_t dev); static int acpi_sysres_alloc(device_t dev); static uint32_t acpi_isa_get_logicalid(device_t dev); static int acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count); static ACPI_STATUS acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level, void *context, void **retval); static ACPI_STATUS acpi_find_dsd(struct acpi_device *ad); static void acpi_platform_osc(device_t dev); static void acpi_probe_children(device_t bus); static void acpi_probe_order(ACPI_HANDLE handle, int *order); static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status); static void acpi_sleep_enable(void *arg); static ACPI_STATUS acpi_sleep_disable(struct acpi_softc *sc); static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state); static void acpi_shutdown_final(void *arg, int howto); static void acpi_enable_fixed_events(struct acpi_softc *sc); static void acpi_resync_clock(struct acpi_softc *sc); static int acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate); static int acpi_wake_run_prep(ACPI_HANDLE handle, int sstate); static int acpi_wake_prep_walk(int sstate); static int acpi_wake_sysctl_walk(device_t dev); static int acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS); static void acpi_system_eventhandler_sleep(void *arg, int state); static void acpi_system_eventhandler_wakeup(void *arg, int state); static int acpi_sname2sstate(const char *sname); static const char *acpi_sstate2sname(int sstate); static int acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_pm_func(u_long cmd, void *arg, ...); static void acpi_enable_pcie(void); static void acpi_reset_interfaces(device_t dev); static device_method_t acpi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_probe), DEVMETHOD(device_attach, acpi_attach), DEVMETHOD(device_shutdown, acpi_shutdown), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_suspend, acpi_suspend), DEVMETHOD(device_resume, acpi_resume), /* Bus interface */ DEVMETHOD(bus_add_child, acpi_add_child), DEVMETHOD(bus_print_child, acpi_print_child), DEVMETHOD(bus_probe_nomatch, acpi_probe_nomatch), DEVMETHOD(bus_driver_added, acpi_driver_added), DEVMETHOD(bus_child_deleted, acpi_child_deleted), DEVMETHOD(bus_read_ivar, acpi_read_ivar), DEVMETHOD(bus_write_ivar, acpi_write_ivar), DEVMETHOD(bus_get_resource_list, acpi_get_rlist), DEVMETHOD(bus_get_rman, acpi_get_rman), DEVMETHOD(bus_set_resource, acpi_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_alloc_resource, acpi_alloc_resource), DEVMETHOD(bus_adjust_resource, acpi_adjust_resource), DEVMETHOD(bus_release_resource, acpi_release_resource), DEVMETHOD(bus_delete_resource, acpi_delete_resource), DEVMETHOD(bus_activate_resource, acpi_activate_resource), DEVMETHOD(bus_deactivate_resource, acpi_deactivate_resource), DEVMETHOD(bus_map_resource, acpi_map_resource), DEVMETHOD(bus_unmap_resource, acpi_unmap_resource), DEVMETHOD(bus_child_pnpinfo, acpi_child_pnpinfo_method), DEVMETHOD(bus_child_location, acpi_child_location_method), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_hint_device_unit, acpi_hint_device_unit), DEVMETHOD(bus_get_cpus, acpi_get_cpus), DEVMETHOD(bus_get_domain, acpi_get_domain_method), DEVMETHOD(bus_get_property, acpi_bus_get_prop), DEVMETHOD(bus_get_device_path, acpi_get_device_path), /* ACPI bus */ DEVMETHOD(acpi_id_probe, acpi_device_id_probe), DEVMETHOD(acpi_evaluate_object, acpi_device_eval_obj), DEVMETHOD(acpi_get_property, acpi_device_get_prop), DEVMETHOD(acpi_pwr_for_sleep, acpi_device_pwr_for_sleep), DEVMETHOD(acpi_scan_children, acpi_device_scan_children), /* ISA emulation */ DEVMETHOD(isa_pnp_probe, acpi_isa_pnp_probe), DEVMETHOD_END }; static driver_t acpi_driver = { "acpi", acpi_methods, sizeof(struct acpi_softc), }; EARLY_DRIVER_MODULE(acpi, nexus, acpi_driver, acpi_modevent, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(acpi, 1); ACPI_SERIAL_DECL(acpi, "ACPI root bus"); /* Local pools for managing system resources for ACPI child devices. */ static struct rman acpi_rman_io, acpi_rman_mem; #define ACPI_MINIMUM_AWAKETIME 5 /* Holds the description of the acpi0 device. */ static char acpi_desc[ACPI_OEM_ID_SIZE + ACPI_OEM_TABLE_ID_SIZE + 2]; SYSCTL_NODE(_debug, OID_AUTO, acpi, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "ACPI debugging"); static char acpi_ca_version[12]; SYSCTL_STRING(_debug_acpi, OID_AUTO, acpi_ca_version, CTLFLAG_RD, acpi_ca_version, 0, "Version of Intel ACPI-CA"); /* * Allow overriding _OSI methods. */ static char acpi_install_interface[256]; TUNABLE_STR("hw.acpi.install_interface", acpi_install_interface, sizeof(acpi_install_interface)); static char acpi_remove_interface[256]; TUNABLE_STR("hw.acpi.remove_interface", acpi_remove_interface, sizeof(acpi_remove_interface)); /* Allow users to dump Debug objects without ACPI debugger. */ static int acpi_debug_objects; TUNABLE_INT("debug.acpi.enable_debug_objects", &acpi_debug_objects); SYSCTL_PROC(_debug_acpi, OID_AUTO, enable_debug_objects, CTLFLAG_RW | CTLTYPE_INT | CTLFLAG_MPSAFE, NULL, 0, acpi_debug_objects_sysctl, "I", "Enable Debug objects"); /* Allow the interpreter to ignore common mistakes in BIOS. */ static int acpi_interpreter_slack = 1; TUNABLE_INT("debug.acpi.interpreter_slack", &acpi_interpreter_slack); SYSCTL_INT(_debug_acpi, OID_AUTO, interpreter_slack, CTLFLAG_RDTUN, &acpi_interpreter_slack, 1, "Turn on interpreter slack mode."); /* Ignore register widths set by FADT and use default widths instead. */ static int acpi_ignore_reg_width = 1; TUNABLE_INT("debug.acpi.default_register_width", &acpi_ignore_reg_width); SYSCTL_INT(_debug_acpi, OID_AUTO, default_register_width, CTLFLAG_RDTUN, &acpi_ignore_reg_width, 1, "Ignore register widths set by FADT"); /* Allow users to override quirks. */ TUNABLE_INT("debug.acpi.quirks", &acpi_quirks); int acpi_susp_bounce; SYSCTL_INT(_debug_acpi, OID_AUTO, suspend_bounce, CTLFLAG_RW, &acpi_susp_bounce, 0, "Don't actually suspend, just test devices."); #if defined(__amd64__) || defined(__i386__) int acpi_override_isa_irq_polarity; #endif /* * ACPI standard UUID for Device Specific Data Package * "Device Properties UUID for _DSD" Rev. 2.0 */ static const struct uuid acpi_dsd_uuid = { 0xdaffd814, 0x6eba, 0x4d8c, 0x8a, 0x91, { 0xbc, 0x9b, 0xbf, 0x4a, 0xa3, 0x01 } }; /* * ACPI can only be loaded as a module by the loader; activating it after * system bootstrap time is not useful, and can be fatal to the system. * It also cannot be unloaded, since the entire system bus hierarchy hangs * off it. */ static int acpi_modevent(struct module *mod, int event, void *junk) { switch (event) { case MOD_LOAD: if (!cold) { printf("The ACPI driver cannot be loaded after boot.\n"); return (EPERM); } break; case MOD_UNLOAD: if (!cold && power_pm_get_type() == POWER_PM_TYPE_ACPI) return (EBUSY); break; default: break; } return (0); } /* * Perform early initialization. */ ACPI_STATUS acpi_Startup(void) { static int started = 0; ACPI_STATUS status; int val; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* Only run the startup code once. The MADT driver also calls this. */ if (started) return_VALUE (AE_OK); started = 1; /* * Initialize the ACPICA subsystem. */ if (ACPI_FAILURE(status = AcpiInitializeSubsystem())) { printf("ACPI: Could not initialize Subsystem: %s\n", AcpiFormatException(status)); return_VALUE (status); } /* * Pre-allocate space for RSDT/XSDT and DSDT tables and allow resizing * if more tables exist. */ if (ACPI_FAILURE(status = AcpiInitializeTables(NULL, 2, TRUE))) { printf("ACPI: Table initialisation failed: %s\n", AcpiFormatException(status)); return_VALUE (status); } /* Set up any quirks we have for this system. */ if (acpi_quirks == ACPI_Q_OK) acpi_table_quirks(&acpi_quirks); /* If the user manually set the disabled hint to 0, force-enable ACPI. */ if (resource_int_value("acpi", 0, "disabled", &val) == 0 && val == 0) acpi_quirks &= ~ACPI_Q_BROKEN; if (acpi_quirks & ACPI_Q_BROKEN) { printf("ACPI disabled by blacklist. Contact your BIOS vendor.\n"); status = AE_SUPPORT; } return_VALUE (status); } /* * Detect ACPI and perform early initialisation. */ int acpi_identify(void) { ACPI_TABLE_RSDP *rsdp; ACPI_TABLE_HEADER *rsdt; ACPI_PHYSICAL_ADDRESS paddr; struct sbuf sb; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (!cold) return (ENXIO); /* Check that we haven't been disabled with a hint. */ if (resource_disabled("acpi", 0)) return (ENXIO); /* Check for other PM systems. */ if (power_pm_get_type() != POWER_PM_TYPE_NONE && power_pm_get_type() != POWER_PM_TYPE_ACPI) { printf("ACPI identify failed, other PM system enabled.\n"); return (ENXIO); } /* Initialize root tables. */ if (ACPI_FAILURE(acpi_Startup())) { printf("ACPI: Try disabling either ACPI or apic support.\n"); return (ENXIO); } if ((paddr = AcpiOsGetRootPointer()) == 0 || (rsdp = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_RSDP))) == NULL) return (ENXIO); if (rsdp->Revision > 1 && rsdp->XsdtPhysicalAddress != 0) paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->XsdtPhysicalAddress; else paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->RsdtPhysicalAddress; AcpiOsUnmapMemory(rsdp, sizeof(ACPI_TABLE_RSDP)); if ((rsdt = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_HEADER))) == NULL) return (ENXIO); sbuf_new(&sb, acpi_desc, sizeof(acpi_desc), SBUF_FIXEDLEN); sbuf_bcat(&sb, rsdt->OemId, ACPI_OEM_ID_SIZE); sbuf_trim(&sb); sbuf_putc(&sb, ' '); sbuf_bcat(&sb, rsdt->OemTableId, ACPI_OEM_TABLE_ID_SIZE); sbuf_trim(&sb); sbuf_finish(&sb); sbuf_delete(&sb); AcpiOsUnmapMemory(rsdt, sizeof(ACPI_TABLE_HEADER)); snprintf(acpi_ca_version, sizeof(acpi_ca_version), "%x", ACPI_CA_VERSION); return (0); } /* * Fetch some descriptive data from ACPI to put in our attach message. */ static int acpi_probe(device_t dev) { ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); device_set_desc(dev, acpi_desc); return_VALUE (BUS_PROBE_NOWILDCARD); } static int acpi_attach(device_t dev) { struct acpi_softc *sc; ACPI_STATUS status; int error, state; UINT32 flags; UINT8 TypeA, TypeB; char *env; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); sc = device_get_softc(dev); sc->acpi_dev = dev; callout_init(&sc->susp_force_to, 1); error = ENXIO; /* Initialize resource manager. */ acpi_rman_io.rm_type = RMAN_ARRAY; acpi_rman_io.rm_start = 0; acpi_rman_io.rm_end = 0xffff; acpi_rman_io.rm_descr = "ACPI I/O ports"; if (rman_init(&acpi_rman_io) != 0) panic("acpi rman_init IO ports failed"); acpi_rman_mem.rm_type = RMAN_ARRAY; acpi_rman_mem.rm_descr = "ACPI I/O memory addresses"; if (rman_init(&acpi_rman_mem) != 0) panic("acpi rman_init memory failed"); resource_list_init(&sc->sysres_rl); /* Initialise the ACPI mutex */ mtx_init(&acpi_mutex, "ACPI global lock", NULL, MTX_DEF); /* * Set the globals from our tunables. This is needed because ACPI-CA * uses UINT8 for some values and we have no tunable_byte. */ AcpiGbl_EnableInterpreterSlack = acpi_interpreter_slack ? TRUE : FALSE; AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE; AcpiGbl_UseDefaultRegisterWidths = acpi_ignore_reg_width ? TRUE : FALSE; #ifndef ACPI_DEBUG /* * Disable all debugging layers and levels. */ AcpiDbgLayer = 0; AcpiDbgLevel = 0; #endif /* Override OS interfaces if the user requested. */ acpi_reset_interfaces(dev); /* Load ACPI name space. */ status = AcpiLoadTables(); if (ACPI_FAILURE(status)) { device_printf(dev, "Could not load Namespace: %s\n", AcpiFormatException(status)); goto out; } /* Handle MCFG table if present. */ acpi_enable_pcie(); /* * Note that some systems (specifically, those with namespace evaluation * issues that require the avoidance of parts of the namespace) must * avoid running _INI and _STA on everything, as well as dodging the final * object init pass. * * For these devices, we set ACPI_NO_DEVICE_INIT and ACPI_NO_OBJECT_INIT). * * XXX We should arrange for the object init pass after we have attached * all our child devices, but on many systems it works here. */ flags = 0; if (testenv("debug.acpi.avoid")) flags = ACPI_NO_DEVICE_INIT | ACPI_NO_OBJECT_INIT; /* Bring the hardware and basic handlers online. */ if (ACPI_FAILURE(status = AcpiEnableSubsystem(flags))) { device_printf(dev, "Could not enable ACPI: %s\n", AcpiFormatException(status)); goto out; } /* * Call the ECDT probe function to provide EC functionality before * the namespace has been evaluated. * * XXX This happens before the sysresource devices have been probed and * attached so its resources come from nexus0. In practice, this isn't * a problem but should be addressed eventually. */ acpi_ec_ecdt_probe(dev); /* Bring device objects and regions online. */ if (ACPI_FAILURE(status = AcpiInitializeObjects(flags))) { device_printf(dev, "Could not initialize ACPI objects: %s\n", AcpiFormatException(status)); goto out; } /* * Setup our sysctl tree. * * XXX: This doesn't check to make sure that none of these fail. */ sysctl_ctx_init(&sc->acpi_sysctl_ctx); sc->acpi_sysctl_tree = SYSCTL_ADD_NODE(&sc->acpi_sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, device_get_name(dev), CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "supported_sleep_state", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0, acpi_supported_sleep_state_sysctl, "A", "List supported ACPI sleep states."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "power_button_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_power_button_sx, 0, acpi_sleep_state_sysctl, "A", "Power button ACPI sleep state."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "sleep_button_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_sleep_button_sx, 0, acpi_sleep_state_sysctl, "A", "Sleep button ACPI sleep state."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "lid_switch_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_lid_switch_sx, 0, acpi_sleep_state_sysctl, "A", "Lid ACPI sleep state. Set to S3 if you want to suspend your laptop when close the Lid."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "standby_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_standby_sx, 0, acpi_sleep_state_sysctl, "A", ""); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "suspend_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_suspend_sx, 0, acpi_sleep_state_sysctl, "A", ""); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "sleep_delay", CTLFLAG_RW, &sc->acpi_sleep_delay, 0, "sleep delay in seconds"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "s4bios", CTLFLAG_RW, &sc->acpi_s4bios, 0, "S4BIOS mode"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "verbose", CTLFLAG_RW, &sc->acpi_verbose, 0, "verbose mode"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "disable_on_reboot", CTLFLAG_RW, &sc->acpi_do_disable, 0, "Disable ACPI when rebooting/halting system"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "handle_reboot", CTLFLAG_RW, &sc->acpi_handle_reboot, 0, "Use ACPI Reset Register to reboot"); #if defined(__amd64__) || defined(__i386__) /* * Enable workaround for incorrect ISA IRQ polarity by default on * systems with Intel CPUs. */ if (cpu_vendor_id == CPU_VENDOR_INTEL) acpi_override_isa_irq_polarity = 1; SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "override_isa_irq_polarity", CTLFLAG_RDTUN, &acpi_override_isa_irq_polarity, 0, "Force active-hi polarity for edge-triggered ISA IRQs"); #endif /* * Default to 1 second before sleeping to give some machines time to * stabilize. */ sc->acpi_sleep_delay = 1; if (bootverbose) sc->acpi_verbose = 1; if ((env = kern_getenv("hw.acpi.verbose")) != NULL) { if (strcmp(env, "0") != 0) sc->acpi_verbose = 1; freeenv(env); } /* Only enable reboot by default if the FADT says it is available. */ if (AcpiGbl_FADT.Flags & ACPI_FADT_RESET_REGISTER) sc->acpi_handle_reboot = 1; #if !ACPI_REDUCED_HARDWARE /* Only enable S4BIOS by default if the FACS says it is available. */ if (AcpiGbl_FACS != NULL && AcpiGbl_FACS->Flags & ACPI_FACS_S4_BIOS_PRESENT) sc->acpi_s4bios = 1; #endif /* Probe all supported sleep states. */ acpi_sleep_states[ACPI_STATE_S0] = TRUE; for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++) if (ACPI_SUCCESS(AcpiEvaluateObject(ACPI_ROOT_OBJECT, __DECONST(char *, AcpiGbl_SleepStateNames[state]), NULL, NULL)) && ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB))) acpi_sleep_states[state] = TRUE; /* * Dispatch the default sleep state to devices. The lid switch is set * to UNKNOWN by default to avoid surprising users. */ sc->acpi_power_button_sx = acpi_sleep_states[ACPI_STATE_S5] ? ACPI_STATE_S5 : ACPI_STATE_UNKNOWN; sc->acpi_lid_switch_sx = ACPI_STATE_UNKNOWN; sc->acpi_standby_sx = acpi_sleep_states[ACPI_STATE_S1] ? ACPI_STATE_S1 : ACPI_STATE_UNKNOWN; sc->acpi_suspend_sx = acpi_sleep_states[ACPI_STATE_S3] ? ACPI_STATE_S3 : ACPI_STATE_UNKNOWN; /* Pick the first valid sleep state for the sleep button default. */ sc->acpi_sleep_button_sx = ACPI_STATE_UNKNOWN; for (state = ACPI_STATE_S1; state <= ACPI_STATE_S4; state++) if (acpi_sleep_states[state]) { sc->acpi_sleep_button_sx = state; break; } acpi_enable_fixed_events(sc); /* * Scan the namespace and attach/initialise children. */ /* Register our shutdown handler. */ EVENTHANDLER_REGISTER(shutdown_final, acpi_shutdown_final, sc, SHUTDOWN_PRI_LAST + 150); /* * Register our acpi event handlers. * XXX should be configurable eg. via userland policy manager. */ EVENTHANDLER_REGISTER(acpi_sleep_event, acpi_system_eventhandler_sleep, sc, ACPI_EVENT_PRI_LAST); EVENTHANDLER_REGISTER(acpi_wakeup_event, acpi_system_eventhandler_wakeup, sc, ACPI_EVENT_PRI_LAST); /* Flag our initial states. */ sc->acpi_enabled = TRUE; sc->acpi_sstate = ACPI_STATE_S0; sc->acpi_sleep_disabled = TRUE; /* Create the control device */ sc->acpi_dev_t = make_dev(&acpi_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0664, "acpi"); sc->acpi_dev_t->si_drv1 = sc; if ((error = acpi_machdep_init(dev))) goto out; /* Register ACPI again to pass the correct argument of pm_func. */ power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, sc); acpi_platform_osc(dev); if (!acpi_disabled("bus")) { EVENTHANDLER_REGISTER(dev_lookup, acpi_lookup, NULL, 1000); acpi_probe_children(dev); } /* Update all GPEs and enable runtime GPEs. */ status = AcpiUpdateAllGpes(); if (ACPI_FAILURE(status)) device_printf(dev, "Could not update all GPEs: %s\n", AcpiFormatException(status)); /* Allow sleep request after a while. */ callout_init_mtx(&acpi_sleep_timer, &acpi_mutex, 0); callout_reset(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME, acpi_sleep_enable, sc); error = 0; out: return_VALUE (error); } static void acpi_set_power_children(device_t dev, int state) { device_t child; device_t *devlist; int dstate, i, numdevs; if (device_get_children(dev, &devlist, &numdevs) != 0) return; /* * Retrieve and set D-state for the sleep state if _SxD is present. * Skip children who aren't attached since they are handled separately. */ for (i = 0; i < numdevs; i++) { child = devlist[i]; dstate = state; if (device_is_attached(child) && acpi_device_pwr_for_sleep(dev, child, &dstate) == 0) acpi_set_powerstate(child, dstate); } free(devlist, M_TEMP); } static int acpi_suspend(device_t dev) { int error; bus_topo_assert(); error = bus_generic_suspend(dev); if (error == 0) acpi_set_power_children(dev, ACPI_STATE_D3); return (error); } static int acpi_resume(device_t dev) { bus_topo_assert(); acpi_set_power_children(dev, ACPI_STATE_D0); return (bus_generic_resume(dev)); } static int acpi_shutdown(device_t dev) { bus_topo_assert(); /* Allow children to shutdown first. */ bus_generic_shutdown(dev); /* * Enable any GPEs that are able to power-on the system (i.e., RTC). * Also, disable any that are not valid for this state (most). */ acpi_wake_prep_walk(ACPI_STATE_S5); return (0); } /* * Handle a new device being added */ static device_t acpi_add_child(device_t bus, u_int order, const char *name, int unit) { struct acpi_device *ad; device_t child; if ((ad = malloc(sizeof(*ad), M_ACPIDEV, M_NOWAIT | M_ZERO)) == NULL) return (NULL); ad->ad_domain = ACPI_DEV_DOMAIN_UNKNOWN; resource_list_init(&ad->ad_rl); child = device_add_child_ordered(bus, order, name, unit); if (child != NULL) device_set_ivars(child, ad); else free(ad, M_ACPIDEV); return (child); } static int acpi_print_child(device_t bus, device_t child) { struct acpi_device *adev = device_get_ivars(child); struct resource_list *rl = &adev->ad_rl; int retval = 0; retval += bus_print_child_header(bus, child); retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#jx"); retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); retval += resource_list_print_type(rl, "drq", SYS_RES_DRQ, "%jd"); if (device_get_flags(child)) retval += printf(" flags %#x", device_get_flags(child)); retval += bus_print_child_domain(bus, child); retval += bus_print_child_footer(bus, child); return (retval); } /* * If this device is an ACPI child but no one claimed it, attempt * to power it off. We'll power it back up when a driver is added. * * XXX Disabled for now since many necessary devices (like fdc and * ATA) don't claim the devices we created for them but still expect * them to be powered up. */ static void acpi_probe_nomatch(device_t bus, device_t child) { #ifdef ACPI_ENABLE_POWERDOWN_NODRIVER acpi_set_powerstate(child, ACPI_STATE_D3); #endif } /* * If a new driver has a chance to probe a child, first power it up. * * XXX Disabled for now (see acpi_probe_nomatch for details). */ static void acpi_driver_added(device_t dev, driver_t *driver) { device_t child, *devlist; int i, numdevs; DEVICE_IDENTIFY(driver, dev); if (device_get_children(dev, &devlist, &numdevs)) return; for (i = 0; i < numdevs; i++) { child = devlist[i]; if (device_get_state(child) == DS_NOTPRESENT) { #ifdef ACPI_ENABLE_POWERDOWN_NODRIVER acpi_set_powerstate(child, ACPI_STATE_D0); if (device_probe_and_attach(child) != 0) acpi_set_powerstate(child, ACPI_STATE_D3); #else device_probe_and_attach(child); #endif } } free(devlist, M_TEMP); } /* Location hint for devctl(8) */ static int acpi_child_location_method(device_t cbdev, device_t child, struct sbuf *sb) { struct acpi_device *dinfo = device_get_ivars(child); int pxm; if (dinfo->ad_handle) { sbuf_printf(sb, "handle=%s", acpi_name(dinfo->ad_handle)); if (ACPI_SUCCESS(acpi_GetInteger(dinfo->ad_handle, "_PXM", &pxm))) { sbuf_printf(sb, " _PXM=%d", pxm); } } return (0); } /* PnP information for devctl(8) */ int acpi_pnpinfo(ACPI_HANDLE handle, struct sbuf *sb) { ACPI_DEVICE_INFO *adinfo; if (ACPI_FAILURE(AcpiGetObjectInfo(handle, &adinfo))) { sbuf_printf(sb, "unknown"); return (0); } sbuf_printf(sb, "_HID=%s _UID=%lu _CID=%s", (adinfo->Valid & ACPI_VALID_HID) ? adinfo->HardwareId.String : "none", (adinfo->Valid & ACPI_VALID_UID) ? strtoul(adinfo->UniqueId.String, NULL, 10) : 0UL, ((adinfo->Valid & ACPI_VALID_CID) && adinfo->CompatibleIdList.Count > 0) ? adinfo->CompatibleIdList.Ids[0].String : "none"); AcpiOsFree(adinfo); return (0); } static int acpi_child_pnpinfo_method(device_t cbdev, device_t child, struct sbuf *sb) { struct acpi_device *dinfo = device_get_ivars(child); return (acpi_pnpinfo(dinfo->ad_handle, sb)); } /* * Note: the check for ACPI locator may be redundant. However, this routine is * suitable for both busses whose only locator is ACPI and as a building block * for busses that have multiple locators to cope with. */ int acpi_get_acpi_device_path(device_t bus, device_t child, const char *locator, struct sbuf *sb) { if (strcmp(locator, BUS_LOCATOR_ACPI) == 0) { ACPI_HANDLE *handle = acpi_get_handle(child); if (handle != NULL) sbuf_printf(sb, "%s", acpi_name(handle)); return (0); } return (bus_generic_get_device_path(bus, child, locator, sb)); } static int acpi_get_device_path(device_t bus, device_t child, const char *locator, struct sbuf *sb) { struct acpi_device *dinfo = device_get_ivars(child); if (strcmp(locator, BUS_LOCATOR_ACPI) == 0) return (acpi_get_acpi_device_path(bus, child, locator, sb)); if (strcmp(locator, BUS_LOCATOR_UEFI) == 0) { ACPI_DEVICE_INFO *adinfo; if (!ACPI_FAILURE(AcpiGetObjectInfo(dinfo->ad_handle, &adinfo)) && dinfo->ad_handle != 0 && (adinfo->Valid & ACPI_VALID_HID)) { const char *hid = adinfo->HardwareId.String; u_long uid = (adinfo->Valid & ACPI_VALID_UID) ? strtoul(adinfo->UniqueId.String, NULL, 10) : 0UL; u_long hidval; /* * In UEFI Stanard Version 2.6, Section 9.6.1.6 Text * Device Node Reference, there's an insanely long table * 98. This implements the relevant bits from that * table. Newer versions appear to have not required * anything new. The EDK2 firmware presents both PciRoot * and PcieRoot as PciRoot. Follow the EDK2 standard. */ if (strncmp("PNP", hid, 3) != 0) goto nomatch; hidval = strtoul(hid + 3, NULL, 16); switch (hidval) { case 0x0301: sbuf_printf(sb, "Keyboard(0x%lx)", uid); break; case 0x0401: sbuf_printf(sb, "ParallelPort(0x%lx)", uid); break; case 0x0501: sbuf_printf(sb, "Serial(0x%lx)", uid); break; case 0x0604: sbuf_printf(sb, "Floppy(0x%lx)", uid); break; case 0x0a03: case 0x0a08: sbuf_printf(sb, "PciRoot(0x%lx)", uid); break; default: /* Everything else gets a generic encode */ nomatch: sbuf_printf(sb, "Acpi(%s,0x%lx)", hid, uid); break; } } /* Not handled: AcpiAdr... unsure how to know it's one */ } /* For the rest, punt to the default handler */ return (bus_generic_get_device_path(bus, child, locator, sb)); } /* * Handle device deletion. */ static void acpi_child_deleted(device_t dev, device_t child) { struct acpi_device *dinfo = device_get_ivars(child); if (acpi_get_device(dinfo->ad_handle) == child) AcpiDetachData(dinfo->ad_handle, acpi_fake_objhandler); } /* * Handle per-device ivars */ static int acpi_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct acpi_device *ad; if ((ad = device_get_ivars(child)) == NULL) { device_printf(child, "device has no ivars\n"); return (ENOENT); } /* ACPI and ISA compatibility ivars */ switch(index) { case ACPI_IVAR_HANDLE: *(ACPI_HANDLE *)result = ad->ad_handle; break; case ACPI_IVAR_PRIVATE: *(void **)result = ad->ad_private; break; case ACPI_IVAR_FLAGS: *(int *)result = ad->ad_flags; break; case ACPI_IVAR_DOMAIN: *(int *)result = ad->ad_domain; break; case ISA_IVAR_VENDORID: case ISA_IVAR_SERIAL: case ISA_IVAR_COMPATID: *(int *)result = -1; break; case ISA_IVAR_LOGICALID: *(int *)result = acpi_isa_get_logicalid(child); break; case PCI_IVAR_CLASS: *(uint8_t*)result = (ad->ad_cls_class >> 16) & 0xff; break; case PCI_IVAR_SUBCLASS: *(uint8_t*)result = (ad->ad_cls_class >> 8) & 0xff; break; case PCI_IVAR_PROGIF: *(uint8_t*)result = (ad->ad_cls_class >> 0) & 0xff; break; default: return (ENOENT); } return (0); } static int acpi_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { struct acpi_device *ad; if ((ad = device_get_ivars(child)) == NULL) { device_printf(child, "device has no ivars\n"); return (ENOENT); } switch(index) { case ACPI_IVAR_HANDLE: ad->ad_handle = (ACPI_HANDLE)value; break; case ACPI_IVAR_PRIVATE: ad->ad_private = (void *)value; break; case ACPI_IVAR_FLAGS: ad->ad_flags = (int)value; break; case ACPI_IVAR_DOMAIN: ad->ad_domain = (int)value; break; default: panic("bad ivar write request (%d)", index); return (ENOENT); } return (0); } /* * Handle child resource allocation/removal */ static struct resource_list * acpi_get_rlist(device_t dev, device_t child) { struct acpi_device *ad; ad = device_get_ivars(child); return (&ad->ad_rl); } static int acpi_match_resource_hint(device_t dev, int type, long value) { struct acpi_device *ad = device_get_ivars(dev); struct resource_list *rl = &ad->ad_rl; struct resource_list_entry *rle; STAILQ_FOREACH(rle, rl, link) { if (rle->type != type) continue; if (rle->start <= value && rle->end >= value) return (1); } return (0); } /* * Does this device match because the resources match? */ static bool acpi_hint_device_matches_resources(device_t child, const char *name, int unit) { long value; bool matches; /* * Check for matching resources. We must have at least one match. * Since I/O and memory resources cannot be shared, if we get a * match on either of those, ignore any mismatches in IRQs or DRQs. * * XXX: We may want to revisit this to be more lenient and wire * as long as it gets one match. */ matches = false; if (resource_long_value(name, unit, "port", &value) == 0) { /* * Floppy drive controllers are notorious for having a * wide variety of resources not all of which include the * first port that is specified by the hint (typically * 0x3f0) (see the comment above fdc_isa_alloc_resources() * in fdc_isa.c). However, they do all seem to include * port + 2 (e.g. 0x3f2) so for a floppy device, look for * 'value + 2' in the port resources instead of the hint * value. */ if (strcmp(name, "fdc") == 0) value += 2; if (acpi_match_resource_hint(child, SYS_RES_IOPORT, value)) matches = true; else return false; } if (resource_long_value(name, unit, "maddr", &value) == 0) { if (acpi_match_resource_hint(child, SYS_RES_MEMORY, value)) matches = true; else return false; } /* * If either the I/O address and/or the memory address matched, then * assumed this devices matches and that any mismatch in other resources * will be resolved by siltently ignoring those other resources. Otherwise * all further resources must match. */ if (matches) { return (true); } if (resource_long_value(name, unit, "irq", &value) == 0) { if (acpi_match_resource_hint(child, SYS_RES_IRQ, value)) matches = true; else return false; } if (resource_long_value(name, unit, "drq", &value) == 0) { if (acpi_match_resource_hint(child, SYS_RES_DRQ, value)) matches = true; else return false; } return matches; } /* * Wire device unit numbers based on resource matches in hints. */ static void acpi_hint_device_unit(device_t acdev, device_t child, const char *name, int *unitp) { device_location_cache_t *cache; const char *s; int line, unit; bool matches; /* * Iterate over all the hints for the devices with the specified * name to see if one's resources are a subset of this device. */ line = 0; cache = dev_wired_cache_init(); while (resource_find_dev(&line, name, &unit, "at", NULL) == 0) { /* Must have an "at" for acpi or isa. */ resource_string_value(name, unit, "at", &s); matches = false; if (strcmp(s, "acpi0") == 0 || strcmp(s, "acpi") == 0 || strcmp(s, "isa0") == 0 || strcmp(s, "isa") == 0) matches = acpi_hint_device_matches_resources(child, name, unit); else matches = dev_wired_cache_match(cache, child, s); if (matches) { /* We have a winner! */ *unitp = unit; break; } } dev_wired_cache_fini(cache); } /* * Fetch the NUMA domain for a device by mapping the value returned by * _PXM to a NUMA domain. If the device does not have a _PXM method, * -2 is returned. If any other error occurs, -1 is returned. */ int acpi_pxm_parse(device_t dev) { #ifdef NUMA #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) ACPI_HANDLE handle; ACPI_STATUS status; int pxm; handle = acpi_get_handle(dev); if (handle == NULL) return (-2); status = acpi_GetInteger(handle, "_PXM", &pxm); if (ACPI_SUCCESS(status)) return (acpi_map_pxm_to_vm_domainid(pxm)); if (status == AE_NOT_FOUND) return (-2); #endif #endif return (-1); } int acpi_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { int d, error; d = acpi_pxm_parse(child); if (d < 0) return (bus_generic_get_cpus(dev, child, op, setsize, cpuset)); switch (op) { case LOCAL_CPUS: if (setsize != sizeof(cpuset_t)) return (EINVAL); *cpuset = cpuset_domain[d]; return (0); case INTR_CPUS: error = bus_generic_get_cpus(dev, child, op, setsize, cpuset); if (error != 0) return (error); if (setsize != sizeof(cpuset_t)) return (EINVAL); CPU_AND(cpuset, cpuset, &cpuset_domain[d]); return (0); default: return (bus_generic_get_cpus(dev, child, op, setsize, cpuset)); } } static int acpi_get_domain_method(device_t dev, device_t child, int *domain) { int error; error = acpi_read_ivar(dev, child, ACPI_IVAR_DOMAIN, (uintptr_t *)domain); if (error == 0 && *domain != ACPI_DEV_DOMAIN_UNKNOWN) return (0); return (ENOENT); } static struct rman * acpi_get_rman(device_t bus, int type, u_int flags) { /* Only memory and IO resources are managed. */ switch (type) { case SYS_RES_IOPORT: return (&acpi_rman_io); case SYS_RES_MEMORY: return (&acpi_rman_mem); default: return (NULL); } } /* * Pre-allocate/manage all memory and IO resources. Since rman can't handle * duplicates, we merge any in the sysresource attach routine. */ static int acpi_sysres_alloc(device_t dev) { struct acpi_softc *sc = device_get_softc(dev); struct resource *res; struct resource_list_entry *rle; struct rman *rm; device_t *children; int child_count, i; /* * Probe/attach any sysresource devices. This would be unnecessary if we * had multi-pass probe/attach. */ if (device_get_children(dev, &children, &child_count) != 0) return (ENXIO); for (i = 0; i < child_count; i++) { if (ACPI_ID_PROBE(dev, children[i], sysres_ids, NULL) <= 0) device_probe_and_attach(children[i]); } free(children, M_TEMP); STAILQ_FOREACH(rle, &sc->sysres_rl, link) { if (rle->res != NULL) { device_printf(dev, "duplicate resource for %jx\n", rle->start); continue; } /* Only memory and IO resources are valid here. */ rm = acpi_get_rman(dev, rle->type, 0); if (rm == NULL) continue; /* Pre-allocate resource and add to our rman pool. */ res = bus_alloc_resource(dev, rle->type, &rle->rid, rle->start, rle->start + rle->count - 1, rle->count, RF_ACTIVE | RF_UNMAPPED); if (res != NULL) { rman_manage_region(rm, rman_get_start(res), rman_get_end(res)); rle->res = res; } else if (bootverbose) device_printf(dev, "reservation of %jx, %jx (%d) failed\n", rle->start, rle->count, rle->type); } return (0); } /* * Reserve declared resources for active devices found during the * namespace scan once the boot-time attach of devices has completed. * * Ideally reserving firmware-assigned resources would work in a * depth-first traversal of the device namespace, but this is * complicated. In particular, not all resources are enumerated by * ACPI (e.g. PCI bridges and devices enumerate their resources via * other means). Some systems also enumerate devices via ACPI behind * PCI bridges but without a matching a PCI device_t enumerated via * PCI bus scanning, the device_t's end up as direct children of * acpi0. Doing this scan late is not ideal, but works for now. */ static void acpi_reserve_resources(device_t dev) { struct resource_list_entry *rle; struct resource_list *rl; struct acpi_device *ad; device_t *children; int child_count, i; if (device_get_children(dev, &children, &child_count) != 0) return; for (i = 0; i < child_count; i++) { ad = device_get_ivars(children[i]); rl = &ad->ad_rl; /* Don't reserve system resources. */ if (ACPI_ID_PROBE(dev, children[i], sysres_ids, NULL) <= 0) continue; STAILQ_FOREACH(rle, rl, link) { /* * Don't reserve IRQ resources. There are many sticky things * to get right otherwise (e.g. IRQs for psm, atkbd, and HPET * when using legacy routing). */ if (rle->type == SYS_RES_IRQ) continue; /* * Don't reserve the resource if it is already allocated. * The acpi_ec(4) driver can allocate its resources early * if ECDT is present. */ if (rle->res != NULL) continue; /* * Try to reserve the resource from our parent. If this * fails because the resource is a system resource, just * let it be. The resource range is already reserved so * that other devices will not use it. If the driver * needs to allocate the resource, then * acpi_alloc_resource() will sub-alloc from the system * resource. */ resource_list_reserve(rl, dev, children[i], rle->type, &rle->rid, rle->start, rle->end, rle->count, 0); } } free(children, M_TEMP); } static int acpi_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count) { struct acpi_device *ad = device_get_ivars(child); struct resource_list *rl = &ad->ad_rl; rman_res_t end; #ifdef INTRNG /* map with default for now */ if (type == SYS_RES_IRQ) start = (rman_res_t)acpi_map_intr(child, (u_int)start, acpi_get_handle(child)); #endif /* If the resource is already allocated, fail. */ if (resource_list_busy(rl, type, rid)) return (EBUSY); /* If the resource is already reserved, release it. */ if (resource_list_reserved(rl, type, rid)) resource_list_unreserve(rl, dev, child, type, rid); /* Add the resource. */ end = (start + count - 1); resource_list_add(rl, type, rid, start, end, count); return (0); } static struct resource * acpi_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { #ifndef INTRNG ACPI_RESOURCE ares; #endif struct acpi_device *ad; struct resource_list_entry *rle; struct resource_list *rl; struct resource *res; int isdefault = RMAN_IS_DEFAULT_RANGE(start, end); /* * First attempt at allocating the resource. For direct children, * use resource_list_alloc() to handle reserved resources. For * other devices, pass the request up to our parent. */ if (bus == device_get_parent(child)) { ad = device_get_ivars(child); rl = &ad->ad_rl; /* * Simulate the behavior of the ISA bus for direct children * devices. That is, if a non-default range is specified for * a resource that doesn't exist, use bus_set_resource() to * add the resource before allocating it. Note that these * resources will not be reserved. */ if (!isdefault && resource_list_find(rl, type, *rid) == NULL) resource_list_add(rl, type, *rid, start, end, count); res = resource_list_alloc(rl, bus, child, type, rid, start, end, count, flags); #ifndef INTRNG if (res != NULL && type == SYS_RES_IRQ) { /* * Since bus_config_intr() takes immediate effect, we cannot * configure the interrupt associated with a device when we * parse the resources but have to defer it until a driver * actually allocates the interrupt via bus_alloc_resource(). * * XXX: Should we handle the lookup failing? */ if (ACPI_SUCCESS(acpi_lookup_irq_resource(child, *rid, res, &ares))) acpi_config_intr(child, &ares); } #endif /* * If this is an allocation of the "default" range for a given * RID, fetch the exact bounds for this resource from the * resource list entry to try to allocate the range from the * system resource regions. */ if (res == NULL && isdefault) { rle = resource_list_find(rl, type, *rid); if (rle != NULL) { start = rle->start; end = rle->end; count = rle->count; } } } else res = bus_generic_alloc_resource(bus, child, type, rid, start, end, count, flags); /* * If the first attempt failed and this is an allocation of a * specific range, try to satisfy the request via a suballocation * from our system resource regions. */ if (res == NULL && start + count - 1 == end) res = bus_generic_rman_alloc_resource(bus, child, type, rid, start, end, count, flags); return (res); } static bool acpi_is_resource_managed(device_t bus, struct resource *r) { struct rman *rm; rm = acpi_get_rman(bus, rman_get_type(r), rman_get_flags(r)); if (rm == NULL) return (false); return (rman_is_region_manager(r, rm)); } static struct resource * acpi_managed_resource(device_t bus, struct resource *r) { struct acpi_softc *sc = device_get_softc(bus); struct resource_list_entry *rle; KASSERT(acpi_is_resource_managed(bus, r), ("resource %p is not suballocated", r)); STAILQ_FOREACH(rle, &sc->sysres_rl, link) { if (rle->type != rman_get_type(r) || rle->res == NULL) continue; if (rman_get_start(r) >= rman_get_start(rle->res) && rman_get_end(r) <= rman_get_end(rle->res)) return (rle->res); } return (NULL); } static int acpi_adjust_resource(device_t bus, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { if (acpi_is_resource_managed(bus, r)) return (rman_adjust_resource(r, start, end)); return (bus_generic_adjust_resource(bus, child, r, start, end)); } static int acpi_release_resource(device_t bus, device_t child, struct resource *r) { /* * If this resource belongs to one of our internal managers, * deactivate it and release it to the local pool. */ if (acpi_is_resource_managed(bus, r)) return (bus_generic_rman_release_resource(bus, child, r)); return (bus_generic_rl_release_resource(bus, child, r)); } static void acpi_delete_resource(device_t bus, device_t child, int type, int rid) { struct resource_list *rl; rl = acpi_get_rlist(bus, child); if (resource_list_busy(rl, type, rid)) { device_printf(bus, "delete_resource: Resource still owned by child" " (type=%d, rid=%d)\n", type, rid); return; } if (resource_list_reserved(rl, type, rid)) resource_list_unreserve(rl, bus, child, type, rid); resource_list_delete(rl, type, rid); } static int acpi_activate_resource(device_t bus, device_t child, struct resource *r) { if (acpi_is_resource_managed(bus, r)) return (bus_generic_rman_activate_resource(bus, child, r)); return (bus_generic_activate_resource(bus, child, r)); } static int acpi_deactivate_resource(device_t bus, device_t child, struct resource *r) { if (acpi_is_resource_managed(bus, r)) return (bus_generic_rman_deactivate_resource(bus, child, r)); return (bus_generic_deactivate_resource(bus, child, r)); } static int acpi_map_resource(device_t bus, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; struct resource *sysres; rman_res_t length, start; int error; if (!acpi_is_resource_managed(bus, r)) return (bus_generic_map_resource(bus, child, r, argsp, map)); /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); sysres = acpi_managed_resource(bus, r); if (sysres == NULL) return (ENOENT); args.offset = start - rman_get_start(sysres); args.length = length; return (bus_map_resource(bus, sysres, &args, map)); } static int acpi_unmap_resource(device_t bus, device_t child, struct resource *r, struct resource_map *map) { struct resource *sysres; if (!acpi_is_resource_managed(bus, r)) return (bus_generic_unmap_resource(bus, child, r, map)); sysres = acpi_managed_resource(bus, r); if (sysres == NULL) return (ENOENT); return (bus_unmap_resource(bus, sysres, map)); } /* Allocate an IO port or memory resource, given its GAS. */ int acpi_bus_alloc_gas(device_t dev, int *type, int *rid, ACPI_GENERIC_ADDRESS *gas, struct resource **res, u_int flags) { int error, res_type; error = ENOMEM; if (type == NULL || rid == NULL || gas == NULL || res == NULL) return (EINVAL); /* We only support memory and IO spaces. */ switch (gas->SpaceId) { case ACPI_ADR_SPACE_SYSTEM_MEMORY: res_type = SYS_RES_MEMORY; break; case ACPI_ADR_SPACE_SYSTEM_IO: res_type = SYS_RES_IOPORT; break; default: return (EOPNOTSUPP); } /* * If the register width is less than 8, assume the BIOS author means * it is a bit field and just allocate a byte. */ if (gas->BitWidth && gas->BitWidth < 8) gas->BitWidth = 8; /* Validate the address after we're sure we support the space. */ if (gas->Address == 0 || gas->BitWidth == 0) return (EINVAL); bus_set_resource(dev, res_type, *rid, gas->Address, gas->BitWidth / 8); *res = bus_alloc_resource_any(dev, res_type, rid, RF_ACTIVE | flags); if (*res != NULL) { *type = res_type; error = 0; } else bus_delete_resource(dev, res_type, *rid); return (error); } /* Probe _HID and _CID for compatible ISA PNP ids. */ static uint32_t acpi_isa_get_logicalid(device_t dev) { ACPI_DEVICE_INFO *devinfo; ACPI_HANDLE h; uint32_t pnpid; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* Fetch and validate the HID. */ if ((h = acpi_get_handle(dev)) == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return_VALUE (0); pnpid = (devinfo->Valid & ACPI_VALID_HID) != 0 && devinfo->HardwareId.Length >= ACPI_EISAID_STRING_SIZE ? PNP_EISAID(devinfo->HardwareId.String) : 0; AcpiOsFree(devinfo); return_VALUE (pnpid); } static int acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count) { ACPI_DEVICE_INFO *devinfo; ACPI_PNP_DEVICE_ID *ids; ACPI_HANDLE h; uint32_t *pnpid; int i, valid; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); pnpid = cids; /* Fetch and validate the CID */ if ((h = acpi_get_handle(dev)) == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return_VALUE (0); if ((devinfo->Valid & ACPI_VALID_CID) == 0) { AcpiOsFree(devinfo); return_VALUE (0); } if (devinfo->CompatibleIdList.Count < count) count = devinfo->CompatibleIdList.Count; ids = devinfo->CompatibleIdList.Ids; for (i = 0, valid = 0; i < count; i++) if (ids[i].Length >= ACPI_EISAID_STRING_SIZE && strncmp(ids[i].String, "PNP", 3) == 0) { *pnpid++ = PNP_EISAID(ids[i].String); valid++; } AcpiOsFree(devinfo); return_VALUE (valid); } static int acpi_device_id_probe(device_t bus, device_t dev, char **ids, char **match) { ACPI_HANDLE h; ACPI_OBJECT_TYPE t; int rv; int i; h = acpi_get_handle(dev); if (ids == NULL || h == NULL) return (ENXIO); t = acpi_get_type(dev); if (t != ACPI_TYPE_DEVICE && t != ACPI_TYPE_PROCESSOR) return (ENXIO); /* Try to match one of the array of IDs with a HID or CID. */ for (i = 0; ids[i] != NULL; i++) { rv = acpi_MatchHid(h, ids[i]); if (rv == ACPI_MATCHHID_NOMATCH) continue; if (match != NULL) { *match = ids[i]; } return ((rv == ACPI_MATCHHID_HID)? BUS_PROBE_DEFAULT : BUS_PROBE_LOW_PRIORITY); } return (ENXIO); } static ACPI_STATUS acpi_device_eval_obj(device_t bus, device_t dev, ACPI_STRING pathname, ACPI_OBJECT_LIST *parameters, ACPI_BUFFER *ret) { ACPI_HANDLE h; if (dev == NULL) h = ACPI_ROOT_OBJECT; else if ((h = acpi_get_handle(dev)) == NULL) return (AE_BAD_PARAMETER); return (AcpiEvaluateObject(h, pathname, parameters, ret)); } static ACPI_STATUS acpi_device_get_prop(device_t bus, device_t dev, ACPI_STRING propname, const ACPI_OBJECT **value) { const ACPI_OBJECT *pkg, *name, *val; struct acpi_device *ad; ACPI_STATUS status; int i; ad = device_get_ivars(dev); if (ad == NULL || propname == NULL) return (AE_BAD_PARAMETER); if (ad->dsd_pkg == NULL) { if (ad->dsd.Pointer == NULL) { status = acpi_find_dsd(ad); if (ACPI_FAILURE(status)) return (status); } else { return (AE_NOT_FOUND); } } for (i = 0; i < ad->dsd_pkg->Package.Count; i ++) { pkg = &ad->dsd_pkg->Package.Elements[i]; if (pkg->Type != ACPI_TYPE_PACKAGE || pkg->Package.Count != 2) continue; name = &pkg->Package.Elements[0]; val = &pkg->Package.Elements[1]; if (name->Type != ACPI_TYPE_STRING) continue; if (strncmp(propname, name->String.Pointer, name->String.Length) == 0) { if (value != NULL) *value = val; return (AE_OK); } } return (AE_NOT_FOUND); } static ACPI_STATUS acpi_find_dsd(struct acpi_device *ad) { const ACPI_OBJECT *dsd, *guid, *pkg; ACPI_STATUS status; ad->dsd.Length = ACPI_ALLOCATE_BUFFER; ad->dsd.Pointer = NULL; ad->dsd_pkg = NULL; status = AcpiEvaluateObject(ad->ad_handle, "_DSD", NULL, &ad->dsd); if (ACPI_FAILURE(status)) return (status); dsd = ad->dsd.Pointer; guid = &dsd->Package.Elements[0]; pkg = &dsd->Package.Elements[1]; if (guid->Type != ACPI_TYPE_BUFFER || pkg->Type != ACPI_TYPE_PACKAGE || guid->Buffer.Length != sizeof(acpi_dsd_uuid)) return (AE_NOT_FOUND); if (memcmp(guid->Buffer.Pointer, &acpi_dsd_uuid, sizeof(acpi_dsd_uuid)) == 0) { ad->dsd_pkg = pkg; return (AE_OK); } return (AE_NOT_FOUND); } static ssize_t acpi_bus_get_prop_handle(const ACPI_OBJECT *hobj, void *propvalue, size_t size) { ACPI_OBJECT *pobj; ACPI_HANDLE h; if (hobj->Type != ACPI_TYPE_PACKAGE) goto err; if (hobj->Package.Count != 1) goto err; pobj = &hobj->Package.Elements[0]; if (pobj == NULL) goto err; if (pobj->Type != ACPI_TYPE_LOCAL_REFERENCE) goto err; h = acpi_GetReference(NULL, pobj); if (h == NULL) goto err; if (propvalue != NULL && size >= sizeof(ACPI_HANDLE)) *(ACPI_HANDLE *)propvalue = h; return (sizeof(ACPI_HANDLE)); err: return (-1); } static ssize_t acpi_bus_get_prop(device_t bus, device_t child, const char *propname, void *propvalue, size_t size, device_property_type_t type) { ACPI_STATUS status; const ACPI_OBJECT *obj; status = acpi_device_get_prop(bus, child, __DECONST(char *, propname), &obj); if (ACPI_FAILURE(status)) return (-1); switch (type) { case DEVICE_PROP_ANY: case DEVICE_PROP_BUFFER: case DEVICE_PROP_UINT32: case DEVICE_PROP_UINT64: break; case DEVICE_PROP_HANDLE: return (acpi_bus_get_prop_handle(obj, propvalue, size)); default: return (-1); } switch (obj->Type) { case ACPI_TYPE_INTEGER: if (type == DEVICE_PROP_UINT32) { if (propvalue != NULL && size >= sizeof(uint32_t)) *((uint32_t *)propvalue) = obj->Integer.Value; return (sizeof(uint32_t)); } if (propvalue != NULL && size >= sizeof(uint64_t)) *((uint64_t *) propvalue) = obj->Integer.Value; return (sizeof(uint64_t)); case ACPI_TYPE_STRING: if (type != DEVICE_PROP_ANY && type != DEVICE_PROP_BUFFER) return (-1); if (propvalue != NULL && size > 0) memcpy(propvalue, obj->String.Pointer, MIN(size, obj->String.Length)); return (obj->String.Length); case ACPI_TYPE_BUFFER: if (propvalue != NULL && size > 0) memcpy(propvalue, obj->Buffer.Pointer, MIN(size, obj->Buffer.Length)); return (obj->Buffer.Length); case ACPI_TYPE_PACKAGE: if (propvalue != NULL && size >= sizeof(ACPI_OBJECT *)) { *((ACPI_OBJECT **) propvalue) = __DECONST(ACPI_OBJECT *, obj); } return (sizeof(ACPI_OBJECT *)); case ACPI_TYPE_LOCAL_REFERENCE: if (propvalue != NULL && size >= sizeof(ACPI_HANDLE)) { ACPI_HANDLE h; h = acpi_GetReference(NULL, __DECONST(ACPI_OBJECT *, obj)); memcpy(propvalue, h, sizeof(ACPI_HANDLE)); } return (sizeof(ACPI_HANDLE)); default: return (0); } } int acpi_device_pwr_for_sleep(device_t bus, device_t dev, int *dstate) { struct acpi_softc *sc; ACPI_HANDLE handle; ACPI_STATUS status; char sxd[8]; handle = acpi_get_handle(dev); /* * XXX If we find these devices, don't try to power them down. * The serial and IRDA ports on my T23 hang the system when * set to D3 and it appears that such legacy devices may * need special handling in their drivers. */ if (dstate == NULL || handle == NULL || acpi_MatchHid(handle, "PNP0500") || acpi_MatchHid(handle, "PNP0501") || acpi_MatchHid(handle, "PNP0502") || acpi_MatchHid(handle, "PNP0510") || acpi_MatchHid(handle, "PNP0511")) return (ENXIO); /* * Override next state with the value from _SxD, if present. * Note illegal _S0D is evaluated because some systems expect this. */ sc = device_get_softc(bus); snprintf(sxd, sizeof(sxd), "_S%dD", sc->acpi_sstate); status = acpi_GetInteger(handle, sxd, dstate); if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { device_printf(dev, "failed to get %s on %s: %s\n", sxd, acpi_name(handle), AcpiFormatException(status)); return (ENXIO); } return (0); } /* Callback arg for our implementation of walking the namespace. */ struct acpi_device_scan_ctx { acpi_scan_cb_t user_fn; void *arg; ACPI_HANDLE parent; }; static ACPI_STATUS acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level, void *arg, void **retval) { struct acpi_device_scan_ctx *ctx; device_t dev, old_dev; ACPI_STATUS status; ACPI_OBJECT_TYPE type; /* * Skip this device if we think we'll have trouble with it or it is * the parent where the scan began. */ ctx = (struct acpi_device_scan_ctx *)arg; if (acpi_avoid(h) || h == ctx->parent) return (AE_OK); /* If this is not a valid device type (e.g., a method), skip it. */ if (ACPI_FAILURE(AcpiGetType(h, &type))) return (AE_OK); if (type != ACPI_TYPE_DEVICE && type != ACPI_TYPE_PROCESSOR && type != ACPI_TYPE_THERMAL && type != ACPI_TYPE_POWER) return (AE_OK); /* * Call the user function with the current device. If it is unchanged * afterwards, return. Otherwise, we update the handle to the new dev. */ old_dev = acpi_get_device(h); dev = old_dev; status = ctx->user_fn(h, &dev, level, ctx->arg); if (ACPI_FAILURE(status) || old_dev == dev) return (status); /* Remove the old child and its connection to the handle. */ if (old_dev != NULL) device_delete_child(device_get_parent(old_dev), old_dev); /* Recreate the handle association if the user created a device. */ if (dev != NULL) AcpiAttachData(h, acpi_fake_objhandler, dev); return (AE_OK); } static ACPI_STATUS acpi_device_scan_children(device_t bus, device_t dev, int max_depth, acpi_scan_cb_t user_fn, void *arg) { ACPI_HANDLE h; struct acpi_device_scan_ctx ctx; if (acpi_disabled("children")) return (AE_OK); if (dev == NULL) h = ACPI_ROOT_OBJECT; else if ((h = acpi_get_handle(dev)) == NULL) return (AE_BAD_PARAMETER); ctx.user_fn = user_fn; ctx.arg = arg; ctx.parent = h; return (AcpiWalkNamespace(ACPI_TYPE_ANY, h, max_depth, acpi_device_scan_cb, NULL, &ctx, NULL)); } /* * Even though ACPI devices are not PCI, we use the PCI approach for setting * device power states since it's close enough to ACPI. */ int acpi_set_powerstate(device_t child, int state) { ACPI_HANDLE h; ACPI_STATUS status; h = acpi_get_handle(child); if (state < ACPI_STATE_D0 || state > ACPI_D_STATES_MAX) return (EINVAL); if (h == NULL) return (0); /* Ignore errors if the power methods aren't present. */ status = acpi_pwr_switch_consumer(h, state); if (ACPI_SUCCESS(status)) { if (bootverbose) - device_printf(child, "set ACPI power state D%d on %s\n", - state, acpi_name(h)); + device_printf(child, "set ACPI power state %s on %s\n", + acpi_d_state_to_str(state), acpi_name(h)); } else if (status != AE_NOT_FOUND) device_printf(child, - "failed to set ACPI power state D%d on %s: %s\n", state, - acpi_name(h), AcpiFormatException(status)); + "failed to set ACPI power state %s on %s: %s\n", + acpi_d_state_to_str(state), acpi_name(h), + AcpiFormatException(status)); return (0); } static int acpi_isa_pnp_probe(device_t bus, device_t child, struct isa_pnp_id *ids) { int result, cid_count, i; uint32_t lid, cids[8]; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* * ISA-style drivers attached to ACPI may persist and * probe manually if we return ENOENT. We never want * that to happen, so don't ever return it. */ result = ENXIO; /* Scan the supplied IDs for a match */ lid = acpi_isa_get_logicalid(child); cid_count = acpi_isa_get_compatid(child, cids, 8); while (ids && ids->ip_id) { if (lid == ids->ip_id) { result = 0; goto out; } for (i = 0; i < cid_count; i++) { if (cids[i] == ids->ip_id) { result = 0; goto out; } } ids++; } out: if (result == 0 && ids->ip_desc) device_set_desc(child, ids->ip_desc); return_VALUE (result); } /* * Look for a MCFG table. If it is present, use the settings for * domain (segment) 0 to setup PCI config space access via the memory * map. * * On non-x86 architectures (arm64 for now), this will be done from the * PCI host bridge driver. */ static void acpi_enable_pcie(void) { #if defined(__i386__) || defined(__amd64__) ACPI_TABLE_HEADER *hdr; ACPI_MCFG_ALLOCATION *alloc, *end; ACPI_STATUS status; status = AcpiGetTable(ACPI_SIG_MCFG, 1, &hdr); if (ACPI_FAILURE(status)) return; end = (ACPI_MCFG_ALLOCATION *)((char *)hdr + hdr->Length); alloc = (ACPI_MCFG_ALLOCATION *)((ACPI_TABLE_MCFG *)hdr + 1); while (alloc < end) { pcie_cfgregopen(alloc->Address, alloc->PciSegment, alloc->StartBusNumber, alloc->EndBusNumber); alloc++; } #endif } static void acpi_platform_osc(device_t dev) { ACPI_HANDLE sb_handle; ACPI_STATUS status; uint32_t cap_set[2]; /* 0811B06E-4A27-44F9-8D60-3CBBC22E7B48 */ static uint8_t acpi_platform_uuid[ACPI_UUID_LENGTH] = { 0x6e, 0xb0, 0x11, 0x08, 0x27, 0x4a, 0xf9, 0x44, 0x8d, 0x60, 0x3c, 0xbb, 0xc2, 0x2e, 0x7b, 0x48 }; if (ACPI_FAILURE(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle))) return; cap_set[1] = 0x10; /* APEI Support */ status = acpi_EvaluateOSC(sb_handle, acpi_platform_uuid, 1, nitems(cap_set), cap_set, cap_set, false); if (ACPI_FAILURE(status)) { if (status == AE_NOT_FOUND) return; device_printf(dev, "_OSC failed: %s\n", AcpiFormatException(status)); return; } } /* * Scan all of the ACPI namespace and attach child devices. * * We should only expect to find devices in the \_PR, \_TZ, \_SI, and * \_SB scopes, and \_PR and \_TZ became obsolete in the ACPI 2.0 spec. * However, in violation of the spec, some systems place their PCI link * devices in \, so we have to walk the whole namespace. We check the * type of namespace nodes, so this should be ok. */ static void acpi_probe_children(device_t bus) { ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* * Scan the namespace and insert placeholders for all the devices that * we find. We also probe/attach any early devices. * * Note that we use AcpiWalkNamespace rather than AcpiGetDevices because * we want to create nodes for all devices, not just those that are * currently present. (This assumes that we don't want to create/remove * devices as they appear, which might be smarter.) */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "namespace scan\n")); AcpiWalkNamespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, 100, acpi_probe_child, NULL, bus, NULL); /* Pre-allocate resources for our rman from any sysresource devices. */ acpi_sysres_alloc(bus); /* Create any static children by calling device identify methods. */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "device identify routines\n")); bus_identify_children(bus); /* Probe/attach all children, created statically and from the namespace. */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "acpi bus_attach_children\n")); bus_attach_children(bus); /* * Reserve resources allocated to children but not yet allocated * by a driver. */ acpi_reserve_resources(bus); /* Attach wake sysctls. */ acpi_wake_sysctl_walk(bus); ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "done attaching children\n")); return_VOID; } /* * Determine the probe order for a given device. */ static void acpi_probe_order(ACPI_HANDLE handle, int *order) { ACPI_OBJECT_TYPE type; /* * 0. CPUs * 1. I/O port and memory system resource holders * 2. Clocks and timers (to handle early accesses) * 3. Embedded controllers (to handle early accesses) * 4. PCI Link Devices */ AcpiGetType(handle, &type); if (type == ACPI_TYPE_PROCESSOR) *order = 0; else if (acpi_MatchHid(handle, "PNP0C01") || acpi_MatchHid(handle, "PNP0C02")) *order = 1; else if (acpi_MatchHid(handle, "PNP0100") || acpi_MatchHid(handle, "PNP0103") || acpi_MatchHid(handle, "PNP0B00")) *order = 2; else if (acpi_MatchHid(handle, "PNP0C09")) *order = 3; else if (acpi_MatchHid(handle, "PNP0C0F")) *order = 4; } /* * Evaluate a child device and determine whether we might attach a device to * it. */ static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { ACPI_DEVICE_INFO *devinfo; struct acpi_device *ad; struct acpi_prw_data prw; ACPI_OBJECT_TYPE type; ACPI_HANDLE h; device_t bus, child; char *handle_str; int d, order; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (acpi_disabled("children")) return_ACPI_STATUS (AE_OK); /* Skip this device if we think we'll have trouble with it. */ if (acpi_avoid(handle)) return_ACPI_STATUS (AE_OK); bus = (device_t)context; if (ACPI_SUCCESS(AcpiGetType(handle, &type))) { handle_str = acpi_name(handle); switch (type) { case ACPI_TYPE_DEVICE: /* * Since we scan from \, be sure to skip system scope objects. * \_SB_ and \_TZ_ are defined in ACPICA as devices to work around * BIOS bugs. For example, \_SB_ is to allow \_SB_._INI to be run * during the initialization and \_TZ_ is to support Notify() on it. */ if (strcmp(handle_str, "\\_SB_") == 0 || strcmp(handle_str, "\\_TZ_") == 0) break; if (acpi_parse_prw(handle, &prw) == 0) AcpiSetupGpeForWake(handle, prw.gpe_handle, prw.gpe_bit); /* * Ignore devices that do not have a _HID or _CID. They should * be discovered by other buses (e.g. the PCI bus driver). */ if (!acpi_has_hid(handle)) break; /* FALLTHROUGH */ case ACPI_TYPE_PROCESSOR: case ACPI_TYPE_THERMAL: case ACPI_TYPE_POWER: /* * Create a placeholder device for this node. Sort the * placeholder so that the probe/attach passes will run * breadth-first. Orders less than ACPI_DEV_BASE_ORDER * are reserved for special objects (i.e., system * resources). */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "scanning '%s'\n", handle_str)); order = level * 10 + ACPI_DEV_BASE_ORDER; acpi_probe_order(handle, &order); child = BUS_ADD_CHILD(bus, order, NULL, DEVICE_UNIT_ANY); if (child == NULL) break; /* Associate the handle with the device_t and vice versa. */ acpi_set_handle(child, handle); AcpiAttachData(handle, acpi_fake_objhandler, child); /* * Check that the device is present. If it's not present, * leave it disabled (so that we have a device_t attached to * the handle, but we don't probe it). * * XXX PCI link devices sometimes report "present" but not * "functional" (i.e. if disabled). Go ahead and probe them * anyway since we may enable them later. */ if (type == ACPI_TYPE_DEVICE && !acpi_DeviceIsPresent(child)) { /* Never disable PCI link devices. */ if (acpi_MatchHid(handle, "PNP0C0F")) break; /* * RTC Device should be enabled for CMOS register space * unless FADT indicate it is not present. * (checked in RTC probe routine.) */ if (acpi_MatchHid(handle, "PNP0B00")) break; /* * Docking stations should remain enabled since the system * may be undocked at boot. */ if (ACPI_SUCCESS(AcpiGetHandle(handle, "_DCK", &h))) break; device_disable(child); break; } /* * Get the device's resource settings and attach them. * Note that if the device has _PRS but no _CRS, we need * to decide when it's appropriate to try to configure the * device. Ignore the return value here; it's OK for the * device not to have any resources. */ acpi_parse_resources(child, handle, &acpi_res_parse_set, NULL); ad = device_get_ivars(child); ad->ad_cls_class = 0xffffff; if (ACPI_SUCCESS(AcpiGetObjectInfo(handle, &devinfo))) { if ((devinfo->Valid & ACPI_VALID_CLS) != 0 && devinfo->ClassCode.Length >= ACPI_PCICLS_STRING_SIZE) { ad->ad_cls_class = strtoul(devinfo->ClassCode.String, NULL, 16); } AcpiOsFree(devinfo); } d = acpi_pxm_parse(child); if (d >= 0) ad->ad_domain = d; break; } } return_ACPI_STATUS (AE_OK); } /* * AcpiAttachData() requires an object handler but never uses it. This is a * placeholder object handler so we can store a device_t in an ACPI_HANDLE. */ void acpi_fake_objhandler(ACPI_HANDLE h, void *data) { } static void acpi_shutdown_final(void *arg, int howto) { struct acpi_softc *sc = (struct acpi_softc *)arg; register_t intr; ACPI_STATUS status; /* * XXX Shutdown code should only run on the BSP (cpuid 0). * Some chipsets do not power off the system correctly if called from * an AP. */ if ((howto & RB_POWEROFF) != 0) { status = AcpiEnterSleepStatePrep(ACPI_STATE_S5); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n", AcpiFormatException(status)); return; } device_printf(sc->acpi_dev, "Powering system off\n"); intr = intr_disable(); status = AcpiEnterSleepState(ACPI_STATE_S5); if (ACPI_FAILURE(status)) { intr_restore(intr); device_printf(sc->acpi_dev, "power-off failed - %s\n", AcpiFormatException(status)); } else { DELAY(1000000); intr_restore(intr); device_printf(sc->acpi_dev, "power-off failed - timeout\n"); } } else if ((howto & RB_HALT) == 0 && sc->acpi_handle_reboot) { /* Reboot using the reset register. */ status = AcpiReset(); if (ACPI_SUCCESS(status)) { DELAY(1000000); device_printf(sc->acpi_dev, "reset failed - timeout\n"); } else if (status != AE_NOT_EXIST) device_printf(sc->acpi_dev, "reset failed - %s\n", AcpiFormatException(status)); } else if (sc->acpi_do_disable && !KERNEL_PANICKED()) { /* * Only disable ACPI if the user requested. On some systems, writing * the disable value to SMI_CMD hangs the system. */ device_printf(sc->acpi_dev, "Shutting down\n"); AcpiTerminate(); } } static void acpi_enable_fixed_events(struct acpi_softc *sc) { static int first_time = 1; /* Enable and clear fixed events and install handlers. */ if ((AcpiGbl_FADT.Flags & ACPI_FADT_POWER_BUTTON) == 0) { AcpiClearEvent(ACPI_EVENT_POWER_BUTTON); AcpiInstallFixedEventHandler(ACPI_EVENT_POWER_BUTTON, acpi_event_power_button_sleep, sc); if (first_time) device_printf(sc->acpi_dev, "Power Button (fixed)\n"); } if ((AcpiGbl_FADT.Flags & ACPI_FADT_SLEEP_BUTTON) == 0) { AcpiClearEvent(ACPI_EVENT_SLEEP_BUTTON); AcpiInstallFixedEventHandler(ACPI_EVENT_SLEEP_BUTTON, acpi_event_sleep_button_sleep, sc); if (first_time) device_printf(sc->acpi_dev, "Sleep Button (fixed)\n"); } first_time = 0; } /* * Returns true if the device is actually present and should * be attached to. This requires the present, enabled, UI-visible * and diagnostics-passed bits to be set. */ BOOLEAN acpi_DeviceIsPresent(device_t dev) { ACPI_HANDLE h; UINT32 s; ACPI_STATUS status; h = acpi_get_handle(dev); if (h == NULL) return (FALSE); #ifdef ACPI_EARLY_EPYC_WAR /* * Certain Treadripper boards always returns 0 for FreeBSD because it * only returns non-zero for the OS string "Windows 2015". Otherwise it * will return zero. Force them to always be treated as present. * Beata versions were worse: they always returned 0. */ if (acpi_MatchHid(h, "AMDI0020") || acpi_MatchHid(h, "AMDI0010")) return (TRUE); #endif status = acpi_GetInteger(h, "_STA", &s); /* * If no _STA method or if it failed, then assume that * the device is present. */ if (ACPI_FAILURE(status)) return (TRUE); return (ACPI_DEVICE_PRESENT(s) ? TRUE : FALSE); } /* * Returns true if the battery is actually present and inserted. */ BOOLEAN acpi_BatteryIsPresent(device_t dev) { ACPI_HANDLE h; UINT32 s; ACPI_STATUS status; h = acpi_get_handle(dev); if (h == NULL) return (FALSE); status = acpi_GetInteger(h, "_STA", &s); /* * If no _STA method or if it failed, then assume that * the device is present. */ if (ACPI_FAILURE(status)) return (TRUE); return (ACPI_BATTERY_PRESENT(s) ? TRUE : FALSE); } /* * Returns true if a device has at least one valid device ID. */ BOOLEAN acpi_has_hid(ACPI_HANDLE h) { ACPI_DEVICE_INFO *devinfo; BOOLEAN ret; if (h == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return (FALSE); ret = FALSE; if ((devinfo->Valid & ACPI_VALID_HID) != 0) ret = TRUE; else if ((devinfo->Valid & ACPI_VALID_CID) != 0) if (devinfo->CompatibleIdList.Count > 0) ret = TRUE; AcpiOsFree(devinfo); return (ret); } /* * Match a HID string against a handle * returns ACPI_MATCHHID_HID if _HID match * ACPI_MATCHHID_CID if _CID match and not _HID match. * ACPI_MATCHHID_NOMATCH=0 if no match. */ int acpi_MatchHid(ACPI_HANDLE h, const char *hid) { ACPI_DEVICE_INFO *devinfo; BOOLEAN ret; int i; if (hid == NULL || h == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return (ACPI_MATCHHID_NOMATCH); ret = ACPI_MATCHHID_NOMATCH; if ((devinfo->Valid & ACPI_VALID_HID) != 0 && strcmp(hid, devinfo->HardwareId.String) == 0) ret = ACPI_MATCHHID_HID; else if ((devinfo->Valid & ACPI_VALID_CID) != 0) for (i = 0; i < devinfo->CompatibleIdList.Count; i++) { if (strcmp(hid, devinfo->CompatibleIdList.Ids[i].String) == 0) { ret = ACPI_MATCHHID_CID; break; } } AcpiOsFree(devinfo); return (ret); } /* * Return the handle of a named object within our scope, ie. that of (parent) * or one if its parents. */ ACPI_STATUS acpi_GetHandleInScope(ACPI_HANDLE parent, char *path, ACPI_HANDLE *result) { ACPI_HANDLE r; ACPI_STATUS status; /* Walk back up the tree to the root */ for (;;) { status = AcpiGetHandle(parent, path, &r); if (ACPI_SUCCESS(status)) { *result = r; return (AE_OK); } /* XXX Return error here? */ if (status != AE_NOT_FOUND) return (AE_OK); if (ACPI_FAILURE(AcpiGetParent(parent, &r))) return (AE_NOT_FOUND); parent = r; } } ACPI_STATUS acpi_GetProperty(device_t dev, ACPI_STRING propname, const ACPI_OBJECT **value) { device_t bus = device_get_parent(dev); return (ACPI_GET_PROPERTY(bus, dev, propname, value)); } /* * Allocate a buffer with a preset data size. */ ACPI_BUFFER * acpi_AllocBuffer(int size) { ACPI_BUFFER *buf; if ((buf = malloc(size + sizeof(*buf), M_ACPIDEV, M_NOWAIT)) == NULL) return (NULL); buf->Length = size; buf->Pointer = (void *)(buf + 1); return (buf); } ACPI_STATUS acpi_SetInteger(ACPI_HANDLE handle, char *path, UINT32 number) { ACPI_OBJECT arg1; ACPI_OBJECT_LIST args; arg1.Type = ACPI_TYPE_INTEGER; arg1.Integer.Value = number; args.Count = 1; args.Pointer = &arg1; return (AcpiEvaluateObject(handle, path, &args, NULL)); } /* * Evaluate a path that should return an integer. */ ACPI_STATUS acpi_GetInteger(ACPI_HANDLE handle, char *path, UINT32 *number) { ACPI_STATUS status; ACPI_BUFFER buf; ACPI_OBJECT param; if (handle == NULL) handle = ACPI_ROOT_OBJECT; /* * Assume that what we've been pointed at is an Integer object, or * a method that will return an Integer. */ buf.Pointer = ¶m; buf.Length = sizeof(param); status = AcpiEvaluateObject(handle, path, NULL, &buf); if (ACPI_SUCCESS(status)) { if (param.Type == ACPI_TYPE_INTEGER) *number = param.Integer.Value; else status = AE_TYPE; } /* * In some applications, a method that's expected to return an Integer * may instead return a Buffer (probably to simplify some internal * arithmetic). We'll try to fetch whatever it is, and if it's a Buffer, * convert it into an Integer as best we can. * * This is a hack. */ if (status == AE_BUFFER_OVERFLOW) { if ((buf.Pointer = AcpiOsAllocate(buf.Length)) == NULL) { status = AE_NO_MEMORY; } else { status = AcpiEvaluateObject(handle, path, NULL, &buf); if (ACPI_SUCCESS(status)) status = acpi_ConvertBufferToInteger(&buf, number); AcpiOsFree(buf.Pointer); } } return (status); } ACPI_STATUS acpi_ConvertBufferToInteger(ACPI_BUFFER *bufp, UINT32 *number) { ACPI_OBJECT *p; UINT8 *val; int i; p = (ACPI_OBJECT *)bufp->Pointer; if (p->Type == ACPI_TYPE_INTEGER) { *number = p->Integer.Value; return (AE_OK); } if (p->Type != ACPI_TYPE_BUFFER) return (AE_TYPE); if (p->Buffer.Length > sizeof(int)) return (AE_BAD_DATA); *number = 0; val = p->Buffer.Pointer; for (i = 0; i < p->Buffer.Length; i++) *number += val[i] << (i * 8); return (AE_OK); } /* * Iterate over the elements of an a package object, calling the supplied * function for each element. * * XXX possible enhancement might be to abort traversal on error. */ ACPI_STATUS acpi_ForeachPackageObject(ACPI_OBJECT *pkg, void (*func)(ACPI_OBJECT *comp, void *arg), void *arg) { ACPI_OBJECT *comp; int i; if (pkg == NULL || pkg->Type != ACPI_TYPE_PACKAGE) return (AE_BAD_PARAMETER); /* Iterate over components */ i = 0; comp = pkg->Package.Elements; for (; i < pkg->Package.Count; i++, comp++) func(comp, arg); return (AE_OK); } /* * Find the (index)th resource object in a set. */ ACPI_STATUS acpi_FindIndexedResource(ACPI_BUFFER *buf, int index, ACPI_RESOURCE **resp) { ACPI_RESOURCE *rp; int i; rp = (ACPI_RESOURCE *)buf->Pointer; i = index; while (i-- > 0) { /* Range check */ if (rp > (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length)) return (AE_BAD_PARAMETER); /* Check for terminator */ if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0) return (AE_NOT_FOUND); rp = ACPI_NEXT_RESOURCE(rp); } if (resp != NULL) *resp = rp; return (AE_OK); } /* * Append an ACPI_RESOURCE to an ACPI_BUFFER. * * Given a pointer to an ACPI_RESOURCE structure, expand the ACPI_BUFFER * provided to contain it. If the ACPI_BUFFER is empty, allocate a sensible * backing block. If the ACPI_RESOURCE is NULL, return an empty set of * resources. */ #define ACPI_INITIAL_RESOURCE_BUFFER_SIZE 512 ACPI_STATUS acpi_AppendBufferResource(ACPI_BUFFER *buf, ACPI_RESOURCE *res) { ACPI_RESOURCE *rp; void *newp; /* Initialise the buffer if necessary. */ if (buf->Pointer == NULL) { buf->Length = ACPI_INITIAL_RESOURCE_BUFFER_SIZE; if ((buf->Pointer = AcpiOsAllocate(buf->Length)) == NULL) return (AE_NO_MEMORY); rp = (ACPI_RESOURCE *)buf->Pointer; rp->Type = ACPI_RESOURCE_TYPE_END_TAG; rp->Length = ACPI_RS_SIZE_MIN; } if (res == NULL) return (AE_OK); /* * Scan the current buffer looking for the terminator. * This will either find the terminator or hit the end * of the buffer and return an error. */ rp = (ACPI_RESOURCE *)buf->Pointer; for (;;) { /* Range check, don't go outside the buffer */ if (rp >= (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length)) return (AE_BAD_PARAMETER); if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0) break; rp = ACPI_NEXT_RESOURCE(rp); } /* * Check the size of the buffer and expand if required. * * Required size is: * size of existing resources before terminator + * size of new resource and header + * size of terminator. * * Note that this loop should really only run once, unless * for some reason we are stuffing a *really* huge resource. */ while ((((u_int8_t *)rp - (u_int8_t *)buf->Pointer) + res->Length + ACPI_RS_SIZE_NO_DATA + ACPI_RS_SIZE_MIN) >= buf->Length) { if ((newp = AcpiOsAllocate(buf->Length * 2)) == NULL) return (AE_NO_MEMORY); bcopy(buf->Pointer, newp, buf->Length); rp = (ACPI_RESOURCE *)((u_int8_t *)newp + ((u_int8_t *)rp - (u_int8_t *)buf->Pointer)); AcpiOsFree(buf->Pointer); buf->Pointer = newp; buf->Length += buf->Length; } /* Insert the new resource. */ bcopy(res, rp, res->Length + ACPI_RS_SIZE_NO_DATA); /* And add the terminator. */ rp = ACPI_NEXT_RESOURCE(rp); rp->Type = ACPI_RESOURCE_TYPE_END_TAG; rp->Length = ACPI_RS_SIZE_MIN; return (AE_OK); } UINT64 acpi_DSMQuery(ACPI_HANDLE h, const uint8_t *uuid, int revision) { /* * ACPI spec 9.1.1 defines this. * * "Arg2: Function Index Represents a specific function whose meaning is * specific to the UUID and Revision ID. Function indices should start * with 1. Function number zero is a query function (see the special * return code defined below)." */ ACPI_BUFFER buf; ACPI_OBJECT *obj; UINT64 ret = 0; int i; if (!ACPI_SUCCESS(acpi_EvaluateDSM(h, uuid, revision, 0, NULL, &buf))) { ACPI_INFO(("Failed to enumerate DSM functions\n")); return (0); } obj = (ACPI_OBJECT *)buf.Pointer; KASSERT(obj, ("Object not allowed to be NULL\n")); /* * From ACPI 6.2 spec 9.1.1: * If Function Index = 0, a Buffer containing a function index bitfield. * Otherwise, the return value and type depends on the UUID and revision * ID (see below). */ switch (obj->Type) { case ACPI_TYPE_BUFFER: for (i = 0; i < MIN(obj->Buffer.Length, sizeof(ret)); i++) ret |= (((uint64_t)obj->Buffer.Pointer[i]) << (i * 8)); break; case ACPI_TYPE_INTEGER: ACPI_BIOS_WARNING((AE_INFO, "Possibly buggy BIOS with ACPI_TYPE_INTEGER for function enumeration\n")); ret = obj->Integer.Value; break; default: ACPI_WARNING((AE_INFO, "Unexpected return type %u\n", obj->Type)); }; AcpiOsFree(obj); return ret; } /* * DSM may return multiple types depending on the function. It is therefore * unsafe to use the typed evaluation. It is highly recommended that the caller * check the type of the returned object. */ ACPI_STATUS acpi_EvaluateDSM(ACPI_HANDLE handle, const uint8_t *uuid, int revision, UINT64 function, ACPI_OBJECT *package, ACPI_BUFFER *out_buf) { return (acpi_EvaluateDSMTyped(handle, uuid, revision, function, package, out_buf, ACPI_TYPE_ANY)); } ACPI_STATUS acpi_EvaluateDSMTyped(ACPI_HANDLE handle, const uint8_t *uuid, int revision, UINT64 function, ACPI_OBJECT *package, ACPI_BUFFER *out_buf, ACPI_OBJECT_TYPE type) { ACPI_OBJECT arg[4]; ACPI_OBJECT_LIST arglist; ACPI_BUFFER buf; ACPI_STATUS status; if (out_buf == NULL) return (AE_NO_MEMORY); arg[0].Type = ACPI_TYPE_BUFFER; arg[0].Buffer.Length = ACPI_UUID_LENGTH; arg[0].Buffer.Pointer = __DECONST(uint8_t *, uuid); arg[1].Type = ACPI_TYPE_INTEGER; arg[1].Integer.Value = revision; arg[2].Type = ACPI_TYPE_INTEGER; arg[2].Integer.Value = function; if (package) { arg[3] = *package; } else { arg[3].Type = ACPI_TYPE_PACKAGE; arg[3].Package.Count = 0; arg[3].Package.Elements = NULL; } arglist.Pointer = arg; arglist.Count = 4; buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObjectTyped(handle, "_DSM", &arglist, &buf, type); if (ACPI_FAILURE(status)) return (status); KASSERT(ACPI_SUCCESS(status), ("Unexpected status")); *out_buf = buf; return (status); } ACPI_STATUS acpi_EvaluateOSC(ACPI_HANDLE handle, uint8_t *uuid, int revision, int count, uint32_t *caps_in, uint32_t *caps_out, bool query) { ACPI_OBJECT arg[4], *ret; ACPI_OBJECT_LIST arglist; ACPI_BUFFER buf; ACPI_STATUS status; arglist.Pointer = arg; arglist.Count = 4; arg[0].Type = ACPI_TYPE_BUFFER; arg[0].Buffer.Length = ACPI_UUID_LENGTH; arg[0].Buffer.Pointer = uuid; arg[1].Type = ACPI_TYPE_INTEGER; arg[1].Integer.Value = revision; arg[2].Type = ACPI_TYPE_INTEGER; arg[2].Integer.Value = count; arg[3].Type = ACPI_TYPE_BUFFER; arg[3].Buffer.Length = count * sizeof(*caps_in); arg[3].Buffer.Pointer = (uint8_t *)caps_in; caps_in[0] = query ? 1 : 0; buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObjectTyped(handle, "_OSC", &arglist, &buf, ACPI_TYPE_BUFFER); if (ACPI_FAILURE(status)) return (status); if (caps_out != NULL) { ret = buf.Pointer; if (ret->Buffer.Length != count * sizeof(*caps_out)) { AcpiOsFree(buf.Pointer); return (AE_BUFFER_OVERFLOW); } bcopy(ret->Buffer.Pointer, caps_out, ret->Buffer.Length); } AcpiOsFree(buf.Pointer); return (status); } /* * Set interrupt model. */ ACPI_STATUS acpi_SetIntrModel(int model) { return (acpi_SetInteger(ACPI_ROOT_OBJECT, "_PIC", model)); } /* * Walk subtables of a table and call a callback routine for each * subtable. The caller should provide the first subtable and a * pointer to the end of the table. This can be used to walk tables * such as MADT and SRAT that use subtable entries. */ void acpi_walk_subtables(void *first, void *end, acpi_subtable_handler *handler, void *arg) { ACPI_SUBTABLE_HEADER *entry; for (entry = first; (void *)entry < end; ) { /* Avoid an infinite loop if we hit a bogus entry. */ if (entry->Length < sizeof(ACPI_SUBTABLE_HEADER)) return; handler(entry, arg); entry = ACPI_ADD_PTR(ACPI_SUBTABLE_HEADER, entry, entry->Length); } } /* * DEPRECATED. This interface has serious deficiencies and will be * removed. * * Immediately enter the sleep state. In the old model, acpiconf(8) ran * rc.suspend and rc.resume so we don't have to notify devd(8) to do this. */ ACPI_STATUS acpi_SetSleepState(struct acpi_softc *sc, int state) { static int once; if (!once) { device_printf(sc->acpi_dev, "warning: acpi_SetSleepState() deprecated, need to update your software\n"); once = 1; } return (acpi_EnterSleepState(sc, state)); } #if defined(__amd64__) || defined(__i386__) static void acpi_sleep_force_task(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate))) device_printf(sc->acpi_dev, "force sleep state S%d failed\n", sc->acpi_next_sstate); } static void acpi_sleep_force(void *arg) { struct acpi_softc *sc = (struct acpi_softc *)arg; device_printf(sc->acpi_dev, "suspend request timed out, forcing sleep now\n"); /* * XXX Suspending from callout causes freezes in DEVICE_SUSPEND(). * Suspend from acpi_task thread instead. */ if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_sleep_force_task, sc))) device_printf(sc->acpi_dev, "AcpiOsExecute() for sleeping failed\n"); } #endif /* * Request that the system enter the given suspend state. All /dev/apm * devices and devd(8) will be notified. Userland then has a chance to * save state and acknowledge the request. The system sleeps once all * acks are in. */ int acpi_ReqSleepState(struct acpi_softc *sc, int state) { #if defined(__amd64__) || defined(__i386__) struct apm_clone_data *clone; ACPI_STATUS status; if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX) return (EINVAL); if (!acpi_sleep_states[state]) return (EOPNOTSUPP); /* * If a reboot/shutdown/suspend request is already in progress or * suspend is blocked due to an upcoming shutdown, just return. */ if (rebooting || sc->acpi_next_sstate != 0 || suspend_blocked) { return (0); } /* Wait until sleep is enabled. */ while (sc->acpi_sleep_disabled) { AcpiOsSleep(1000); } ACPI_LOCK(acpi); sc->acpi_next_sstate = state; /* S5 (soft-off) should be entered directly with no waiting. */ if (state == ACPI_STATE_S5) { ACPI_UNLOCK(acpi); status = acpi_EnterSleepState(sc, state); return (ACPI_SUCCESS(status) ? 0 : ENXIO); } /* Record the pending state and notify all apm devices. */ STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) { clone->notify_status = APM_EV_NONE; if ((clone->flags & ACPI_EVF_DEVD) == 0) { selwakeuppri(&clone->sel_read, PZERO); KNOTE_LOCKED(&clone->sel_read.si_note, 0); } } /* If devd(8) is not running, immediately enter the sleep state. */ if (!devctl_process_running()) { ACPI_UNLOCK(acpi); status = acpi_EnterSleepState(sc, state); return (ACPI_SUCCESS(status) ? 0 : ENXIO); } /* * Set a timeout to fire if userland doesn't ack the suspend request * in time. This way we still eventually go to sleep if we were * overheating or running low on battery, even if userland is hung. * We cancel this timeout once all userland acks are in or the * suspend request is aborted. */ callout_reset(&sc->susp_force_to, 10 * hz, acpi_sleep_force, sc); ACPI_UNLOCK(acpi); /* Now notify devd(8) also. */ acpi_UserNotify("Suspend", ACPI_ROOT_OBJECT, state); return (0); #else /* This platform does not support acpi suspend/resume. */ return (EOPNOTSUPP); #endif } /* * Acknowledge (or reject) a pending sleep state. The caller has * prepared for suspend and is now ready for it to proceed. If the * error argument is non-zero, it indicates suspend should be cancelled * and gives an errno value describing why. Once all votes are in, * we suspend the system. */ int acpi_AckSleepState(struct apm_clone_data *clone, int error) { #if defined(__amd64__) || defined(__i386__) struct acpi_softc *sc; int ret, sleeping; /* If no pending sleep state, return an error. */ ACPI_LOCK(acpi); sc = clone->acpi_sc; if (sc->acpi_next_sstate == 0) { ACPI_UNLOCK(acpi); return (ENXIO); } /* Caller wants to abort suspend process. */ if (error) { sc->acpi_next_sstate = 0; callout_stop(&sc->susp_force_to); device_printf(sc->acpi_dev, "listener on %s cancelled the pending suspend\n", devtoname(clone->cdev)); ACPI_UNLOCK(acpi); return (0); } /* * Mark this device as acking the suspend request. Then, walk through * all devices, seeing if they agree yet. We only count devices that * are writable since read-only devices couldn't ack the request. */ sleeping = TRUE; clone->notify_status = APM_EV_ACKED; STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) { if ((clone->flags & ACPI_EVF_WRITE) != 0 && clone->notify_status != APM_EV_ACKED) { sleeping = FALSE; break; } } /* If all devices have voted "yes", we will suspend now. */ if (sleeping) callout_stop(&sc->susp_force_to); ACPI_UNLOCK(acpi); ret = 0; if (sleeping) { if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate))) ret = ENODEV; } return (ret); #else /* This platform does not support acpi suspend/resume. */ return (EOPNOTSUPP); #endif } static void acpi_sleep_enable(void *arg) { struct acpi_softc *sc = (struct acpi_softc *)arg; ACPI_LOCK_ASSERT(acpi); /* Reschedule if the system is not fully up and running. */ if (!AcpiGbl_SystemAwakeAndRunning) { callout_schedule(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME); return; } sc->acpi_sleep_disabled = FALSE; } static ACPI_STATUS acpi_sleep_disable(struct acpi_softc *sc) { ACPI_STATUS status; /* Fail if the system is not fully up and running. */ if (!AcpiGbl_SystemAwakeAndRunning) return (AE_ERROR); ACPI_LOCK(acpi); status = sc->acpi_sleep_disabled ? AE_ERROR : AE_OK; sc->acpi_sleep_disabled = TRUE; ACPI_UNLOCK(acpi); return (status); } enum acpi_sleep_state { ACPI_SS_NONE, ACPI_SS_GPE_SET, ACPI_SS_DEV_SUSPEND, ACPI_SS_SLP_PREP, ACPI_SS_SLEPT, }; /* * Enter the desired system sleep state. * * Currently we support S1-S5 but S4 is only S4BIOS */ static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state) { register_t intr; ACPI_STATUS status; ACPI_EVENT_STATUS power_button_status; enum acpi_sleep_state slp_state; int sleep_result; ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX) return_ACPI_STATUS (AE_BAD_PARAMETER); if (!acpi_sleep_states[state]) { device_printf(sc->acpi_dev, "Sleep state S%d not supported by BIOS\n", state); return (AE_SUPPORT); } /* Re-entry once we're suspending is not allowed. */ status = acpi_sleep_disable(sc); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "suspend request ignored (not ready yet)\n"); return (status); } if (state == ACPI_STATE_S5) { /* * Shut down cleanly and power off. This will call us back through the * shutdown handlers. */ shutdown_nice(RB_POWEROFF); return_ACPI_STATUS (AE_OK); } EVENTHANDLER_INVOKE(power_suspend_early); stop_all_proc(); suspend_all_fs(); EVENTHANDLER_INVOKE(power_suspend); #ifdef EARLY_AP_STARTUP MPASS(mp_ncpus == 1 || smp_started); thread_lock(curthread); sched_bind(curthread, 0); thread_unlock(curthread); #else if (smp_started) { thread_lock(curthread); sched_bind(curthread, 0); thread_unlock(curthread); } #endif /* * Be sure to hold Giant across DEVICE_SUSPEND/RESUME */ bus_topo_lock(); slp_state = ACPI_SS_NONE; sc->acpi_sstate = state; /* Enable any GPEs as appropriate and requested by the user. */ acpi_wake_prep_walk(state); slp_state = ACPI_SS_GPE_SET; /* * Inform all devices that we are going to sleep. If at least one * device fails, DEVICE_SUSPEND() automatically resumes the tree. * * XXX Note that a better two-pass approach with a 'veto' pass * followed by a "real thing" pass would be better, but the current * bus interface does not provide for this. */ if (DEVICE_SUSPEND(root_bus) != 0) { device_printf(sc->acpi_dev, "device_suspend failed\n"); goto backout; } slp_state = ACPI_SS_DEV_SUSPEND; status = AcpiEnterSleepStatePrep(state); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n", AcpiFormatException(status)); goto backout; } slp_state = ACPI_SS_SLP_PREP; if (sc->acpi_sleep_delay > 0) DELAY(sc->acpi_sleep_delay * 1000000); suspendclock(); intr = intr_disable(); if (state != ACPI_STATE_S1) { sleep_result = acpi_sleep_machdep(sc, state); acpi_wakeup_machdep(sc, state, sleep_result, 0); /* * XXX According to ACPI specification SCI_EN bit should be restored * by ACPI platform (BIOS, firmware) to its pre-sleep state. * Unfortunately some BIOSes fail to do that and that leads to * unexpected and serious consequences during wake up like a system * getting stuck in SMI handlers. * This hack is picked up from Linux, which claims that it follows * Windows behavior. */ if (sleep_result == 1 && state != ACPI_STATE_S4) AcpiWriteBitRegister(ACPI_BITREG_SCI_ENABLE, ACPI_ENABLE_EVENT); if (sleep_result == 1 && state == ACPI_STATE_S3) { /* * Prevent mis-interpretation of the wakeup by power button * as a request for power off. * Ideally we should post an appropriate wakeup event, * perhaps using acpi_event_power_button_wake or alike. * * Clearing of power button status after wakeup is mandated * by ACPI specification in section "Fixed Power Button". * * XXX As of ACPICA 20121114 AcpiGetEventStatus provides * status as 0/1 corressponding to inactive/active despite * its type being ACPI_EVENT_STATUS. In other words, * we should not test for ACPI_EVENT_FLAG_SET for time being. */ if (ACPI_SUCCESS(AcpiGetEventStatus(ACPI_EVENT_POWER_BUTTON, &power_button_status)) && power_button_status != 0) { AcpiClearEvent(ACPI_EVENT_POWER_BUTTON); device_printf(sc->acpi_dev, "cleared fixed power button status\n"); } } intr_restore(intr); /* call acpi_wakeup_machdep() again with interrupt enabled */ acpi_wakeup_machdep(sc, state, sleep_result, 1); AcpiLeaveSleepStatePrep(state); if (sleep_result == -1) goto backout; /* Re-enable ACPI hardware on wakeup from sleep state 4. */ if (state == ACPI_STATE_S4) AcpiEnable(); } else { status = AcpiEnterSleepState(state); intr_restore(intr); AcpiLeaveSleepStatePrep(state); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n", AcpiFormatException(status)); goto backout; } } slp_state = ACPI_SS_SLEPT; /* * Back out state according to how far along we got in the suspend * process. This handles both the error and success cases. */ backout: if (slp_state >= ACPI_SS_SLP_PREP) resumeclock(); if (slp_state >= ACPI_SS_GPE_SET) { acpi_wake_prep_walk(state); sc->acpi_sstate = ACPI_STATE_S0; } if (slp_state >= ACPI_SS_DEV_SUSPEND) DEVICE_RESUME(root_bus); if (slp_state >= ACPI_SS_SLP_PREP) AcpiLeaveSleepState(state); if (slp_state >= ACPI_SS_SLEPT) { #if defined(__i386__) || defined(__amd64__) /* NB: we are still using ACPI timecounter at this point. */ resume_TSC(); #endif acpi_resync_clock(sc); acpi_enable_fixed_events(sc); } sc->acpi_next_sstate = 0; bus_topo_unlock(); #ifdef EARLY_AP_STARTUP thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); #else if (smp_started) { thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); } #endif resume_all_fs(); resume_all_proc(); EVENTHANDLER_INVOKE(power_resume); /* Allow another sleep request after a while. */ callout_schedule(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME); /* Run /etc/rc.resume after we are back. */ if (devctl_process_running()) acpi_UserNotify("Resume", ACPI_ROOT_OBJECT, state); return_ACPI_STATUS (status); } static void acpi_resync_clock(struct acpi_softc *sc) { /* * Warm up timecounter again and reset system clock. */ (void)timecounter->tc_get_timecount(timecounter); inittodr(time_second + sc->acpi_sleep_delay); } /* Enable or disable the device's wake GPE. */ int acpi_wake_set_enable(device_t dev, int enable) { struct acpi_prw_data prw; ACPI_STATUS status; int flags; /* Make sure the device supports waking the system and get the GPE. */ if (acpi_parse_prw(acpi_get_handle(dev), &prw) != 0) return (ENXIO); flags = acpi_get_flags(dev); if (enable) { status = AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_ENABLE); if (ACPI_FAILURE(status)) { device_printf(dev, "enable wake failed\n"); return (ENXIO); } acpi_set_flags(dev, flags | ACPI_FLAG_WAKE_ENABLED); } else { status = AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_DISABLE); if (ACPI_FAILURE(status)) { device_printf(dev, "disable wake failed\n"); return (ENXIO); } acpi_set_flags(dev, flags & ~ACPI_FLAG_WAKE_ENABLED); } return (0); } static int acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate) { struct acpi_prw_data prw; device_t dev; /* Check that this is a wake-capable device and get its GPE. */ if (acpi_parse_prw(handle, &prw) != 0) return (ENXIO); dev = acpi_get_device(handle); /* * The destination sleep state must be less than (i.e., higher power) * or equal to the value specified by _PRW. If this GPE cannot be * enabled for the next sleep state, then disable it. If it can and * the user requested it be enabled, turn on any required power resources * and set _PSW. */ if (sstate > prw.lowest_wake) { AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_DISABLE); if (bootverbose) device_printf(dev, "wake_prep disabled wake for %s (S%d)\n", acpi_name(handle), sstate); } else if (dev && (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) != 0) { acpi_pwr_wake_enable(handle, 1); acpi_SetInteger(handle, "_PSW", 1); if (bootverbose) device_printf(dev, "wake_prep enabled for %s (S%d)\n", acpi_name(handle), sstate); } return (0); } static int acpi_wake_run_prep(ACPI_HANDLE handle, int sstate) { struct acpi_prw_data prw; device_t dev; /* * Check that this is a wake-capable device and get its GPE. Return * now if the user didn't enable this device for wake. */ if (acpi_parse_prw(handle, &prw) != 0) return (ENXIO); dev = acpi_get_device(handle); if (dev == NULL || (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) == 0) return (0); /* * If this GPE couldn't be enabled for the previous sleep state, it was * disabled before going to sleep so re-enable it. If it was enabled, * clear _PSW and turn off any power resources it used. */ if (sstate > prw.lowest_wake) { AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_ENABLE); if (bootverbose) device_printf(dev, "run_prep re-enabled %s\n", acpi_name(handle)); } else { acpi_SetInteger(handle, "_PSW", 0); acpi_pwr_wake_enable(handle, 0); if (bootverbose) device_printf(dev, "run_prep cleaned up for %s\n", acpi_name(handle)); } return (0); } static ACPI_STATUS acpi_wake_prep(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { int sstate; /* If suspending, run the sleep prep function, otherwise wake. */ sstate = *(int *)context; if (AcpiGbl_SystemAwakeAndRunning) acpi_wake_sleep_prep(handle, sstate); else acpi_wake_run_prep(handle, sstate); return (AE_OK); } /* Walk the tree rooted at acpi0 to prep devices for suspend/resume. */ static int acpi_wake_prep_walk(int sstate) { ACPI_HANDLE sb_handle; if (ACPI_SUCCESS(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle))) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sb_handle, 100, acpi_wake_prep, NULL, &sstate, NULL); return (0); } /* Walk the tree rooted at acpi0 to attach per-device wake sysctls. */ static int acpi_wake_sysctl_walk(device_t dev) { int error, i, numdevs; device_t *devlist; device_t child; ACPI_STATUS status; error = device_get_children(dev, &devlist, &numdevs); if (error != 0 || numdevs == 0) { if (numdevs == 0) free(devlist, M_TEMP); return (error); } for (i = 0; i < numdevs; i++) { child = devlist[i]; acpi_wake_sysctl_walk(child); if (!device_is_attached(child)) continue; status = AcpiEvaluateObject(acpi_get_handle(child), "_PRW", NULL, NULL); if (ACPI_SUCCESS(status)) { SYSCTL_ADD_PROC(device_get_sysctl_ctx(child), SYSCTL_CHILDREN(device_get_sysctl_tree(child)), OID_AUTO, "wake", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, child, 0, acpi_wake_set_sysctl, "I", "Device set to wake the system"); } } free(devlist, M_TEMP); return (0); } /* Enable or disable wake from userland. */ static int acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS) { int enable, error; device_t dev; dev = (device_t)arg1; enable = (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) ? 1 : 0; error = sysctl_handle_int(oidp, &enable, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (enable != 0 && enable != 1) return (EINVAL); return (acpi_wake_set_enable(dev, enable)); } /* Parse a device's _PRW into a structure. */ int acpi_parse_prw(ACPI_HANDLE h, struct acpi_prw_data *prw) { ACPI_STATUS status; ACPI_BUFFER prw_buffer; ACPI_OBJECT *res, *res2; int error, i, power_count; if (h == NULL || prw == NULL) return (EINVAL); /* * The _PRW object (7.2.9) is only required for devices that have the * ability to wake the system from a sleeping state. */ error = EINVAL; prw_buffer.Pointer = NULL; prw_buffer.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObject(h, "_PRW", NULL, &prw_buffer); if (ACPI_FAILURE(status)) return (ENOENT); res = (ACPI_OBJECT *)prw_buffer.Pointer; if (res == NULL) return (ENOENT); if (!ACPI_PKG_VALID(res, 2)) goto out; /* * Element 1 of the _PRW object: * The lowest power system sleeping state that can be entered while still * providing wake functionality. The sleeping state being entered must * be less than (i.e., higher power) or equal to this value. */ if (acpi_PkgInt32(res, 1, &prw->lowest_wake) != 0) goto out; /* * Element 0 of the _PRW object: */ switch (res->Package.Elements[0].Type) { case ACPI_TYPE_INTEGER: /* * If the data type of this package element is numeric, then this * _PRW package element is the bit index in the GPEx_EN, in the * GPE blocks described in the FADT, of the enable bit that is * enabled for the wake event. */ prw->gpe_handle = NULL; prw->gpe_bit = res->Package.Elements[0].Integer.Value; error = 0; break; case ACPI_TYPE_PACKAGE: /* * If the data type of this package element is a package, then this * _PRW package element is itself a package containing two * elements. The first is an object reference to the GPE Block * device that contains the GPE that will be triggered by the wake * event. The second element is numeric and it contains the bit * index in the GPEx_EN, in the GPE Block referenced by the * first element in the package, of the enable bit that is enabled for * the wake event. * * For example, if this field is a package then it is of the form: * Package() {\_SB.PCI0.ISA.GPE, 2} */ res2 = &res->Package.Elements[0]; if (!ACPI_PKG_VALID(res2, 2)) goto out; prw->gpe_handle = acpi_GetReference(NULL, &res2->Package.Elements[0]); if (prw->gpe_handle == NULL) goto out; if (acpi_PkgInt32(res2, 1, &prw->gpe_bit) != 0) goto out; error = 0; break; default: goto out; } /* Elements 2 to N of the _PRW object are power resources. */ power_count = res->Package.Count - 2; if (power_count > ACPI_PRW_MAX_POWERRES) { printf("ACPI device %s has too many power resources\n", acpi_name(h)); power_count = 0; } prw->power_res_count = power_count; for (i = 0; i < power_count; i++) prw->power_res[i] = res->Package.Elements[i]; out: if (prw_buffer.Pointer != NULL) AcpiOsFree(prw_buffer.Pointer); return (error); } /* * ACPI Event Handlers */ /* System Event Handlers (registered by EVENTHANDLER_REGISTER) */ static void acpi_system_eventhandler_sleep(void *arg, int state) { struct acpi_softc *sc = (struct acpi_softc *)arg; int ret; ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); /* Check if button action is disabled or unknown. */ if (state == ACPI_STATE_UNKNOWN) return; /* Request that the system prepare to enter the given suspend state. */ ret = acpi_ReqSleepState(sc, state); if (ret != 0) device_printf(sc->acpi_dev, "request to enter state S%d failed (err %d)\n", state, ret); return_VOID; } static void acpi_system_eventhandler_wakeup(void *arg, int state) { ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); /* Currently, nothing to do for wakeup. */ return_VOID; } /* * ACPICA Event Handlers (FixedEvent, also called from button notify handler) */ static void acpi_invoke_sleep_eventhandler(void *context) { EVENTHANDLER_INVOKE(acpi_sleep_event, *(int *)context); } static void acpi_invoke_wake_eventhandler(void *context) { EVENTHANDLER_INVOKE(acpi_wakeup_event, *(int *)context); } UINT32 acpi_event_power_button_sleep(void *context) { #if defined(__amd64__) || defined(__i386__) struct acpi_softc *sc = (struct acpi_softc *)context; #else (void)context; #endif ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); #if defined(__amd64__) || defined(__i386__) if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_sleep_eventhandler, &sc->acpi_power_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); #else shutdown_nice(RB_POWEROFF); #endif return_VALUE (ACPI_INTERRUPT_HANDLED); } UINT32 acpi_event_power_button_wake(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_wake_eventhandler, &sc->acpi_power_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); return_VALUE (ACPI_INTERRUPT_HANDLED); } UINT32 acpi_event_sleep_button_sleep(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_sleep_eventhandler, &sc->acpi_sleep_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); return_VALUE (ACPI_INTERRUPT_HANDLED); } UINT32 acpi_event_sleep_button_wake(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_wake_eventhandler, &sc->acpi_sleep_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); return_VALUE (ACPI_INTERRUPT_HANDLED); } /* * XXX This static buffer is suboptimal. There is no locking so only * use this for single-threaded callers. */ char * acpi_name(ACPI_HANDLE handle) { ACPI_BUFFER buf; static char data[256]; buf.Length = sizeof(data); buf.Pointer = data; if (handle && ACPI_SUCCESS(AcpiGetName(handle, ACPI_FULL_PATHNAME, &buf))) return (data); return ("(unknown)"); } /* * Debugging/bug-avoidance. Avoid trying to fetch info on various * parts of the namespace. */ int acpi_avoid(ACPI_HANDLE handle) { char *cp, *env, *np; int len; np = acpi_name(handle); if (*np == '\\') np++; if ((env = kern_getenv("debug.acpi.avoid")) == NULL) return (0); /* Scan the avoid list checking for a match */ cp = env; for (;;) { while (*cp != 0 && isspace(*cp)) cp++; if (*cp == 0) break; len = 0; while (cp[len] != 0 && !isspace(cp[len])) len++; if (!strncmp(cp, np, len)) { freeenv(env); return(1); } cp += len; } freeenv(env); return (0); } /* * Debugging/bug-avoidance. Disable ACPI subsystem components. */ int acpi_disabled(char *subsys) { char *cp, *env; int len; if ((env = kern_getenv("debug.acpi.disabled")) == NULL) return (0); if (strcmp(env, "all") == 0) { freeenv(env); return (1); } /* Scan the disable list, checking for a match. */ cp = env; for (;;) { while (*cp != '\0' && isspace(*cp)) cp++; if (*cp == '\0') break; len = 0; while (cp[len] != '\0' && !isspace(cp[len])) len++; if (strncmp(cp, subsys, len) == 0) { freeenv(env); return (1); } cp += len; } freeenv(env); return (0); } static void acpi_lookup(void *arg, const char *name, device_t *dev) { ACPI_HANDLE handle; if (*dev != NULL) return; /* * Allow any handle name that is specified as an absolute path and * starts with '\'. We could restrict this to \_SB and friends, * but see acpi_probe_children() for notes on why we scan the entire * namespace for devices. * * XXX: The pathname argument to AcpiGetHandle() should be fixed to * be const. */ if (name[0] != '\\') return; if (ACPI_FAILURE(AcpiGetHandle(ACPI_ROOT_OBJECT, __DECONST(char *, name), &handle))) return; *dev = acpi_get_device(handle); } /* * Control interface. * * We multiplex ioctls for all participating ACPI devices here. Individual * drivers wanting to be accessible via /dev/acpi should use the * register/deregister interface to make their handlers visible. */ struct acpi_ioctl_hook { TAILQ_ENTRY(acpi_ioctl_hook) link; u_long cmd; acpi_ioctl_fn fn; void *arg; }; static TAILQ_HEAD(,acpi_ioctl_hook) acpi_ioctl_hooks; static int acpi_ioctl_hooks_initted; int acpi_register_ioctl(u_long cmd, acpi_ioctl_fn fn, void *arg) { struct acpi_ioctl_hook *hp; if ((hp = malloc(sizeof(*hp), M_ACPIDEV, M_NOWAIT)) == NULL) return (ENOMEM); hp->cmd = cmd; hp->fn = fn; hp->arg = arg; ACPI_LOCK(acpi); if (acpi_ioctl_hooks_initted == 0) { TAILQ_INIT(&acpi_ioctl_hooks); acpi_ioctl_hooks_initted = 1; } TAILQ_INSERT_TAIL(&acpi_ioctl_hooks, hp, link); ACPI_UNLOCK(acpi); return (0); } void acpi_deregister_ioctl(u_long cmd, acpi_ioctl_fn fn) { struct acpi_ioctl_hook *hp; ACPI_LOCK(acpi); TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) if (hp->cmd == cmd && hp->fn == fn) break; if (hp != NULL) { TAILQ_REMOVE(&acpi_ioctl_hooks, hp, link); free(hp, M_ACPIDEV); } ACPI_UNLOCK(acpi); } static int acpiopen(struct cdev *dev, int flag, int fmt, struct thread *td) { return (0); } static int acpiclose(struct cdev *dev, int flag, int fmt, struct thread *td) { return (0); } static int acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct acpi_softc *sc; struct acpi_ioctl_hook *hp; int error, state; error = 0; hp = NULL; sc = dev->si_drv1; /* * Scan the list of registered ioctls, looking for handlers. */ ACPI_LOCK(acpi); if (acpi_ioctl_hooks_initted) TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) { if (hp->cmd == cmd) break; } ACPI_UNLOCK(acpi); if (hp) return (hp->fn(cmd, addr, hp->arg)); /* * Core ioctls are not permitted for non-writable user. * Currently, other ioctls just fetch information. * Not changing system behavior. */ if ((flag & FWRITE) == 0) return (EPERM); /* Core system ioctls. */ switch (cmd) { case ACPIIO_REQSLPSTATE: state = *(int *)addr; if (state != ACPI_STATE_S5) return (acpi_ReqSleepState(sc, state)); device_printf(sc->acpi_dev, "power off via acpi ioctl not supported\n"); error = EOPNOTSUPP; break; case ACPIIO_ACKSLPSTATE: error = *(int *)addr; error = acpi_AckSleepState(sc->acpi_clone, error); break; case ACPIIO_SETSLPSTATE: /* DEPRECATED */ state = *(int *)addr; if (state < ACPI_STATE_S0 || state > ACPI_S_STATES_MAX) return (EINVAL); if (!acpi_sleep_states[state]) return (EOPNOTSUPP); if (ACPI_FAILURE(acpi_SetSleepState(sc, state))) error = ENXIO; break; default: error = ENXIO; break; } return (error); } static int acpi_sname2sstate(const char *sname) { int sstate; if (toupper(sname[0]) == 'S') { sstate = sname[1] - '0'; if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5 && sname[2] == '\0') return (sstate); } else if (strcasecmp(sname, "NONE") == 0) return (ACPI_STATE_UNKNOWN); return (-1); } static const char * acpi_sstate2sname(int sstate) { static const char *snames[] = { "S0", "S1", "S2", "S3", "S4", "S5" }; if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5) return (snames[sstate]); else if (sstate == ACPI_STATE_UNKNOWN) return ("NONE"); return (NULL); } static int acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS) { int error; struct sbuf sb; UINT8 state; sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND); for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++) if (acpi_sleep_states[state]) sbuf_printf(&sb, "%s ", acpi_sstate2sname(state)); sbuf_trim(&sb); sbuf_finish(&sb); error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); sbuf_delete(&sb); return (error); } static int acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS) { char sleep_state[10]; int error, new_state, old_state; old_state = *(int *)oidp->oid_arg1; strlcpy(sleep_state, acpi_sstate2sname(old_state), sizeof(sleep_state)); error = sysctl_handle_string(oidp, sleep_state, sizeof(sleep_state), req); if (error == 0 && req->newptr != NULL) { new_state = acpi_sname2sstate(sleep_state); if (new_state < ACPI_STATE_S1) return (EINVAL); if (new_state < ACPI_S_STATE_COUNT && !acpi_sleep_states[new_state]) return (EOPNOTSUPP); if (new_state != old_state) *(int *)oidp->oid_arg1 = new_state; } return (error); } /* Inform devctl(4) when we receive a Notify. */ void acpi_UserNotify(const char *subsystem, ACPI_HANDLE h, uint8_t notify) { char notify_buf[16]; ACPI_BUFFER handle_buf; ACPI_STATUS status; if (subsystem == NULL) return; handle_buf.Pointer = NULL; handle_buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiNsHandleToPathname(h, &handle_buf, FALSE); if (ACPI_FAILURE(status)) return; snprintf(notify_buf, sizeof(notify_buf), "notify=0x%02x", notify); devctl_notify("ACPI", subsystem, handle_buf.Pointer, notify_buf); AcpiOsFree(handle_buf.Pointer); } #ifdef ACPI_DEBUG /* * Support for parsing debug options from the kernel environment. * * Bits may be set in the AcpiDbgLayer and AcpiDbgLevel debug registers * by specifying the names of the bits in the debug.acpi.layer and * debug.acpi.level environment variables. Bits may be unset by * prefixing the bit name with !. */ struct debugtag { char *name; UINT32 value; }; static struct debugtag dbg_layer[] = { {"ACPI_UTILITIES", ACPI_UTILITIES}, {"ACPI_HARDWARE", ACPI_HARDWARE}, {"ACPI_EVENTS", ACPI_EVENTS}, {"ACPI_TABLES", ACPI_TABLES}, {"ACPI_NAMESPACE", ACPI_NAMESPACE}, {"ACPI_PARSER", ACPI_PARSER}, {"ACPI_DISPATCHER", ACPI_DISPATCHER}, {"ACPI_EXECUTER", ACPI_EXECUTER}, {"ACPI_RESOURCES", ACPI_RESOURCES}, {"ACPI_CA_DEBUGGER", ACPI_CA_DEBUGGER}, {"ACPI_OS_SERVICES", ACPI_OS_SERVICES}, {"ACPI_CA_DISASSEMBLER", ACPI_CA_DISASSEMBLER}, {"ACPI_ALL_COMPONENTS", ACPI_ALL_COMPONENTS}, {"ACPI_AC_ADAPTER", ACPI_AC_ADAPTER}, {"ACPI_BATTERY", ACPI_BATTERY}, {"ACPI_BUS", ACPI_BUS}, {"ACPI_BUTTON", ACPI_BUTTON}, {"ACPI_EC", ACPI_EC}, {"ACPI_FAN", ACPI_FAN}, {"ACPI_POWERRES", ACPI_POWERRES}, {"ACPI_PROCESSOR", ACPI_PROCESSOR}, {"ACPI_THERMAL", ACPI_THERMAL}, {"ACPI_TIMER", ACPI_TIMER}, {"ACPI_ALL_DRIVERS", ACPI_ALL_DRIVERS}, {NULL, 0} }; static struct debugtag dbg_level[] = { {"ACPI_LV_INIT", ACPI_LV_INIT}, {"ACPI_LV_DEBUG_OBJECT", ACPI_LV_DEBUG_OBJECT}, {"ACPI_LV_INFO", ACPI_LV_INFO}, {"ACPI_LV_REPAIR", ACPI_LV_REPAIR}, {"ACPI_LV_ALL_EXCEPTIONS", ACPI_LV_ALL_EXCEPTIONS}, /* Trace verbosity level 1 [Standard Trace Level] */ {"ACPI_LV_INIT_NAMES", ACPI_LV_INIT_NAMES}, {"ACPI_LV_PARSE", ACPI_LV_PARSE}, {"ACPI_LV_LOAD", ACPI_LV_LOAD}, {"ACPI_LV_DISPATCH", ACPI_LV_DISPATCH}, {"ACPI_LV_EXEC", ACPI_LV_EXEC}, {"ACPI_LV_NAMES", ACPI_LV_NAMES}, {"ACPI_LV_OPREGION", ACPI_LV_OPREGION}, {"ACPI_LV_BFIELD", ACPI_LV_BFIELD}, {"ACPI_LV_TABLES", ACPI_LV_TABLES}, {"ACPI_LV_VALUES", ACPI_LV_VALUES}, {"ACPI_LV_OBJECTS", ACPI_LV_OBJECTS}, {"ACPI_LV_RESOURCES", ACPI_LV_RESOURCES}, {"ACPI_LV_USER_REQUESTS", ACPI_LV_USER_REQUESTS}, {"ACPI_LV_PACKAGE", ACPI_LV_PACKAGE}, {"ACPI_LV_VERBOSITY1", ACPI_LV_VERBOSITY1}, /* Trace verbosity level 2 [Function tracing and memory allocation] */ {"ACPI_LV_ALLOCATIONS", ACPI_LV_ALLOCATIONS}, {"ACPI_LV_FUNCTIONS", ACPI_LV_FUNCTIONS}, {"ACPI_LV_OPTIMIZATIONS", ACPI_LV_OPTIMIZATIONS}, {"ACPI_LV_VERBOSITY2", ACPI_LV_VERBOSITY2}, {"ACPI_LV_ALL", ACPI_LV_ALL}, /* Trace verbosity level 3 [Threading, I/O, and Interrupts] */ {"ACPI_LV_MUTEX", ACPI_LV_MUTEX}, {"ACPI_LV_THREADS", ACPI_LV_THREADS}, {"ACPI_LV_IO", ACPI_LV_IO}, {"ACPI_LV_INTERRUPTS", ACPI_LV_INTERRUPTS}, {"ACPI_LV_VERBOSITY3", ACPI_LV_VERBOSITY3}, /* Exceptionally verbose output -- also used in the global "DebugLevel" */ {"ACPI_LV_AML_DISASSEMBLE", ACPI_LV_AML_DISASSEMBLE}, {"ACPI_LV_VERBOSE_INFO", ACPI_LV_VERBOSE_INFO}, {"ACPI_LV_FULL_TABLES", ACPI_LV_FULL_TABLES}, {"ACPI_LV_EVENTS", ACPI_LV_EVENTS}, {"ACPI_LV_VERBOSE", ACPI_LV_VERBOSE}, {NULL, 0} }; static void acpi_parse_debug(char *cp, struct debugtag *tag, UINT32 *flag) { char *ep; int i, l; int set; while (*cp) { if (isspace(*cp)) { cp++; continue; } ep = cp; while (*ep && !isspace(*ep)) ep++; if (*cp == '!') { set = 0; cp++; if (cp == ep) continue; } else { set = 1; } l = ep - cp; for (i = 0; tag[i].name != NULL; i++) { if (!strncmp(cp, tag[i].name, l)) { if (set) *flag |= tag[i].value; else *flag &= ~tag[i].value; } } cp = ep; } } static void acpi_set_debugging(void *junk) { char *layer, *level; if (cold) { AcpiDbgLayer = 0; AcpiDbgLevel = 0; } layer = kern_getenv("debug.acpi.layer"); level = kern_getenv("debug.acpi.level"); if (layer == NULL && level == NULL) return; printf("ACPI set debug"); if (layer != NULL) { if (strcmp("NONE", layer) != 0) printf(" layer '%s'", layer); acpi_parse_debug(layer, &dbg_layer[0], &AcpiDbgLayer); freeenv(layer); } if (level != NULL) { if (strcmp("NONE", level) != 0) printf(" level '%s'", level); acpi_parse_debug(level, &dbg_level[0], &AcpiDbgLevel); freeenv(level); } printf("\n"); } SYSINIT(acpi_debugging, SI_SUB_TUNABLES, SI_ORDER_ANY, acpi_set_debugging, NULL); static int acpi_debug_sysctl(SYSCTL_HANDLER_ARGS) { int error, *dbg; struct debugtag *tag; struct sbuf sb; char temp[128]; if (sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND) == NULL) return (ENOMEM); if (strcmp(oidp->oid_arg1, "debug.acpi.layer") == 0) { tag = &dbg_layer[0]; dbg = &AcpiDbgLayer; } else { tag = &dbg_level[0]; dbg = &AcpiDbgLevel; } /* Get old values if this is a get request. */ ACPI_SERIAL_BEGIN(acpi); if (*dbg == 0) { sbuf_cpy(&sb, "NONE"); } else if (req->newptr == NULL) { for (; tag->name != NULL; tag++) { if ((*dbg & tag->value) == tag->value) sbuf_printf(&sb, "%s ", tag->name); } } sbuf_trim(&sb); sbuf_finish(&sb); strlcpy(temp, sbuf_data(&sb), sizeof(temp)); sbuf_delete(&sb); error = sysctl_handle_string(oidp, temp, sizeof(temp), req); /* Check for error or no change */ if (error == 0 && req->newptr != NULL) { *dbg = 0; kern_setenv((char *)oidp->oid_arg1, temp); acpi_set_debugging(NULL); } ACPI_SERIAL_END(acpi); return (error); } SYSCTL_PROC(_debug_acpi, OID_AUTO, layer, CTLFLAG_RW | CTLTYPE_STRING | CTLFLAG_MPSAFE, "debug.acpi.layer", 0, acpi_debug_sysctl, "A", ""); SYSCTL_PROC(_debug_acpi, OID_AUTO, level, CTLFLAG_RW | CTLTYPE_STRING | CTLFLAG_MPSAFE, "debug.acpi.level", 0, acpi_debug_sysctl, "A", ""); #endif /* ACPI_DEBUG */ static int acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS) { int error; int old; old = acpi_debug_objects; error = sysctl_handle_int(oidp, &acpi_debug_objects, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (old == acpi_debug_objects || (old && acpi_debug_objects)) return (0); ACPI_SERIAL_BEGIN(acpi); AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE; ACPI_SERIAL_END(acpi); return (0); } static int acpi_parse_interfaces(char *str, struct acpi_interface *iface) { char *p; size_t len; int i, j; p = str; while (isspace(*p) || *p == ',') p++; len = strlen(p); if (len == 0) return (0); p = strdup(p, M_TEMP); for (i = 0; i < len; i++) if (p[i] == ',') p[i] = '\0'; i = j = 0; while (i < len) if (isspace(p[i]) || p[i] == '\0') i++; else { i += strlen(p + i) + 1; j++; } if (j == 0) { free(p, M_TEMP); return (0); } iface->data = malloc(sizeof(*iface->data) * j, M_TEMP, M_WAITOK); iface->num = j; i = j = 0; while (i < len) if (isspace(p[i]) || p[i] == '\0') i++; else { iface->data[j] = p + i; i += strlen(p + i) + 1; j++; } return (j); } static void acpi_free_interfaces(struct acpi_interface *iface) { free(iface->data[0], M_TEMP); free(iface->data, M_TEMP); } static void acpi_reset_interfaces(device_t dev) { struct acpi_interface list; ACPI_STATUS status; int i; if (acpi_parse_interfaces(acpi_install_interface, &list) > 0) { for (i = 0; i < list.num; i++) { status = AcpiInstallInterface(list.data[i]); if (ACPI_FAILURE(status)) device_printf(dev, "failed to install _OSI(\"%s\"): %s\n", list.data[i], AcpiFormatException(status)); else if (bootverbose) device_printf(dev, "installed _OSI(\"%s\")\n", list.data[i]); } acpi_free_interfaces(&list); } if (acpi_parse_interfaces(acpi_remove_interface, &list) > 0) { for (i = 0; i < list.num; i++) { status = AcpiRemoveInterface(list.data[i]); if (ACPI_FAILURE(status)) device_printf(dev, "failed to remove _OSI(\"%s\"): %s\n", list.data[i], AcpiFormatException(status)); else if (bootverbose) device_printf(dev, "removed _OSI(\"%s\")\n", list.data[i]); } acpi_free_interfaces(&list); } } static int acpi_pm_func(u_long cmd, void *arg, ...) { int state, acpi_state; int error; struct acpi_softc *sc; va_list ap; error = 0; switch (cmd) { case POWER_CMD_SUSPEND: sc = (struct acpi_softc *)arg; if (sc == NULL) { error = EINVAL; goto out; } va_start(ap, arg); state = va_arg(ap, int); va_end(ap); switch (state) { case POWER_SLEEP_STATE_STANDBY: acpi_state = sc->acpi_standby_sx; break; case POWER_SLEEP_STATE_SUSPEND: acpi_state = sc->acpi_suspend_sx; break; case POWER_SLEEP_STATE_HIBERNATE: acpi_state = ACPI_STATE_S4; break; default: error = EINVAL; goto out; } if (ACPI_FAILURE(acpi_EnterSleepState(sc, acpi_state))) error = ENXIO; break; default: error = EINVAL; goto out; } out: return (error); } static void acpi_pm_register(void *arg) { if (!cold || resource_disabled("acpi", 0)) return; power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, NULL); } SYSINIT(power, SI_SUB_KLD, SI_ORDER_ANY, acpi_pm_register, NULL); diff --git a/sys/dev/acpica/acpi_pci.c b/sys/dev/acpica/acpi_pci.c index 646295f9eecc..1912350bbc48 100644 --- a/sys/dev/acpica/acpi_pci.c +++ b/sys/dev/acpica/acpi_pci.c @@ -1,522 +1,520 @@ /*- * Copyright (c) 1997, Stefan Esser * Copyright (c) 2000, Michael Smith * Copyright (c) 2000, BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include "opt_acpi.h" #include "opt_iommu.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include "pcib_if.h" -#include "pci_if.h" - /* Hooks for the ACPI CA debugging infrastructure. */ #define _COMPONENT ACPI_BUS ACPI_MODULE_NAME("PCI") struct acpi_pci_devinfo { struct pci_devinfo ap_dinfo; ACPI_HANDLE ap_handle; int ap_flags; }; ACPI_SERIAL_DECL(pci_powerstate, "ACPI PCI power methods"); /* Be sure that ACPI and PCI power states are equivalent. */ CTASSERT(ACPI_STATE_D0 == PCI_POWERSTATE_D0); CTASSERT(ACPI_STATE_D1 == PCI_POWERSTATE_D1); CTASSERT(ACPI_STATE_D2 == PCI_POWERSTATE_D2); CTASSERT(ACPI_STATE_D3 == PCI_POWERSTATE_D3); static struct pci_devinfo *acpi_pci_alloc_devinfo(device_t dev); static int acpi_pci_attach(device_t dev); static void acpi_pci_child_deleted(device_t dev, device_t child); static int acpi_pci_child_location_method(device_t cbdev, device_t child, struct sbuf *sb); static int acpi_pci_get_device_path(device_t cbdev, device_t child, const char *locator, struct sbuf *sb); static int acpi_pci_detach(device_t dev); static int acpi_pci_probe(device_t dev); static int acpi_pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result); static int acpi_pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value); static ACPI_STATUS acpi_pci_save_handle(ACPI_HANDLE handle, UINT32 level, void *context, void **status); static int acpi_pci_set_powerstate_method(device_t dev, device_t child, int state); static void acpi_pci_update_device(ACPI_HANDLE handle, device_t pci_child); static bus_dma_tag_t acpi_pci_get_dma_tag(device_t bus, device_t child); static int acpi_pci_get_domain(device_t dev, device_t child, int *domain); static device_method_t acpi_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_pci_probe), DEVMETHOD(device_attach, acpi_pci_attach), DEVMETHOD(device_detach, acpi_pci_detach), /* Bus interface */ DEVMETHOD(bus_read_ivar, acpi_pci_read_ivar), DEVMETHOD(bus_write_ivar, acpi_pci_write_ivar), DEVMETHOD(bus_child_deleted, acpi_pci_child_deleted), DEVMETHOD(bus_child_location, acpi_pci_child_location_method), DEVMETHOD(bus_get_device_path, acpi_pci_get_device_path), DEVMETHOD(bus_get_cpus, acpi_get_cpus), DEVMETHOD(bus_get_dma_tag, acpi_pci_get_dma_tag), DEVMETHOD(bus_get_domain, acpi_pci_get_domain), /* PCI interface */ DEVMETHOD(pci_alloc_devinfo, acpi_pci_alloc_devinfo), DEVMETHOD(pci_child_added, acpi_pci_child_added), DEVMETHOD(pci_set_powerstate, acpi_pci_set_powerstate_method), DEVMETHOD_END }; DEFINE_CLASS_1(pci, acpi_pci_driver, acpi_pci_methods, sizeof(struct pci_softc), pci_driver); DRIVER_MODULE(acpi_pci, pcib, acpi_pci_driver, 0, 0); MODULE_DEPEND(acpi_pci, acpi, 1, 1, 1); MODULE_DEPEND(acpi_pci, pci, 1, 1, 1); MODULE_VERSION(acpi_pci, 1); static struct pci_devinfo * acpi_pci_alloc_devinfo(device_t dev) { struct acpi_pci_devinfo *dinfo; dinfo = malloc(sizeof(*dinfo), M_DEVBUF, M_WAITOK | M_ZERO); return (&dinfo->ap_dinfo); } static int acpi_pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct acpi_pci_devinfo *dinfo; dinfo = device_get_ivars(child); switch (which) { case ACPI_IVAR_HANDLE: *result = (uintptr_t)dinfo->ap_handle; return (0); case ACPI_IVAR_FLAGS: *result = (uintptr_t)dinfo->ap_flags; return (0); } return (pci_read_ivar(dev, child, which, result)); } static int acpi_pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { struct acpi_pci_devinfo *dinfo; dinfo = device_get_ivars(child); switch (which) { case ACPI_IVAR_HANDLE: dinfo->ap_handle = (ACPI_HANDLE)value; return (0); case ACPI_IVAR_FLAGS: dinfo->ap_flags = (int)value; return (0); } return (pci_write_ivar(dev, child, which, value)); } static void acpi_pci_child_deleted(device_t dev, device_t child) { struct acpi_pci_devinfo *dinfo = device_get_ivars(child); if (acpi_get_device(dinfo->ap_handle) == child) AcpiDetachData(dinfo->ap_handle, acpi_fake_objhandler); pci_child_deleted(dev, child); } static int acpi_pci_child_location_method(device_t cbdev, device_t child, struct sbuf *sb) { struct acpi_pci_devinfo *dinfo = device_get_ivars(child); int pxm; pci_child_location_method(cbdev, child, sb); if (dinfo->ap_handle) { sbuf_printf(sb, " handle=%s", acpi_name(dinfo->ap_handle)); if (ACPI_SUCCESS(acpi_GetInteger(dinfo->ap_handle, "_PXM", &pxm))) { sbuf_printf(sb, " _PXM=%d", pxm); } } return (0); } static int acpi_pci_get_device_path(device_t bus, device_t child, const char *locator, struct sbuf *sb) { if (strcmp(locator, BUS_LOCATOR_ACPI) == 0) return (acpi_get_acpi_device_path(bus, child, locator, sb)); /* Otherwise follow base class' actions */ return (pci_get_device_path_method(bus, child, locator, sb)); } /* * Fetch the NUMA domain for the given device 'dev'. * * If a device has a _PXM method, map that to a NUMA domain. * Otherwise, pass the request up to the parent. * If there's no matching domain or the domain cannot be * determined, return ENOENT. */ static int acpi_pci_get_domain(device_t dev, device_t child, int *domain) { int d; d = acpi_pxm_parse(child); if (d >= 0) { *domain = d; return (0); } if (d == -1) return (ENOENT); /* No _PXM node; go up a level */ return (bus_generic_get_domain(dev, child, domain)); } /* * PCI power manangement */ static int acpi_pci_set_powerstate_method(device_t dev, device_t child, int state) { ACPI_HANDLE h; ACPI_STATUS status; int old_state, error; error = 0; if (state < ACPI_STATE_D0 || state > ACPI_STATE_D3) return (EINVAL); /* * We set the state using PCI Power Management outside of setting * the ACPI state. This means that when powering down a device, we * first shut it down using PCI, and then using ACPI, which lets ACPI * try to power down any Power Resources that are now no longer used. * When powering up a device, we let ACPI set the state first so that * it can enable any needed Power Resources before changing the PCI * power state. */ ACPI_SERIAL_BEGIN(pci_powerstate); old_state = pci_get_powerstate(child); if (old_state < state && pci_do_power_suspend) { error = pci_set_powerstate_method(dev, child, state); if (error) goto out; } h = acpi_get_handle(child); status = acpi_pwr_switch_consumer(h, state); if (ACPI_SUCCESS(status)) { if (bootverbose) - device_printf(dev, "set ACPI power state D%d on %s\n", - state, acpi_name(h)); + device_printf(dev, "set ACPI power state %s on %s\n", + acpi_d_state_to_str(state), acpi_name(h)); } else if (status != AE_NOT_FOUND) device_printf(dev, - "failed to set ACPI power state D%d on %s: %s\n", - state, acpi_name(h), AcpiFormatException(status)); + "failed to set ACPI power state %s on %s: %s\n", + acpi_d_state_to_str(state), acpi_name(h), + AcpiFormatException(status)); if (old_state > state && pci_do_power_resume) error = pci_set_powerstate_method(dev, child, state); out: ACPI_SERIAL_END(pci_powerstate); return (error); } static void acpi_pci_update_device(ACPI_HANDLE handle, device_t pci_child) { ACPI_STATUS status; device_t child; /* * Occasionally a PCI device may show up as an ACPI device * with a _HID. (For example, the TabletPC TC1000 has a * second PCI-ISA bridge that has a _HID for an * acpi_sysresource device.) In that case, leave ACPI-CA's * device data pointing at the ACPI-enumerated device. */ child = acpi_get_device(handle); if (child != NULL) { KASSERT(device_get_parent(child) == devclass_get_device(devclass_find("acpi"), 0), ("%s: child (%s)'s parent is not acpi0", __func__, acpi_name(handle))); return; } /* * Update ACPI-CA to use the PCI enumerated device_t for this handle. */ status = AcpiAttachData(handle, acpi_fake_objhandler, pci_child); if (ACPI_FAILURE(status)) printf("WARNING: Unable to attach object data to %s - %s\n", acpi_name(handle), AcpiFormatException(status)); } static ACPI_STATUS acpi_pci_save_handle(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { struct acpi_pci_devinfo *dinfo; device_t child; int func, slot; UINT32 address; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); child = context; if (ACPI_FAILURE(acpi_GetInteger(handle, "_ADR", &address))) return_ACPI_STATUS (AE_OK); slot = ACPI_ADR_PCI_SLOT(address); func = ACPI_ADR_PCI_FUNC(address); dinfo = device_get_ivars(child); if (dinfo->ap_dinfo.cfg.func == func && dinfo->ap_dinfo.cfg.slot == slot) { dinfo->ap_handle = handle; acpi_pci_update_device(handle, child); return_ACPI_STATUS (AE_CTRL_TERMINATE); } return_ACPI_STATUS (AE_OK); } void acpi_pci_child_added(device_t dev, device_t child) { /* * PCI devices are added via the bus scan in the normal PCI * bus driver. As each device is added, the * acpi_pci_child_added() callback walks the ACPI namespace * under the bridge driver to save ACPI handles to all the * devices that appear in the ACPI namespace as immediate * descendants of the bridge. * * XXX: Sometimes PCI devices show up in the ACPI namespace that * pci_add_children() doesn't find. We currently just ignore * these devices. */ AcpiWalkNamespace(ACPI_TYPE_DEVICE, acpi_get_handle(dev), 1, acpi_pci_save_handle, NULL, child, NULL); } static int acpi_pci_probe(device_t dev) { if (acpi_get_handle(dev) == NULL) return (ENXIO); device_set_desc(dev, "ACPI PCI bus"); return (BUS_PROBE_DEFAULT); } static void acpi_pci_bus_notify_handler(ACPI_HANDLE h, UINT32 notify, void *context) { device_t dev; dev = context; switch (notify) { case ACPI_NOTIFY_BUS_CHECK: bus_topo_lock(); BUS_RESCAN(dev); bus_topo_unlock(); break; default: device_printf(dev, "unknown notify %#x\n", notify); break; } } static void acpi_pci_device_notify_handler(ACPI_HANDLE h, UINT32 notify, void *context) { device_t child, dev; ACPI_STATUS status; int error; dev = context; switch (notify) { case ACPI_NOTIFY_DEVICE_CHECK: bus_topo_lock(); BUS_RESCAN(dev); bus_topo_unlock(); break; case ACPI_NOTIFY_EJECT_REQUEST: child = acpi_get_device(h); if (child == NULL) { device_printf(dev, "no device to eject for %s\n", acpi_name(h)); return; } bus_topo_lock(); error = device_detach(child); if (error) { bus_topo_unlock(); device_printf(dev, "failed to detach %s: %d\n", device_get_nameunit(child), error); return; } if ((acpi_quirks & ACPI_Q_CLEAR_PME_ON_DETACH) && pci_has_pm(child)) pci_clear_pme(child); status = acpi_SetInteger(h, "_EJ0", 1); if (ACPI_FAILURE(status)) { bus_topo_unlock(); device_printf(dev, "failed to eject %s: %s\n", acpi_name(h), AcpiFormatException(status)); return; } if (acpi_quirks & ACPI_Q_DELAY_BEFORE_EJECT_RESCAN) DELAY(10 * 1000); BUS_RESCAN(dev); bus_topo_unlock(); break; default: device_printf(dev, "unknown notify %#x for %s\n", notify, acpi_name(h)); break; } } static ACPI_STATUS acpi_pci_install_device_notify_handler(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { ACPI_HANDLE h; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiGetHandle(handle, "_EJ0", &h))) return_ACPI_STATUS (AE_OK); AcpiInstallNotifyHandler(handle, ACPI_SYSTEM_NOTIFY, acpi_pci_device_notify_handler, context); return_ACPI_STATUS (AE_OK); } static int acpi_pci_attach(device_t dev) { int error; error = pci_attach(dev); if (error) return (error); AcpiInstallNotifyHandler(acpi_get_handle(dev), ACPI_SYSTEM_NOTIFY, acpi_pci_bus_notify_handler, dev); AcpiWalkNamespace(ACPI_TYPE_DEVICE, acpi_get_handle(dev), 1, acpi_pci_install_device_notify_handler, NULL, dev, NULL); return (0); } static ACPI_STATUS acpi_pci_remove_notify_handler(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { ACPI_HANDLE h; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiGetHandle(handle, "_EJ0", &h))) return_ACPI_STATUS (AE_OK); AcpiRemoveNotifyHandler(handle, ACPI_SYSTEM_NOTIFY, acpi_pci_device_notify_handler); return_ACPI_STATUS (AE_OK); } static int acpi_pci_detach(device_t dev) { AcpiWalkNamespace(ACPI_TYPE_DEVICE, acpi_get_handle(dev), 1, acpi_pci_remove_notify_handler, NULL, dev, NULL); AcpiRemoveNotifyHandler(acpi_get_handle(dev), ACPI_SYSTEM_NOTIFY, acpi_pci_bus_notify_handler); return (pci_detach(dev)); } #ifdef IOMMU static bus_dma_tag_t acpi_pci_get_dma_tag(device_t bus, device_t child) { bus_dma_tag_t tag; if (device_get_parent(child) == bus) { /* try iommu and return if it works */ tag = iommu_get_dma_tag(bus, child); } else tag = NULL; if (tag == NULL) tag = pci_get_dma_tag(bus, child); return (tag); } #else static bus_dma_tag_t acpi_pci_get_dma_tag(device_t bus, device_t child) { return (pci_get_dma_tag(bus, child)); } #endif diff --git a/sys/dev/acpica/acpi_powerres.c b/sys/dev/acpica/acpi_powerres.c index 0f2a25b1d02b..29d1690f1bdd 100644 --- a/sys/dev/acpica/acpi_powerres.c +++ b/sys/dev/acpica/acpi_powerres.c @@ -1,736 +1,764 @@ /*- * Copyright (c) 2001 Michael Smith * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_acpi.h" #include #include #include #include #include #include #include /* * ACPI power resource management. * * Power resource behaviour is slightly complicated by the fact that * a single power resource may provide power for more than one device. * Thus, we must track the device(s) being powered by a given power * resource, and only deactivate it when there are no powered devices. * * Note that this only manages resources for known devices. There is an * ugly case where we may turn off power to a device which is in use because * we don't know that it depends on a given resource. We should perhaps * try to be smarter about this, but a more complete solution would involve * scanning all of the ACPI namespace to find devices we're not currently * aware of, and this raises questions about whether they should be left * on, turned off, etc. */ static MALLOC_DEFINE(M_ACPIPWR, "acpipwr", "ACPI power resources"); /* Hooks for the ACPI CA debugging infrastructure */ #define _COMPONENT ACPI_POWERRES ACPI_MODULE_NAME("POWERRES") /* Return values from _STA on a power resource */ #define ACPI_PWR_OFF 0 #define ACPI_PWR_ON 1 /* A relationship between a power resource and a consumer. */ struct acpi_powerreference { struct acpi_powerconsumer *ar_consumer; struct acpi_powerresource *ar_resource; TAILQ_ENTRY(acpi_powerreference) ar_rlink; /* link on resource list */ TAILQ_ENTRY(acpi_powerreference) ar_clink; /* link on consumer */ }; /* A power-managed device. */ struct acpi_powerconsumer { /* Device which is powered */ ACPI_HANDLE ac_consumer; int ac_state; TAILQ_ENTRY(acpi_powerconsumer) ac_link; TAILQ_HEAD(,acpi_powerreference) ac_references; }; /* A power resource. */ struct acpi_powerresource { TAILQ_ENTRY(acpi_powerresource) ap_link; TAILQ_HEAD(,acpi_powerreference) ap_references; ACPI_HANDLE ap_resource; UINT64 ap_systemlevel; UINT64 ap_order; }; static TAILQ_HEAD(acpi_powerresource_list, acpi_powerresource) acpi_powerresources = TAILQ_HEAD_INITIALIZER(acpi_powerresources); static TAILQ_HEAD(acpi_powerconsumer_list, acpi_powerconsumer) acpi_powerconsumers = TAILQ_HEAD_INITIALIZER(acpi_powerconsumers); ACPI_SERIAL_DECL(powerres, "ACPI power resources"); static ACPI_STATUS acpi_pwr_register_consumer(ACPI_HANDLE consumer); #ifdef notyet static ACPI_STATUS acpi_pwr_deregister_consumer(ACPI_HANDLE consumer); #endif /* notyet */ static ACPI_STATUS acpi_pwr_register_resource(ACPI_HANDLE res); #ifdef notyet static ACPI_STATUS acpi_pwr_deregister_resource(ACPI_HANDLE res); #endif /* notyet */ static void acpi_pwr_reference_resource(ACPI_OBJECT *obj, void *arg); static int acpi_pwr_dereference_resource(struct acpi_powerconsumer *pc); static ACPI_STATUS acpi_pwr_switch_power(void); static struct acpi_powerresource *acpi_pwr_find_resource(ACPI_HANDLE res); static struct acpi_powerconsumer *acpi_pwr_find_consumer(ACPI_HANDLE consumer); /* * Register a power resource. * * It's OK to call this if we already know about the resource. */ static ACPI_STATUS acpi_pwr_register_resource(ACPI_HANDLE res) { ACPI_STATUS status; ACPI_BUFFER buf; ACPI_OBJECT *obj; struct acpi_powerresource *rp, *srp; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); ACPI_SERIAL_ASSERT(powerres); rp = NULL; buf.Pointer = NULL; /* Look to see if we know about this resource */ if (acpi_pwr_find_resource(res) != NULL) return_ACPI_STATUS (AE_OK); /* already know about it */ /* Allocate a new resource */ if ((rp = malloc(sizeof(*rp), M_ACPIPWR, M_NOWAIT | M_ZERO)) == NULL) { status = AE_NO_MEMORY; goto out; } TAILQ_INIT(&rp->ap_references); rp->ap_resource = res; /* Get the Power Resource object */ buf.Length = ACPI_ALLOCATE_BUFFER; if (ACPI_FAILURE(status = AcpiEvaluateObject(res, NULL, NULL, &buf))) { ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "no power resource object\n")); goto out; } obj = buf.Pointer; if (obj->Type != ACPI_TYPE_POWER) { ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "questionable power resource object %s\n", acpi_name(res))); status = AE_TYPE; goto out; } rp->ap_systemlevel = obj->PowerResource.SystemLevel; rp->ap_order = obj->PowerResource.ResourceOrder; /* Sort the resource into the list */ status = AE_OK; srp = TAILQ_FIRST(&acpi_powerresources); if (srp == NULL || rp->ap_order < srp->ap_order) { TAILQ_INSERT_HEAD(&acpi_powerresources, rp, ap_link); goto done; } TAILQ_FOREACH(srp, &acpi_powerresources, ap_link) { if (rp->ap_order < srp->ap_order) { TAILQ_INSERT_BEFORE(srp, rp, ap_link); goto done; } } TAILQ_INSERT_TAIL(&acpi_powerresources, rp, ap_link); done: ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "registered power resource %s\n", acpi_name(res))); out: if (buf.Pointer != NULL) AcpiOsFree(buf.Pointer); if (ACPI_FAILURE(status) && rp != NULL) free(rp, M_ACPIPWR); return_ACPI_STATUS (status); } #ifdef notyet /* * Deregister a power resource. */ static ACPI_STATUS acpi_pwr_deregister_resource(ACPI_HANDLE res) { struct acpi_powerresource *rp; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); ACPI_SERIAL_ASSERT(powerres); rp = NULL; /* Find the resource */ if ((rp = acpi_pwr_find_resource(res)) == NULL) return_ACPI_STATUS (AE_BAD_PARAMETER); /* Check that there are no consumers referencing this resource */ if (TAILQ_FIRST(&rp->ap_references) != NULL) return_ACPI_STATUS (AE_BAD_PARAMETER); /* Pull it off the list and free it */ TAILQ_REMOVE(&acpi_powerresources, rp, ap_link); free(rp, M_ACPIPWR); ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "deregistered power resource %s\n", acpi_name(res))); return_ACPI_STATUS (AE_OK); } #endif /* notyet */ /* * Register a power consumer. * * It's OK to call this if we already know about the consumer. */ static ACPI_STATUS acpi_pwr_register_consumer(ACPI_HANDLE consumer) { struct acpi_powerconsumer *pc; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); ACPI_SERIAL_ASSERT(powerres); /* Check to see whether we know about this consumer already */ if (acpi_pwr_find_consumer(consumer) != NULL) return_ACPI_STATUS (AE_OK); /* Allocate a new power consumer */ if ((pc = malloc(sizeof(*pc), M_ACPIPWR, M_NOWAIT)) == NULL) return_ACPI_STATUS (AE_NO_MEMORY); TAILQ_INSERT_HEAD(&acpi_powerconsumers, pc, ac_link); TAILQ_INIT(&pc->ac_references); pc->ac_consumer = consumer; /* XXX we should try to find its current state */ pc->ac_state = ACPI_STATE_UNKNOWN; ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "registered power consumer %s\n", acpi_name(consumer))); return_ACPI_STATUS (AE_OK); } #ifdef notyet /* * Deregister a power consumer. * * This should only be done once the consumer has been powered off. * (XXX is this correct? Check once implemented) */ static ACPI_STATUS acpi_pwr_deregister_consumer(ACPI_HANDLE consumer) { struct acpi_powerconsumer *pc; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); ACPI_SERIAL_ASSERT(powerres); /* Find the consumer */ if ((pc = acpi_pwr_find_consumer(consumer)) == NULL) return_ACPI_STATUS (AE_BAD_PARAMETER); /* Make sure the consumer's not referencing anything right now */ if (TAILQ_FIRST(&pc->ac_references) != NULL) return_ACPI_STATUS (AE_BAD_PARAMETER); /* Pull the consumer off the list and free it */ TAILQ_REMOVE(&acpi_powerconsumers, pc, ac_link); free(pc, M_ACPIPWR); ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "deregistered power consumer %s\n", acpi_name(consumer))); return_ACPI_STATUS (AE_OK); } #endif /* notyet */ /* * Set a power consumer to a particular power state. */ ACPI_STATUS acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state) { struct acpi_powerconsumer *pc; ACPI_HANDLE method_handle, reslist_handle, pr0_handle; ACPI_BUFFER reslist_buffer; ACPI_OBJECT *reslist_object; ACPI_STATUS status; - char *method_name, *reslist_name; + char *method_name, *reslist_name = NULL; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* It's never ok to switch a non-existent consumer. */ if (consumer == NULL) return_ACPI_STATUS (AE_NOT_FOUND); reslist_buffer.Pointer = NULL; reslist_object = NULL; ACPI_SERIAL_BEGIN(powerres); /* Find the consumer */ if ((pc = acpi_pwr_find_consumer(consumer)) == NULL) { if (ACPI_FAILURE(status = acpi_pwr_register_consumer(consumer))) goto out; if ((pc = acpi_pwr_find_consumer(consumer)) == NULL) panic("acpi added power consumer but can't find it"); } - /* Check for valid transitions. We can only go to D0 from D3. */ + /* Stop here if we're already at the target D-state. */ + if (pc->ac_state == state) { + status = AE_OK; + goto out; + } + + /* + * Check for valid transitions. From D3hot or D3cold, we can only go to D0. + * The exception to this is going from D3hot to D3cold or the other way + * around. This is because they both use _PS3, so the only difference when + * doing these transitions is whether or not the power resources for _PR3 + * are on for devices which support D3cold, and turning these power + * resources on/off is always perfectly fine (ACPI 7.3.11). + */ status = AE_BAD_PARAMETER; - if (pc->ac_state == ACPI_STATE_D3 && state != ACPI_STATE_D0) + if (pc->ac_state == ACPI_STATE_D3_HOT && state != ACPI_STATE_D0 && + state != ACPI_STATE_D3_COLD) + goto out; + if (pc->ac_state == ACPI_STATE_D3_COLD && state != ACPI_STATE_D0 && + state != ACPI_STATE_D3_HOT) goto out; /* Find transition mechanism(s) */ switch (state) { case ACPI_STATE_D0: method_name = "_PS0"; reslist_name = "_PR0"; break; case ACPI_STATE_D1: method_name = "_PS1"; reslist_name = "_PR1"; break; case ACPI_STATE_D2: method_name = "_PS2"; reslist_name = "_PR2"; break; - case ACPI_STATE_D3: + case ACPI_STATE_D3_HOT: method_name = "_PS3"; reslist_name = "_PR3"; break; + case ACPI_STATE_D3_COLD: + method_name = "_PS3"; + reslist_name = NULL; + break; default: goto out; } - ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "setup to switch %s D%d -> D%d\n", - acpi_name(consumer), pc->ac_state, state)); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "setup to switch %s %s -> %s\n", + acpi_name(consumer), acpi_d_state_to_str(pc->ac_state), + acpi_d_state_to_str(state))); /* * Verify that this state is supported, ie. one of method or * reslist must be present. We need to do this before we go * dereferencing resources (since we might be trying to go to * a state we don't support). * * Note that if any states are supported, the device has to * support D0 and D3. It's never an error to try to go to * D0. */ if (ACPI_FAILURE(AcpiGetHandle(consumer, method_name, &method_handle))) method_handle = NULL; - if (ACPI_FAILURE(AcpiGetHandle(consumer, reslist_name, &reslist_handle))) + if (reslist_name == NULL || + ACPI_FAILURE(AcpiGetHandle(consumer, reslist_name, &reslist_handle))) reslist_handle = NULL; if (reslist_handle == NULL && method_handle == NULL) { if (state == ACPI_STATE_D0) { pc->ac_state = ACPI_STATE_D0; status = AE_OK; goto out; } - if (state != ACPI_STATE_D3) { + if (state == ACPI_STATE_D3_COLD) + state = ACPI_STATE_D3_HOT; + if (state != ACPI_STATE_D3_HOT) { ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, - "attempt to set unsupported state D%d\n", state)); + "attempt to set unsupported state %s\n", + acpi_d_state_to_str(state))); goto out; } /* * Turn off the resources listed in _PR0 to go to D3. If there is * no _PR0 method, this object doesn't support ACPI power states. */ if (ACPI_FAILURE(AcpiGetHandle(consumer, "_PR0", &pr0_handle))) { status = AE_NOT_FOUND; ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, - "device missing _PR0 (desired state was D%d)\n", state)); + "device missing _PR0 (desired state was %s)\n", + acpi_d_state_to_str(state))); goto out; } reslist_buffer.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObject(pr0_handle, NULL, NULL, &reslist_buffer); if (ACPI_FAILURE(status)) { ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, - "can't evaluate _PR0 for device %s, state D%d\n", - acpi_name(consumer), state)); + "can't evaluate _PR0 for device %s, state %s\n", + acpi_name(consumer), acpi_d_state_to_str(state))); goto out; } reslist_object = (ACPI_OBJECT *)reslist_buffer.Pointer; if (!ACPI_PKG_VALID(reslist_object, 1)) { ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, - "invalid package object for state D%d\n", state)); + "invalid package object for state %s\n", + acpi_d_state_to_str(state))); status = AE_TYPE; goto out; } AcpiOsFree(reslist_buffer.Pointer); reslist_buffer.Pointer = NULL; reslist_object = NULL; } /* * Check that we can actually fetch the list of power resources */ if (reslist_handle != NULL) { reslist_buffer.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObject(reslist_handle, NULL, NULL, &reslist_buffer); if (ACPI_FAILURE(status)) { ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "can't evaluate resource list %s\n", acpi_name(reslist_handle))); goto out; } reslist_object = (ACPI_OBJECT *)reslist_buffer.Pointer; if (reslist_object->Type != ACPI_TYPE_PACKAGE) { ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "resource list is not ACPI_TYPE_PACKAGE (%d)\n", reslist_object->Type)); status = AE_TYPE; goto out; } } /* * Now we are ready to switch, so kill off any current power * resource references. */ acpi_pwr_dereference_resource(pc); /* * Add new power resource references, if we have any. Traverse the * package that we got from evaluating reslist_handle, and look up each * of the resources that are referenced. */ if (reslist_object != NULL) { ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "referencing %d new resources\n", reslist_object->Package.Count)); acpi_ForeachPackageObject(reslist_object, acpi_pwr_reference_resource, pc); } /* * If we changed anything in the resource list, we need to run a switch * pass now. */ if (ACPI_FAILURE(status = acpi_pwr_switch_power())) { ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, - "failed to switch resources from %s to D%d\n", - acpi_name(consumer), state)); + "failed to switch resources from %s to %s\n", + acpi_name(consumer), acpi_d_state_to_str(state))); /* XXX is this appropriate? Should we return to previous state? */ goto out; } /* Invoke power state switch method (if present) */ if (method_handle != NULL) { ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "invoking state transition method %s\n", acpi_name(method_handle))); status = AcpiEvaluateObject(method_handle, NULL, NULL, NULL); if (ACPI_FAILURE(status)) { ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "failed to set state - %s\n", AcpiFormatException(status))); pc->ac_state = ACPI_STATE_UNKNOWN; /* XXX Should we return to previous state? */ goto out; } } /* Transition was successful */ pc->ac_state = state; status = AE_OK; out: ACPI_SERIAL_END(powerres); if (reslist_buffer.Pointer != NULL) AcpiOsFree(reslist_buffer.Pointer); return_ACPI_STATUS (status); } /* Enable or disable a power resource for wake */ ACPI_STATUS acpi_pwr_wake_enable(ACPI_HANDLE consumer, int enable) { ACPI_STATUS status; struct acpi_powerconsumer *pc; struct acpi_prw_data prw; int i; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (consumer == NULL) return (AE_BAD_PARAMETER); ACPI_SERIAL_BEGIN(powerres); if ((pc = acpi_pwr_find_consumer(consumer)) == NULL) { if (ACPI_FAILURE(status = acpi_pwr_register_consumer(consumer))) goto out; if ((pc = acpi_pwr_find_consumer(consumer)) == NULL) panic("acpi wake added power consumer but can't find it"); } status = AE_OK; if (acpi_parse_prw(consumer, &prw) != 0) goto out; for (i = 0; i < prw.power_res_count; i++) if (enable) acpi_pwr_reference_resource(&prw.power_res[i], pc); else acpi_pwr_dereference_resource(pc); if (prw.power_res_count > 0) acpi_pwr_switch_power(); out: ACPI_SERIAL_END(powerres); return (status); } /* * Called to create a reference between a power consumer and a power resource * identified in the object. */ static void acpi_pwr_reference_resource(ACPI_OBJECT *obj, void *arg) { struct acpi_powerconsumer *pc = (struct acpi_powerconsumer *)arg; struct acpi_powerreference *pr; struct acpi_powerresource *rp; ACPI_HANDLE res; ACPI_STATUS status; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); ACPI_SERIAL_ASSERT(powerres); res = acpi_GetReference(NULL, obj); if (res == NULL) { ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "can't create a power reference for object type %d\n", obj->Type)); return_VOID; } /* Create/look up the resource */ if (ACPI_FAILURE(status = acpi_pwr_register_resource(res))) { ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "couldn't register power resource %s - %s\n", obj->String.Pointer, AcpiFormatException(status))); return_VOID; } if ((rp = acpi_pwr_find_resource(res)) == NULL) { ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "power resource list corrupted\n")); return_VOID; } ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "found power resource %s\n", acpi_name(rp->ap_resource))); /* Create a reference between the consumer and resource */ if ((pr = malloc(sizeof(*pr), M_ACPIPWR, M_NOWAIT | M_ZERO)) == NULL) { ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "allocation failed for a power consumer reference\n")); return_VOID; } pr->ar_consumer = pc; pr->ar_resource = rp; TAILQ_INSERT_TAIL(&pc->ac_references, pr, ar_clink); TAILQ_INSERT_TAIL(&rp->ap_references, pr, ar_rlink); return_VOID; } static int acpi_pwr_dereference_resource(struct acpi_powerconsumer *pc) { struct acpi_powerreference *pr; int changed; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); ACPI_SERIAL_ASSERT(powerres); changed = 0; while ((pr = TAILQ_FIRST(&pc->ac_references)) != NULL) { ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "removing reference to %s\n", acpi_name(pr->ar_resource->ap_resource))); TAILQ_REMOVE(&pr->ar_resource->ap_references, pr, ar_rlink); TAILQ_REMOVE(&pc->ac_references, pr, ar_clink); free(pr, M_ACPIPWR); changed = 1; } return (changed); } /* * Switch power resources to conform to the desired state. * * Consumers may have modified the power resource list in an arbitrary * fashion; we sweep it in sequence order. */ static ACPI_STATUS acpi_pwr_switch_power(void) { struct acpi_powerresource *rp; ACPI_STATUS status; int cur; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); ACPI_SERIAL_ASSERT(powerres); /* * Sweep the list forwards turning things on. */ TAILQ_FOREACH(rp, &acpi_powerresources, ap_link) { if (TAILQ_FIRST(&rp->ap_references) == NULL) { ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "%s has no references, not turning on\n", acpi_name(rp->ap_resource))); continue; } status = acpi_GetInteger(rp->ap_resource, "_STA", &cur); if (ACPI_FAILURE(status)) { ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "can't get status of %s - %d\n", acpi_name(rp->ap_resource), status)); /* XXX is this correct? Always switch if in doubt? */ continue; } /* * Switch if required. Note that we ignore the result of the switch * effort; we don't know what to do if it fails, so checking wouldn't * help much. */ if (cur != ACPI_PWR_ON) { status = AcpiEvaluateObject(rp->ap_resource, "_ON", NULL, NULL); if (ACPI_FAILURE(status)) { ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "failed to switch %s on - %s\n", acpi_name(rp->ap_resource), AcpiFormatException(status))); } else { ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "switched %s on\n", acpi_name(rp->ap_resource))); } } else { ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "%s is already on\n", acpi_name(rp->ap_resource))); } } /* Sweep the list backwards turning things off. */ TAILQ_FOREACH_REVERSE(rp, &acpi_powerresources, acpi_powerresource_list, ap_link) { if (TAILQ_FIRST(&rp->ap_references) != NULL) { ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "%s has references, not turning off\n", acpi_name(rp->ap_resource))); continue; } status = acpi_GetInteger(rp->ap_resource, "_STA", &cur); if (ACPI_FAILURE(status)) { ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "can't get status of %s - %d\n", acpi_name(rp->ap_resource), status)); /* XXX is this correct? Always switch if in doubt? */ continue; } /* * Switch if required. Note that we ignore the result of the switch * effort; we don't know what to do if it fails, so checking wouldn't * help much. */ if (cur != ACPI_PWR_OFF) { status = AcpiEvaluateObject(rp->ap_resource, "_OFF", NULL, NULL); if (ACPI_FAILURE(status)) { ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "failed to switch %s off - %s\n", acpi_name(rp->ap_resource), AcpiFormatException(status))); } else { ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "switched %s off\n", acpi_name(rp->ap_resource))); } } else { ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "%s is already off\n", acpi_name(rp->ap_resource))); } } return_ACPI_STATUS (AE_OK); } /* * Find a power resource's control structure. */ static struct acpi_powerresource * acpi_pwr_find_resource(ACPI_HANDLE res) { struct acpi_powerresource *rp; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); ACPI_SERIAL_ASSERT(powerres); TAILQ_FOREACH(rp, &acpi_powerresources, ap_link) { if (rp->ap_resource == res) break; } return_PTR (rp); } /* * Find a power consumer's control structure. */ static struct acpi_powerconsumer * acpi_pwr_find_consumer(ACPI_HANDLE consumer) { struct acpi_powerconsumer *pc; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); ACPI_SERIAL_ASSERT(powerres); TAILQ_FOREACH(pc, &acpi_powerconsumers, ac_link) { if (pc->ac_consumer == consumer) break; } return_PTR (pc); } diff --git a/sys/dev/acpica/acpivar.h b/sys/dev/acpica/acpivar.h index 106ec9038820..39432731cbcc 100644 --- a/sys/dev/acpica/acpivar.h +++ b/sys/dev/acpica/acpivar.h @@ -1,626 +1,635 @@ /*- * Copyright (c) 2000 Mitsuru IWASAKI * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _ACPIVAR_H_ #define _ACPIVAR_H_ #ifdef _KERNEL #include "acpi_if.h" #include "bus_if.h" #include #ifdef INTRNG #include #endif #include #include #include #include #include #include #include #include struct apm_clone_data; struct acpi_softc { device_t acpi_dev; struct cdev *acpi_dev_t; int acpi_enabled; int acpi_sstate; int acpi_sleep_disabled; struct sysctl_ctx_list acpi_sysctl_ctx; struct sysctl_oid *acpi_sysctl_tree; int acpi_power_button_sx; int acpi_sleep_button_sx; int acpi_lid_switch_sx; int acpi_standby_sx; int acpi_suspend_sx; int acpi_sleep_delay; int acpi_s4bios; int acpi_do_disable; int acpi_verbose; int acpi_handle_reboot; vm_offset_t acpi_wakeaddr; vm_paddr_t acpi_wakephys; int acpi_next_sstate; /* Next suspend Sx state. */ struct apm_clone_data *acpi_clone; /* Pseudo-dev for devd(8). */ STAILQ_HEAD(,apm_clone_data) apm_cdevs; /* All apm/apmctl/acpi cdevs. */ struct callout susp_force_to; /* Force suspend if no acks. */ /* System Resources */ struct resource_list sysres_rl; }; struct acpi_device { /* ACPI ivars */ ACPI_HANDLE ad_handle; void *ad_private; int ad_flags; int ad_cls_class; int ad_domain; ACPI_BUFFER dsd; /* Device Specific Data */ const ACPI_OBJECT *dsd_pkg; /* Resources */ struct resource_list ad_rl; }; #ifdef INTRNG struct intr_map_data_acpi { struct intr_map_data hdr; u_int irq; u_int pol; u_int trig; }; #endif /* Track device (/dev/{apm,apmctl} and /dev/acpi) notification status. */ struct apm_clone_data { STAILQ_ENTRY(apm_clone_data) entries; struct cdev *cdev; int flags; #define ACPI_EVF_NONE 0 /* /dev/apm semantics */ #define ACPI_EVF_DEVD 1 /* /dev/acpi is handled via devd(8) */ #define ACPI_EVF_WRITE 2 /* Device instance is opened writable. */ int notify_status; #define APM_EV_NONE 0 /* Device not yet aware of pending sleep. */ #define APM_EV_NOTIFIED 1 /* Device saw next sleep state. */ #define APM_EV_ACKED 2 /* Device agreed sleep can occur. */ struct acpi_softc *acpi_sc; struct selinfo sel_read; }; #define ACPI_PRW_MAX_POWERRES 8 struct acpi_prw_data { ACPI_HANDLE gpe_handle; int gpe_bit; int lowest_wake; ACPI_OBJECT power_res[ACPI_PRW_MAX_POWERRES]; int power_res_count; }; /* Flags for each device defined in the AML namespace. */ #define ACPI_FLAG_WAKE_ENABLED 0x1 /* Macros for extracting parts of a PCI address from an _ADR value. */ #define ACPI_ADR_PCI_SLOT(adr) (((adr) & 0xffff0000) >> 16) #define ACPI_ADR_PCI_FUNC(adr) ((adr) & 0xffff) /* * Entry points to ACPI from above are global functions defined in this * file, sysctls, and I/O on the control device. Entry points from below * are interrupts (the SCI), notifies, task queue threads, and the thermal * zone polling thread. * * ACPI tables and global shared data are protected by a global lock * (acpi_mutex). * * Each ACPI device can have its own driver-specific mutex for protecting * shared access to local data. The ACPI_LOCK macros handle mutexes. * * Drivers that need to serialize access to functions (e.g., to route * interrupts, get/set control paths, etc.) should use the sx lock macros * (ACPI_SERIAL). * * ACPI-CA handles its own locking and should not be called with locks held. * * The most complicated path is: * GPE -> EC runs _Qxx -> _Qxx reads EC space -> GPE */ extern struct mtx acpi_mutex; #define ACPI_LOCK(sys) mtx_lock(&sys##_mutex) #define ACPI_UNLOCK(sys) mtx_unlock(&sys##_mutex) #define ACPI_LOCK_ASSERT(sys) mtx_assert(&sys##_mutex, MA_OWNED); #define ACPI_LOCK_DECL(sys, name) \ static struct mtx sys##_mutex; \ MTX_SYSINIT(sys##_mutex, &sys##_mutex, name, MTX_DEF) #define ACPI_SERIAL_BEGIN(sys) sx_xlock(&sys##_sxlock) #define ACPI_SERIAL_END(sys) sx_xunlock(&sys##_sxlock) #define ACPI_SERIAL_ASSERT(sys) sx_assert(&sys##_sxlock, SX_XLOCKED); #define ACPI_SERIAL_DECL(sys, name) \ static struct sx sys##_sxlock; \ SX_SYSINIT(sys##_sxlock, &sys##_sxlock, name) /* * ACPI CA does not define layers for non-ACPI CA drivers. * We define some here within the range provided. */ #define ACPI_AC_ADAPTER 0x00010000 #define ACPI_BATTERY 0x00020000 #define ACPI_BUS 0x00040000 #define ACPI_BUTTON 0x00080000 #define ACPI_EC 0x00100000 #define ACPI_FAN 0x00200000 #define ACPI_POWERRES 0x00400000 #define ACPI_PROCESSOR 0x00800000 #define ACPI_THERMAL 0x01000000 #define ACPI_TIMER 0x02000000 #define ACPI_OEM 0x04000000 /* * Constants for different interrupt models used with acpi_SetIntrModel(). */ #define ACPI_INTR_PIC 0 #define ACPI_INTR_APIC 1 #define ACPI_INTR_SAPIC 2 /* * Various features and capabilities for the acpi_get_features() method. * In particular, these are used for the ACPI 3.0 _PDC and _OSC methods. * See the Intel document titled "Intel Processor Vendor-Specific ACPI", * number 302223-007. */ #define ACPI_CAP_PERF_MSRS (1 << 0) /* Intel SpeedStep PERF_CTL MSRs */ #define ACPI_CAP_C1_IO_HALT (1 << 1) /* Intel C1 "IO then halt" sequence */ #define ACPI_CAP_THR_MSRS (1 << 2) /* Intel OnDemand throttling MSRs */ #define ACPI_CAP_SMP_SAME (1 << 3) /* MP C1, Px, and Tx (all the same) */ #define ACPI_CAP_SMP_SAME_C3 (1 << 4) /* MP C2 and C3 (all the same) */ #define ACPI_CAP_SMP_DIFF_PX (1 << 5) /* MP Px (different, using _PSD) */ #define ACPI_CAP_SMP_DIFF_CX (1 << 6) /* MP Cx (different, using _CSD) */ #define ACPI_CAP_SMP_DIFF_TX (1 << 7) /* MP Tx (different, using _TSD) */ #define ACPI_CAP_SMP_C1_NATIVE (1 << 8) /* MP C1 support other than halt */ #define ACPI_CAP_SMP_C3_NATIVE (1 << 9) /* MP C2 and C3 support */ #define ACPI_CAP_PX_HW_COORD (1 << 11) /* Intel P-state HW coordination */ #define ACPI_CAP_INTR_CPPC (1 << 12) /* Native Interrupt Handling for Collaborative Processor Performance Control notifications */ #define ACPI_CAP_HW_DUTY_C (1 << 13) /* Hardware Duty Cycling */ /* * Quirk flags. * * ACPI_Q_BROKEN: Disables all ACPI support. * ACPI_Q_TIMER: Disables support for the ACPI timer. * ACPI_Q_MADT_IRQ0: Specifies that ISA IRQ 0 is wired up to pin 0 of the * first APIC and that the MADT should force that by ignoring the PC-AT * compatible flag and ignoring overrides that redirect IRQ 0 to pin 2. * ACPI_Q_AEI_NOPULL: Specifies that _AEI objects incorrectly designate pins * as "PullUp" and they should be treated as "NoPull" instead. * ACPI_Q_CLEAR_PME_ON_DETACH: Specifies that PCIM_PSTAT_(PME & ~PMEENABLE) * should be written to the power status register as part of ACPI Eject. * ACPI_Q_DELAY_BEFORE_EJECT_RESCAN: Specifies that we need a short (10ms) * delay after _EJ0 returns before rescanning the PCI bus. */ extern int acpi_quirks; #define ACPI_Q_OK 0 #define ACPI_Q_BROKEN (1 << 0) #define ACPI_Q_TIMER (1 << 1) #define ACPI_Q_MADT_IRQ0 (1 << 2) #define ACPI_Q_AEI_NOPULL (1 << 3) #define ACPI_Q_CLEAR_PME_ON_DETACH (1 << 4) #define ACPI_Q_DELAY_BEFORE_EJECT_RESCAN (1 << 5) #if defined(__amd64__) || defined(__i386__) /* * Certain Intel BIOSes have buggy AML that specify an IRQ that is * edge-sensitive and active-lo. Normally, edge-sensitive IRQs should * be active-hi. If this value is non-zero, edge-sensitive ISA IRQs * are forced to be active-hi instead. At least some AMD systems use * active-lo edge-sensitive ISA IRQs, so this setting is only enabled * by default on systems with Intel CPUs. */ extern int acpi_override_isa_irq_polarity; #endif /* * Plug and play information for device matching. Matching table format * is compatible with ids parameter of ACPI_ID_PROBE bus method. * * XXX: While ACPI_ID_PROBE matches against _HID and all _CIDs, current * acpi_pnpinfo() exports only _HID and first _CID. That means second * and further _CIDs should be added to both acpi_pnpinfo() and * ACPICOMPAT_PNP_INFO if device matching against them is required. */ #define ACPICOMPAT_PNP_INFO(t, busname) \ MODULE_PNP_INFO("Z:_HID", busname, t##hid, t, nitems(t)-1); \ MODULE_PNP_INFO("Z:_CID", busname, t##cid, t, nitems(t)-1); #define ACPI_PNP_INFO(t) ACPICOMPAT_PNP_INFO(t, acpi) /* * Note that the low ivar values are reserved to provide * interface compatibility with ISA drivers which can also * attach to ACPI. */ #define ACPI_IVAR_HANDLE 0x100 #define ACPI_IVAR_UNUSED 0x101 /* Unused/reserved. */ #define ACPI_IVAR_PRIVATE 0x102 #define ACPI_IVAR_FLAGS 0x103 #define ACPI_IVAR_DOMAIN 0x104 /* * ad_domain NUMA domain special value. */ #define ACPI_DEV_DOMAIN_UNKNOWN (-1) /* * Accessor functions for our ivars. Default value for BUS_READ_IVAR is * (type) 0. The accessor functions don't check return values. */ #define __ACPI_BUS_ACCESSOR(varp, var, ivarp, ivar, type) \ \ static __inline type varp ## _get_ ## var(device_t dev) \ { \ uintptr_t v = 0; \ BUS_READ_IVAR(device_get_parent(dev), dev, \ ivarp ## _IVAR_ ## ivar, &v); \ return ((type) v); \ } \ \ static __inline void varp ## _set_ ## var(device_t dev, type t) \ { \ uintptr_t v = (uintptr_t) t; \ BUS_WRITE_IVAR(device_get_parent(dev), dev, \ ivarp ## _IVAR_ ## ivar, v); \ } __ACPI_BUS_ACCESSOR(acpi, handle, ACPI, HANDLE, ACPI_HANDLE) __ACPI_BUS_ACCESSOR(acpi, private, ACPI, PRIVATE, void *) __ACPI_BUS_ACCESSOR(acpi, flags, ACPI, FLAGS, int) __ACPI_BUS_ACCESSOR(acpi, domain, ACPI, DOMAIN, int) void acpi_fake_objhandler(ACPI_HANDLE h, void *data); static __inline device_t acpi_get_device(ACPI_HANDLE handle) { void *dev = NULL; AcpiGetData(handle, acpi_fake_objhandler, &dev); return ((device_t)dev); } static __inline ACPI_OBJECT_TYPE acpi_get_type(device_t dev) { ACPI_HANDLE h; ACPI_OBJECT_TYPE t; if ((h = acpi_get_handle(dev)) == NULL) return (ACPI_TYPE_NOT_FOUND); if (ACPI_FAILURE(AcpiGetType(h, &t))) return (ACPI_TYPE_NOT_FOUND); return (t); } /* Find the difference between two PM tick counts. */ static __inline uint32_t acpi_TimerDelta(uint32_t end, uint32_t start) { if (end < start && (AcpiGbl_FADT.Flags & ACPI_FADT_32BIT_TIMER) == 0) end |= 0x01000000; return (end - start); } #ifdef ACPI_DEBUGGER void acpi_EnterDebugger(void); #endif #ifdef ACPI_DEBUG #include #define STEP(x) do {printf x, printf("\n"); cngetc();} while (0) #else #define STEP(x) #endif #define ACPI_VPRINT(dev, acpi_sc, x...) do { \ if (acpi_get_verbose(acpi_sc)) \ device_printf(dev, x); \ } while (0) /* Values for the first status word returned by _OSC. */ #define ACPI_OSC_FAILURE (1 << 1) #define ACPI_OSC_BAD_UUID (1 << 2) #define ACPI_OSC_BAD_REVISION (1 << 3) #define ACPI_OSC_CAPS_MASKED (1 << 4) #define ACPI_DEVINFO_PRESENT(x, flags) \ (((x) & (flags)) == (flags)) #define ACPI_DEVICE_PRESENT(x) \ ACPI_DEVINFO_PRESENT(x, ACPI_STA_DEVICE_PRESENT | \ ACPI_STA_DEVICE_FUNCTIONING) #define ACPI_BATTERY_PRESENT(x) \ ACPI_DEVINFO_PRESENT(x, ACPI_STA_DEVICE_PRESENT | \ ACPI_STA_DEVICE_FUNCTIONING | ACPI_STA_BATTERY_PRESENT) /* Callback function type for walking subtables within a table. */ typedef void acpi_subtable_handler(ACPI_SUBTABLE_HEADER *, void *); BOOLEAN acpi_DeviceIsPresent(device_t dev); BOOLEAN acpi_BatteryIsPresent(device_t dev); ACPI_STATUS acpi_GetHandleInScope(ACPI_HANDLE parent, char *path, ACPI_HANDLE *result); ACPI_STATUS acpi_GetProperty(device_t dev, ACPI_STRING propname, const ACPI_OBJECT **value); ACPI_BUFFER *acpi_AllocBuffer(int size); ACPI_STATUS acpi_ConvertBufferToInteger(ACPI_BUFFER *bufp, UINT32 *number); ACPI_STATUS acpi_GetInteger(ACPI_HANDLE handle, char *path, UINT32 *number); ACPI_STATUS acpi_SetInteger(ACPI_HANDLE handle, char *path, UINT32 number); ACPI_STATUS acpi_ForeachPackageObject(ACPI_OBJECT *obj, void (*func)(ACPI_OBJECT *comp, void *arg), void *arg); ACPI_STATUS acpi_FindIndexedResource(ACPI_BUFFER *buf, int index, ACPI_RESOURCE **resp); ACPI_STATUS acpi_AppendBufferResource(ACPI_BUFFER *buf, ACPI_RESOURCE *res); UINT64 acpi_DSMQuery(ACPI_HANDLE h, const uint8_t *uuid, int revision); ACPI_STATUS acpi_EvaluateDSM(ACPI_HANDLE handle, const uint8_t *uuid, int revision, UINT64 function, ACPI_OBJECT *package, ACPI_BUFFER *out_buf); ACPI_STATUS acpi_EvaluateDSMTyped(ACPI_HANDLE handle, const uint8_t *uuid, int revision, UINT64 function, ACPI_OBJECT *package, ACPI_BUFFER *out_buf, ACPI_OBJECT_TYPE type); ACPI_STATUS acpi_EvaluateOSC(ACPI_HANDLE handle, uint8_t *uuid, int revision, int count, uint32_t *caps_in, uint32_t *caps_out, bool query); ACPI_STATUS acpi_OverrideInterruptLevel(UINT32 InterruptNumber); ACPI_STATUS acpi_SetIntrModel(int model); int acpi_ReqSleepState(struct acpi_softc *sc, int state); int acpi_AckSleepState(struct apm_clone_data *clone, int error); ACPI_STATUS acpi_SetSleepState(struct acpi_softc *sc, int state); int acpi_wake_set_enable(device_t dev, int enable); int acpi_parse_prw(ACPI_HANDLE h, struct acpi_prw_data *prw); ACPI_STATUS acpi_Startup(void); void acpi_UserNotify(const char *subsystem, ACPI_HANDLE h, uint8_t notify); int acpi_bus_alloc_gas(device_t dev, int *type, int *rid, ACPI_GENERIC_ADDRESS *gas, struct resource **res, u_int flags); void acpi_walk_subtables(void *first, void *end, acpi_subtable_handler *handler, void *arg); BOOLEAN acpi_has_hid(ACPI_HANDLE handle); int acpi_MatchHid(ACPI_HANDLE h, const char *hid); #define ACPI_MATCHHID_NOMATCH 0 #define ACPI_MATCHHID_HID 1 #define ACPI_MATCHHID_CID 2 static __inline bool acpi_HasProperty(device_t dev, ACPI_STRING propname) { return ACPI_SUCCESS(acpi_GetProperty(dev, propname, NULL)); } struct acpi_parse_resource_set { void (*set_init)(device_t dev, void *arg, void **context); void (*set_done)(device_t dev, void *context); void (*set_ioport)(device_t dev, void *context, uint64_t base, uint64_t length); void (*set_iorange)(device_t dev, void *context, uint64_t low, uint64_t high, uint64_t length, uint64_t align); void (*set_memory)(device_t dev, void *context, uint64_t base, uint64_t length); void (*set_memoryrange)(device_t dev, void *context, uint64_t low, uint64_t high, uint64_t length, uint64_t align); void (*set_irq)(device_t dev, void *context, uint8_t *irq, int count, int trig, int pol); void (*set_ext_irq)(device_t dev, void *context, uint32_t *irq, int count, int trig, int pol); void (*set_drq)(device_t dev, void *context, uint8_t *drq, int count); void (*set_start_dependent)(device_t dev, void *context, int preference); void (*set_end_dependent)(device_t dev, void *context); }; extern struct acpi_parse_resource_set acpi_res_parse_set; int acpi_identify(void); void acpi_config_intr(device_t dev, ACPI_RESOURCE *res); #ifdef INTRNG int acpi_map_intr(device_t dev, u_int irq, ACPI_HANDLE handle); #endif ACPI_STATUS acpi_lookup_irq_resource(device_t dev, int rid, struct resource *res, ACPI_RESOURCE *acpi_res); ACPI_STATUS acpi_parse_resources(device_t dev, ACPI_HANDLE handle, struct acpi_parse_resource_set *set, void *arg); /* ACPI event handling */ UINT32 acpi_event_power_button_sleep(void *context); UINT32 acpi_event_power_button_wake(void *context); UINT32 acpi_event_sleep_button_sleep(void *context); UINT32 acpi_event_sleep_button_wake(void *context); #define ACPI_EVENT_PRI_FIRST 0 #define ACPI_EVENT_PRI_DEFAULT 10000 #define ACPI_EVENT_PRI_LAST 20000 typedef void (*acpi_event_handler_t)(void *, int); EVENTHANDLER_DECLARE(acpi_sleep_event, acpi_event_handler_t); EVENTHANDLER_DECLARE(acpi_wakeup_event, acpi_event_handler_t); EVENTHANDLER_DECLARE(acpi_acad_event, acpi_event_handler_t); EVENTHANDLER_DECLARE(acpi_video_event, acpi_event_handler_t); /* Device power control. */ ACPI_STATUS acpi_pwr_wake_enable(ACPI_HANDLE consumer, int enable); ACPI_STATUS acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state); acpi_pwr_for_sleep_t acpi_device_pwr_for_sleep; int acpi_set_powerstate(device_t child, int state); /* APM emulation */ void acpi_apm_init(struct acpi_softc *); /* Misc. */ static __inline struct acpi_softc * acpi_device_get_parent_softc(device_t child) { device_t parent; parent = device_get_parent(child); if (parent == NULL) return (NULL); return (device_get_softc(parent)); } static __inline int acpi_get_verbose(struct acpi_softc *sc) { if (sc) return (sc->acpi_verbose); return (0); } +static __inline const char * +acpi_d_state_to_str(int state) +{ + const char *strs[ACPI_D_STATE_COUNT] = {"D0", "D1", "D2", "D3", "D3cold"}; + + MPASS(state >= ACPI_STATE_D0 && state <= ACPI_D_STATES_MAX); + return (strs[state]); +} + char *acpi_name(ACPI_HANDLE handle); int acpi_avoid(ACPI_HANDLE handle); int acpi_disabled(char *subsys); int acpi_get_acpi_device_path(device_t bus, device_t child, const char *locator, struct sbuf *sb); int acpi_machdep_init(device_t dev); void acpi_install_wakeup_handler(struct acpi_softc *sc); int acpi_sleep_machdep(struct acpi_softc *sc, int state); int acpi_wakeup_machdep(struct acpi_softc *sc, int state, int sleep_result, int intr_enabled); int acpi_table_quirks(int *quirks); int acpi_machdep_quirks(int *quirks); int acpi_pnpinfo(ACPI_HANDLE handle, struct sbuf *sb); uint32_t hpet_get_uid(device_t dev); /* Battery Abstraction. */ struct acpi_battinfo; int acpi_battery_register(device_t dev); int acpi_battery_remove(device_t dev); int acpi_battery_get_units(void); int acpi_battery_get_info_expire(void); int acpi_battery_bst_valid(struct acpi_bst *bst); int acpi_battery_bix_valid(struct acpi_bix *bix); int acpi_battery_get_battinfo(device_t dev, struct acpi_battinfo *info); /* Embedded controller. */ void acpi_ec_ecdt_probe(device_t); /* AC adapter interface. */ int acpi_acad_get_acline(int *); /* Package manipulation convenience functions. */ #define ACPI_PKG_VALID(pkg, size) \ ((pkg) != NULL && (pkg)->Type == ACPI_TYPE_PACKAGE && \ (pkg)->Package.Count >= (size)) #define ACPI_PKG_VALID_EQ(pkg, size) \ ((pkg) != NULL && (pkg)->Type == ACPI_TYPE_PACKAGE && \ (pkg)->Package.Count == (size)) int acpi_PkgInt(ACPI_OBJECT *res, int idx, UINT64 *dst); int acpi_PkgInt32(ACPI_OBJECT *res, int idx, uint32_t *dst); int acpi_PkgInt16(ACPI_OBJECT *res, int idx, uint16_t *dst); int acpi_PkgStr(ACPI_OBJECT *res, int idx, void *dst, size_t size); int acpi_PkgGas(device_t dev, ACPI_OBJECT *res, int idx, int *type, int *rid, struct resource **dst, u_int flags); int acpi_PkgFFH_IntelCpu(ACPI_OBJECT *res, int idx, int *vendor, int *class, uint64_t *address, int *accsize); ACPI_HANDLE acpi_GetReference(ACPI_HANDLE scope, ACPI_OBJECT *obj); /* * Base level for BUS_ADD_CHILD. Special devices are added at orders less * than this, and normal devices at or above this level. This keeps the * probe order sorted so that things like sysresource are available before * their children need them. */ #define ACPI_DEV_BASE_ORDER 100 /* Default maximum number of tasks to enqueue. */ #ifndef ACPI_MAX_TASKS #define ACPI_MAX_TASKS MAX(32, MAXCPU * 4) #endif /* Default number of task queue threads to start. */ #ifndef ACPI_MAX_THREADS #define ACPI_MAX_THREADS 3 #endif /* Use the device logging level for ktr(4). */ #define KTR_ACPI KTR_DEV SYSCTL_DECL(_debug_acpi); /* * Parse and use proximity information in SRAT and SLIT. */ int acpi_pxm_init(int ncpus, vm_paddr_t maxphys); void acpi_pxm_parse_tables(void); void acpi_pxm_set_mem_locality(void); void acpi_pxm_set_cpu_locality(void); int acpi_pxm_get_cpu_locality(int apic_id); int acpi_pxm_parse(device_t dev); /* * Map a PXM to a VM domain. * * Returns the VM domain ID if found, or -1 if not found / invalid. */ int acpi_map_pxm_to_vm_domainid(int pxm); bus_get_cpus_t acpi_get_cpus; #ifdef __aarch64__ /* * ARM specific ACPI interfaces, relating to IORT table. */ int acpi_iort_map_pci_msi(u_int seg, u_int rid, u_int *xref, u_int *devid); int acpi_iort_map_pci_smmuv3(u_int seg, u_int rid, uint64_t *xref, u_int *devid); int acpi_iort_its_lookup(u_int its_id, u_int *xref, int *pxm); int acpi_iort_map_named_msi(const char *devname, u_int rid, u_int *xref, u_int *devid); int acpi_iort_map_named_smmuv3(const char *devname, u_int rid, uint64_t *xref, u_int *devid); #endif #endif /* _KERNEL */ #endif /* !_ACPIVAR_H_ */ diff --git a/sys/dev/pci/pci.c b/sys/dev/pci/pci.c index f94438cda041..4629165f34b2 100644 --- a/sys/dev/pci/pci.c +++ b/sys/dev/pci/pci.c @@ -1,7033 +1,7036 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1997, Stefan Esser * Copyright (c) 2000, Michael Smith * Copyright (c) 2000, BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include "opt_acpi.h" #include "opt_iommu.h" #include "opt_bus.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) #include #endif #include #include #include #include #ifdef PCI_IOV #include #include #endif #include #include #include #include #include +#include +#include + #include "pcib_if.h" #include "pci_if.h" #define PCIR_IS_BIOS(cfg, reg) \ (((cfg)->hdrtype == PCIM_HDRTYPE_NORMAL && reg == PCIR_BIOS) || \ ((cfg)->hdrtype == PCIM_HDRTYPE_BRIDGE && reg == PCIR_BIOS_1)) static device_probe_t pci_probe; static bus_reset_post_t pci_reset_post; static bus_reset_prepare_t pci_reset_prepare; static bus_reset_child_t pci_reset_child; static bus_hint_device_unit_t pci_hint_device_unit; static bus_remap_intr_t pci_remap_intr_method; static pci_get_id_t pci_get_id_method; static int pci_has_quirk(uint32_t devid, int quirk); static pci_addr_t pci_mapbase(uint64_t mapreg); static const char *pci_maptype(uint64_t mapreg); static int pci_maprange(uint64_t mapreg); static pci_addr_t pci_rombase(uint64_t mapreg); static int pci_romsize(uint64_t testval); static void pci_fixancient(pcicfgregs *cfg); static int pci_printf(pcicfgregs *cfg, const char *fmt, ...); static int pci_porten(device_t dev); static int pci_memen(device_t dev); static void pci_assign_interrupt(device_t bus, device_t dev, int force_route); static int pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl, int force, int prefetch); static void pci_load_vendor_data(void); static int pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc); static char *pci_describe_device(device_t dev); static int pci_modevent(module_t mod, int what, void *arg); static void pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg); static void pci_read_cap(device_t pcib, pcicfgregs *cfg); static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data); #if 0 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data); #endif static void pci_read_vpd(device_t pcib, pcicfgregs *cfg); static void pci_mask_msix(device_t dev, u_int index); static void pci_unmask_msix(device_t dev, u_int index); static int pci_msi_blacklisted(void); static int pci_msix_blacklisted(void); static void pci_resume_msi(device_t dev); static void pci_resume_msix(device_t dev); static struct pci_devinfo * pci_fill_devinfo(device_t pcib, device_t bus, int d, int b, int s, int f, uint16_t vid, uint16_t did); static device_method_t pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pci_probe), DEVMETHOD(device_attach, pci_attach), DEVMETHOD(device_detach, pci_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, pci_resume), /* Bus interface */ DEVMETHOD(bus_print_child, pci_print_child), DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch), DEVMETHOD(bus_read_ivar, pci_read_ivar), DEVMETHOD(bus_write_ivar, pci_write_ivar), DEVMETHOD(bus_driver_added, pci_driver_added), DEVMETHOD(bus_setup_intr, pci_setup_intr), DEVMETHOD(bus_teardown_intr, pci_teardown_intr), DEVMETHOD(bus_reset_prepare, pci_reset_prepare), DEVMETHOD(bus_reset_post, pci_reset_post), DEVMETHOD(bus_reset_child, pci_reset_child), DEVMETHOD(bus_get_dma_tag, pci_get_dma_tag), DEVMETHOD(bus_get_resource_list,pci_get_resource_list), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, pci_delete_resource), DEVMETHOD(bus_alloc_resource, pci_alloc_resource), DEVMETHOD(bus_adjust_resource, pci_adjust_resource), DEVMETHOD(bus_release_resource, pci_release_resource), DEVMETHOD(bus_activate_resource, pci_activate_resource), DEVMETHOD(bus_deactivate_resource, pci_deactivate_resource), DEVMETHOD(bus_map_resource, pci_map_resource), DEVMETHOD(bus_unmap_resource, pci_unmap_resource), DEVMETHOD(bus_child_deleted, pci_child_deleted), DEVMETHOD(bus_child_detached, pci_child_detached), DEVMETHOD(bus_child_pnpinfo, pci_child_pnpinfo_method), DEVMETHOD(bus_child_location, pci_child_location_method), DEVMETHOD(bus_get_device_path, pci_get_device_path_method), DEVMETHOD(bus_hint_device_unit, pci_hint_device_unit), DEVMETHOD(bus_remap_intr, pci_remap_intr_method), DEVMETHOD(bus_suspend_child, pci_suspend_child), DEVMETHOD(bus_resume_child, pci_resume_child), DEVMETHOD(bus_rescan, pci_rescan_method), /* PCI interface */ DEVMETHOD(pci_read_config, pci_read_config_method), DEVMETHOD(pci_write_config, pci_write_config_method), DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method), DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method), DEVMETHOD(pci_enable_io, pci_enable_io_method), DEVMETHOD(pci_disable_io, pci_disable_io_method), DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method), DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method), DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method), DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method), DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method), DEVMETHOD(pci_find_cap, pci_find_cap_method), DEVMETHOD(pci_find_next_cap, pci_find_next_cap_method), DEVMETHOD(pci_find_extcap, pci_find_extcap_method), DEVMETHOD(pci_find_next_extcap, pci_find_next_extcap_method), DEVMETHOD(pci_find_htcap, pci_find_htcap_method), DEVMETHOD(pci_find_next_htcap, pci_find_next_htcap_method), DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method), DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method), DEVMETHOD(pci_enable_msi, pci_enable_msi_method), DEVMETHOD(pci_enable_msix, pci_enable_msix_method), DEVMETHOD(pci_disable_msi, pci_disable_msi_method), DEVMETHOD(pci_remap_msix, pci_remap_msix_method), DEVMETHOD(pci_release_msi, pci_release_msi_method), DEVMETHOD(pci_msi_count, pci_msi_count_method), DEVMETHOD(pci_msix_count, pci_msix_count_method), DEVMETHOD(pci_msix_pba_bar, pci_msix_pba_bar_method), DEVMETHOD(pci_msix_table_bar, pci_msix_table_bar_method), DEVMETHOD(pci_get_id, pci_get_id_method), DEVMETHOD(pci_alloc_devinfo, pci_alloc_devinfo_method), DEVMETHOD(pci_child_added, pci_child_added_method), #ifdef PCI_IOV DEVMETHOD(pci_iov_attach, pci_iov_attach_method), DEVMETHOD(pci_iov_detach, pci_iov_detach_method), DEVMETHOD(pci_create_iov_child, pci_create_iov_child_method), #endif DEVMETHOD_END }; DEFINE_CLASS_0(pci, pci_driver, pci_methods, sizeof(struct pci_softc)); EARLY_DRIVER_MODULE(pci, pcib, pci_driver, pci_modevent, NULL, BUS_PASS_BUS); MODULE_VERSION(pci, 1); static char *pci_vendordata; static size_t pci_vendordata_size; struct pci_quirk { uint32_t devid; /* Vendor/device of the card */ int type; #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */ #define PCI_QUIRK_DISABLE_MSI 2 /* Neither MSI nor MSI-X work */ #define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI works */ #define PCI_QUIRK_UNMAP_REG 4 /* Ignore PCI map register */ #define PCI_QUIRK_DISABLE_MSIX 5 /* MSI-X doesn't work */ #define PCI_QUIRK_MSI_INTX_BUG 6 /* PCIM_CMD_INTxDIS disables MSI */ #define PCI_QUIRK_REALLOC_BAR 7 /* Can't allocate memory at the default address */ int arg1; int arg2; }; static const struct pci_quirk pci_quirks[] = { /* The Intel 82371AB and 82443MX have a map register at offset 0x90. */ { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 }, { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 }, /* As does the Serverworks OSB4 (the SMBus mapping register) */ { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 }, /* * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge * or the CMIC-SL (AKA ServerWorks GC_LE). */ { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 }, /* * MSI doesn't work on earlier Intel chipsets including * E7500, E7501, E7505, 845, 865, 875/E7210, and 855. */ { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, /* * MSI doesn't work with devices behind the AMD 8131 HT-PCIX * bridge. */ { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 }, /* * Some virtualization environments emulate an older chipset * but support MSI just fine. QEMU uses the Intel 82440. */ { 0x12378086, PCI_QUIRK_ENABLE_MSI_VM, 0, 0 }, /* * HPET MMIO base address may appear in Bar1 for AMD SB600 SMBus * controller depending on SoftPciRst register (PM_IO 0x55 [7]). * It prevents us from attaching hpet(4) when the bit is unset. * Note this quirk only affects SB600 revision A13 and earlier. * For SB600 A21 and later, firmware must set the bit to hide it. * For SB700 and later, it is unused and hardcoded to zero. */ { 0x43851002, PCI_QUIRK_UNMAP_REG, 0x14, 0 }, /* * Atheros AR8161/AR8162/E2200/E2400/E2500 Ethernet controllers have * a bug that MSI interrupt does not assert if PCIM_CMD_INTxDIS bit * of the command register is set. */ { 0x10911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, { 0xE0911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, { 0xE0A11969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, { 0xE0B11969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, { 0x10901969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* * Broadcom BCM5714(S)/BCM5715(S)/BCM5780(S) Ethernet MACs don't * issue MSI interrupts with PCIM_CMD_INTxDIS set either. */ { 0x166814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714 */ { 0x166914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714S */ { 0x166a14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780 */ { 0x166b14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780S */ { 0x167814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715 */ { 0x167914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715S */ /* * HPE Gen 10 VGA has a memory range that can't be allocated in the * expected place. */ { 0x98741002, PCI_QUIRK_REALLOC_BAR, 0, 0 }, { 0 } }; /* map register information */ #define PCI_MAPMEM 0x01 /* memory map */ #define PCI_MAPMEMP 0x02 /* prefetchable memory map */ #define PCI_MAPPORT 0x04 /* port map */ struct devlist pci_devq; uint32_t pci_generation; uint32_t pci_numdevs = 0; static int pcie_chipset, pcix_chipset; /* sysctl vars */ SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "PCI bus tuning parameters"); static int pci_enable_io_modes = 1; SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RWTUN, &pci_enable_io_modes, 1, "Enable I/O and memory bits in the config register. Some BIOSes do not" " enable these bits correctly. We'd like to do this all the time, but" " there are some peripherals that this causes problems with."); static int pci_do_realloc_bars = 1; SYSCTL_INT(_hw_pci, OID_AUTO, realloc_bars, CTLFLAG_RWTUN, &pci_do_realloc_bars, 0, "Attempt to allocate a new range for any BARs whose original " "firmware-assigned ranges fail to allocate during the initial device scan."); static int pci_do_power_nodriver = 0; SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RWTUN, &pci_do_power_nodriver, 0, "Place a function into D3 state when no driver attaches to it. 0 means" " disable. 1 means conservatively place function into D3 state. 2 means" " aggressively place function into D3 state. 3 means put absolutely" " everything in D3 state."); int pci_do_power_resume = 1; SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RWTUN, &pci_do_power_resume, 1, "Transition from D3 -> D0 on resume."); int pci_do_power_suspend = 1; SYSCTL_INT(_hw_pci, OID_AUTO, do_power_suspend, CTLFLAG_RWTUN, &pci_do_power_suspend, 1, "Transition from D0 -> D3 on suspend."); static int pci_do_msi = 1; SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RWTUN, &pci_do_msi, 1, "Enable support for MSI interrupts"); static int pci_do_msix = 1; SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RWTUN, &pci_do_msix, 1, "Enable support for MSI-X interrupts"); static int pci_msix_rewrite_table = 0; SYSCTL_INT(_hw_pci, OID_AUTO, msix_rewrite_table, CTLFLAG_RWTUN, &pci_msix_rewrite_table, 0, "Rewrite entire MSI-X table when updating MSI-X entries"); static int pci_honor_msi_blacklist = 1; SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RDTUN, &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI/MSI-X"); #if defined(__i386__) || defined(__amd64__) static int pci_usb_takeover = 1; #else static int pci_usb_takeover = 0; #endif SYSCTL_INT(_hw_pci, OID_AUTO, usb_early_takeover, CTLFLAG_RDTUN, &pci_usb_takeover, 1, "Enable early takeover of USB controllers. Disable this if you depend on" " BIOS emulation of USB devices, that is you use USB devices (like" " keyboard or mouse) but do not load USB drivers"); static int pci_clear_bars; SYSCTL_INT(_hw_pci, OID_AUTO, clear_bars, CTLFLAG_RDTUN, &pci_clear_bars, 0, "Ignore firmware-assigned resources for BARs."); static int pci_clear_buses; SYSCTL_INT(_hw_pci, OID_AUTO, clear_buses, CTLFLAG_RDTUN, &pci_clear_buses, 0, "Ignore firmware-assigned bus numbers."); static int pci_enable_ari = 1; SYSCTL_INT(_hw_pci, OID_AUTO, enable_ari, CTLFLAG_RDTUN, &pci_enable_ari, 0, "Enable support for PCIe Alternative RID Interpretation"); /* * Some x86 firmware only enables PCIe hotplug if we claim to support aspm, * however enabling it breaks some arm64 firmware as it powers off devices. */ #if defined(__i386__) || defined(__amd64__) int pci_enable_aspm = 1; #else int pci_enable_aspm = 0; #endif SYSCTL_INT(_hw_pci, OID_AUTO, enable_aspm, CTLFLAG_RDTUN, &pci_enable_aspm, 0, "Enable support for PCIe Active State Power Management"); static int pci_clear_aer_on_attach = 0; SYSCTL_INT(_hw_pci, OID_AUTO, clear_aer_on_attach, CTLFLAG_RWTUN, &pci_clear_aer_on_attach, 0, "Clear port and device AER state on driver attach"); static bool pci_enable_mps_tune = true; SYSCTL_BOOL(_hw_pci, OID_AUTO, enable_mps_tune, CTLFLAG_RWTUN, &pci_enable_mps_tune, 1, "Enable tuning of MPS(maximum payload size)." ); static bool pci_intx_reroute = true; SYSCTL_BOOL(_hw_pci, OID_AUTO, intx_reroute, CTLFLAG_RWTUN, &pci_intx_reroute, 0, "Re-route INTx interrupts when scanning devices"); static int pci_has_quirk(uint32_t devid, int quirk) { const struct pci_quirk *q; for (q = &pci_quirks[0]; q->devid; q++) { if (q->devid == devid && q->type == quirk) return (1); } return (0); } /* Find a device_t by bus/slot/function in domain 0 */ device_t pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func) { return (pci_find_dbsf(0, bus, slot, func)); } /* Find a device_t by domain/bus/slot/function */ device_t pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func) { struct pci_devinfo *dinfo = NULL; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { if ((dinfo->cfg.domain == domain) && (dinfo->cfg.bus == bus) && (dinfo->cfg.slot == slot) && (dinfo->cfg.func == func)) { break; } } return (dinfo != NULL ? dinfo->cfg.dev : NULL); } /* Find a device_t by vendor/device ID */ device_t pci_find_device(uint16_t vendor, uint16_t device) { struct pci_devinfo *dinfo; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { if ((dinfo->cfg.vendor == vendor) && (dinfo->cfg.device == device)) { return (dinfo->cfg.dev); } } return (NULL); } device_t pci_find_class(uint8_t class, uint8_t subclass) { struct pci_devinfo *dinfo; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { if (dinfo->cfg.baseclass == class && dinfo->cfg.subclass == subclass) { return (dinfo->cfg.dev); } } return (NULL); } device_t pci_find_class_from(uint8_t class, uint8_t subclass, device_t from) { struct pci_devinfo *dinfo; bool found = false; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { if (from != NULL && found == false) { if (from != dinfo->cfg.dev) continue; found = true; continue; } if (dinfo->cfg.baseclass == class && dinfo->cfg.subclass == subclass) { return (dinfo->cfg.dev); } } return (NULL); } device_t pci_find_base_class_from(uint8_t class, device_t from) { struct pci_devinfo *dinfo; bool found = false; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { if (from != NULL && found == false) { if (from != dinfo->cfg.dev) continue; found = true; continue; } if (dinfo->cfg.baseclass == class) { return (dinfo->cfg.dev); } } return (NULL); } static int pci_printf(pcicfgregs *cfg, const char *fmt, ...) { va_list ap; int retval; retval = printf("pci%d:%d:%d:%d: ", cfg->domain, cfg->bus, cfg->slot, cfg->func); va_start(ap, fmt); retval += vprintf(fmt, ap); va_end(ap); return (retval); } /* return base address of memory or port map */ static pci_addr_t pci_mapbase(uint64_t mapreg) { if (PCI_BAR_MEM(mapreg)) return (mapreg & PCIM_BAR_MEM_BASE); else return (mapreg & PCIM_BAR_IO_BASE); } /* return map type of memory or port map */ static const char * pci_maptype(uint64_t mapreg) { if (PCI_BAR_IO(mapreg)) return ("I/O Port"); if (mapreg & PCIM_BAR_MEM_PREFETCH) return ("Prefetchable Memory"); return ("Memory"); } /* return log2 of map size decoded for memory or port map */ int pci_mapsize(uint64_t testval) { int ln2size; testval = pci_mapbase(testval); ln2size = 0; if (testval != 0) { while ((testval & 1) == 0) { ln2size++; testval >>= 1; } } return (ln2size); } /* return base address of device ROM */ static pci_addr_t pci_rombase(uint64_t mapreg) { return (mapreg & PCIM_BIOS_ADDR_MASK); } /* return log2 of map size decided for device ROM */ static int pci_romsize(uint64_t testval) { int ln2size; testval = pci_rombase(testval); ln2size = 0; if (testval != 0) { while ((testval & 1) == 0) { ln2size++; testval >>= 1; } } return (ln2size); } /* return log2 of address range supported by map register */ static int pci_maprange(uint64_t mapreg) { int ln2range = 0; if (PCI_BAR_IO(mapreg)) ln2range = 32; else switch (mapreg & PCIM_BAR_MEM_TYPE) { case PCIM_BAR_MEM_32: ln2range = 32; break; case PCIM_BAR_MEM_1MB: ln2range = 20; break; case PCIM_BAR_MEM_64: ln2range = 64; break; } return (ln2range); } /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */ static void pci_fixancient(pcicfgregs *cfg) { if ((cfg->hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL) return; /* PCI to PCI bridges use header type 1 */ if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI) cfg->hdrtype = PCIM_HDRTYPE_BRIDGE; } /* extract header type specific config data */ static void pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg) { #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w) switch (cfg->hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: cfg->subvendor = REG(PCIR_SUBVEND_0, 2); cfg->subdevice = REG(PCIR_SUBDEV_0, 2); cfg->mingnt = REG(PCIR_MINGNT, 1); cfg->maxlat = REG(PCIR_MAXLAT, 1); cfg->nummaps = PCI_MAXMAPS_0; break; case PCIM_HDRTYPE_BRIDGE: cfg->bridge.br_seclat = REG(PCIR_SECLAT_1, 1); cfg->bridge.br_subbus = REG(PCIR_SUBBUS_1, 1); cfg->bridge.br_secbus = REG(PCIR_SECBUS_1, 1); cfg->bridge.br_pribus = REG(PCIR_PRIBUS_1, 1); cfg->bridge.br_control = REG(PCIR_BRIDGECTL_1, 2); cfg->nummaps = PCI_MAXMAPS_1; break; case PCIM_HDRTYPE_CARDBUS: cfg->bridge.br_seclat = REG(PCIR_SECLAT_2, 1); cfg->bridge.br_subbus = REG(PCIR_SUBBUS_2, 1); cfg->bridge.br_secbus = REG(PCIR_SECBUS_2, 1); cfg->bridge.br_pribus = REG(PCIR_PRIBUS_2, 1); cfg->bridge.br_control = REG(PCIR_BRIDGECTL_2, 2); cfg->subvendor = REG(PCIR_SUBVEND_2, 2); cfg->subdevice = REG(PCIR_SUBDEV_2, 2); cfg->nummaps = PCI_MAXMAPS_2; break; } #undef REG } /* read configuration header into pcicfgregs structure */ struct pci_devinfo * pci_read_device(device_t pcib, device_t bus, int d, int b, int s, int f) { #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w) uint16_t vid, did; vid = REG(PCIR_VENDOR, 2); if (vid == PCIV_INVALID) return (NULL); did = REG(PCIR_DEVICE, 2); return (pci_fill_devinfo(pcib, bus, d, b, s, f, vid, did)); } struct pci_devinfo * pci_alloc_devinfo_method(device_t dev) { return (malloc(sizeof(struct pci_devinfo), M_DEVBUF, M_WAITOK | M_ZERO)); } static struct pci_devinfo * pci_fill_devinfo(device_t pcib, device_t bus, int d, int b, int s, int f, uint16_t vid, uint16_t did) { struct pci_devinfo *devlist_entry; pcicfgregs *cfg; devlist_entry = PCI_ALLOC_DEVINFO(bus); cfg = &devlist_entry->cfg; cfg->domain = d; cfg->bus = b; cfg->slot = s; cfg->func = f; cfg->vendor = vid; cfg->device = did; cfg->cmdreg = REG(PCIR_COMMAND, 2); cfg->statreg = REG(PCIR_STATUS, 2); cfg->baseclass = REG(PCIR_CLASS, 1); cfg->subclass = REG(PCIR_SUBCLASS, 1); cfg->progif = REG(PCIR_PROGIF, 1); cfg->revid = REG(PCIR_REVID, 1); cfg->hdrtype = REG(PCIR_HDRTYPE, 1); cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1); cfg->lattimer = REG(PCIR_LATTIMER, 1); cfg->intpin = REG(PCIR_INTPIN, 1); cfg->intline = REG(PCIR_INTLINE, 1); cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0; cfg->hdrtype &= ~PCIM_MFDEV; STAILQ_INIT(&cfg->maps); cfg->iov = NULL; pci_fixancient(cfg); pci_hdrtypedata(pcib, b, s, f, cfg); if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT) pci_read_cap(pcib, cfg); STAILQ_INSERT_TAIL(&pci_devq, devlist_entry, pci_links); devlist_entry->conf.pc_sel.pc_domain = cfg->domain; devlist_entry->conf.pc_sel.pc_bus = cfg->bus; devlist_entry->conf.pc_sel.pc_dev = cfg->slot; devlist_entry->conf.pc_sel.pc_func = cfg->func; devlist_entry->conf.pc_hdr = cfg->hdrtype; devlist_entry->conf.pc_subvendor = cfg->subvendor; devlist_entry->conf.pc_subdevice = cfg->subdevice; devlist_entry->conf.pc_vendor = cfg->vendor; devlist_entry->conf.pc_device = cfg->device; devlist_entry->conf.pc_class = cfg->baseclass; devlist_entry->conf.pc_subclass = cfg->subclass; devlist_entry->conf.pc_progif = cfg->progif; devlist_entry->conf.pc_revid = cfg->revid; pci_numdevs++; pci_generation++; return (devlist_entry); } #undef REG static void pci_ea_fill_info(device_t pcib, pcicfgregs *cfg) { #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, \ cfg->ea.ea_location + (n), w) int num_ent; int ptr; int a, b; uint32_t val; int ent_size; uint32_t dw[4]; uint64_t base, max_offset; struct pci_ea_entry *eae; if (cfg->ea.ea_location == 0) return; STAILQ_INIT(&cfg->ea.ea_entries); /* Determine the number of entries */ num_ent = REG(PCIR_EA_NUM_ENT, 2); num_ent &= PCIM_EA_NUM_ENT_MASK; /* Find the first entry to care of */ ptr = PCIR_EA_FIRST_ENT; /* Skip DWORD 2 for type 1 functions */ if ((cfg->hdrtype & PCIM_HDRTYPE) == PCIM_HDRTYPE_BRIDGE) ptr += 4; for (a = 0; a < num_ent; a++) { eae = malloc(sizeof(*eae), M_DEVBUF, M_WAITOK | M_ZERO); eae->eae_cfg_offset = cfg->ea.ea_location + ptr; /* Read a number of dwords in the entry */ val = REG(ptr, 4); ptr += 4; ent_size = (val & PCIM_EA_ES); for (b = 0; b < ent_size; b++) { dw[b] = REG(ptr, 4); ptr += 4; } eae->eae_flags = val; eae->eae_bei = (PCIM_EA_BEI & val) >> PCIM_EA_BEI_OFFSET; base = dw[0] & PCIM_EA_FIELD_MASK; max_offset = dw[1] | ~PCIM_EA_FIELD_MASK; b = 2; if (((dw[0] & PCIM_EA_IS_64) != 0) && (b < ent_size)) { base |= (uint64_t)dw[b] << 32UL; b++; } if (((dw[1] & PCIM_EA_IS_64) != 0) && (b < ent_size)) { max_offset |= (uint64_t)dw[b] << 32UL; b++; } eae->eae_base = base; eae->eae_max_offset = max_offset; STAILQ_INSERT_TAIL(&cfg->ea.ea_entries, eae, eae_link); if (bootverbose) { printf("PCI(EA) dev %04x:%04x, bei %d, flags #%x, base #%jx, max_offset #%jx\n", cfg->vendor, cfg->device, eae->eae_bei, eae->eae_flags, (uintmax_t)eae->eae_base, (uintmax_t)eae->eae_max_offset); } } } #undef REG static void pci_read_cap(device_t pcib, pcicfgregs *cfg) { #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w) #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w) #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) uint64_t addr; #endif uint32_t val; int ptr, nextptr, ptrptr; switch (cfg->hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: case PCIM_HDRTYPE_BRIDGE: ptrptr = PCIR_CAP_PTR; break; case PCIM_HDRTYPE_CARDBUS: ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */ break; default: return; /* no extended capabilities support */ } nextptr = REG(ptrptr, 1); /* sanity check? */ /* * Read capability entries. */ while (nextptr != 0) { /* Sanity check */ if (nextptr > 255) { printf("illegal PCI extended capability offset %d\n", nextptr); return; } /* Find the next entry */ ptr = nextptr; nextptr = REG(ptr + PCICAP_NEXTPTR, 1); /* Process this entry */ switch (REG(ptr + PCICAP_ID, 1)) { case PCIY_PMG: /* PCI power management */ cfg->pp.pp_location = ptr; cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2); break; case PCIY_HT: /* HyperTransport */ /* Determine HT-specific capability type. */ val = REG(ptr + PCIR_HT_COMMAND, 2); if ((val & 0xe000) == PCIM_HTCAP_SLAVE) cfg->ht.ht_slave = ptr; #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) switch (val & PCIM_HTCMD_CAP_MASK) { case PCIM_HTCAP_MSI_MAPPING: if (!(val & PCIM_HTCMD_MSI_FIXED)) { /* Sanity check the mapping window. */ addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI, 4); addr <<= 32; addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO, 4); if (addr != MSI_INTEL_ADDR_BASE) device_printf(pcib, "HT device at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n", cfg->domain, cfg->bus, cfg->slot, cfg->func, (long long)addr); } else addr = MSI_INTEL_ADDR_BASE; cfg->ht.ht_msimap = ptr; cfg->ht.ht_msictrl = val; cfg->ht.ht_msiaddr = addr; break; } #endif break; case PCIY_MSI: /* PCI MSI */ cfg->msi.msi_location = ptr; cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2); break; case PCIY_MSIX: /* PCI MSI-X */ cfg->msix.msix_location = ptr; cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2); val = REG(ptr + PCIR_MSIX_TABLE, 4); cfg->msix.msix_table_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK); cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK; val = REG(ptr + PCIR_MSIX_PBA, 4); cfg->msix.msix_pba_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK); cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK; break; case PCIY_VPD: /* PCI Vital Product Data */ cfg->vpd.vpd_reg = ptr; break; case PCIY_SUBVENDOR: /* Should always be true. */ if ((cfg->hdrtype & PCIM_HDRTYPE) == PCIM_HDRTYPE_BRIDGE) { val = REG(ptr + PCIR_SUBVENDCAP_ID, 4); cfg->subvendor = val & 0xffff; cfg->subdevice = val >> 16; } break; case PCIY_PCIX: /* PCI-X */ /* * Assume we have a PCI-X chipset if we have * at least one PCI-PCI bridge with a PCI-X * capability. Note that some systems with * PCI-express or HT chipsets might match on * this check as well. */ if ((cfg->hdrtype & PCIM_HDRTYPE) == PCIM_HDRTYPE_BRIDGE) pcix_chipset = 1; cfg->pcix.pcix_location = ptr; break; case PCIY_EXPRESS: /* PCI-express */ /* * Assume we have a PCI-express chipset if we have * at least one PCI-express device. */ pcie_chipset = 1; cfg->pcie.pcie_location = ptr; val = REG(ptr + PCIER_FLAGS, 2); cfg->pcie.pcie_type = val & PCIEM_FLAGS_TYPE; break; case PCIY_EA: /* Enhanced Allocation */ cfg->ea.ea_location = ptr; pci_ea_fill_info(pcib, cfg); break; default: break; } } #if defined(__powerpc__) /* * Enable the MSI mapping window for all HyperTransport * slaves. PCI-PCI bridges have their windows enabled via * PCIB_MAP_MSI(). */ if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 && !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) { device_printf(pcib, "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n", cfg->domain, cfg->bus, cfg->slot, cfg->func); cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE; WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl, 2); } #endif /* REG and WREG use carry through to next functions */ } /* * PCI Vital Product Data */ #define PCI_VPD_TIMEOUT 1000000 static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data) { int count = PCI_VPD_TIMEOUT; KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned")); WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2); while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) { if (--count < 0) return (ENXIO); DELAY(1); /* limit looping */ } *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4)); return (0); } #if 0 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data) { int count = PCI_VPD_TIMEOUT; KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned")); WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4); WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2); while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) { if (--count < 0) return (ENXIO); DELAY(1); /* limit looping */ } return (0); } #endif #undef PCI_VPD_TIMEOUT struct vpd_readstate { device_t pcib; pcicfgregs *cfg; uint32_t val; int bytesinval; int off; uint8_t cksum; }; /* return 0 and one byte in *data if no read error, -1 else */ static int vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data) { uint32_t reg; uint8_t byte; if (vrs->bytesinval == 0) { if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®)) return (-1); vrs->val = le32toh(reg); vrs->off += 4; byte = vrs->val & 0xff; vrs->bytesinval = 3; } else { vrs->val = vrs->val >> 8; byte = vrs->val & 0xff; vrs->bytesinval--; } vrs->cksum += byte; *data = byte; return (0); } /* return 0 on match, -1 and "unget" byte on no match */ static int vpd_expectbyte(struct vpd_readstate *vrs, uint8_t expected) { uint8_t data; if (vpd_nextbyte(vrs, &data) != 0) return (-1); if (data == expected) return (0); vrs->cksum -= data; vrs->val = (vrs->val << 8) + data; vrs->bytesinval++; return (-1); } /* return size if tag matches, -1 on no match, -2 on read error */ static int vpd_read_tag_size(struct vpd_readstate *vrs, uint8_t vpd_tag) { uint8_t byte1, byte2; if (vpd_expectbyte(vrs, vpd_tag) != 0) return (-1); if ((vpd_tag & 0x80) == 0) return (vpd_tag & 0x07); if (vpd_nextbyte(vrs, &byte1) != 0) return (-2); if (vpd_nextbyte(vrs, &byte2) != 0) return (-2); return ((byte2 << 8) + byte1); } /* (re)allocate buffer in multiples of 8 elements */ static void* alloc_buffer(void* buffer, size_t element_size, int needed) { int alloc, new_alloc; alloc = roundup2(needed, 8); new_alloc = roundup2(needed + 1, 8); if (alloc != new_alloc) { buffer = reallocf(buffer, new_alloc * element_size, M_DEVBUF, M_WAITOK | M_ZERO); } return (buffer); } /* read VPD keyword and return element size, return -1 on read error */ static int vpd_read_elem_head(struct vpd_readstate *vrs, char keyword[2]) { uint8_t data; if (vpd_nextbyte(vrs, &keyword[0]) != 0) return (-1); if (vpd_nextbyte(vrs, &keyword[1]) != 0) return (-1); if (vpd_nextbyte(vrs, &data) != 0) return (-1); return (data); } /* read VPD data element of given size into allocated buffer */ static char * vpd_read_value(struct vpd_readstate *vrs, int size) { int i; char char1; char *value; value = malloc(size + 1, M_DEVBUF, M_WAITOK); for (i = 0; i < size; i++) { if (vpd_nextbyte(vrs, &char1) != 0) { free(value, M_DEVBUF); return (NULL); } value[i] = char1; } value[size] = '\0'; return (value); } /* read VPD into *keyword and *value, return length of data element */ static int vpd_read_elem_data(struct vpd_readstate *vrs, char keyword[2], char **value, int maxlen) { int len; len = vpd_read_elem_head(vrs, keyword); if (len < 0 || len > maxlen) return (-1); *value = vpd_read_value(vrs, len); return (len); } /* subtract all data following first byte from checksum of RV element */ static void vpd_fixup_cksum(struct vpd_readstate *vrs, char *rvstring, int len) { int i; uint8_t fixup; fixup = 0; for (i = 1; i < len; i++) fixup += rvstring[i]; vrs->cksum -= fixup; } /* fetch one read-only element and return size of heading + data */ static int next_vpd_ro_elem(struct vpd_readstate *vrs, int maxsize) { struct pcicfg_vpd *vpd; pcicfgregs *cfg; struct vpd_readonly *vpd_ros; int len; cfg = vrs->cfg; vpd = &cfg->vpd; if (maxsize < 3) return (-1); vpd->vpd_ros = alloc_buffer(vpd->vpd_ros, sizeof(*vpd->vpd_ros), vpd->vpd_rocnt); vpd_ros = &vpd->vpd_ros[vpd->vpd_rocnt]; maxsize -= 3; len = vpd_read_elem_data(vrs, vpd_ros->keyword, &vpd_ros->value, maxsize); if (vpd_ros->value == NULL) return (-1); vpd_ros->len = len; if (vpd_ros->keyword[0] == 'R' && vpd_ros->keyword[1] == 'V') { vpd_fixup_cksum(vrs, vpd_ros->value, len); if (vrs->cksum != 0) { pci_printf(cfg, "invalid VPD checksum %#hhx\n", vrs->cksum); return (-1); } } vpd->vpd_rocnt++; return (len + 3); } /* fetch one writable element and return size of heading + data */ static int next_vpd_rw_elem(struct vpd_readstate *vrs, int maxsize) { struct pcicfg_vpd *vpd; pcicfgregs *cfg; struct vpd_write *vpd_w; int len; cfg = vrs->cfg; vpd = &cfg->vpd; if (maxsize < 3) return (-1); vpd->vpd_w = alloc_buffer(vpd->vpd_w, sizeof(*vpd->vpd_w), vpd->vpd_wcnt); if (vpd->vpd_w == NULL) { pci_printf(cfg, "out of memory"); return (-1); } vpd_w = &vpd->vpd_w[vpd->vpd_wcnt]; maxsize -= 3; vpd_w->start = vrs->off + 3 - vrs->bytesinval; len = vpd_read_elem_data(vrs, vpd_w->keyword, &vpd_w->value, maxsize); if (vpd_w->value == NULL) return (-1); vpd_w->len = len; vpd->vpd_wcnt++; return (len + 3); } /* free all memory allocated for VPD data */ static void vpd_free(struct pcicfg_vpd *vpd) { int i; free(vpd->vpd_ident, M_DEVBUF); for (i = 0; i < vpd->vpd_rocnt; i++) free(vpd->vpd_ros[i].value, M_DEVBUF); free(vpd->vpd_ros, M_DEVBUF); vpd->vpd_rocnt = 0; for (i = 0; i < vpd->vpd_wcnt; i++) free(vpd->vpd_w[i].value, M_DEVBUF); free(vpd->vpd_w, M_DEVBUF); vpd->vpd_wcnt = 0; } #define VPD_TAG_END ((0x0f << 3) | 0) /* small tag, len == 0 */ #define VPD_TAG_IDENT (0x02 | 0x80) /* large tag */ #define VPD_TAG_RO (0x10 | 0x80) /* large tag */ #define VPD_TAG_RW (0x11 | 0x80) /* large tag */ static int pci_parse_vpd(device_t pcib, pcicfgregs *cfg) { struct vpd_readstate vrs; int cksumvalid; int size, elem_size; /* init vpd reader */ vrs.bytesinval = 0; vrs.off = 0; vrs.pcib = pcib; vrs.cfg = cfg; vrs.cksum = 0; /* read VPD ident element - mandatory */ size = vpd_read_tag_size(&vrs, VPD_TAG_IDENT); if (size <= 0) { pci_printf(cfg, "no VPD ident found\n"); return (0); } cfg->vpd.vpd_ident = vpd_read_value(&vrs, size); if (cfg->vpd.vpd_ident == NULL) { pci_printf(cfg, "error accessing VPD ident data\n"); return (0); } /* read VPD RO elements - mandatory */ size = vpd_read_tag_size(&vrs, VPD_TAG_RO); if (size <= 0) { pci_printf(cfg, "no read-only VPD data found\n"); return (0); } while (size > 0) { elem_size = next_vpd_ro_elem(&vrs, size); if (elem_size < 0) { pci_printf(cfg, "error accessing read-only VPD data\n"); return (-1); } size -= elem_size; } cksumvalid = (vrs.cksum == 0); if (!cksumvalid) return (-1); /* read VPD RW elements - optional */ size = vpd_read_tag_size(&vrs, VPD_TAG_RW); if (size == -2) return (-1); while (size > 0) { elem_size = next_vpd_rw_elem(&vrs, size); if (elem_size < 0) { pci_printf(cfg, "error accessing writeable VPD data\n"); return (-1); } size -= elem_size; } /* read empty END tag - mandatory */ size = vpd_read_tag_size(&vrs, VPD_TAG_END); if (size != 0) { pci_printf(cfg, "No valid VPD end tag found\n"); } return (0); } static void pci_read_vpd(device_t pcib, pcicfgregs *cfg) { int status; status = pci_parse_vpd(pcib, cfg); if (status < 0) vpd_free(&cfg->vpd); cfg->vpd.vpd_cached = 1; #undef REG #undef WREG } int pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0) pci_read_vpd(device_get_parent(dev), cfg); *identptr = cfg->vpd.vpd_ident; if (*identptr == NULL) return (ENXIO); return (0); } int pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw, const char **vptr) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; int i; if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0) pci_read_vpd(device_get_parent(dev), cfg); for (i = 0; i < cfg->vpd.vpd_rocnt; i++) if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword, sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) { *vptr = cfg->vpd.vpd_ros[i].value; return (0); } *vptr = NULL; return (ENXIO); } struct pcicfg_vpd * pci_fetch_vpd_list(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); pcicfgregs *cfg = &dinfo->cfg; if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0) pci_read_vpd(device_get_parent(device_get_parent(dev)), cfg); return (&cfg->vpd); } /* * Find the requested HyperTransport capability and return the offset * in configuration space via the pointer provided. The function * returns 0 on success and an error code otherwise. */ int pci_find_htcap_method(device_t dev, device_t child, int capability, int *capreg) { int ptr, error; uint16_t val; error = pci_find_cap(child, PCIY_HT, &ptr); if (error) return (error); /* * Traverse the capabilities list checking each HT capability * to see if it matches the requested HT capability. */ for (;;) { val = pci_read_config(child, ptr + PCIR_HT_COMMAND, 2); if (capability == PCIM_HTCAP_SLAVE || capability == PCIM_HTCAP_HOST) val &= 0xe000; else val &= PCIM_HTCMD_CAP_MASK; if (val == capability) { if (capreg != NULL) *capreg = ptr; return (0); } /* Skip to the next HT capability. */ if (pci_find_next_cap(child, PCIY_HT, ptr, &ptr) != 0) break; } return (ENOENT); } /* * Find the next requested HyperTransport capability after start and return * the offset in configuration space via the pointer provided. The function * returns 0 on success and an error code otherwise. */ int pci_find_next_htcap_method(device_t dev, device_t child, int capability, int start, int *capreg) { int ptr; uint16_t val; KASSERT(pci_read_config(child, start + PCICAP_ID, 1) == PCIY_HT, ("start capability is not HyperTransport capability")); ptr = start; /* * Traverse the capabilities list checking each HT capability * to see if it matches the requested HT capability. */ for (;;) { /* Skip to the next HT capability. */ if (pci_find_next_cap(child, PCIY_HT, ptr, &ptr) != 0) break; val = pci_read_config(child, ptr + PCIR_HT_COMMAND, 2); if (capability == PCIM_HTCAP_SLAVE || capability == PCIM_HTCAP_HOST) val &= 0xe000; else val &= PCIM_HTCMD_CAP_MASK; if (val == capability) { if (capreg != NULL) *capreg = ptr; return (0); } } return (ENOENT); } /* * Find the requested capability and return the offset in * configuration space via the pointer provided. The function returns * 0 on success and an error code otherwise. */ int pci_find_cap_method(device_t dev, device_t child, int capability, int *capreg) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; uint32_t status; uint8_t ptr; int cnt; /* * Check the CAP_LIST bit of the PCI status register first. */ status = pci_read_config(child, PCIR_STATUS, 2); if (!(status & PCIM_STATUS_CAPPRESENT)) return (ENXIO); /* * Determine the start pointer of the capabilities list. */ switch (cfg->hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: case PCIM_HDRTYPE_BRIDGE: ptr = PCIR_CAP_PTR; break; case PCIM_HDRTYPE_CARDBUS: ptr = PCIR_CAP_PTR_2; break; default: /* XXX: panic? */ return (ENXIO); /* no extended capabilities support */ } ptr = pci_read_config(child, ptr, 1); /* * Traverse the capabilities list. Limit by total theoretical * maximum number of caps: capability needs at least id and * next registers, and any type X header cannot contain caps. */ for (cnt = 0; ptr != 0 && cnt < (PCIE_REGMAX - 0x40) / 2; cnt++) { if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) { if (capreg != NULL) *capreg = ptr; return (0); } ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1); } return (ENOENT); } /* * Find the next requested capability after start and return the offset in * configuration space via the pointer provided. The function returns * 0 on success and an error code otherwise. */ int pci_find_next_cap_method(device_t dev, device_t child, int capability, int start, int *capreg) { uint8_t ptr; KASSERT(pci_read_config(child, start + PCICAP_ID, 1) == capability, ("start capability is not expected capability")); ptr = pci_read_config(child, start + PCICAP_NEXTPTR, 1); while (ptr != 0) { if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) { if (capreg != NULL) *capreg = ptr; return (0); } ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1); } return (ENOENT); } /* * Find the requested extended capability and return the offset in * configuration space via the pointer provided. The function returns * 0 on success and an error code otherwise. */ int pci_find_extcap_method(device_t dev, device_t child, int capability, int *capreg) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; uint32_t ecap; uint16_t ptr; /* Only supported for PCI-express devices. */ if (cfg->pcie.pcie_location == 0) return (ENXIO); ptr = PCIR_EXTCAP; ecap = pci_read_config(child, ptr, 4); if (ecap == 0xffffffff || ecap == 0) return (ENOENT); for (;;) { if (PCI_EXTCAP_ID(ecap) == capability) { if (capreg != NULL) *capreg = ptr; return (0); } ptr = PCI_EXTCAP_NEXTPTR(ecap); if (ptr == 0) break; ecap = pci_read_config(child, ptr, 4); } return (ENOENT); } /* * Find the next requested extended capability after start and return the * offset in configuration space via the pointer provided. The function * returns 0 on success and an error code otherwise. */ int pci_find_next_extcap_method(device_t dev, device_t child, int capability, int start, int *capreg) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; uint32_t ecap; uint16_t ptr; /* Only supported for PCI-express devices. */ if (cfg->pcie.pcie_location == 0) return (ENXIO); ecap = pci_read_config(child, start, 4); KASSERT(PCI_EXTCAP_ID(ecap) == capability, ("start extended capability is not expected capability")); ptr = PCI_EXTCAP_NEXTPTR(ecap); while (ptr != 0) { ecap = pci_read_config(child, ptr, 4); if (PCI_EXTCAP_ID(ecap) == capability) { if (capreg != NULL) *capreg = ptr; return (0); } ptr = PCI_EXTCAP_NEXTPTR(ecap); } return (ENOENT); } /* * Support for MSI-X message interrupts. */ static void pci_write_msix_entry(device_t dev, u_int index, uint64_t address, uint32_t data) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msix *msix = &dinfo->cfg.msix; uint32_t offset; KASSERT(msix->msix_table_len > index, ("bogus index")); offset = msix->msix_table_offset + index * 16; bus_write_4(msix->msix_table_res, offset, address & 0xffffffff); bus_write_4(msix->msix_table_res, offset + 4, address >> 32); bus_write_4(msix->msix_table_res, offset + 8, data); } void pci_enable_msix_method(device_t dev, device_t child, u_int index, uint64_t address, uint32_t data) { if (pci_msix_rewrite_table) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; /* * Some VM hosts require MSIX to be disabled in the * control register before updating the MSIX table * entries are allowed. It is not enough to only * disable MSIX while updating a single entry. MSIX * must be disabled while updating all entries in the * table. */ pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL, msix->msix_ctrl & ~PCIM_MSIXCTRL_MSIX_ENABLE, 2); pci_resume_msix(child); } else pci_write_msix_entry(child, index, address, data); /* Enable MSI -> HT mapping. */ pci_ht_map_msi(child, address); } void pci_mask_msix(device_t dev, u_int index) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msix *msix = &dinfo->cfg.msix; uint32_t offset, val; KASSERT(PCI_MSIX_MSGNUM(msix->msix_ctrl) > index, ("bogus index")); offset = msix->msix_table_offset + index * 16 + 12; val = bus_read_4(msix->msix_table_res, offset); val |= PCIM_MSIX_VCTRL_MASK; /* * Some devices (e.g. Samsung PM961) do not support reads of this * register, so always write the new value. */ bus_write_4(msix->msix_table_res, offset, val); } void pci_unmask_msix(device_t dev, u_int index) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msix *msix = &dinfo->cfg.msix; uint32_t offset, val; KASSERT(PCI_MSIX_MSGNUM(msix->msix_ctrl) > index, ("bogus index")); offset = msix->msix_table_offset + index * 16 + 12; val = bus_read_4(msix->msix_table_res, offset); val &= ~PCIM_MSIX_VCTRL_MASK; /* * Some devices (e.g. Samsung PM961) do not support reads of this * register, so always write the new value. */ bus_write_4(msix->msix_table_res, offset, val); } int pci_pending_msix(device_t dev, u_int index) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msix *msix = &dinfo->cfg.msix; uint32_t offset, bit; KASSERT(msix->msix_table_len > index, ("bogus index")); offset = msix->msix_pba_offset + (index / 32) * 4; bit = 1 << index % 32; return (bus_read_4(msix->msix_pba_res, offset) & bit); } /* * Restore MSI-X registers and table during resume. If MSI-X is * enabled then walk the virtual table to restore the actual MSI-X * table. */ static void pci_resume_msix(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msix *msix = &dinfo->cfg.msix; struct msix_table_entry *mte; struct msix_vector *mv; u_int i, msgnum; if (msix->msix_alloc > 0) { msgnum = PCI_MSIX_MSGNUM(msix->msix_ctrl); /* First, mask all vectors. */ for (i = 0; i < msgnum; i++) pci_mask_msix(dev, i); /* Second, program any messages with at least one handler. */ for (i = 0; i < msix->msix_table_len; i++) { mte = &msix->msix_table[i]; if (mte->mte_vector == 0 || mte->mte_handlers == 0) continue; mv = &msix->msix_vectors[mte->mte_vector - 1]; pci_write_msix_entry(dev, i, mv->mv_address, mv->mv_data); pci_unmask_msix(dev, i); } } pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL, msix->msix_ctrl, 2); } /* * Attempt to allocate *count MSI-X messages. The actual number allocated is * returned in *count. After this function returns, each message will be * available to the driver as SYS_RES_IRQ resources starting at rid 1. */ int pci_alloc_msix_method(device_t dev, device_t child, int *count) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; struct resource_list_entry *rle; u_int actual, i, max; int error, irq; uint16_t ctrl, msgnum; /* Don't let count == 0 get us into trouble. */ if (*count < 1) return (EINVAL); /* If rid 0 is allocated, then fail. */ rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); if (rle != NULL && rle->res != NULL) return (ENXIO); /* Already have allocated messages? */ if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0) return (ENXIO); /* If MSI-X is blacklisted for this system, fail. */ if (pci_msix_blacklisted()) return (ENXIO); /* MSI-X capability present? */ if (cfg->msix.msix_location == 0 || !pci_do_msix) return (ENODEV); /* Make sure the appropriate BARs are mapped. */ rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY, cfg->msix.msix_table_bar); if (rle == NULL || rle->res == NULL || !(rman_get_flags(rle->res) & RF_ACTIVE)) return (ENXIO); cfg->msix.msix_table_res = rle->res; if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) { rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY, cfg->msix.msix_pba_bar); if (rle == NULL || rle->res == NULL || !(rman_get_flags(rle->res) & RF_ACTIVE)) return (ENXIO); } cfg->msix.msix_pba_res = rle->res; ctrl = pci_read_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL, 2); msgnum = PCI_MSIX_MSGNUM(ctrl); if (bootverbose) device_printf(child, "attempting to allocate %d MSI-X vectors (%d supported)\n", *count, msgnum); max = min(*count, msgnum); for (i = 0; i < max; i++) { /* Allocate a message. */ error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq); if (error) { if (i == 0) return (error); break; } resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq, irq, 1); } actual = i; if (bootverbose) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1); if (actual == 1) device_printf(child, "using IRQ %ju for MSI-X\n", rle->start); else { bool run; /* * Be fancy and try to print contiguous runs of * IRQ values as ranges. 'irq' is the previous IRQ. * 'run' is true if we are in a range. */ device_printf(child, "using IRQs %ju", rle->start); irq = rle->start; run = false; for (i = 1; i < actual; i++) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); /* Still in a run? */ if (rle->start == irq + 1) { run = true; irq++; continue; } /* Finish previous range. */ if (run) { printf("-%d", irq); run = false; } /* Start new range. */ printf(",%ju", rle->start); irq = rle->start; } /* Unfinished range? */ if (run) printf("-%d", irq); printf(" for MSI-X\n"); } } /* * Mask all vectors. Note that the message index assertion in * pci_mask_msix requires msix_ctrl to be set. */ cfg->msix.msix_ctrl = ctrl; for (i = 0; i < msgnum; i++) pci_mask_msix(child, i); /* Allocate and initialize vector data and virtual table. */ cfg->msix.msix_vectors = mallocarray(actual, sizeof(struct msix_vector), M_DEVBUF, M_WAITOK | M_ZERO); cfg->msix.msix_table = mallocarray(actual, sizeof(struct msix_table_entry), M_DEVBUF, M_WAITOK | M_ZERO); for (i = 0; i < actual; i++) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); cfg->msix.msix_vectors[i].mv_irq = rle->start; cfg->msix.msix_table[i].mte_vector = i + 1; } /* Update control register to enable MSI-X. */ ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL, ctrl, 2); cfg->msix.msix_ctrl = ctrl; /* Update counts of alloc'd messages. */ cfg->msix.msix_alloc = actual; cfg->msix.msix_table_len = actual; *count = actual; return (0); } /* * By default, pci_alloc_msix() will assign the allocated IRQ * resources consecutively to the first N messages in the MSI-X table. * However, device drivers may want to use different layouts if they * either receive fewer messages than they asked for, or they wish to * populate the MSI-X table sparsely. This method allows the driver * to specify what layout it wants. It must be called after a * successful pci_alloc_msix() but before any of the associated * SYS_RES_IRQ resources are allocated via bus_alloc_resource(). * * The 'vectors' array contains 'count' message vectors. The array * maps directly to the MSI-X table in that index 0 in the array * specifies the vector for the first message in the MSI-X table, etc. * The vector value in each array index can either be 0 to indicate * that no vector should be assigned to a message slot, or it can be a * number from 1 to N (where N is the count returned from a * succcessful call to pci_alloc_msix()) to indicate which message * vector (IRQ) to be used for the corresponding message. * * On successful return, each message with a non-zero vector will have * an associated SYS_RES_IRQ whose rid is equal to the array index + * 1. Additionally, if any of the IRQs allocated via the previous * call to pci_alloc_msix() are not used in the mapping, those IRQs * will be freed back to the system automatically. * * For example, suppose a driver has a MSI-X table with 6 messages and * asks for 6 messages, but pci_alloc_msix() only returns a count of * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and * C. After the call to pci_alloc_msix(), the device will be setup to * have an MSI-X table of ABC--- (where - means no vector assigned). * If the driver then passes a vector array of { 1, 0, 1, 2, 0, 2 }, * then the MSI-X table will look like A-AB-B, and the 'C' vector will * be freed back to the system. This device will also have valid * SYS_RES_IRQ rids of 1, 3, 4, and 6. * * In any case, the SYS_RES_IRQ rid X will always map to the message * at MSI-X table index X - 1 and will only be valid if a vector is * assigned to that table entry. */ int pci_remap_msix_method(device_t dev, device_t child, int count, const u_int *vectors) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; struct resource_list_entry *rle; u_int i, irq, j; bool *used; /* * Have to have at least one message in the table but the * table can't be bigger than the actual MSI-X table in the * device. */ if (count < 1 || count > PCI_MSIX_MSGNUM(msix->msix_ctrl)) return (EINVAL); /* Sanity check the vectors. */ for (i = 0; i < count; i++) if (vectors[i] > msix->msix_alloc) return (EINVAL); /* * Make sure there aren't any holes in the vectors to be used. * It's a big pain to support it, and it doesn't really make * sense anyway. Also, at least one vector must be used. */ used = mallocarray(msix->msix_alloc, sizeof(*used), M_DEVBUF, M_WAITOK | M_ZERO); for (i = 0; i < count; i++) if (vectors[i] != 0) used[vectors[i] - 1] = true; for (i = 0; i < msix->msix_alloc - 1; i++) if (!used[i] && used[i + 1]) { free(used, M_DEVBUF); return (EINVAL); } if (!used[0]) { free(used, M_DEVBUF); return (EINVAL); } /* Make sure none of the resources are allocated. */ for (i = 0; i < msix->msix_table_len; i++) { if (msix->msix_table[i].mte_vector == 0) continue; if (msix->msix_table[i].mte_handlers > 0) { free(used, M_DEVBUF); return (EBUSY); } rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); KASSERT(rle != NULL, ("missing resource")); if (rle->res != NULL) { free(used, M_DEVBUF); return (EBUSY); } } /* Free the existing resource list entries. */ for (i = 0; i < msix->msix_table_len; i++) { if (msix->msix_table[i].mte_vector == 0) continue; resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); } /* * Build the new virtual table keeping track of which vectors are * used. */ free(msix->msix_table, M_DEVBUF); msix->msix_table = mallocarray(count, sizeof(struct msix_table_entry), M_DEVBUF, M_WAITOK | M_ZERO); for (i = 0; i < count; i++) msix->msix_table[i].mte_vector = vectors[i]; msix->msix_table_len = count; /* Free any unused IRQs and resize the vectors array if necessary. */ j = msix->msix_alloc - 1; if (!used[j]) { struct msix_vector *vec; while (!used[j]) { PCIB_RELEASE_MSIX(device_get_parent(dev), child, msix->msix_vectors[j].mv_irq); j--; } vec = mallocarray(j + 1, sizeof(struct msix_vector), M_DEVBUF, M_WAITOK); bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) * (j + 1)); free(msix->msix_vectors, M_DEVBUF); msix->msix_vectors = vec; msix->msix_alloc = j + 1; } free(used, M_DEVBUF); /* Map the IRQs onto the rids. */ for (i = 0; i < count; i++) { if (vectors[i] == 0) continue; irq = msix->msix_vectors[vectors[i] - 1].mv_irq; resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq, irq, 1); } if (bootverbose) { device_printf(child, "Remapped MSI-X IRQs as: "); for (i = 0; i < count; i++) { if (i != 0) printf(", "); if (vectors[i] == 0) printf("---"); else printf("%d", msix->msix_vectors[vectors[i] - 1].mv_irq); } printf("\n"); } return (0); } static int pci_release_msix(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; struct resource_list_entry *rle; u_int i; /* Do we have any messages to release? */ if (msix->msix_alloc == 0) return (ENODEV); /* Make sure none of the resources are allocated. */ for (i = 0; i < msix->msix_table_len; i++) { if (msix->msix_table[i].mte_vector == 0) continue; if (msix->msix_table[i].mte_handlers > 0) return (EBUSY); rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); KASSERT(rle != NULL, ("missing resource")); if (rle->res != NULL) return (EBUSY); } /* Update control register to disable MSI-X. */ msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE; pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL, msix->msix_ctrl, 2); /* Free the resource list entries. */ for (i = 0; i < msix->msix_table_len; i++) { if (msix->msix_table[i].mte_vector == 0) continue; resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); } free(msix->msix_table, M_DEVBUF); msix->msix_table_len = 0; /* Release the IRQs. */ for (i = 0; i < msix->msix_alloc; i++) PCIB_RELEASE_MSIX(device_get_parent(dev), child, msix->msix_vectors[i].mv_irq); free(msix->msix_vectors, M_DEVBUF); msix->msix_alloc = 0; return (0); } /* * Return the max supported MSI-X messages this device supports. * Basically, assuming the MD code can alloc messages, this function * should return the maximum value that pci_alloc_msix() can return. * Thus, it is subject to the tunables, etc. */ int pci_msix_count_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; uint16_t ctrl; if (pci_do_msix && msix->msix_location != 0) { ctrl = pci_read_config(child, msix->msix_location + PCIR_MSI_CTRL, 2); return (PCI_MSIX_MSGNUM(ctrl)); } return (0); } int pci_msix_pba_bar_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; if (pci_do_msix && msix->msix_location != 0) return (msix->msix_pba_bar); return (-1); } int pci_msix_table_bar_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; if (pci_do_msix && msix->msix_location != 0) return (msix->msix_table_bar); return (-1); } /* * HyperTransport MSI mapping control */ void pci_ht_map_msi(device_t dev, uint64_t addr) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_ht *ht = &dinfo->cfg.ht; if (!ht->ht_msimap) return; if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) && ht->ht_msiaddr >> 20 == addr >> 20) { /* Enable MSI -> HT mapping. */ ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE; pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND, ht->ht_msictrl, 2); } if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) { /* Disable MSI -> HT mapping. */ ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE; pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND, ht->ht_msictrl, 2); } } int pci_get_relaxed_ordering_enabled(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; uint16_t val; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (0); val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2); val &= PCIEM_CTL_RELAXED_ORD_ENABLE; return (val != 0); } int pci_get_max_payload(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; uint16_t val; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (0); val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2); val &= PCIEM_CTL_MAX_PAYLOAD; val >>= 5; return (1 << (val + 7)); } int pci_get_max_read_req(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; uint16_t val; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (0); val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2); val &= PCIEM_CTL_MAX_READ_REQUEST; val >>= 12; return (1 << (val + 7)); } int pci_set_max_read_req(device_t dev, int size) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; uint16_t val; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (0); if (size < 128) size = 128; if (size > 4096) size = 4096; size = (1 << (fls(size) - 1)); val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2); val &= ~PCIEM_CTL_MAX_READ_REQUEST; val |= (fls(size) - 8) << 12; pci_write_config(dev, cap + PCIER_DEVICE_CTL, val, 2); return (size); } uint32_t pcie_read_config(device_t dev, int reg, int width) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) { if (width == 2) return (0xffff); return (0xffffffff); } return (pci_read_config(dev, cap + reg, width)); } void pcie_write_config(device_t dev, int reg, uint32_t value, int width) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return; pci_write_config(dev, cap + reg, value, width); } /* * Adjusts a PCI-e capability register by clearing the bits in mask * and setting the bits in (value & mask). Bits not set in mask are * not adjusted. * * Returns the old value on success or all ones on failure. */ uint32_t pcie_adjust_config(device_t dev, int reg, uint32_t mask, uint32_t value, int width) { struct pci_devinfo *dinfo = device_get_ivars(dev); uint32_t old, new; int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) { if (width == 2) return (0xffff); return (0xffffffff); } old = pci_read_config(dev, cap + reg, width); new = old & ~mask; new |= (value & mask); pci_write_config(dev, cap + reg, new, width); return (old); } /* * Support for MSI message signalled interrupts. */ void pci_enable_msi_method(device_t dev, device_t child, uint64_t address, uint16_t data) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msi *msi = &dinfo->cfg.msi; /* Write data and address values. */ pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR, address & 0xffffffff, 4); if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) { pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR_HIGH, address >> 32, 4); pci_write_config(child, msi->msi_location + PCIR_MSI_DATA_64BIT, data, 2); } else pci_write_config(child, msi->msi_location + PCIR_MSI_DATA, data, 2); /* Enable MSI in the control register. */ msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE; pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, 2); /* Enable MSI -> HT mapping. */ pci_ht_map_msi(child, address); } void pci_disable_msi_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msi *msi = &dinfo->cfg.msi; /* Disable MSI -> HT mapping. */ pci_ht_map_msi(child, 0); /* Disable MSI in the control register. */ msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE; pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, 2); } /* * Restore MSI registers during resume. If MSI is enabled then * restore the data and address registers in addition to the control * register. */ static void pci_resume_msi(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msi *msi = &dinfo->cfg.msi; uint64_t address; uint16_t data; if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) { address = msi->msi_addr; data = msi->msi_data; pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR, address & 0xffffffff, 4); if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) { pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR_HIGH, address >> 32, 4); pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA_64BIT, data, 2); } else pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, data, 2); } pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, 2); } static int pci_remap_intr_method(device_t bus, device_t dev, u_int irq) { struct pci_devinfo *dinfo = device_get_ivars(dev); pcicfgregs *cfg = &dinfo->cfg; struct resource_list_entry *rle; struct msix_table_entry *mte; struct msix_vector *mv; uint64_t addr; uint32_t data; u_int i, j; int error; /* * Handle MSI first. We try to find this IRQ among our list * of MSI IRQs. If we find it, we request updated address and * data registers and apply the results. */ if (cfg->msi.msi_alloc > 0) { /* If we don't have any active handlers, nothing to do. */ if (cfg->msi.msi_handlers == 0) return (0); for (i = 0; i < cfg->msi.msi_alloc; i++) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); if (rle->start == irq) { error = PCIB_MAP_MSI(device_get_parent(bus), dev, irq, &addr, &data); if (error) return (error); pci_disable_msi(dev); dinfo->cfg.msi.msi_addr = addr; dinfo->cfg.msi.msi_data = data; pci_enable_msi(dev, addr, data); return (0); } } return (ENOENT); } /* * For MSI-X, we check to see if we have this IRQ. If we do, * we request the updated mapping info. If that works, we go * through all the slots that use this IRQ and update them. */ if (cfg->msix.msix_alloc > 0) { bool found = false; for (i = 0; i < cfg->msix.msix_alloc; i++) { mv = &cfg->msix.msix_vectors[i]; if (mv->mv_irq == irq) { error = PCIB_MAP_MSI(device_get_parent(bus), dev, irq, &addr, &data); if (error) return (error); mv->mv_address = addr; mv->mv_data = data; for (j = 0; j < cfg->msix.msix_table_len; j++) { mte = &cfg->msix.msix_table[j]; if (mte->mte_vector != i + 1) continue; if (mte->mte_handlers == 0) continue; pci_mask_msix(dev, j); pci_enable_msix(dev, j, addr, data); pci_unmask_msix(dev, j); } found = true; } } return (found ? 0 : ENOENT); } return (ENOENT); } /* * Returns true if the specified device is blacklisted because MSI * doesn't work. */ int pci_msi_device_blacklisted(device_t dev) { if (!pci_honor_msi_blacklist) return (0); return (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSI)); } /* * Determine if MSI is blacklisted globally on this system. Currently, * we just check for blacklisted chipsets as represented by the * host-PCI bridge at device 0:0:0. In the future, it may become * necessary to check other system attributes, such as the kenv values * that give the motherboard manufacturer and model number. */ static int pci_msi_blacklisted(void) { device_t dev; if (!pci_honor_msi_blacklist) return (0); /* Blacklist all non-PCI-express and non-PCI-X chipsets. */ if (!(pcie_chipset || pcix_chipset)) { if (vm_guest != VM_GUEST_NO) { /* * Whitelist older chipsets in virtual * machines known to support MSI. */ dev = pci_find_bsf(0, 0, 0); if (dev != NULL) return (!pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_ENABLE_MSI_VM)); } return (1); } dev = pci_find_bsf(0, 0, 0); if (dev != NULL) return (pci_msi_device_blacklisted(dev)); return (0); } /* * Returns true if the specified device is blacklisted because MSI-X * doesn't work. Note that this assumes that if MSI doesn't work, * MSI-X doesn't either. */ int pci_msix_device_blacklisted(device_t dev) { if (!pci_honor_msi_blacklist) return (0); if (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSIX)) return (1); return (pci_msi_device_blacklisted(dev)); } /* * Determine if MSI-X is blacklisted globally on this system. If MSI * is blacklisted, assume that MSI-X is as well. Check for additional * chipsets where MSI works but MSI-X does not. */ static int pci_msix_blacklisted(void) { device_t dev; if (!pci_honor_msi_blacklist) return (0); dev = pci_find_bsf(0, 0, 0); if (dev != NULL && pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSIX)) return (1); return (pci_msi_blacklisted()); } /* * Attempt to allocate *count MSI messages. The actual number allocated is * returned in *count. After this function returns, each message will be * available to the driver as SYS_RES_IRQ resources starting at a rid 1. */ int pci_alloc_msi_method(device_t dev, device_t child, int *count) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; struct resource_list_entry *rle; u_int actual, i; int error, irqs[32]; uint16_t ctrl, msgnum; /* Don't let count == 0 get us into trouble. */ if (*count < 1) return (EINVAL); /* If rid 0 is allocated, then fail. */ rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); if (rle != NULL && rle->res != NULL) return (ENXIO); /* Already have allocated messages? */ if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0) return (ENXIO); /* If MSI is blacklisted for this system, fail. */ if (pci_msi_blacklisted()) return (ENXIO); /* MSI capability present? */ if (cfg->msi.msi_location == 0 || !pci_do_msi) return (ENODEV); ctrl = pci_read_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, 2); msgnum = PCI_MSI_MSGNUM(ctrl); if (bootverbose) device_printf(child, "attempting to allocate %d MSI vectors (%u supported)\n", *count, msgnum); /* Don't ask for more than the device supports. */ actual = min(*count, msgnum); /* Don't ask for more than 32 messages. */ actual = min(actual, 32); /* MSI requires power of 2 number of messages. */ if (!powerof2(actual)) return (EINVAL); for (;;) { /* Try to allocate N messages. */ error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual, actual, irqs); if (error == 0) break; if (actual == 1) return (error); /* Try N / 2. */ actual >>= 1; } /* * We now have N actual messages mapped onto SYS_RES_IRQ * resources in the irqs[] array, so add new resources * starting at rid 1. */ for (i = 0; i < actual; i++) resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irqs[i], irqs[i], 1); if (bootverbose) { if (actual == 1) device_printf(child, "using IRQ %d for MSI\n", irqs[0]); else { bool run; /* * Be fancy and try to print contiguous runs * of IRQ values as ranges. 'run' is true if * we are in a range. */ device_printf(child, "using IRQs %d", irqs[0]); run = false; for (i = 1; i < actual; i++) { /* Still in a run? */ if (irqs[i] == irqs[i - 1] + 1) { run = true; continue; } /* Finish previous range. */ if (run) { printf("-%d", irqs[i - 1]); run = false; } /* Start new range. */ printf(",%d", irqs[i]); } /* Unfinished range? */ if (run) printf("-%d", irqs[actual - 1]); printf(" for MSI\n"); } } /* Update control register with actual count. */ ctrl &= ~PCIM_MSICTRL_MME_MASK; ctrl |= (ffs(actual) - 1) << 4; cfg->msi.msi_ctrl = ctrl; pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2); /* Update counts of alloc'd messages. */ cfg->msi.msi_alloc = actual; cfg->msi.msi_handlers = 0; *count = actual; return (0); } /* Release the MSI messages associated with this device. */ int pci_release_msi_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msi *msi = &dinfo->cfg.msi; struct resource_list_entry *rle; u_int i, irqs[32]; int error; /* Try MSI-X first. */ error = pci_release_msix(dev, child); if (error != ENODEV) return (error); /* Do we have any messages to release? */ if (msi->msi_alloc == 0) return (ENODEV); KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages")); /* Make sure none of the resources are allocated. */ if (msi->msi_handlers > 0) return (EBUSY); for (i = 0; i < msi->msi_alloc; i++) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); KASSERT(rle != NULL, ("missing MSI resource")); if (rle->res != NULL) return (EBUSY); irqs[i] = rle->start; } /* Update control register with 0 count. */ KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE), ("%s: MSI still enabled", __func__)); msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK; pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, 2); /* Release the messages. */ PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs); for (i = 0; i < msi->msi_alloc; i++) resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); /* Update alloc count. */ msi->msi_alloc = 0; msi->msi_addr = 0; msi->msi_data = 0; return (0); } /* * Return the max supported MSI messages this device supports. * Basically, assuming the MD code can alloc messages, this function * should return the maximum value that pci_alloc_msi() can return. * Thus, it is subject to the tunables, etc. */ int pci_msi_count_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msi *msi = &dinfo->cfg.msi; uint16_t ctrl; if (pci_do_msi && msi->msi_location != 0) { ctrl = pci_read_config(child, msi->msi_location + PCIR_MSI_CTRL, 2); return (PCI_MSI_MSGNUM(ctrl)); } return (0); } /* free pcicfgregs structure and all depending data structures */ int pci_freecfg(struct pci_devinfo *dinfo) { struct devlist *devlist_head; struct pci_map *pm, *next; devlist_head = &pci_devq; if (dinfo->cfg.vpd.vpd_reg) vpd_free(&dinfo->cfg.vpd); STAILQ_FOREACH_SAFE(pm, &dinfo->cfg.maps, pm_link, next) { free(pm, M_DEVBUF); } STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links); free(dinfo, M_DEVBUF); /* increment the generation count */ pci_generation++; /* we're losing one device */ pci_numdevs--; return (0); } /* * PCI power manangement */ int pci_set_powerstate_method(device_t dev, device_t child, int state) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; uint16_t status; int oldstate, highest, delay; if (cfg->pp.pp_location == 0) return (EOPNOTSUPP); /* * Optimize a no state change request away. While it would be OK to * write to the hardware in theory, some devices have shown odd * behavior when going from D3 -> D3. */ oldstate = pci_get_powerstate(child); if (oldstate == state) return (0); /* * The PCI power management specification states that after a state * transition between PCI power states, system software must * guarantee a minimal delay before the function accesses the device. * Compute the worst case delay that we need to guarantee before we * access the device. Many devices will be responsive much more * quickly than this delay, but there are some that don't respond * instantly to state changes. Transitions to/from D3 state require * 10ms, while D2 requires 200us, and D0/1 require none. The delay * is done below with DELAY rather than a sleeper function because * this function can be called from contexts where we cannot sleep. */ highest = (oldstate > state) ? oldstate : state; if (highest == PCI_POWERSTATE_D3) delay = 10000; else if (highest == PCI_POWERSTATE_D2) delay = 200; else delay = 0; status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_location + PCIR_POWER_STATUS, 2) & ~PCIM_PSTAT_DMASK; switch (state) { case PCI_POWERSTATE_D0: status |= PCIM_PSTAT_D0; break; case PCI_POWERSTATE_D1: if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0) return (EOPNOTSUPP); status |= PCIM_PSTAT_D1; break; case PCI_POWERSTATE_D2: if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0) return (EOPNOTSUPP); status |= PCIM_PSTAT_D2; break; case PCI_POWERSTATE_D3: status |= PCIM_PSTAT_D3; break; default: return (EINVAL); } if (bootverbose) - pci_printf(cfg, "Transition from D%d to D%d\n", oldstate, - state); + pci_printf(cfg, "Transition from %s to %s\n", + acpi_d_state_to_str(oldstate), acpi_d_state_to_str(state)); PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_location + PCIR_POWER_STATUS, status, 2); if (delay) DELAY(delay); return (0); } int pci_get_powerstate_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; uint16_t status; int result; if (cfg->pp.pp_location != 0) { status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_location + PCIR_POWER_STATUS, 2); switch (status & PCIM_PSTAT_DMASK) { case PCIM_PSTAT_D0: result = PCI_POWERSTATE_D0; break; case PCIM_PSTAT_D1: result = PCI_POWERSTATE_D1; break; case PCIM_PSTAT_D2: result = PCI_POWERSTATE_D2; break; case PCIM_PSTAT_D3: result = PCI_POWERSTATE_D3; break; default: result = PCI_POWERSTATE_UNKNOWN; break; } } else { /* No support, device is always at D0 */ result = PCI_POWERSTATE_D0; } return (result); } /* Clear any active PME# and disable PME# generation. */ void pci_clear_pme(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); pcicfgregs *cfg = &dinfo->cfg; uint16_t status; if (cfg->pp.pp_location != 0) { status = pci_read_config(dev, dinfo->cfg.pp.pp_location + PCIR_POWER_STATUS, 2); status &= ~PCIM_PSTAT_PMEENABLE; status |= PCIM_PSTAT_PME; pci_write_config(dev, dinfo->cfg.pp.pp_location + PCIR_POWER_STATUS, status, 2); } } /* Clear any active PME# and enable PME# generation. */ void pci_enable_pme(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); pcicfgregs *cfg = &dinfo->cfg; uint16_t status; if (cfg->pp.pp_location != 0) { status = pci_read_config(dev, dinfo->cfg.pp.pp_location + PCIR_POWER_STATUS, 2); status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; pci_write_config(dev, dinfo->cfg.pp.pp_location + PCIR_POWER_STATUS, status, 2); } } bool pci_has_pm(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); pcicfgregs *cfg = &dinfo->cfg; return (cfg->pp.pp_location != 0); } /* * Some convenience functions for PCI device drivers. */ static __inline void pci_set_command_bit(device_t dev, device_t child, uint16_t bit) { uint16_t command; command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2); command |= bit; PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2); } static __inline void pci_clear_command_bit(device_t dev, device_t child, uint16_t bit) { uint16_t command; command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2); command &= ~bit; PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2); } int pci_enable_busmaster_method(device_t dev, device_t child) { pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN); return (0); } int pci_disable_busmaster_method(device_t dev, device_t child) { pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN); return (0); } int pci_enable_io_method(device_t dev, device_t child, int space) { uint16_t bit; switch(space) { case SYS_RES_IOPORT: bit = PCIM_CMD_PORTEN; break; case SYS_RES_MEMORY: bit = PCIM_CMD_MEMEN; break; default: return (EINVAL); } pci_set_command_bit(dev, child, bit); return (0); } int pci_disable_io_method(device_t dev, device_t child, int space) { uint16_t bit; switch(space) { case SYS_RES_IOPORT: bit = PCIM_CMD_PORTEN; break; case SYS_RES_MEMORY: bit = PCIM_CMD_MEMEN; break; default: return (EINVAL); } pci_clear_command_bit(dev, child, bit); return (0); } /* * New style pci driver. Parent device is either a pci-host-bridge or a * pci-pci-bridge. Both kinds are represented by instances of pcib. */ void pci_print_verbose(struct pci_devinfo *dinfo) { if (bootverbose) { pcicfgregs *cfg = &dinfo->cfg; printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n", cfg->vendor, cfg->device, cfg->revid); printf("\tdomain=%d, bus=%d, slot=%d, func=%d\n", cfg->domain, cfg->bus, cfg->slot, cfg->func); printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n", cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype, cfg->mfdev); printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n", cfg->cmdreg, cfg->statreg, cfg->cachelnsz); printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n", cfg->lattimer, cfg->lattimer * 30, cfg->mingnt, cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250); if (cfg->intpin > 0) printf("\tintpin=%c, irq=%d\n", cfg->intpin +'a' -1, cfg->intline); if (cfg->pp.pp_location) { uint16_t status; status = pci_read_config(cfg->dev, cfg->pp.pp_location + PCIR_POWER_STATUS, 2); printf("\tpowerspec %d supports D0%s%s D3 current D%d\n", cfg->pp.pp_cap & PCIM_PCAP_SPEC, cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "", cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "", status & PCIM_PSTAT_DMASK); } if (cfg->msi.msi_location) { uint16_t ctrl, msgnum; ctrl = cfg->msi.msi_ctrl; msgnum = PCI_MSI_MSGNUM(ctrl); printf("\tMSI supports %d message%s%s%s\n", msgnum, (msgnum == 1) ? "" : "s", (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "", (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":""); } if (cfg->msix.msix_location) { uint16_t msgnum; msgnum = PCI_MSIX_MSGNUM(cfg->msix.msix_ctrl); printf("\tMSI-X supports %d message%s ", msgnum, (msgnum == 1) ? "" : "s"); if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar) printf("in map 0x%x\n", cfg->msix.msix_table_bar); else printf("in maps 0x%x and 0x%x\n", cfg->msix.msix_table_bar, cfg->msix.msix_pba_bar); } } } static int pci_porten(device_t dev) { return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_PORTEN) != 0; } static int pci_memen(device_t dev) { return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_MEMEN) != 0; } void pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp, int *bar64) { struct pci_devinfo *dinfo; pci_addr_t map, testval; int ln2range; uint16_t cmd; /* * The device ROM BAR is special. It is always a 32-bit * memory BAR. Bit 0 is special and should not be set when * sizing the BAR. */ dinfo = device_get_ivars(dev); if (PCIR_IS_BIOS(&dinfo->cfg, reg)) { map = pci_read_config(dev, reg, 4); pci_write_config(dev, reg, 0xfffffffe, 4); testval = pci_read_config(dev, reg, 4); pci_write_config(dev, reg, map, 4); *mapp = map; *testvalp = testval; if (bar64 != NULL) *bar64 = 0; return; } map = pci_read_config(dev, reg, 4); ln2range = pci_maprange(map); if (ln2range == 64) map |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32; /* * Disable decoding via the command register before * determining the BAR's length since we will be placing it in * a weird state. */ cmd = pci_read_config(dev, PCIR_COMMAND, 2); pci_write_config(dev, PCIR_COMMAND, cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2); /* * Determine the BAR's length by writing all 1's. The bottom * log_2(size) bits of the BAR will stick as 0 when we read * the value back. * * NB: according to the PCI Local Bus Specification, rev. 3.0: * "Software writes 0FFFFFFFFh to both registers, reads them back, * and combines the result into a 64-bit value." (section 6.2.5.1) * * Writes to both registers must be performed before attempting to * read back the size value. */ testval = 0; pci_write_config(dev, reg, 0xffffffff, 4); if (ln2range == 64) { pci_write_config(dev, reg + 4, 0xffffffff, 4); testval |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32; } testval |= pci_read_config(dev, reg, 4); /* * Restore the original value of the BAR. We may have reprogrammed * the BAR of the low-level console device and when booting verbose, * we need the console device addressable. */ pci_write_config(dev, reg, map, 4); if (ln2range == 64) pci_write_config(dev, reg + 4, map >> 32, 4); pci_write_config(dev, PCIR_COMMAND, cmd, 2); *mapp = map; *testvalp = testval; if (bar64 != NULL) *bar64 = (ln2range == 64); } static void pci_write_bar(device_t dev, struct pci_map *pm, pci_addr_t base) { struct pci_devinfo *dinfo; int ln2range; /* The device ROM BAR is always a 32-bit memory BAR. */ dinfo = device_get_ivars(dev); if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg)) ln2range = 32; else ln2range = pci_maprange(pm->pm_value); pci_write_config(dev, pm->pm_reg, base, 4); if (ln2range == 64) pci_write_config(dev, pm->pm_reg + 4, base >> 32, 4); pm->pm_value = pci_read_config(dev, pm->pm_reg, 4); if (ln2range == 64) pm->pm_value |= (pci_addr_t)pci_read_config(dev, pm->pm_reg + 4, 4) << 32; } struct pci_map * pci_find_bar(device_t dev, int reg) { struct pci_devinfo *dinfo; struct pci_map *pm; dinfo = device_get_ivars(dev); STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) { if (pm->pm_reg == reg) return (pm); } return (NULL); } struct pci_map * pci_first_bar(device_t dev) { struct pci_devinfo *dinfo; dinfo = device_get_ivars(dev); return (STAILQ_FIRST(&dinfo->cfg.maps)); } struct pci_map * pci_next_bar(struct pci_map *pm) { return (STAILQ_NEXT(pm, pm_link)); } int pci_bar_enabled(device_t dev, struct pci_map *pm) { struct pci_devinfo *dinfo; uint16_t cmd; dinfo = device_get_ivars(dev); if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) && !(pm->pm_value & PCIM_BIOS_ENABLE)) return (0); #ifdef PCI_IOV if ((dinfo->cfg.flags & PCICFG_VF) != 0) { struct pcicfg_iov *iov; iov = dinfo->cfg.iov; cmd = pci_read_config(iov->iov_pf, iov->iov_pos + PCIR_SRIOV_CTL, 2); return ((cmd & PCIM_SRIOV_VF_MSE) != 0); } #endif cmd = pci_read_config(dev, PCIR_COMMAND, 2); if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) || PCI_BAR_MEM(pm->pm_value)) return ((cmd & PCIM_CMD_MEMEN) != 0); else return ((cmd & PCIM_CMD_PORTEN) != 0); } struct pci_map * pci_add_bar(device_t dev, int reg, pci_addr_t value, pci_addr_t size) { struct pci_devinfo *dinfo; struct pci_map *pm, *prev; dinfo = device_get_ivars(dev); pm = malloc(sizeof(*pm), M_DEVBUF, M_WAITOK | M_ZERO); pm->pm_reg = reg; pm->pm_value = value; pm->pm_size = size; STAILQ_FOREACH(prev, &dinfo->cfg.maps, pm_link) { KASSERT(prev->pm_reg != pm->pm_reg, ("duplicate map %02x", reg)); if (STAILQ_NEXT(prev, pm_link) == NULL || STAILQ_NEXT(prev, pm_link)->pm_reg > pm->pm_reg) break; } if (prev != NULL) STAILQ_INSERT_AFTER(&dinfo->cfg.maps, prev, pm, pm_link); else STAILQ_INSERT_TAIL(&dinfo->cfg.maps, pm, pm_link); return (pm); } static void pci_restore_bars(device_t dev) { struct pci_devinfo *dinfo; struct pci_map *pm; int ln2range; dinfo = device_get_ivars(dev); STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) { if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg)) ln2range = 32; else ln2range = pci_maprange(pm->pm_value); pci_write_config(dev, pm->pm_reg, pm->pm_value, 4); if (ln2range == 64) pci_write_config(dev, pm->pm_reg + 4, pm->pm_value >> 32, 4); } } /* * Add a resource based on a pci map register. Return 1 if the map * register is a 32bit map register or 2 if it is a 64bit register. */ static int pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl, int force, int prefetch) { struct pci_map *pm; pci_addr_t base, map, testval; pci_addr_t start, end, count; int barlen, basezero, flags, maprange, mapsize, type; uint16_t cmd; struct resource *res; /* * The BAR may already exist if the device is a CardBus card * whose CIS is stored in this BAR. */ pm = pci_find_bar(dev, reg); if (pm != NULL) { maprange = pci_maprange(pm->pm_value); barlen = maprange == 64 ? 2 : 1; return (barlen); } pci_read_bar(dev, reg, &map, &testval, NULL); if (PCI_BAR_MEM(map)) { type = SYS_RES_MEMORY; if (map & PCIM_BAR_MEM_PREFETCH) prefetch = 1; } else type = SYS_RES_IOPORT; mapsize = pci_mapsize(testval); base = pci_mapbase(map); #ifdef __PCI_BAR_ZERO_VALID basezero = 0; #else basezero = base == 0; #endif maprange = pci_maprange(map); barlen = maprange == 64 ? 2 : 1; /* * For I/O registers, if bottom bit is set, and the next bit up * isn't clear, we know we have a BAR that doesn't conform to the * spec, so ignore it. Also, sanity check the size of the data * areas to the type of memory involved. Memory must be at least * 16 bytes in size, while I/O ranges must be at least 4. */ if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0) return (barlen); if ((type == SYS_RES_MEMORY && mapsize < 4) || (type == SYS_RES_IOPORT && mapsize < 2)) return (barlen); /* Save a record of this BAR. */ pm = pci_add_bar(dev, reg, map, mapsize); if (bootverbose) { printf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d", reg, pci_maptype(map), maprange, (uintmax_t)base, mapsize); if (type == SYS_RES_IOPORT && !pci_porten(dev)) printf(", port disabled\n"); else if (type == SYS_RES_MEMORY && !pci_memen(dev)) printf(", memory disabled\n"); else printf(", enabled\n"); } /* * If base is 0, then we have problems if this architecture does * not allow that. It is best to ignore such entries for the * moment. These will be allocated later if the driver specifically * requests them. However, some removable buses look better when * all resources are allocated, so allow '0' to be overridden. * * Similarly treat maps whose values is the same as the test value * read back. These maps have had all f's written to them by the * BIOS in an attempt to disable the resources. */ if (!force && (basezero || map == testval)) return (barlen); if ((u_long)base != base) { device_printf(bus, "pci%d:%d:%d:%d bar %#x too many address bits", pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), reg); return (barlen); } /* * This code theoretically does the right thing, but has * undesirable side effects in some cases where peripherals * respond oddly to having these bits enabled. Let the user * be able to turn them off (since pci_enable_io_modes is 1 by * default). */ if (pci_enable_io_modes) { /* Turn on resources that have been left off by a lazy BIOS */ if (type == SYS_RES_IOPORT && !pci_porten(dev)) { cmd = pci_read_config(dev, PCIR_COMMAND, 2); cmd |= PCIM_CMD_PORTEN; pci_write_config(dev, PCIR_COMMAND, cmd, 2); } if (type == SYS_RES_MEMORY && !pci_memen(dev)) { cmd = pci_read_config(dev, PCIR_COMMAND, 2); cmd |= PCIM_CMD_MEMEN; pci_write_config(dev, PCIR_COMMAND, cmd, 2); } } else { if (type == SYS_RES_IOPORT && !pci_porten(dev)) return (barlen); if (type == SYS_RES_MEMORY && !pci_memen(dev)) return (barlen); } count = (pci_addr_t)1 << mapsize; flags = RF_ALIGNMENT_LOG2(mapsize); if (prefetch) flags |= RF_PREFETCHABLE; if (basezero || base == pci_mapbase(testval) || pci_clear_bars) { start = 0; /* Let the parent decide. */ end = ~0; } else { start = base; end = base + count - 1; } resource_list_add(rl, type, reg, start, end, count); /* * Try to allocate the resource for this BAR from our parent * so that this resource range is already reserved. The * driver for this device will later inherit this resource in * pci_alloc_resource(). */ res = resource_list_reserve(rl, bus, dev, type, ®, start, end, count, flags); if ((pci_do_realloc_bars || pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_REALLOC_BAR)) && res == NULL && (start != 0 || end != ~0)) { /* * If the allocation fails, try to allocate a resource for * this BAR using any available range. The firmware felt * it was important enough to assign a resource, so don't * disable decoding if we can help it. */ resource_list_delete(rl, type, reg); resource_list_add(rl, type, reg, 0, ~0, count); res = resource_list_reserve(rl, bus, dev, type, ®, 0, ~0, count, flags); } if (res == NULL) { /* * If the allocation fails, delete the resource list entry * and disable decoding for this device. * * If the driver requests this resource in the future, * pci_reserve_map() will try to allocate a fresh * resource range. */ resource_list_delete(rl, type, reg); pci_disable_io(dev, type); if (bootverbose) device_printf(bus, "pci%d:%d:%d:%d bar %#x failed to allocate\n", pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), reg); } else { start = rman_get_start(res); pci_write_bar(dev, pm, start); } return (barlen); } /* * For ATA devices we need to decide early what addressing mode to use. * Legacy demands that the primary and secondary ATA ports sits on the * same addresses that old ISA hardware did. This dictates that we use * those addresses and ignore the BAR's if we cannot set PCI native * addressing mode. */ static void pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force, uint32_t prefetchmask) { int rid, type, progif; #if 0 /* if this device supports PCI native addressing use it */ progif = pci_read_config(dev, PCIR_PROGIF, 1); if ((progif & 0x8a) == 0x8a) { if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) && pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) { printf("Trying ATA native PCI addressing mode\n"); pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1); } } #endif progif = pci_read_config(dev, PCIR_PROGIF, 1); type = SYS_RES_IOPORT; if (progif & PCIP_STORAGE_IDE_MODEPRIM) { pci_add_map(bus, dev, PCIR_BAR(0), rl, force, prefetchmask & (1 << 0)); pci_add_map(bus, dev, PCIR_BAR(1), rl, force, prefetchmask & (1 << 1)); } else { rid = PCIR_BAR(0); resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8); (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x1f0, 0x1f7, 8, 0); rid = PCIR_BAR(1); resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1); (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x3f6, 0x3f6, 1, 0); } if (progif & PCIP_STORAGE_IDE_MODESEC) { pci_add_map(bus, dev, PCIR_BAR(2), rl, force, prefetchmask & (1 << 2)); pci_add_map(bus, dev, PCIR_BAR(3), rl, force, prefetchmask & (1 << 3)); } else { rid = PCIR_BAR(2); resource_list_add(rl, type, rid, 0x170, 0x177, 8); (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x170, 0x177, 8, 0); rid = PCIR_BAR(3); resource_list_add(rl, type, rid, 0x376, 0x376, 1); (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x376, 0x376, 1, 0); } pci_add_map(bus, dev, PCIR_BAR(4), rl, force, prefetchmask & (1 << 4)); pci_add_map(bus, dev, PCIR_BAR(5), rl, force, prefetchmask & (1 << 5)); } static void pci_assign_interrupt(device_t bus, device_t dev, int force_route) { struct pci_devinfo *dinfo = device_get_ivars(dev); pcicfgregs *cfg = &dinfo->cfg; char tunable_name[64]; int irq; /* Has to have an intpin to have an interrupt. */ if (cfg->intpin == 0) return; /* Let the user override the IRQ with a tunable. */ irq = PCI_INVALID_IRQ; snprintf(tunable_name, sizeof(tunable_name), "hw.pci%d.%d.%d.INT%c.irq", cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1); if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0)) irq = PCI_INVALID_IRQ; /* * If we didn't get an IRQ via the tunable, then we either use the * IRQ value in the intline register or we ask the bus to route an * interrupt for us. If force_route is true, then we only use the * value in the intline register if the bus was unable to assign an * IRQ. */ if (!PCI_INTERRUPT_VALID(irq)) { if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route) irq = PCI_ASSIGN_INTERRUPT(bus, dev); if (!PCI_INTERRUPT_VALID(irq)) irq = cfg->intline; } /* If after all that we don't have an IRQ, just bail. */ if (!PCI_INTERRUPT_VALID(irq)) return; /* Update the config register if it changed. */ if (irq != cfg->intline) { cfg->intline = irq; pci_write_config(dev, PCIR_INTLINE, irq, 1); } /* Add this IRQ as rid 0 interrupt resource. */ resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1); } /* Perform early OHCI takeover from SMM. */ static void ohci_early_takeover(device_t self) { struct resource *res; uint32_t ctl; int rid; int i; rid = PCIR_BAR(0); res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (res == NULL) return; ctl = bus_read_4(res, OHCI_CONTROL); if (ctl & OHCI_IR) { if (bootverbose) printf("ohci early: " "SMM active, request owner change\n"); bus_write_4(res, OHCI_COMMAND_STATUS, OHCI_OCR); for (i = 0; (i < 100) && (ctl & OHCI_IR); i++) { DELAY(1000); ctl = bus_read_4(res, OHCI_CONTROL); } if (ctl & OHCI_IR) { if (bootverbose) printf("ohci early: " "SMM does not respond, resetting\n"); bus_write_4(res, OHCI_CONTROL, OHCI_HCFS_RESET); } /* Disable interrupts */ bus_write_4(res, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS); } bus_release_resource(self, SYS_RES_MEMORY, rid, res); } /* Perform early UHCI takeover from SMM. */ static void uhci_early_takeover(device_t self) { struct resource *res; int rid; /* * Set the PIRQD enable bit and switch off all the others. We don't * want legacy support to interfere with us XXX Does this also mean * that the BIOS won't touch the keyboard anymore if it is connected * to the ports of the root hub? */ pci_write_config(self, PCI_LEGSUP, PCI_LEGSUP_USBPIRQDEN, 2); /* Disable interrupts */ rid = PCI_UHCI_BASE_REG; res = bus_alloc_resource_any(self, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (res != NULL) { bus_write_2(res, UHCI_INTR, 0); bus_release_resource(self, SYS_RES_IOPORT, rid, res); } } /* Perform early EHCI takeover from SMM. */ static void ehci_early_takeover(device_t self) { struct resource *res; uint32_t cparams; uint32_t eec; uint8_t eecp; uint8_t bios_sem; uint8_t offs; int rid; int i; rid = PCIR_BAR(0); res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (res == NULL) return; cparams = bus_read_4(res, EHCI_HCCPARAMS); /* Synchronise with the BIOS if it owns the controller. */ for (eecp = EHCI_HCC_EECP(cparams); eecp != 0; eecp = EHCI_EECP_NEXT(eec)) { eec = pci_read_config(self, eecp, 4); if (EHCI_EECP_ID(eec) != EHCI_EC_LEGSUP) { continue; } bios_sem = pci_read_config(self, eecp + EHCI_LEGSUP_BIOS_SEM, 1); if (bios_sem == 0) { continue; } if (bootverbose) printf("ehci early: " "SMM active, request owner change\n"); pci_write_config(self, eecp + EHCI_LEGSUP_OS_SEM, 1, 1); for (i = 0; (i < 100) && (bios_sem != 0); i++) { DELAY(1000); bios_sem = pci_read_config(self, eecp + EHCI_LEGSUP_BIOS_SEM, 1); } if (bios_sem != 0) { if (bootverbose) printf("ehci early: " "SMM does not respond\n"); } /* Disable interrupts */ offs = EHCI_CAPLENGTH(bus_read_4(res, EHCI_CAPLEN_HCIVERSION)); bus_write_4(res, offs + EHCI_USBINTR, 0); } bus_release_resource(self, SYS_RES_MEMORY, rid, res); } /* Perform early XHCI takeover from SMM. */ static void xhci_early_takeover(device_t self) { struct resource *res; uint32_t cparams; uint32_t eec; uint8_t eecp; uint8_t bios_sem; uint8_t offs; int rid; int i; rid = PCIR_BAR(0); res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (res == NULL) return; cparams = bus_read_4(res, XHCI_HCSPARAMS0); eec = -1; /* Synchronise with the BIOS if it owns the controller. */ for (eecp = XHCI_HCS0_XECP(cparams) << 2; eecp != 0 && XHCI_XECP_NEXT(eec); eecp += XHCI_XECP_NEXT(eec) << 2) { eec = bus_read_4(res, eecp); if (XHCI_XECP_ID(eec) != XHCI_ID_USB_LEGACY) continue; bios_sem = bus_read_1(res, eecp + XHCI_XECP_BIOS_SEM); if (bios_sem == 0) continue; if (bootverbose) printf("xhci early: " "SMM active, request owner change\n"); bus_write_1(res, eecp + XHCI_XECP_OS_SEM, 1); /* wait a maximum of 5 second */ for (i = 0; (i < 5000) && (bios_sem != 0); i++) { DELAY(1000); bios_sem = bus_read_1(res, eecp + XHCI_XECP_BIOS_SEM); } if (bios_sem != 0) { if (bootverbose) printf("xhci early: " "SMM does not respond\n"); } /* Disable interrupts */ offs = bus_read_1(res, XHCI_CAPLENGTH); bus_write_4(res, offs + XHCI_USBCMD, 0); bus_read_4(res, offs + XHCI_USBSTS); } bus_release_resource(self, SYS_RES_MEMORY, rid, res); } static void pci_reserve_secbus(device_t bus, device_t dev, pcicfgregs *cfg, struct resource_list *rl) { struct resource *res; char *cp; rman_res_t start, end, count; int rid, sec_bus, sec_reg, sub_bus, sub_reg, sup_bus; switch (cfg->hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_BRIDGE: sec_reg = PCIR_SECBUS_1; sub_reg = PCIR_SUBBUS_1; break; case PCIM_HDRTYPE_CARDBUS: sec_reg = PCIR_SECBUS_2; sub_reg = PCIR_SUBBUS_2; break; default: return; } /* * If the existing bus range is valid, attempt to reserve it * from our parent. If this fails for any reason, clear the * secbus and subbus registers. * * XXX: Should we reset sub_bus to sec_bus if it is < sec_bus? * This would at least preserve the existing sec_bus if it is * valid. */ sec_bus = PCI_READ_CONFIG(bus, dev, sec_reg, 1); sub_bus = PCI_READ_CONFIG(bus, dev, sub_reg, 1); /* Quirk handling. */ switch (pci_get_devid(dev)) { case 0x12258086: /* Intel 82454KX/GX (Orion) */ sup_bus = pci_read_config(dev, 0x41, 1); if (sup_bus != 0xff) { sec_bus = sup_bus + 1; sub_bus = sup_bus + 1; PCI_WRITE_CONFIG(bus, dev, sec_reg, sec_bus, 1); PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1); } break; case 0x00dd10de: /* Compaq R3000 BIOS sets wrong subordinate bus number. */ if ((cp = kern_getenv("smbios.planar.maker")) == NULL) break; if (strncmp(cp, "Compal", 6) != 0) { freeenv(cp); break; } freeenv(cp); if ((cp = kern_getenv("smbios.planar.product")) == NULL) break; if (strncmp(cp, "08A0", 4) != 0) { freeenv(cp); break; } freeenv(cp); if (sub_bus < 0xa) { sub_bus = 0xa; PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1); } break; } if (bootverbose) printf("\tsecbus=%d, subbus=%d\n", sec_bus, sub_bus); if (sec_bus > 0 && sub_bus >= sec_bus) { start = sec_bus; end = sub_bus; count = end - start + 1; resource_list_add(rl, PCI_RES_BUS, 0, 0, ~0, count); /* * If requested, clear secondary bus registers in * bridge devices to force a complete renumbering * rather than reserving the existing range. However, * preserve the existing size. */ if (pci_clear_buses) goto clear; rid = 0; res = resource_list_reserve(rl, bus, dev, PCI_RES_BUS, &rid, start, end, count, 0); if (res != NULL) return; if (bootverbose) device_printf(bus, "pci%d:%d:%d:%d secbus failed to allocate\n", pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)); } clear: PCI_WRITE_CONFIG(bus, dev, sec_reg, 0, 1); PCI_WRITE_CONFIG(bus, dev, sub_reg, 0, 1); } static struct resource * pci_alloc_secbus(device_t dev, device_t child, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct pci_devinfo *dinfo; pcicfgregs *cfg; struct resource_list *rl; struct resource *res; int sec_reg, sub_reg; dinfo = device_get_ivars(child); cfg = &dinfo->cfg; rl = &dinfo->resources; switch (cfg->hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_BRIDGE: sec_reg = PCIR_SECBUS_1; sub_reg = PCIR_SUBBUS_1; break; case PCIM_HDRTYPE_CARDBUS: sec_reg = PCIR_SECBUS_2; sub_reg = PCIR_SUBBUS_2; break; default: return (NULL); } if (*rid != 0) return (NULL); if (resource_list_find(rl, PCI_RES_BUS, *rid) == NULL) resource_list_add(rl, PCI_RES_BUS, *rid, start, end, count); if (!resource_list_reserved(rl, PCI_RES_BUS, *rid)) { res = resource_list_reserve(rl, dev, child, PCI_RES_BUS, rid, start, end, count, flags & ~RF_ACTIVE); if (res == NULL) { resource_list_delete(rl, PCI_RES_BUS, *rid); device_printf(child, "allocating %ju bus%s failed\n", count, count == 1 ? "" : "es"); return (NULL); } if (bootverbose) device_printf(child, "Lazy allocation of %ju bus%s at %ju\n", count, count == 1 ? "" : "es", rman_get_start(res)); PCI_WRITE_CONFIG(dev, child, sec_reg, rman_get_start(res), 1); PCI_WRITE_CONFIG(dev, child, sub_reg, rman_get_end(res), 1); } return (resource_list_alloc(rl, dev, child, PCI_RES_BUS, rid, start, end, count, flags)); } static int pci_ea_bei_to_rid(device_t dev, int bei) { #ifdef PCI_IOV struct pci_devinfo *dinfo; int iov_pos; struct pcicfg_iov *iov; dinfo = device_get_ivars(dev); iov = dinfo->cfg.iov; if (iov != NULL) iov_pos = iov->iov_pos; else iov_pos = 0; #endif /* Check if matches BAR */ if ((bei >= PCIM_EA_BEI_BAR_0) && (bei <= PCIM_EA_BEI_BAR_5)) return (PCIR_BAR(bei)); /* Check ROM */ if (bei == PCIM_EA_BEI_ROM) return (PCIR_BIOS); #ifdef PCI_IOV /* Check if matches VF_BAR */ if ((iov != NULL) && (bei >= PCIM_EA_BEI_VF_BAR_0) && (bei <= PCIM_EA_BEI_VF_BAR_5)) return (PCIR_SRIOV_BAR(bei - PCIM_EA_BEI_VF_BAR_0) + iov_pos); #endif return (-1); } int pci_ea_is_enabled(device_t dev, int rid) { struct pci_ea_entry *ea; struct pci_devinfo *dinfo; dinfo = device_get_ivars(dev); STAILQ_FOREACH(ea, &dinfo->cfg.ea.ea_entries, eae_link) { if (pci_ea_bei_to_rid(dev, ea->eae_bei) == rid) return ((ea->eae_flags & PCIM_EA_ENABLE) > 0); } return (0); } void pci_add_resources_ea(device_t bus, device_t dev, int alloc_iov) { struct pci_ea_entry *ea; struct pci_devinfo *dinfo; pci_addr_t start, end, count; struct resource_list *rl; int type, flags, rid; struct resource *res; uint32_t tmp; #ifdef PCI_IOV struct pcicfg_iov *iov; #endif dinfo = device_get_ivars(dev); rl = &dinfo->resources; flags = 0; #ifdef PCI_IOV iov = dinfo->cfg.iov; #endif if (dinfo->cfg.ea.ea_location == 0) return; STAILQ_FOREACH(ea, &dinfo->cfg.ea.ea_entries, eae_link) { /* * TODO: Ignore EA-BAR if is not enabled. * Currently the EA implementation supports * only situation, where EA structure contains * predefined entries. In case they are not enabled * leave them unallocated and proceed with * a legacy-BAR mechanism. */ if ((ea->eae_flags & PCIM_EA_ENABLE) == 0) continue; switch ((ea->eae_flags & PCIM_EA_PP) >> PCIM_EA_PP_OFFSET) { case PCIM_EA_P_MEM_PREFETCH: case PCIM_EA_P_VF_MEM_PREFETCH: flags = RF_PREFETCHABLE; /* FALLTHROUGH */ case PCIM_EA_P_VF_MEM: case PCIM_EA_P_MEM: type = SYS_RES_MEMORY; break; case PCIM_EA_P_IO: type = SYS_RES_IOPORT; break; default: continue; } if (alloc_iov != 0) { #ifdef PCI_IOV /* Allocating IOV, confirm BEI matches */ if ((ea->eae_bei < PCIM_EA_BEI_VF_BAR_0) || (ea->eae_bei > PCIM_EA_BEI_VF_BAR_5)) continue; #else continue; #endif } else { /* Allocating BAR, confirm BEI matches */ if (((ea->eae_bei < PCIM_EA_BEI_BAR_0) || (ea->eae_bei > PCIM_EA_BEI_BAR_5)) && (ea->eae_bei != PCIM_EA_BEI_ROM)) continue; } rid = pci_ea_bei_to_rid(dev, ea->eae_bei); if (rid < 0) continue; /* Skip resources already allocated by EA */ if ((resource_list_find(rl, SYS_RES_MEMORY, rid) != NULL) || (resource_list_find(rl, SYS_RES_IOPORT, rid) != NULL)) continue; start = ea->eae_base; count = ea->eae_max_offset + 1; #ifdef PCI_IOV if (iov != NULL) count = count * iov->iov_num_vfs; #endif end = start + count - 1; if (count == 0) continue; resource_list_add(rl, type, rid, start, end, count); res = resource_list_reserve(rl, bus, dev, type, &rid, start, end, count, flags); if (res == NULL) { resource_list_delete(rl, type, rid); /* * Failed to allocate using EA, disable entry. * Another attempt to allocation will be performed * further, but this time using legacy BAR registers */ tmp = pci_read_config(dev, ea->eae_cfg_offset, 4); tmp &= ~PCIM_EA_ENABLE; pci_write_config(dev, ea->eae_cfg_offset, tmp, 4); /* * Disabling entry might fail in case it is hardwired. * Read flags again to match current status. */ ea->eae_flags = pci_read_config(dev, ea->eae_cfg_offset, 4); continue; } /* As per specification, fill BAR with zeros */ pci_write_config(dev, rid, 0, 4); } } void pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask) { struct pci_devinfo *dinfo; pcicfgregs *cfg; struct resource_list *rl; const struct pci_quirk *q; uint32_t devid; int i; dinfo = device_get_ivars(dev); cfg = &dinfo->cfg; rl = &dinfo->resources; devid = (cfg->device << 16) | cfg->vendor; /* Allocate resources using Enhanced Allocation */ pci_add_resources_ea(bus, dev, 0); /* ATA devices needs special map treatment */ if ((pci_get_class(dev) == PCIC_STORAGE) && (pci_get_subclass(dev) == PCIS_STORAGE_IDE) && ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) || (!pci_read_config(dev, PCIR_BAR(0), 4) && !pci_read_config(dev, PCIR_BAR(2), 4))) ) pci_ata_maps(bus, dev, rl, force, prefetchmask); else for (i = 0; i < cfg->nummaps;) { /* Skip resources already managed by EA */ if ((resource_list_find(rl, SYS_RES_MEMORY, PCIR_BAR(i)) != NULL) || (resource_list_find(rl, SYS_RES_IOPORT, PCIR_BAR(i)) != NULL) || pci_ea_is_enabled(dev, PCIR_BAR(i))) { i++; continue; } /* * Skip quirked resources. */ for (q = &pci_quirks[0]; q->devid != 0; q++) if (q->devid == devid && q->type == PCI_QUIRK_UNMAP_REG && q->arg1 == PCIR_BAR(i)) break; if (q->devid != 0) { i++; continue; } i += pci_add_map(bus, dev, PCIR_BAR(i), rl, force, prefetchmask & (1 << i)); } /* * Add additional, quirked resources. */ for (q = &pci_quirks[0]; q->devid != 0; q++) if (q->devid == devid && q->type == PCI_QUIRK_MAP_REG) pci_add_map(bus, dev, q->arg1, rl, force, 0); if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline) && pci_intx_reroute) { /* * Try to re-route interrupts. Sometimes the BIOS or * firmware may leave bogus values in these registers. * If the re-route fails, then just stick with what we * have. */ pci_assign_interrupt(bus, dev, 1); } if (pci_usb_takeover && pci_get_class(dev) == PCIC_SERIALBUS && pci_get_subclass(dev) == PCIS_SERIALBUS_USB) { if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_XHCI) xhci_early_takeover(dev); else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_EHCI) ehci_early_takeover(dev); else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_OHCI) ohci_early_takeover(dev); else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_UHCI) uhci_early_takeover(dev); } /* * Reserve resources for secondary bus ranges behind bridge * devices. */ pci_reserve_secbus(bus, dev, cfg, rl); } static struct pci_devinfo * pci_identify_function(device_t pcib, device_t dev, int domain, int busno, int slot, int func) { struct pci_devinfo *dinfo; dinfo = pci_read_device(pcib, dev, domain, busno, slot, func); if (dinfo != NULL) pci_add_child(dev, dinfo); return (dinfo); } void pci_add_children(device_t dev, int domain, int busno) { #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w) device_t pcib = device_get_parent(dev); struct pci_devinfo *dinfo; int maxslots; int s, f, pcifunchigh; uint8_t hdrtype; int first_func; /* * Try to detect a device at slot 0, function 0. If it exists, try to * enable ARI. We must enable ARI before detecting the rest of the * functions on this bus as ARI changes the set of slots and functions * that are legal on this bus. */ dinfo = pci_identify_function(pcib, dev, domain, busno, 0, 0); if (dinfo != NULL && pci_enable_ari) PCIB_TRY_ENABLE_ARI(pcib, dinfo->cfg.dev); /* * Start looking for new devices on slot 0 at function 1 because we * just identified the device at slot 0, function 0. */ first_func = 1; maxslots = PCIB_MAXSLOTS(pcib); for (s = 0; s <= maxslots; s++, first_func = 0) { pcifunchigh = 0; f = 0; DELAY(1); /* If function 0 is not present, skip to the next slot. */ if (REG(PCIR_VENDOR, 2) == PCIV_INVALID) continue; hdrtype = REG(PCIR_HDRTYPE, 1); if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE) continue; if (hdrtype & PCIM_MFDEV) pcifunchigh = PCIB_MAXFUNCS(pcib); for (f = first_func; f <= pcifunchigh; f++) pci_identify_function(pcib, dev, domain, busno, s, f); } #undef REG } int pci_rescan_method(device_t dev) { #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w) device_t pcib = device_get_parent(dev); device_t child, *devlist, *unchanged; int devcount, error, i, j, maxslots, oldcount; int busno, domain, s, f, pcifunchigh; uint8_t hdrtype; /* No need to check for ARI on a rescan. */ error = device_get_children(dev, &devlist, &devcount); if (error) return (error); if (devcount != 0) { unchanged = malloc(devcount * sizeof(device_t), M_TEMP, M_NOWAIT | M_ZERO); if (unchanged == NULL) { free(devlist, M_TEMP); return (ENOMEM); } } else unchanged = NULL; domain = pcib_get_domain(dev); busno = pcib_get_bus(dev); maxslots = PCIB_MAXSLOTS(pcib); for (s = 0; s <= maxslots; s++) { /* If function 0 is not present, skip to the next slot. */ f = 0; if (REG(PCIR_VENDOR, 2) == PCIV_INVALID) continue; pcifunchigh = 0; hdrtype = REG(PCIR_HDRTYPE, 1); if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE) continue; if (hdrtype & PCIM_MFDEV) pcifunchigh = PCIB_MAXFUNCS(pcib); for (f = 0; f <= pcifunchigh; f++) { if (REG(PCIR_VENDOR, 2) == PCIV_INVALID) continue; /* * Found a valid function. Check if a * device_t for this device already exists. */ for (i = 0; i < devcount; i++) { child = devlist[i]; if (child == NULL) continue; if (pci_get_slot(child) == s && pci_get_function(child) == f) { unchanged[i] = child; goto next_func; } } pci_identify_function(pcib, dev, domain, busno, s, f); next_func:; } } /* Remove devices that are no longer present. */ for (i = 0; i < devcount; i++) { if (unchanged[i] != NULL) continue; device_delete_child(dev, devlist[i]); } free(devlist, M_TEMP); oldcount = devcount; /* Try to attach the devices just added. */ error = device_get_children(dev, &devlist, &devcount); if (error) { free(unchanged, M_TEMP); return (error); } for (i = 0; i < devcount; i++) { for (j = 0; j < oldcount; j++) { if (devlist[i] == unchanged[j]) goto next_device; } device_probe_and_attach(devlist[i]); next_device:; } free(unchanged, M_TEMP); free(devlist, M_TEMP); return (0); #undef REG } #ifdef PCI_IOV device_t pci_add_iov_child(device_t bus, device_t pf, uint16_t rid, uint16_t vid, uint16_t did) { struct pci_devinfo *vf_dinfo; device_t pcib; int busno, slot, func; pcib = device_get_parent(bus); PCIB_DECODE_RID(pcib, rid, &busno, &slot, &func); vf_dinfo = pci_fill_devinfo(pcib, bus, pci_get_domain(pcib), busno, slot, func, vid, did); vf_dinfo->cfg.flags |= PCICFG_VF; pci_add_child(bus, vf_dinfo); return (vf_dinfo->cfg.dev); } device_t pci_create_iov_child_method(device_t bus, device_t pf, uint16_t rid, uint16_t vid, uint16_t did) { return (pci_add_iov_child(bus, pf, rid, vid, did)); } #endif /* * For PCIe device set Max_Payload_Size to match PCIe root's. */ static void pcie_setup_mps(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); device_t root; uint16_t rmps, mmps, mps; if (dinfo->cfg.pcie.pcie_location == 0) return; root = pci_find_pcie_root_port(dev); if (root == NULL) return; /* Check whether the MPS is already configured. */ rmps = pcie_read_config(root, PCIER_DEVICE_CTL, 2) & PCIEM_CTL_MAX_PAYLOAD; mps = pcie_read_config(dev, PCIER_DEVICE_CTL, 2) & PCIEM_CTL_MAX_PAYLOAD; if (mps == rmps) return; /* Check whether the device is capable of the root's MPS. */ mmps = (pcie_read_config(dev, PCIER_DEVICE_CAP, 2) & PCIEM_CAP_MAX_PAYLOAD) << 5; if (rmps > mmps) { /* * The device is unable to handle root's MPS. Limit root. * XXX: We should traverse through all the tree, applying * it to all the devices. */ pcie_adjust_config(root, PCIER_DEVICE_CTL, PCIEM_CTL_MAX_PAYLOAD, mmps, 2); } else { pcie_adjust_config(dev, PCIER_DEVICE_CTL, PCIEM_CTL_MAX_PAYLOAD, rmps, 2); } } static void pci_add_child_clear_aer(device_t dev, struct pci_devinfo *dinfo) { int aer; uint32_t r; uint16_t r2; if (dinfo->cfg.pcie.pcie_location != 0 && dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT) { r2 = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_ROOT_CTL, 2); r2 &= ~(PCIEM_ROOT_CTL_SERR_CORR | PCIEM_ROOT_CTL_SERR_NONFATAL | PCIEM_ROOT_CTL_SERR_FATAL); pci_write_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_ROOT_CTL, r2, 2); } if (pci_find_extcap(dev, PCIZ_AER, &aer) == 0) { r = pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4); pci_write_config(dev, aer + PCIR_AER_UC_STATUS, r, 4); if (r != 0 && bootverbose) { pci_printf(&dinfo->cfg, "clearing AER UC 0x%08x -> 0x%08x\n", r, pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4)); } r = pci_read_config(dev, aer + PCIR_AER_UC_MASK, 4); r &= ~(PCIM_AER_UC_TRAINING_ERROR | PCIM_AER_UC_DL_PROTOCOL_ERROR | PCIM_AER_UC_SURPRISE_LINK_DOWN | PCIM_AER_UC_POISONED_TLP | PCIM_AER_UC_FC_PROTOCOL_ERROR | PCIM_AER_UC_COMPLETION_TIMEOUT | PCIM_AER_UC_COMPLETER_ABORT | PCIM_AER_UC_UNEXPECTED_COMPLETION | PCIM_AER_UC_RECEIVER_OVERFLOW | PCIM_AER_UC_MALFORMED_TLP | PCIM_AER_UC_ECRC_ERROR | PCIM_AER_UC_UNSUPPORTED_REQUEST | PCIM_AER_UC_ACS_VIOLATION | PCIM_AER_UC_INTERNAL_ERROR | PCIM_AER_UC_MC_BLOCKED_TLP | PCIM_AER_UC_ATOMIC_EGRESS_BLK | PCIM_AER_UC_TLP_PREFIX_BLOCKED); pci_write_config(dev, aer + PCIR_AER_UC_MASK, r, 4); r = pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4); pci_write_config(dev, aer + PCIR_AER_COR_STATUS, r, 4); if (r != 0 && bootverbose) { pci_printf(&dinfo->cfg, "clearing AER COR 0x%08x -> 0x%08x\n", r, pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4)); } r = pci_read_config(dev, aer + PCIR_AER_COR_MASK, 4); r &= ~(PCIM_AER_COR_RECEIVER_ERROR | PCIM_AER_COR_BAD_TLP | PCIM_AER_COR_BAD_DLLP | PCIM_AER_COR_REPLAY_ROLLOVER | PCIM_AER_COR_REPLAY_TIMEOUT | PCIM_AER_COR_ADVISORY_NF_ERROR | PCIM_AER_COR_INTERNAL_ERROR | PCIM_AER_COR_HEADER_LOG_OVFLOW); pci_write_config(dev, aer + PCIR_AER_COR_MASK, r, 4); r = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_CTL, 2); r |= PCIEM_CTL_COR_ENABLE | PCIEM_CTL_NFER_ENABLE | PCIEM_CTL_FER_ENABLE | PCIEM_CTL_URR_ENABLE; pci_write_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_CTL, r, 2); } } void pci_add_child(device_t bus, struct pci_devinfo *dinfo) { device_t dev; dinfo->cfg.dev = dev = device_add_child(bus, NULL, DEVICE_UNIT_ANY); device_set_ivars(dev, dinfo); resource_list_init(&dinfo->resources); pci_cfg_save(dev, dinfo, 0); pci_cfg_restore(dev, dinfo); pci_clear_pme(dev); pci_print_verbose(dinfo); pci_add_resources(bus, dev, 0, 0); if (pci_enable_mps_tune) pcie_setup_mps(dev); pci_child_added(dinfo->cfg.dev); if (pci_clear_aer_on_attach) pci_add_child_clear_aer(dev, dinfo); EVENTHANDLER_INVOKE(pci_add_device, dinfo->cfg.dev); } void pci_child_added_method(device_t dev, device_t child) { } static int pci_probe(device_t dev) { device_set_desc(dev, "PCI bus"); /* Allow other subclasses to override this driver. */ return (BUS_PROBE_GENERIC); } int pci_attach_common(device_t dev) { struct pci_softc *sc; int busno, domain; int rid; sc = device_get_softc(dev); domain = pcib_get_domain(dev); busno = pcib_get_bus(dev); rid = 0; sc->sc_bus = bus_alloc_resource(dev, PCI_RES_BUS, &rid, busno, busno, 1, 0); if (sc->sc_bus == NULL) { device_printf(dev, "failed to allocate bus number\n"); return (ENXIO); } if (bootverbose) device_printf(dev, "domain=%d, physical bus=%d\n", domain, busno); sc->sc_dma_tag = bus_get_dma_tag(dev); return (0); } int pci_attach(device_t dev) { int busno, domain, error; error = pci_attach_common(dev); if (error) return (error); /* * Since there can be multiple independently numbered PCI * buses on systems with multiple PCI domains, we can't use * the unit number to decide which bus we are probing. We ask * the parent pcib what our domain and bus numbers are. */ domain = pcib_get_domain(dev); busno = pcib_get_bus(dev); pci_add_children(dev, domain, busno); bus_attach_children(dev); return (0); } int pci_detach(device_t dev) { struct pci_softc *sc; int error; error = bus_generic_detach(dev); if (error) return (error); sc = device_get_softc(dev); error = bus_release_resource(dev, PCI_RES_BUS, 0, sc->sc_bus); return (error); } static void pci_hint_device_unit(device_t dev, device_t child, const char *name, int *unitp) { int line, unit; const char *at; char me1[24], me2[32]; uint8_t b, s, f; uint32_t d; device_location_cache_t *cache; d = pci_get_domain(child); b = pci_get_bus(child); s = pci_get_slot(child); f = pci_get_function(child); snprintf(me1, sizeof(me1), "pci%u:%u:%u", b, s, f); snprintf(me2, sizeof(me2), "pci%u:%u:%u:%u", d, b, s, f); line = 0; cache = dev_wired_cache_init(); while (resource_find_dev(&line, name, &unit, "at", NULL) == 0) { resource_string_value(name, unit, "at", &at); if (strcmp(at, me1) == 0 || strcmp(at, me2) == 0) { *unitp = unit; break; } if (dev_wired_cache_match(cache, child, at)) { *unitp = unit; break; } } dev_wired_cache_fini(cache); } static void pci_set_power_child(device_t dev, device_t child, int state) { device_t pcib; int dstate; /* * Set the device to the given state. If the firmware suggests * a different power state, use it instead. If power management * is not present, the firmware is responsible for managing * device power. Skip children who aren't attached since they * are handled separately. */ pcib = device_get_parent(dev); dstate = state; if (device_is_attached(child) && PCIB_POWER_FOR_SLEEP(pcib, child, &dstate) == 0) pci_set_powerstate(child, dstate); } int pci_suspend_child(device_t dev, device_t child) { struct pci_devinfo *dinfo; struct resource_list_entry *rle; int error; dinfo = device_get_ivars(child); /* * Save the PCI configuration space for the child and set the * device in the appropriate power state for this sleep state. */ pci_cfg_save(child, dinfo, 0); /* Suspend devices before potentially powering them down. */ error = bus_generic_suspend_child(dev, child); if (error) return (error); if (pci_do_power_suspend) { /* * Make sure this device's interrupt handler is not invoked * in the case the device uses a shared interrupt that can * be raised by some other device. * This is applicable only to regular (legacy) PCI interrupts * as MSI/MSI-X interrupts are never shared. */ rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); if (rle != NULL && rle->res != NULL) (void)bus_suspend_intr(child, rle->res); pci_set_power_child(dev, child, PCI_POWERSTATE_D3); } return (0); } int pci_resume_child(device_t dev, device_t child) { struct pci_devinfo *dinfo; struct resource_list_entry *rle; if (pci_do_power_resume) pci_set_power_child(dev, child, PCI_POWERSTATE_D0); dinfo = device_get_ivars(child); pci_cfg_restore(child, dinfo); pci_clear_pme(child); if (!device_is_attached(child)) pci_cfg_save(child, dinfo, 1); bus_generic_resume_child(dev, child); /* * Allow interrupts only after fully resuming the driver and hardware. */ if (pci_do_power_suspend) { /* See pci_suspend_child for details. */ rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); if (rle != NULL && rle->res != NULL) (void)bus_resume_intr(child, rle->res); } return (0); } int pci_resume(device_t dev) { device_t child, *devlist; int error, i, numdevs; if ((error = device_get_children(dev, &devlist, &numdevs)) != 0) return (error); /* * Resume critical devices first, then everything else later. */ for (i = 0; i < numdevs; i++) { child = devlist[i]; switch (pci_get_class(child)) { case PCIC_DISPLAY: case PCIC_MEMORY: case PCIC_BRIDGE: case PCIC_BASEPERIPH: BUS_RESUME_CHILD(dev, child); break; } } for (i = 0; i < numdevs; i++) { child = devlist[i]; switch (pci_get_class(child)) { case PCIC_DISPLAY: case PCIC_MEMORY: case PCIC_BRIDGE: case PCIC_BASEPERIPH: break; default: BUS_RESUME_CHILD(dev, child); } } free(devlist, M_TEMP); return (0); } static void pci_load_vendor_data(void) { caddr_t data; void *ptr; size_t sz; data = preload_search_by_type("pci_vendor_data"); if (data != NULL) { ptr = preload_fetch_addr(data); sz = preload_fetch_size(data); if (ptr != NULL && sz != 0) { pci_vendordata = ptr; pci_vendordata_size = sz; /* terminate the database */ pci_vendordata[pci_vendordata_size] = '\n'; } } } void pci_driver_added(device_t dev, driver_t *driver) { int numdevs; device_t *devlist; device_t child; struct pci_devinfo *dinfo; int i; if (bootverbose) device_printf(dev, "driver added\n"); DEVICE_IDENTIFY(driver, dev); if (device_get_children(dev, &devlist, &numdevs) != 0) return; for (i = 0; i < numdevs; i++) { child = devlist[i]; if (device_get_state(child) != DS_NOTPRESENT) continue; dinfo = device_get_ivars(child); pci_print_verbose(dinfo); if (bootverbose) pci_printf(&dinfo->cfg, "reprobing on driver added\n"); pci_cfg_restore(child, dinfo); if (device_probe_and_attach(child) != 0) pci_child_detached(dev, child); } free(devlist, M_TEMP); } int pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep) { struct pci_devinfo *dinfo; struct msix_table_entry *mte; struct msix_vector *mv; uint64_t addr; uint32_t data; void *cookie; int error, rid; error = bus_generic_setup_intr(dev, child, irq, flags, filter, intr, arg, &cookie); if (error) return (error); /* If this is not a direct child, just bail out. */ if (device_get_parent(child) != dev) { *cookiep = cookie; return(0); } rid = rman_get_rid(irq); if (rid == 0) { /* Make sure that INTx is enabled */ pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS); } else { /* * Check to see if the interrupt is MSI or MSI-X. * Ask our parent to map the MSI and give * us the address and data register values. * If we fail for some reason, teardown the * interrupt handler. */ dinfo = device_get_ivars(child); if (dinfo->cfg.msi.msi_alloc > 0) { if (dinfo->cfg.msi.msi_addr == 0) { KASSERT(dinfo->cfg.msi.msi_handlers == 0, ("MSI has handlers, but vectors not mapped")); error = PCIB_MAP_MSI(device_get_parent(dev), child, rman_get_start(irq), &addr, &data); if (error) goto bad; dinfo->cfg.msi.msi_addr = addr; dinfo->cfg.msi.msi_data = data; } if (dinfo->cfg.msi.msi_handlers == 0) pci_enable_msi(child, dinfo->cfg.msi.msi_addr, dinfo->cfg.msi.msi_data); dinfo->cfg.msi.msi_handlers++; } else { KASSERT(dinfo->cfg.msix.msix_alloc > 0, ("No MSI or MSI-X interrupts allocated")); KASSERT(rid <= dinfo->cfg.msix.msix_table_len, ("MSI-X index too high")); mte = &dinfo->cfg.msix.msix_table[rid - 1]; KASSERT(mte->mte_vector != 0, ("no message vector")); mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1]; KASSERT(mv->mv_irq == rman_get_start(irq), ("IRQ mismatch")); if (mv->mv_address == 0) { KASSERT(mte->mte_handlers == 0, ("MSI-X table entry has handlers, but vector not mapped")); error = PCIB_MAP_MSI(device_get_parent(dev), child, rman_get_start(irq), &addr, &data); if (error) goto bad; mv->mv_address = addr; mv->mv_data = data; } /* * The MSIX table entry must be made valid by * incrementing the mte_handlers before * calling pci_enable_msix() and * pci_resume_msix(). Else the MSIX rewrite * table quirk will not work as expected. */ mte->mte_handlers++; if (mte->mte_handlers == 1) { pci_enable_msix(child, rid - 1, mv->mv_address, mv->mv_data); pci_unmask_msix(child, rid - 1); } } /* * Make sure that INTx is disabled if we are using MSI/MSI-X, * unless the device is affected by PCI_QUIRK_MSI_INTX_BUG, * in which case we "enable" INTx so MSI/MSI-X actually works. */ if (!pci_has_quirk(pci_get_devid(child), PCI_QUIRK_MSI_INTX_BUG)) pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS); else pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS); bad: if (error) { (void)bus_generic_teardown_intr(dev, child, irq, cookie); return (error); } } *cookiep = cookie; return (0); } int pci_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { struct msix_table_entry *mte; struct resource_list_entry *rle; struct pci_devinfo *dinfo; int error, rid; if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE)) return (EINVAL); /* If this isn't a direct child, just bail out */ if (device_get_parent(child) != dev) return(bus_generic_teardown_intr(dev, child, irq, cookie)); rid = rman_get_rid(irq); if (rid == 0) { /* Mask INTx */ pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS); } else { /* * Check to see if the interrupt is MSI or MSI-X. If so, * decrement the appropriate handlers count and mask the * MSI-X message, or disable MSI messages if the count * drops to 0. */ dinfo = device_get_ivars(child); rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid); if (rle->res != irq) return (EINVAL); if (dinfo->cfg.msi.msi_alloc > 0) { KASSERT(rid <= dinfo->cfg.msi.msi_alloc, ("MSI-X index too high")); if (dinfo->cfg.msi.msi_handlers == 0) return (EINVAL); dinfo->cfg.msi.msi_handlers--; if (dinfo->cfg.msi.msi_handlers == 0) pci_disable_msi(child); } else { KASSERT(dinfo->cfg.msix.msix_alloc > 0, ("No MSI or MSI-X interrupts allocated")); KASSERT(rid <= dinfo->cfg.msix.msix_table_len, ("MSI-X index too high")); mte = &dinfo->cfg.msix.msix_table[rid - 1]; if (mte->mte_handlers == 0) return (EINVAL); mte->mte_handlers--; if (mte->mte_handlers == 0) pci_mask_msix(child, rid - 1); } } error = bus_generic_teardown_intr(dev, child, irq, cookie); if (rid > 0) KASSERT(error == 0, ("%s: generic teardown failed for MSI/MSI-X", __func__)); return (error); } int pci_print_child(device_t dev, device_t child) { struct pci_devinfo *dinfo; struct resource_list *rl; int retval = 0; dinfo = device_get_ivars(child); rl = &dinfo->resources; retval += bus_print_child_header(dev, child); retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#jx"); retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); if (device_get_flags(dev)) retval += printf(" flags %#x", device_get_flags(dev)); retval += printf(" at device %d.%d", pci_get_slot(child), pci_get_function(child)); retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } static const struct { int class; int subclass; int report; /* 0 = bootverbose, 1 = always */ const char *desc; } pci_nomatch_tab[] = { {PCIC_OLD, -1, 1, "old"}, {PCIC_OLD, PCIS_OLD_NONVGA, 1, "non-VGA display device"}, {PCIC_OLD, PCIS_OLD_VGA, 1, "VGA-compatible display device"}, {PCIC_STORAGE, -1, 1, "mass storage"}, {PCIC_STORAGE, PCIS_STORAGE_SCSI, 1, "SCSI"}, {PCIC_STORAGE, PCIS_STORAGE_IDE, 1, "ATA"}, {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, 1, "floppy disk"}, {PCIC_STORAGE, PCIS_STORAGE_IPI, 1, "IPI"}, {PCIC_STORAGE, PCIS_STORAGE_RAID, 1, "RAID"}, {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, 1, "ATA (ADMA)"}, {PCIC_STORAGE, PCIS_STORAGE_SATA, 1, "SATA"}, {PCIC_STORAGE, PCIS_STORAGE_SAS, 1, "SAS"}, {PCIC_STORAGE, PCIS_STORAGE_NVM, 1, "NVM"}, {PCIC_NETWORK, -1, 1, "network"}, {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, 1, "ethernet"}, {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, 1, "token ring"}, {PCIC_NETWORK, PCIS_NETWORK_FDDI, 1, "fddi"}, {PCIC_NETWORK, PCIS_NETWORK_ATM, 1, "ATM"}, {PCIC_NETWORK, PCIS_NETWORK_ISDN, 1, "ISDN"}, {PCIC_DISPLAY, -1, 1, "display"}, {PCIC_DISPLAY, PCIS_DISPLAY_VGA, 1, "VGA"}, {PCIC_DISPLAY, PCIS_DISPLAY_XGA, 1, "XGA"}, {PCIC_DISPLAY, PCIS_DISPLAY_3D, 1, "3D"}, {PCIC_MULTIMEDIA, -1, 1, "multimedia"}, {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, 1, "video"}, {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, 1, "audio"}, {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, 1, "telephony"}, {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, 1, "HDA"}, {PCIC_MEMORY, -1, 1, "memory"}, {PCIC_MEMORY, PCIS_MEMORY_RAM, 1, "RAM"}, {PCIC_MEMORY, PCIS_MEMORY_FLASH, 1, "flash"}, {PCIC_BRIDGE, -1, 1, "bridge"}, {PCIC_BRIDGE, PCIS_BRIDGE_HOST, 1, "HOST-PCI"}, {PCIC_BRIDGE, PCIS_BRIDGE_ISA, 1, "PCI-ISA"}, {PCIC_BRIDGE, PCIS_BRIDGE_EISA, 1, "PCI-EISA"}, {PCIC_BRIDGE, PCIS_BRIDGE_MCA, 1, "PCI-MCA"}, {PCIC_BRIDGE, PCIS_BRIDGE_PCI, 1, "PCI-PCI"}, {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, 1, "PCI-PCMCIA"}, {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, 1, "PCI-NuBus"}, {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, 1, "PCI-CardBus"}, {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, 1, "PCI-RACEway"}, {PCIC_SIMPLECOMM, -1, 1, "simple comms"}, {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, 1, "UART"}, /* could detect 16550 */ {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, 1, "parallel port"}, {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, 1, "multiport serial"}, {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, 1, "generic modem"}, {PCIC_BASEPERIPH, -1, 0, "base peripheral"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, 1, "interrupt controller"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, 1, "DMA controller"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, 1, "timer"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, 1, "realtime clock"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, 1, "PCI hot-plug controller"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, 1, "SD host controller"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_IOMMU, 1, "IOMMU"}, {PCIC_INPUTDEV, -1, 1, "input device"}, {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, 1, "keyboard"}, {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,1, "digitizer"}, {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, 1, "mouse"}, {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, 1, "scanner"}, {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, 1, "gameport"}, {PCIC_DOCKING, -1, 1, "docking station"}, {PCIC_PROCESSOR, -1, 1, "processor"}, {PCIC_SERIALBUS, -1, 1, "serial bus"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, 1, "FireWire"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, 1, "AccessBus"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, 1, "SSA"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, 1, "USB"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, 1, "Fibre Channel"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, 0, "SMBus"}, {PCIC_WIRELESS, -1, 1, "wireless controller"}, {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, 1, "iRDA"}, {PCIC_WIRELESS, PCIS_WIRELESS_IR, 1, "IR"}, {PCIC_WIRELESS, PCIS_WIRELESS_RF, 1, "RF"}, {PCIC_INTELLIIO, -1, 1, "intelligent I/O controller"}, {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, 1, "I2O"}, {PCIC_SATCOM, -1, 1, "satellite communication"}, {PCIC_SATCOM, PCIS_SATCOM_TV, 1, "sat TV"}, {PCIC_SATCOM, PCIS_SATCOM_AUDIO, 1, "sat audio"}, {PCIC_SATCOM, PCIS_SATCOM_VOICE, 1, "sat voice"}, {PCIC_SATCOM, PCIS_SATCOM_DATA, 1, "sat data"}, {PCIC_CRYPTO, -1, 1, "encrypt/decrypt"}, {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, 1, "network/computer crypto"}, {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, 1, "entertainment crypto"}, {PCIC_DASP, -1, 0, "dasp"}, {PCIC_DASP, PCIS_DASP_DPIO, 1, "DPIO module"}, {PCIC_DASP, PCIS_DASP_PERFCNTRS, 1, "performance counters"}, {PCIC_DASP, PCIS_DASP_COMM_SYNC, 1, "communication synchronizer"}, {PCIC_DASP, PCIS_DASP_MGMT_CARD, 1, "signal processing management"}, {PCIC_INSTRUMENT, -1, 0, "non-essential instrumentation"}, {0, 0, 0, NULL} }; void pci_probe_nomatch(device_t dev, device_t child) { int i, report; const char *cp, *scp; char *device; /* * Look for a listing for this device in a loaded device database. */ report = 1; if ((device = pci_describe_device(child)) != NULL) { device_printf(dev, "<%s>", device); free(device, M_DEVBUF); } else { /* * Scan the class/subclass descriptions for a general * description. */ cp = "unknown"; scp = NULL; for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) { if (pci_nomatch_tab[i].class == pci_get_class(child)) { if (pci_nomatch_tab[i].subclass == -1) { cp = pci_nomatch_tab[i].desc; report = pci_nomatch_tab[i].report; } else if (pci_nomatch_tab[i].subclass == pci_get_subclass(child)) { scp = pci_nomatch_tab[i].desc; report = pci_nomatch_tab[i].report; } } } if (report || bootverbose) { device_printf(dev, "<%s%s%s>", cp ? cp : "", ((cp != NULL) && (scp != NULL)) ? ", " : "", scp ? scp : ""); } } if (report || bootverbose) { printf(" at device %d.%d (no driver attached)\n", pci_get_slot(child), pci_get_function(child)); } pci_cfg_save(child, device_get_ivars(child), 1); } void pci_child_detached(device_t dev, device_t child) { struct pci_devinfo *dinfo; struct resource_list *rl; dinfo = device_get_ivars(child); rl = &dinfo->resources; /* * Have to deallocate IRQs before releasing any MSI messages and * have to release MSI messages before deallocating any memory * BARs. */ if (resource_list_release_active(rl, dev, child, SYS_RES_IRQ) != 0) pci_printf(&dinfo->cfg, "Device leaked IRQ resources\n"); if (dinfo->cfg.msi.msi_alloc != 0 || dinfo->cfg.msix.msix_alloc != 0) { if (dinfo->cfg.msi.msi_alloc != 0) pci_printf(&dinfo->cfg, "Device leaked %d MSI " "vectors\n", dinfo->cfg.msi.msi_alloc); else pci_printf(&dinfo->cfg, "Device leaked %d MSI-X " "vectors\n", dinfo->cfg.msix.msix_alloc); (void)pci_release_msi(child); } if (resource_list_release_active(rl, dev, child, SYS_RES_MEMORY) != 0) pci_printf(&dinfo->cfg, "Device leaked memory resources\n"); if (resource_list_release_active(rl, dev, child, SYS_RES_IOPORT) != 0) pci_printf(&dinfo->cfg, "Device leaked I/O resources\n"); if (resource_list_release_active(rl, dev, child, PCI_RES_BUS) != 0) pci_printf(&dinfo->cfg, "Device leaked PCI bus numbers\n"); pci_cfg_save(child, dinfo, 1); } /* * Parse the PCI device database, if loaded, and return a pointer to a * description of the device. * * The database is flat text formatted as follows: * * Any line not in a valid format is ignored. * Lines are terminated with newline '\n' characters. * * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then * the vendor name. * * A DEVICE line is entered immediately below the corresponding VENDOR ID. * - devices cannot be listed without a corresponding VENDOR line. * A DEVICE line consists of a TAB, the 4 digit (hex) device code, * another TAB, then the device name. */ /* * Assuming (ptr) points to the beginning of a line in the database, * return the vendor or device and description of the next entry. * The value of (vendor) or (device) inappropriate for the entry type * is set to -1. Returns nonzero at the end of the database. * * Note that this is slightly unrobust in the face of corrupt data; * we attempt to safeguard against this by spamming the end of the * database with a newline when we initialise. */ static int pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc) { char *cp = *ptr; int left; *device = -1; *vendor = -1; **desc = '\0'; for (;;) { left = pci_vendordata_size - (cp - pci_vendordata); if (left <= 0) { *ptr = cp; return(1); } /* vendor entry? */ if (*cp != '\t' && sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2) break; /* device entry? */ if (*cp == '\t' && sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2) break; /* skip to next line */ while (*cp != '\n' && left > 0) { cp++; left--; } if (*cp == '\n') { cp++; left--; } } /* skip to next line */ while (*cp != '\n' && left > 0) { cp++; left--; } if (*cp == '\n' && left > 0) cp++; *ptr = cp; return(0); } static char * pci_describe_device(device_t dev) { int vendor, device; char *desc, *vp, *dp, *line; desc = vp = dp = NULL; /* * If we have no vendor data, we can't do anything. */ if (pci_vendordata == NULL) goto out; /* * Scan the vendor data looking for this device */ line = pci_vendordata; if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL) goto out; for (;;) { if (pci_describe_parse_line(&line, &vendor, &device, &vp)) goto out; if (vendor == pci_get_vendor(dev)) break; } if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL) goto out; for (;;) { if (pci_describe_parse_line(&line, &vendor, &device, &dp)) { *dp = 0; break; } if (vendor != -1) { *dp = 0; break; } if (device == pci_get_device(dev)) break; } if (dp[0] == '\0') snprintf(dp, 80, "0x%x", pci_get_device(dev)); if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) != NULL) sprintf(desc, "%s, %s", vp, dp); out: if (vp != NULL) free(vp, M_DEVBUF); if (dp != NULL) free(dp, M_DEVBUF); return(desc); } int pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct pci_devinfo *dinfo; pcicfgregs *cfg; dinfo = device_get_ivars(child); cfg = &dinfo->cfg; switch (which) { case PCI_IVAR_ETHADDR: /* * The generic accessor doesn't deal with failure, so * we set the return value, then return an error. */ *((uint8_t **) result) = NULL; return (EINVAL); case PCI_IVAR_SUBVENDOR: *result = cfg->subvendor; break; case PCI_IVAR_SUBDEVICE: *result = cfg->subdevice; break; case PCI_IVAR_VENDOR: *result = cfg->vendor; break; case PCI_IVAR_DEVICE: *result = cfg->device; break; case PCI_IVAR_DEVID: *result = (cfg->device << 16) | cfg->vendor; break; case PCI_IVAR_CLASS: *result = cfg->baseclass; break; case PCI_IVAR_SUBCLASS: *result = cfg->subclass; break; case PCI_IVAR_PROGIF: *result = cfg->progif; break; case PCI_IVAR_REVID: *result = cfg->revid; break; case PCI_IVAR_INTPIN: *result = cfg->intpin; break; case PCI_IVAR_IRQ: *result = cfg->intline; break; case PCI_IVAR_DOMAIN: *result = cfg->domain; break; case PCI_IVAR_BUS: *result = cfg->bus; break; case PCI_IVAR_SLOT: *result = cfg->slot; break; case PCI_IVAR_FUNCTION: *result = cfg->func; break; case PCI_IVAR_CMDREG: *result = cfg->cmdreg; break; case PCI_IVAR_CACHELNSZ: *result = cfg->cachelnsz; break; case PCI_IVAR_MINGNT: if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) { *result = -1; return (EINVAL); } *result = cfg->mingnt; break; case PCI_IVAR_MAXLAT: if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) { *result = -1; return (EINVAL); } *result = cfg->maxlat; break; case PCI_IVAR_LATTIMER: *result = cfg->lattimer; break; default: return (ENOENT); } return (0); } int pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { struct pci_devinfo *dinfo; dinfo = device_get_ivars(child); switch (which) { case PCI_IVAR_INTPIN: dinfo->cfg.intpin = value; return (0); case PCI_IVAR_ETHADDR: case PCI_IVAR_SUBVENDOR: case PCI_IVAR_SUBDEVICE: case PCI_IVAR_VENDOR: case PCI_IVAR_DEVICE: case PCI_IVAR_DEVID: case PCI_IVAR_CLASS: case PCI_IVAR_SUBCLASS: case PCI_IVAR_PROGIF: case PCI_IVAR_REVID: case PCI_IVAR_IRQ: case PCI_IVAR_DOMAIN: case PCI_IVAR_BUS: case PCI_IVAR_SLOT: case PCI_IVAR_FUNCTION: return (EINVAL); /* disallow for now */ default: return (ENOENT); } } #include "opt_ddb.h" #ifdef DDB #include #include /* * List resources based on pci map registers, used for within ddb */ DB_SHOW_COMMAND_FLAGS(pciregs, db_pci_dump, DB_CMD_MEMSAFE) { struct pci_devinfo *dinfo; struct devlist *devlist_head; struct pci_conf *p; const char *name; int i, error, none_count; none_count = 0; /* get the head of the device queue */ devlist_head = &pci_devq; /* * Go through the list of devices and print out devices */ for (error = 0, i = 0, dinfo = STAILQ_FIRST(devlist_head); (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit; dinfo = STAILQ_NEXT(dinfo, pci_links), i++) { /* Populate pd_name and pd_unit */ name = NULL; if (dinfo->cfg.dev) name = device_get_name(dinfo->cfg.dev); p = &dinfo->conf; db_printf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x " "chip=0x%08x rev=0x%02x hdr=0x%02x\n", (name && *name) ? name : "none", (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) : none_count++, p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev, p->pc_sel.pc_func, (p->pc_class << 16) | (p->pc_subclass << 8) | p->pc_progif, (p->pc_subdevice << 16) | p->pc_subvendor, (p->pc_device << 16) | p->pc_vendor, p->pc_revid, p->pc_hdr); } } #endif /* DDB */ struct resource * pci_reserve_map(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int num, u_int flags) { struct pci_devinfo *dinfo = device_get_ivars(child); struct resource_list *rl = &dinfo->resources; struct resource *res; struct pci_map *pm; uint16_t cmd; pci_addr_t map, testval; int mapsize; res = NULL; /* If rid is managed by EA, ignore it */ if (pci_ea_is_enabled(child, *rid)) goto out; pm = pci_find_bar(child, *rid); if (pm != NULL) { /* This is a BAR that we failed to allocate earlier. */ mapsize = pm->pm_size; map = pm->pm_value; } else { /* * Weed out the bogons, and figure out how large the * BAR/map is. BARs that read back 0 here are bogus * and unimplemented. Note: atapci in legacy mode are * special and handled elsewhere in the code. If you * have a atapci device in legacy mode and it fails * here, that other code is broken. */ pci_read_bar(child, *rid, &map, &testval, NULL); /* * Determine the size of the BAR and ignore BARs with a size * of 0. Device ROM BARs use a different mask value. */ if (PCIR_IS_BIOS(&dinfo->cfg, *rid)) mapsize = pci_romsize(testval); else mapsize = pci_mapsize(testval); if (mapsize == 0) goto out; pm = pci_add_bar(child, *rid, map, mapsize); } if (PCI_BAR_MEM(map) || PCIR_IS_BIOS(&dinfo->cfg, *rid)) { if (type != SYS_RES_MEMORY) { if (bootverbose) device_printf(dev, "child %s requested type %d for rid %#x," " but the BAR says it is an memio\n", device_get_nameunit(child), type, *rid); goto out; } } else { if (type != SYS_RES_IOPORT) { if (bootverbose) device_printf(dev, "child %s requested type %d for rid %#x," " but the BAR says it is an ioport\n", device_get_nameunit(child), type, *rid); goto out; } } /* * For real BARs, we need to override the size that * the driver requests, because that's what the BAR * actually uses and we would otherwise have a * situation where we might allocate the excess to * another driver, which won't work. */ count = ((pci_addr_t)1 << mapsize) * num; if (RF_ALIGNMENT(flags) < mapsize) flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize); if (PCI_BAR_MEM(map) && (map & PCIM_BAR_MEM_PREFETCH)) flags |= RF_PREFETCHABLE; /* * Allocate enough resource, and then write back the * appropriate BAR for that resource. */ resource_list_add(rl, type, *rid, start, end, count); res = resource_list_reserve(rl, dev, child, type, rid, start, end, count, flags & ~RF_ACTIVE); if (res == NULL) { resource_list_delete(rl, type, *rid); device_printf(child, "%#jx bytes of rid %#x res %d failed (%#jx, %#jx).\n", count, *rid, type, start, end); goto out; } if (bootverbose) device_printf(child, "Lazy allocation of %#jx bytes rid %#x type %d at %#jx\n", count, *rid, type, rman_get_start(res)); /* Disable decoding via the CMD register before updating the BAR */ cmd = pci_read_config(child, PCIR_COMMAND, 2); pci_write_config(child, PCIR_COMMAND, cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2); map = rman_get_start(res); pci_write_bar(child, pm, map); /* Restore the original value of the CMD register */ pci_write_config(child, PCIR_COMMAND, cmd, 2); out: return (res); } struct resource * pci_alloc_multi_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_long num, u_int flags) { struct pci_devinfo *dinfo; struct resource_list *rl; struct resource_list_entry *rle; struct resource *res; pcicfgregs *cfg; /* * Perform lazy resource allocation */ dinfo = device_get_ivars(child); rl = &dinfo->resources; cfg = &dinfo->cfg; switch (type) { case PCI_RES_BUS: return (pci_alloc_secbus(dev, child, rid, start, end, count, flags)); case SYS_RES_IRQ: /* * Can't alloc legacy interrupt once MSI messages have * been allocated. */ if (*rid == 0 && (cfg->msi.msi_alloc > 0 || cfg->msix.msix_alloc > 0)) return (NULL); /* * If the child device doesn't have an interrupt * routed and is deserving of an interrupt, try to * assign it one. */ if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) && (cfg->intpin != 0)) pci_assign_interrupt(dev, child, 0); break; case SYS_RES_IOPORT: case SYS_RES_MEMORY: /* * PCI-PCI bridge I/O window resources are not BARs. * For those allocations just pass the request up the * tree. */ if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE) { switch (*rid) { case PCIR_IOBASEL_1: case PCIR_MEMBASE_1: case PCIR_PMBASEL_1: /* * XXX: Should we bother creating a resource * list entry? */ return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); } } /* Reserve resources for this BAR if needed. */ rle = resource_list_find(rl, type, *rid); if (rle == NULL) { res = pci_reserve_map(dev, child, type, rid, start, end, count, num, flags); if (res == NULL) return (NULL); } } return (resource_list_alloc(rl, dev, child, type, rid, start, end, count, flags)); } struct resource * pci_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { #ifdef PCI_IOV struct pci_devinfo *dinfo; #endif if (device_get_parent(child) != dev) return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid, start, end, count, flags)); #ifdef PCI_IOV dinfo = device_get_ivars(child); if (dinfo->cfg.flags & PCICFG_VF) { switch (type) { /* VFs can't have I/O BARs. */ case SYS_RES_IOPORT: return (NULL); case SYS_RES_MEMORY: return (pci_vf_alloc_mem_resource(dev, child, rid, start, end, count, flags)); } /* Fall through for other types of resource allocations. */ } #endif return (pci_alloc_multi_resource(dev, child, type, rid, start, end, count, 1, flags)); } int pci_release_resource(device_t dev, device_t child, struct resource *r) { struct pci_devinfo *dinfo; struct resource_list *rl; pcicfgregs *cfg __unused; if (device_get_parent(child) != dev) return (bus_generic_release_resource(dev, child, r)); dinfo = device_get_ivars(child); cfg = &dinfo->cfg; #ifdef PCI_IOV if (cfg->flags & PCICFG_VF) { switch (rman_get_type(r)) { /* VFs can't have I/O BARs. */ case SYS_RES_IOPORT: return (EDOOFUS); case SYS_RES_MEMORY: return (pci_vf_release_mem_resource(dev, child, r)); } /* Fall through for other types of resource allocations. */ } #endif /* * PCI-PCI bridge I/O window resources are not BARs. For * those allocations just pass the request up the tree. */ if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE && (rman_get_type(r) == SYS_RES_IOPORT || rman_get_type(r) == SYS_RES_MEMORY)) { switch (rman_get_rid(r)) { case PCIR_IOBASEL_1: case PCIR_MEMBASE_1: case PCIR_PMBASEL_1: return (bus_generic_release_resource(dev, child, r)); } } rl = &dinfo->resources; return (resource_list_release(rl, dev, child, r)); } int pci_activate_resource(device_t dev, device_t child, struct resource *r) { struct pci_devinfo *dinfo; int error, rid, type; if (device_get_parent(child) != dev) return (bus_generic_activate_resource(dev, child, r)); dinfo = device_get_ivars(child); #ifdef PCI_IOV if (dinfo->cfg.flags & PCICFG_VF) { switch (rman_get_type(r)) { /* VFs can't have I/O BARs. */ case SYS_RES_IOPORT: error = EINVAL; break; case SYS_RES_MEMORY: error = pci_vf_activate_mem_resource(dev, child, r); break; default: error = bus_generic_activate_resource(dev, child, r); break; } } else #endif error = bus_generic_activate_resource(dev, child, r); if (error) return (error); rid = rman_get_rid(r); type = rman_get_type(r); /* Device ROMs need their decoding explicitly enabled. */ if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid)) pci_write_bar(child, pci_find_bar(child, rid), rman_get_start(r) | PCIM_BIOS_ENABLE); /* Enable decoding in the command register when activating BARs. */ switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: error = PCI_ENABLE_IO(dev, child, type); break; } return (error); } int pci_deactivate_resource(device_t dev, device_t child, struct resource *r) { struct pci_devinfo *dinfo; int error, rid, type; if (device_get_parent(child) != dev) return (bus_generic_deactivate_resource(dev, child, r)); dinfo = device_get_ivars(child); #ifdef PCI_IOV if (dinfo->cfg.flags & PCICFG_VF) { switch (rman_get_type(r)) { /* VFs can't have I/O BARs. */ case SYS_RES_IOPORT: error = EINVAL; break; case SYS_RES_MEMORY: error = pci_vf_deactivate_mem_resource(dev, child, r); break; default: error = bus_generic_deactivate_resource(dev, child, r); break; } } else #endif error = bus_generic_deactivate_resource(dev, child, r); if (error) return (error); /* Disable decoding for device ROMs. */ rid = rman_get_rid(r); type = rman_get_type(r); if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid)) pci_write_bar(child, pci_find_bar(child, rid), rman_get_start(r)); return (0); } int pci_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { #ifdef PCI_IOV struct pci_devinfo *dinfo; if (device_get_parent(child) != dev) return (bus_generic_adjust_resource(dev, child, r, start, end)); dinfo = device_get_ivars(child); if (dinfo->cfg.flags & PCICFG_VF) { switch (rman_get_type(r)) { /* VFs can't have I/O BARs. */ case SYS_RES_IOPORT: return (EINVAL); case SYS_RES_MEMORY: return (pci_vf_adjust_mem_resource(dev, child, r, start, end)); } /* Fall through for other types of resource allocations. */ } #endif return (bus_generic_adjust_resource(dev, child, r, start, end)); } int pci_map_resource(device_t dev, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { #ifdef PCI_IOV struct pci_devinfo *dinfo; if (device_get_parent(child) != dev) return (bus_generic_map_resource(dev, child, r, argsp, map)); dinfo = device_get_ivars(child); if (dinfo->cfg.flags & PCICFG_VF) { switch (rman_get_type(r)) { /* VFs can't have I/O BARs. */ case SYS_RES_IOPORT: return (EINVAL); case SYS_RES_MEMORY: return (pci_vf_map_mem_resource(dev, child, r, argsp, map)); } /* Fall through for other types of resource allocations. */ } #endif return (bus_generic_map_resource(dev, child, r, argsp, map)); } int pci_unmap_resource(device_t dev, device_t child, struct resource *r, struct resource_map *map) { #ifdef PCI_IOV struct pci_devinfo *dinfo; if (device_get_parent(child) != dev) return (bus_generic_unmap_resource(dev, child, r, map)); dinfo = device_get_ivars(child); if (dinfo->cfg.flags & PCICFG_VF) { switch (rman_get_type(r)) { /* VFs can't have I/O BARs. */ case SYS_RES_IOPORT: return (EINVAL); case SYS_RES_MEMORY: return (pci_vf_unmap_mem_resource(dev, child, r, map)); } /* Fall through for other types of resource allocations. */ } #endif return (bus_generic_unmap_resource(dev, child, r, map)); } void pci_child_deleted(device_t dev, device_t child) { struct resource_list_entry *rle; struct resource_list *rl; struct pci_devinfo *dinfo; dinfo = device_get_ivars(child); rl = &dinfo->resources; EVENTHANDLER_INVOKE(pci_delete_device, child); /* Turn off access to resources we're about to free */ if (bus_child_present(child) != 0) { pci_write_config(child, PCIR_COMMAND, pci_read_config(child, PCIR_COMMAND, 2) & ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN), 2); pci_disable_busmaster(child); } /* Free all allocated resources */ STAILQ_FOREACH(rle, rl, link) { if (rle->res) { if (rman_get_flags(rle->res) & RF_ACTIVE || resource_list_busy(rl, rle->type, rle->rid)) { pci_printf(&dinfo->cfg, "Resource still owned, oops. " "(type=%d, rid=%d, addr=%lx)\n", rle->type, rle->rid, rman_get_start(rle->res)); bus_release_resource(child, rle->type, rle->rid, rle->res); } resource_list_unreserve(rl, dev, child, rle->type, rle->rid); } } resource_list_free(rl); pci_freecfg(dinfo); } void pci_delete_resource(device_t dev, device_t child, int type, int rid) { struct pci_devinfo *dinfo; struct resource_list *rl; struct resource_list_entry *rle; if (device_get_parent(child) != dev) return; dinfo = device_get_ivars(child); rl = &dinfo->resources; rle = resource_list_find(rl, type, rid); if (rle == NULL) return; if (rle->res) { if (rman_get_flags(rle->res) & RF_ACTIVE || resource_list_busy(rl, type, rid)) { device_printf(dev, "delete_resource: " "Resource still owned by child, oops. " "(type=%d, rid=%d, addr=%jx)\n", type, rid, rman_get_start(rle->res)); return; } resource_list_unreserve(rl, dev, child, type, rid); } resource_list_delete(rl, type, rid); } struct resource_list * pci_get_resource_list (device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } #ifdef IOMMU bus_dma_tag_t pci_get_dma_tag(device_t bus, device_t dev) { bus_dma_tag_t tag; struct pci_softc *sc; if (device_get_parent(dev) == bus) { /* try iommu and return if it works */ tag = iommu_get_dma_tag(bus, dev); } else tag = NULL; if (tag == NULL) { sc = device_get_softc(bus); tag = sc->sc_dma_tag; } return (tag); } #else bus_dma_tag_t pci_get_dma_tag(device_t bus, device_t dev) { struct pci_softc *sc = device_get_softc(bus); return (sc->sc_dma_tag); } #endif uint32_t pci_read_config_method(device_t dev, device_t child, int reg, int width) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; #ifdef PCI_IOV /* * SR-IOV VFs don't implement the VID or DID registers, so we have to * emulate them here. */ if (cfg->flags & PCICFG_VF) { if (reg == PCIR_VENDOR) { switch (width) { case 4: return (cfg->device << 16 | cfg->vendor); case 2: return (cfg->vendor); case 1: return (cfg->vendor & 0xff); default: return (0xffffffff); } } else if (reg == PCIR_DEVICE) { switch (width) { /* Note that an unaligned 4-byte read is an error. */ case 2: return (cfg->device); case 1: return (cfg->device & 0xff); default: return (0xffffffff); } } } #endif return (PCIB_READ_CONFIG(device_get_parent(dev), cfg->bus, cfg->slot, cfg->func, reg, width)); } void pci_write_config_method(device_t dev, device_t child, int reg, uint32_t val, int width) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; PCIB_WRITE_CONFIG(device_get_parent(dev), cfg->bus, cfg->slot, cfg->func, reg, val, width); } int pci_child_location_method(device_t dev, device_t child, struct sbuf *sb) { sbuf_printf(sb, "slot=%d function=%d dbsf=pci%d:%d:%d:%d", pci_get_slot(child), pci_get_function(child), pci_get_domain(child), pci_get_bus(child), pci_get_slot(child), pci_get_function(child)); return (0); } int pci_child_pnpinfo_method(device_t dev, device_t child, struct sbuf *sb) { struct pci_devinfo *dinfo; pcicfgregs *cfg; dinfo = device_get_ivars(child); cfg = &dinfo->cfg; sbuf_printf(sb, "vendor=0x%04x device=0x%04x subvendor=0x%04x " "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device, cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass, cfg->progif); return (0); } int pci_get_device_path_method(device_t bus, device_t child, const char *locator, struct sbuf *sb) { device_t parent = device_get_parent(bus); int rv; if (strcmp(locator, BUS_LOCATOR_UEFI) == 0) { rv = bus_generic_get_device_path(parent, bus, locator, sb); if (rv == 0) { sbuf_printf(sb, "/Pci(0x%x,0x%x)", pci_get_slot(child), pci_get_function(child)); } return (0); } return (bus_generic_get_device_path(bus, child, locator, sb)); } int pci_assign_interrupt_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child, cfg->intpin)); } static void pci_lookup(void *arg, const char *name, device_t *dev) { long val; char *end; int domain, bus, slot, func; if (*dev != NULL) return; /* * Accept pciconf-style selectors of either pciD:B:S:F or * pciB:S:F. In the latter case, the domain is assumed to * be zero. */ if (strncmp(name, "pci", 3) != 0) return; val = strtol(name + 3, &end, 10); if (val < 0 || val > INT_MAX || *end != ':') return; domain = val; val = strtol(end + 1, &end, 10); if (val < 0 || val > INT_MAX || *end != ':') return; bus = val; val = strtol(end + 1, &end, 10); if (val < 0 || val > INT_MAX) return; slot = val; if (*end == ':') { val = strtol(end + 1, &end, 10); if (val < 0 || val > INT_MAX || *end != '\0') return; func = val; } else if (*end == '\0') { func = slot; slot = bus; bus = domain; domain = 0; } else return; if (domain > PCI_DOMAINMAX || bus > PCI_BUSMAX || slot > PCI_SLOTMAX || func > PCIE_ARI_FUNCMAX || (slot != 0 && func > PCI_FUNCMAX)) return; *dev = pci_find_dbsf(domain, bus, slot, func); } static int pci_modevent(module_t mod, int what, void *arg) { static struct cdev *pci_cdev; static eventhandler_tag tag; switch (what) { case MOD_LOAD: STAILQ_INIT(&pci_devq); pci_generation = 0; pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644, "pci"); pci_load_vendor_data(); tag = EVENTHANDLER_REGISTER(dev_lookup, pci_lookup, NULL, 1000); break; case MOD_UNLOAD: if (tag != NULL) EVENTHANDLER_DEREGISTER(dev_lookup, tag); destroy_dev(pci_cdev); break; } return (0); } static void pci_cfg_restore_pcie(device_t dev, struct pci_devinfo *dinfo) { #define WREG(n, v) pci_write_config(dev, pos + (n), (v), 2) struct pcicfg_pcie *cfg; int version, pos; cfg = &dinfo->cfg.pcie; pos = cfg->pcie_location; version = cfg->pcie_flags & PCIEM_FLAGS_VERSION; WREG(PCIER_DEVICE_CTL, cfg->pcie_device_ctl); if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || cfg->pcie_type == PCIEM_TYPE_ENDPOINT || cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT) WREG(PCIER_LINK_CTL, cfg->pcie_link_ctl); if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT && (cfg->pcie_flags & PCIEM_FLAGS_SLOT)))) WREG(PCIER_SLOT_CTL, cfg->pcie_slot_ctl); if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || cfg->pcie_type == PCIEM_TYPE_ROOT_EC) WREG(PCIER_ROOT_CTL, cfg->pcie_root_ctl); if (version > 1) { WREG(PCIER_DEVICE_CTL2, cfg->pcie_device_ctl2); WREG(PCIER_LINK_CTL2, cfg->pcie_link_ctl2); WREG(PCIER_SLOT_CTL2, cfg->pcie_slot_ctl2); } #undef WREG } static void pci_cfg_restore_pcix(device_t dev, struct pci_devinfo *dinfo) { pci_write_config(dev, dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, dinfo->cfg.pcix.pcix_command, 2); } void pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo) { /* * Restore the device to full power mode. We must do this * before we restore the registers because moving from D3 to * D0 will cause the chip's BARs and some other registers to * be reset to some unknown power on reset values. Cut down * the noise on boot by doing nothing if we are already in * state D0. */ if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) pci_set_powerstate(dev, PCI_POWERSTATE_D0); pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1); pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1); pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1); pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1); pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1); pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1); switch (dinfo->cfg.hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1); pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1); break; case PCIM_HDRTYPE_BRIDGE: pci_write_config(dev, PCIR_SECLAT_1, dinfo->cfg.bridge.br_seclat, 1); pci_write_config(dev, PCIR_SUBBUS_1, dinfo->cfg.bridge.br_subbus, 1); pci_write_config(dev, PCIR_SECBUS_1, dinfo->cfg.bridge.br_secbus, 1); pci_write_config(dev, PCIR_PRIBUS_1, dinfo->cfg.bridge.br_pribus, 1); pci_write_config(dev, PCIR_BRIDGECTL_1, dinfo->cfg.bridge.br_control, 2); break; case PCIM_HDRTYPE_CARDBUS: pci_write_config(dev, PCIR_SECLAT_2, dinfo->cfg.bridge.br_seclat, 1); pci_write_config(dev, PCIR_SUBBUS_2, dinfo->cfg.bridge.br_subbus, 1); pci_write_config(dev, PCIR_SECBUS_2, dinfo->cfg.bridge.br_secbus, 1); pci_write_config(dev, PCIR_PRIBUS_2, dinfo->cfg.bridge.br_pribus, 1); pci_write_config(dev, PCIR_BRIDGECTL_2, dinfo->cfg.bridge.br_control, 2); break; } pci_restore_bars(dev); if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_BRIDGE) pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2); /* * Restore extended capabilities for PCI-Express and PCI-X */ if (dinfo->cfg.pcie.pcie_location != 0) pci_cfg_restore_pcie(dev, dinfo); if (dinfo->cfg.pcix.pcix_location != 0) pci_cfg_restore_pcix(dev, dinfo); /* Restore MSI and MSI-X configurations if they are present. */ if (dinfo->cfg.msi.msi_location != 0) pci_resume_msi(dev); if (dinfo->cfg.msix.msix_location != 0) pci_resume_msix(dev); #ifdef PCI_IOV if (dinfo->cfg.iov != NULL) pci_iov_cfg_restore(dev, dinfo); #endif } static void pci_cfg_save_pcie(device_t dev, struct pci_devinfo *dinfo) { #define RREG(n) pci_read_config(dev, pos + (n), 2) struct pcicfg_pcie *cfg; int version, pos; cfg = &dinfo->cfg.pcie; pos = cfg->pcie_location; cfg->pcie_flags = RREG(PCIER_FLAGS); version = cfg->pcie_flags & PCIEM_FLAGS_VERSION; cfg->pcie_device_ctl = RREG(PCIER_DEVICE_CTL); if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || cfg->pcie_type == PCIEM_TYPE_ENDPOINT || cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT) cfg->pcie_link_ctl = RREG(PCIER_LINK_CTL); if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT && (cfg->pcie_flags & PCIEM_FLAGS_SLOT)))) cfg->pcie_slot_ctl = RREG(PCIER_SLOT_CTL); if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || cfg->pcie_type == PCIEM_TYPE_ROOT_EC) cfg->pcie_root_ctl = RREG(PCIER_ROOT_CTL); if (version > 1) { cfg->pcie_device_ctl2 = RREG(PCIER_DEVICE_CTL2); cfg->pcie_link_ctl2 = RREG(PCIER_LINK_CTL2); cfg->pcie_slot_ctl2 = RREG(PCIER_SLOT_CTL2); } #undef RREG } static void pci_cfg_save_pcix(device_t dev, struct pci_devinfo *dinfo) { dinfo->cfg.pcix.pcix_command = pci_read_config(dev, dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, 2); } void pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate) { uint32_t cls; int ps; /* * Some drivers apparently write to these registers w/o updating our * cached copy. No harm happens if we update the copy, so do so here * so we can restore them. The COMMAND register is modified by the * bus w/o updating the cache. This should represent the normally * writable portion of the 'defined' part of type 0/1/2 headers. */ dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2); dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2); dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2); dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1); dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1); dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1); dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1); dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1); dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1); switch (dinfo->cfg.hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2); dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2); dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1); dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1); break; case PCIM_HDRTYPE_BRIDGE: dinfo->cfg.bridge.br_seclat = pci_read_config(dev, PCIR_SECLAT_1, 1); dinfo->cfg.bridge.br_subbus = pci_read_config(dev, PCIR_SUBBUS_1, 1); dinfo->cfg.bridge.br_secbus = pci_read_config(dev, PCIR_SECBUS_1, 1); dinfo->cfg.bridge.br_pribus = pci_read_config(dev, PCIR_PRIBUS_1, 1); dinfo->cfg.bridge.br_control = pci_read_config(dev, PCIR_BRIDGECTL_1, 2); break; case PCIM_HDRTYPE_CARDBUS: dinfo->cfg.bridge.br_seclat = pci_read_config(dev, PCIR_SECLAT_2, 1); dinfo->cfg.bridge.br_subbus = pci_read_config(dev, PCIR_SUBBUS_2, 1); dinfo->cfg.bridge.br_secbus = pci_read_config(dev, PCIR_SECBUS_2, 1); dinfo->cfg.bridge.br_pribus = pci_read_config(dev, PCIR_PRIBUS_2, 1); dinfo->cfg.bridge.br_control = pci_read_config(dev, PCIR_BRIDGECTL_2, 2); dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_2, 2); dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_2, 2); break; } if (dinfo->cfg.pcie.pcie_location != 0) pci_cfg_save_pcie(dev, dinfo); if (dinfo->cfg.pcix.pcix_location != 0) pci_cfg_save_pcix(dev, dinfo); #ifdef PCI_IOV if (dinfo->cfg.iov != NULL) pci_iov_cfg_save(dev, dinfo); #endif /* * don't set the state for display devices, base peripherals and * memory devices since bad things happen when they are powered down. * We should (a) have drivers that can easily detach and (b) use * generic drivers for these devices so that some device actually * attaches. We need to make sure that when we implement (a) we don't * power the device down on a reattach. */ cls = pci_get_class(dev); if (!setstate) return; switch (pci_do_power_nodriver) { case 0: /* NO powerdown at all */ return; case 1: /* Conservative about what to power down */ if (cls == PCIC_STORAGE) return; /*FALLTHROUGH*/ case 2: /* Aggressive about what to power down */ if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY || cls == PCIC_BASEPERIPH) return; /*FALLTHROUGH*/ case 3: /* Power down everything */ break; } /* * PCI spec says we can only go into D3 state from D0 state. * Transition from D[12] into D0 before going to D3 state. */ ps = pci_get_powerstate(dev); if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3) pci_set_powerstate(dev, PCI_POWERSTATE_D0); if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3) pci_set_powerstate(dev, PCI_POWERSTATE_D3); } /* Wrapper APIs suitable for device driver use. */ void pci_save_state(device_t dev) { struct pci_devinfo *dinfo; dinfo = device_get_ivars(dev); pci_cfg_save(dev, dinfo, 0); } void pci_restore_state(device_t dev) { struct pci_devinfo *dinfo; dinfo = device_get_ivars(dev); pci_cfg_restore(dev, dinfo); } static int pci_get_id_method(device_t dev, device_t child, enum pci_id_type type, uintptr_t *id) { return (PCIB_GET_ID(device_get_parent(dev), child, type, id)); } /* Find the upstream port of a given PCI device in a root complex. */ device_t pci_find_pcie_root_port(device_t dev) { struct pci_devinfo *dinfo; devclass_t pci_class; device_t pcib, bus; pci_class = devclass_find("pci"); KASSERT(device_get_devclass(device_get_parent(dev)) == pci_class, ("%s: non-pci device %s", __func__, device_get_nameunit(dev))); /* * Walk the bridge hierarchy until we find a PCI-e root * port or a non-PCI device. */ for (;;) { bus = device_get_parent(dev); KASSERT(bus != NULL, ("%s: null parent of %s", __func__, device_get_nameunit(dev))); pcib = device_get_parent(bus); KASSERT(pcib != NULL, ("%s: null bridge of %s", __func__, device_get_nameunit(bus))); /* * pcib's parent must be a PCI bus for this to be a * PCI-PCI bridge. */ if (device_get_devclass(device_get_parent(pcib)) != pci_class) return (NULL); dinfo = device_get_ivars(pcib); if (dinfo->cfg.pcie.pcie_location != 0 && dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT) return (pcib); dev = pcib; } } /* * Wait for pending transactions to complete on a PCI-express function. * * The maximum delay is specified in milliseconds in max_delay. Note * that this function may sleep. * * Returns true if the function is idle and false if the timeout is * exceeded. If dev is not a PCI-express function, this returns true. */ bool pcie_wait_for_pending_transactions(device_t dev, u_int max_delay) { struct pci_devinfo *dinfo = device_get_ivars(dev); uint16_t sta; int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (true); sta = pci_read_config(dev, cap + PCIER_DEVICE_STA, 2); while (sta & PCIEM_STA_TRANSACTION_PND) { if (max_delay == 0) return (false); /* Poll once every 100 milliseconds up to the timeout. */ if (max_delay > 100) { pause_sbt("pcietp", 100 * SBT_1MS, 0, C_HARDCLOCK); max_delay -= 100; } else { pause_sbt("pcietp", max_delay * SBT_1MS, 0, C_HARDCLOCK); max_delay = 0; } sta = pci_read_config(dev, cap + PCIER_DEVICE_STA, 2); } return (true); } /* * Determine the maximum Completion Timeout in microseconds. * * For non-PCI-express functions this returns 0. */ int pcie_get_max_completion_timeout(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (0); /* * Functions using the 1.x spec use the default timeout range of * 50 microseconds to 50 milliseconds. Functions that do not * support programmable timeouts also use this range. */ if ((dinfo->cfg.pcie.pcie_flags & PCIEM_FLAGS_VERSION) < 2 || (pci_read_config(dev, cap + PCIER_DEVICE_CAP2, 4) & PCIEM_CAP2_COMP_TIMO_RANGES) == 0) return (50 * 1000); switch (pci_read_config(dev, cap + PCIER_DEVICE_CTL2, 2) & PCIEM_CTL2_COMP_TIMO_VAL) { case PCIEM_CTL2_COMP_TIMO_100US: return (100); case PCIEM_CTL2_COMP_TIMO_10MS: return (10 * 1000); case PCIEM_CTL2_COMP_TIMO_55MS: return (55 * 1000); case PCIEM_CTL2_COMP_TIMO_210MS: return (210 * 1000); case PCIEM_CTL2_COMP_TIMO_900MS: return (900 * 1000); case PCIEM_CTL2_COMP_TIMO_3500MS: return (3500 * 1000); case PCIEM_CTL2_COMP_TIMO_13S: return (13 * 1000 * 1000); case PCIEM_CTL2_COMP_TIMO_64S: return (64 * 1000 * 1000); default: return (50 * 1000); } } void pcie_apei_error(device_t dev, int sev, uint8_t *aerp) { struct pci_devinfo *dinfo = device_get_ivars(dev); const char *s; int aer; uint32_t r, r1; uint16_t rs; if (sev == PCIEM_STA_CORRECTABLE_ERROR) s = "Correctable"; else if (sev == PCIEM_STA_NON_FATAL_ERROR) s = "Uncorrectable (Non-Fatal)"; else s = "Uncorrectable (Fatal)"; device_printf(dev, "%s PCIe error reported by APEI\n", s); if (aerp) { if (sev == PCIEM_STA_CORRECTABLE_ERROR) { r = le32dec(aerp + PCIR_AER_COR_STATUS); r1 = le32dec(aerp + PCIR_AER_COR_MASK); } else { r = le32dec(aerp + PCIR_AER_UC_STATUS); r1 = le32dec(aerp + PCIR_AER_UC_MASK); } device_printf(dev, "status 0x%08x mask 0x%08x", r, r1); if (sev != PCIEM_STA_CORRECTABLE_ERROR) { r = le32dec(aerp + PCIR_AER_UC_SEVERITY); rs = le16dec(aerp + PCIR_AER_CAP_CONTROL); printf(" severity 0x%08x first %d\n", r, rs & 0x1f); } else printf("\n"); } /* As kind of recovery just report and clear the error statuses. */ if (pci_find_extcap(dev, PCIZ_AER, &aer) == 0) { r = pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4); if (r != 0) { pci_write_config(dev, aer + PCIR_AER_UC_STATUS, r, 4); device_printf(dev, "Clearing UC AER errors 0x%08x\n", r); } r = pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4); if (r != 0) { pci_write_config(dev, aer + PCIR_AER_COR_STATUS, r, 4); device_printf(dev, "Clearing COR AER errors 0x%08x\n", r); } } if (dinfo->cfg.pcie.pcie_location != 0) { rs = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_STA, 2); if ((rs & (PCIEM_STA_CORRECTABLE_ERROR | PCIEM_STA_NON_FATAL_ERROR | PCIEM_STA_FATAL_ERROR | PCIEM_STA_UNSUPPORTED_REQ)) != 0) { pci_write_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_STA, rs, 2); device_printf(dev, "Clearing PCIe errors 0x%04x\n", rs); } } } /* * Perform a Function Level Reset (FLR) on a device. * * This function first waits for any pending transactions to complete * within the timeout specified by max_delay. If transactions are * still pending, the function will return false without attempting a * reset. * * If dev is not a PCI-express function or does not support FLR, this * function returns false. * * Note that no registers are saved or restored. The caller is * responsible for saving and restoring any registers including * PCI-standard registers via pci_save_state() and * pci_restore_state(). */ bool pcie_flr(device_t dev, u_int max_delay, bool force) { struct pci_devinfo *dinfo = device_get_ivars(dev); uint16_t cmd, ctl; int compl_delay; int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (false); if (!(pci_read_config(dev, cap + PCIER_DEVICE_CAP, 4) & PCIEM_CAP_FLR)) return (false); /* * Disable busmastering to prevent generation of new * transactions while waiting for the device to go idle. If * the idle timeout fails, the command register is restored * which will re-enable busmastering. */ cmd = pci_read_config(dev, PCIR_COMMAND, 2); pci_write_config(dev, PCIR_COMMAND, cmd & ~(PCIM_CMD_BUSMASTEREN), 2); if (!pcie_wait_for_pending_transactions(dev, max_delay)) { if (!force) { pci_write_config(dev, PCIR_COMMAND, cmd, 2); return (false); } pci_printf(&dinfo->cfg, "Resetting with transactions pending after %d ms\n", max_delay); /* * Extend the post-FLR delay to cover the maximum * Completion Timeout delay of anything in flight * during the FLR delay. Enforce a minimum delay of * at least 10ms. */ compl_delay = pcie_get_max_completion_timeout(dev) / 1000; if (compl_delay < 10) compl_delay = 10; } else compl_delay = 0; /* Initiate the reset. */ ctl = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2); pci_write_config(dev, cap + PCIER_DEVICE_CTL, ctl | PCIEM_CTL_INITIATE_FLR, 2); /* Wait for 100ms. */ pause_sbt("pcieflr", (100 + compl_delay) * SBT_1MS, 0, C_HARDCLOCK); if (pci_read_config(dev, cap + PCIER_DEVICE_STA, 2) & PCIEM_STA_TRANSACTION_PND) pci_printf(&dinfo->cfg, "Transactions pending after FLR!\n"); return (true); } /* * Attempt a power-management reset by cycling the device in/out of D3 * state. PCI spec says we can only go into D3 state from D0 state. * Transition from D[12] into D0 before going to D3 state. */ int pci_power_reset(device_t dev) { int ps; ps = pci_get_powerstate(dev); if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3) pci_set_powerstate(dev, PCI_POWERSTATE_D0); pci_set_powerstate(dev, PCI_POWERSTATE_D3); pci_set_powerstate(dev, ps); return (0); } /* * Try link drop and retrain of the downstream port of upstream * switch, for PCIe. According to the PCIe 3.0 spec 6.6.1, this must * cause Conventional Hot reset of the device in the slot. * Alternative, for PCIe, could be the secondary bus reset initiatied * on the upstream switch PCIR_BRIDGECTL_1, bit 6. */ int pcie_link_reset(device_t port, int pcie_location) { uint16_t v; v = pci_read_config(port, pcie_location + PCIER_LINK_CTL, 2); v |= PCIEM_LINK_CTL_LINK_DIS; pci_write_config(port, pcie_location + PCIER_LINK_CTL, v, 2); pause_sbt("pcier1", mstosbt(20), 0, 0); v &= ~PCIEM_LINK_CTL_LINK_DIS; v |= PCIEM_LINK_CTL_RETRAIN_LINK; pci_write_config(port, pcie_location + PCIER_LINK_CTL, v, 2); pause_sbt("pcier2", mstosbt(100), 0, 0); /* 100 ms */ v = pci_read_config(port, pcie_location + PCIER_LINK_STA, 2); return ((v & PCIEM_LINK_STA_TRAINING) != 0 ? ETIMEDOUT : 0); } static int pci_reset_post(device_t dev, device_t child) { if (dev == device_get_parent(child)) pci_restore_state(child); return (0); } static int pci_reset_prepare(device_t dev, device_t child) { if (dev == device_get_parent(child)) pci_save_state(child); return (0); } static int pci_reset_child(device_t dev, device_t child, int flags) { int error; if (dev == NULL || device_get_parent(child) != dev) return (0); if ((flags & DEVF_RESET_DETACH) != 0) { error = device_get_state(child) == DS_ATTACHED ? device_detach(child) : 0; } else { error = BUS_SUSPEND_CHILD(dev, child); } if (error == 0) { if (!pcie_flr(child, 1000, false)) { error = BUS_RESET_PREPARE(dev, child); if (error == 0) pci_power_reset(child); BUS_RESET_POST(dev, child); } if ((flags & DEVF_RESET_DETACH) != 0) device_probe_and_attach(child); else BUS_RESUME_CHILD(dev, child); } return (error); } const struct pci_device_table * pci_match_device(device_t child, const struct pci_device_table *id, size_t nelt) { bool match; uint16_t vendor, device, subvendor, subdevice, class, subclass, revid; vendor = pci_get_vendor(child); device = pci_get_device(child); subvendor = pci_get_subvendor(child); subdevice = pci_get_subdevice(child); class = pci_get_class(child); subclass = pci_get_subclass(child); revid = pci_get_revid(child); while (nelt-- > 0) { match = true; if (id->match_flag_vendor) match &= vendor == id->vendor; if (id->match_flag_device) match &= device == id->device; if (id->match_flag_subvendor) match &= subvendor == id->subvendor; if (id->match_flag_subdevice) match &= subdevice == id->subdevice; if (id->match_flag_class) match &= class == id->class_id; if (id->match_flag_subclass) match &= subclass == id->subclass; if (id->match_flag_revid) match &= revid == id->revid; if (match) return (id); id++; } return (NULL); } static void pci_print_faulted_dev_name(const struct pci_devinfo *dinfo) { const char *dev_name; device_t dev; dev = dinfo->cfg.dev; printf("pci%d:%d:%d:%d", dinfo->cfg.domain, dinfo->cfg.bus, dinfo->cfg.slot, dinfo->cfg.func); dev_name = device_get_name(dev); if (dev_name != NULL) printf(" (%s%d)", dev_name, device_get_unit(dev)); } void pci_print_faulted_dev(void) { struct pci_devinfo *dinfo; device_t dev; int aer, i; uint32_t r1, r2; uint16_t status; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { dev = dinfo->cfg.dev; status = pci_read_config(dev, PCIR_STATUS, 2); status &= PCIM_STATUS_MDPERR | PCIM_STATUS_STABORT | PCIM_STATUS_RTABORT | PCIM_STATUS_RMABORT | PCIM_STATUS_SERR | PCIM_STATUS_PERR; if (status != 0) { pci_print_faulted_dev_name(dinfo); printf(" error 0x%04x\n", status); } if (dinfo->cfg.pcie.pcie_location != 0) { status = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_STA, 2); if ((status & (PCIEM_STA_CORRECTABLE_ERROR | PCIEM_STA_NON_FATAL_ERROR | PCIEM_STA_FATAL_ERROR | PCIEM_STA_UNSUPPORTED_REQ)) != 0) { pci_print_faulted_dev_name(dinfo); printf(" PCIe DEVCTL 0x%04x DEVSTA 0x%04x\n", pci_read_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_CTL, 2), status); } } if (pci_find_extcap(dev, PCIZ_AER, &aer) == 0) { r1 = pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4); r2 = pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4); if (r1 != 0 || r2 != 0) { pci_print_faulted_dev_name(dinfo); printf(" AER UC 0x%08x Mask 0x%08x Svr 0x%08x\n" " COR 0x%08x Mask 0x%08x Ctl 0x%08x\n", r1, pci_read_config(dev, aer + PCIR_AER_UC_MASK, 4), pci_read_config(dev, aer + PCIR_AER_UC_SEVERITY, 4), r2, pci_read_config(dev, aer + PCIR_AER_COR_MASK, 4), pci_read_config(dev, aer + PCIR_AER_CAP_CONTROL, 4)); for (i = 0; i < 4; i++) { r1 = pci_read_config(dev, aer + PCIR_AER_HEADER_LOG + i * 4, 4); printf(" HL%d: 0x%08x\n", i, r1); } } } } } #ifdef DDB DB_SHOW_COMMAND_FLAGS(pcierr, pci_print_faulted_dev_db, DB_CMD_MEMSAFE) { pci_print_faulted_dev(); } static void db_clear_pcie_errors(const struct pci_devinfo *dinfo) { device_t dev; int aer; uint32_t r; dev = dinfo->cfg.dev; r = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_STA, 2); pci_write_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_STA, r, 2); if (pci_find_extcap(dev, PCIZ_AER, &aer) != 0) return; r = pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4); if (r != 0) pci_write_config(dev, aer + PCIR_AER_UC_STATUS, r, 4); r = pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4); if (r != 0) pci_write_config(dev, aer + PCIR_AER_COR_STATUS, r, 4); } DB_COMMAND_FLAGS(pci_clearerr, db_pci_clearerr, DB_CMD_MEMSAFE) { struct pci_devinfo *dinfo; device_t dev; uint16_t status, status1; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { dev = dinfo->cfg.dev; status1 = status = pci_read_config(dev, PCIR_STATUS, 2); status1 &= PCIM_STATUS_MDPERR | PCIM_STATUS_STABORT | PCIM_STATUS_RTABORT | PCIM_STATUS_RMABORT | PCIM_STATUS_SERR | PCIM_STATUS_PERR; if (status1 != 0) { status &= ~status1; pci_write_config(dev, PCIR_STATUS, status, 2); } if (dinfo->cfg.pcie.pcie_location != 0) db_clear_pcie_errors(dinfo); } } #endif diff --git a/sys/dev/pci/pcivar.h b/sys/dev/pci/pcivar.h index d1b7d28eae91..832305b9adee 100644 --- a/sys/dev/pci/pcivar.h +++ b/sys/dev/pci/pcivar.h @@ -1,739 +1,742 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 1997, Stefan Esser * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #ifndef _PCIVAR_H_ #define _PCIVAR_H_ #include #include /* some PCI bus constants */ #define PCI_MAXMAPS_0 6 /* max. no. of memory/port maps */ #define PCI_MAXMAPS_1 2 /* max. no. of maps for PCI to PCI bridge */ #define PCI_MAXMAPS_2 1 /* max. no. of maps for CardBus bridge */ typedef uint64_t pci_addr_t; /* Config registers for PCI-PCI and PCI-Cardbus bridges. */ struct pcicfg_bridge { uint8_t br_seclat; uint8_t br_subbus; uint8_t br_secbus; uint8_t br_pribus; uint16_t br_control; }; /* Interesting values for PCI power management */ struct pcicfg_pp { uint16_t pp_cap; /* PCI power management capabilities */ uint8_t pp_location; /* Offset of power management registers */ }; struct pci_map { pci_addr_t pm_value; /* Raw BAR value */ pci_addr_t pm_size; uint16_t pm_reg; STAILQ_ENTRY(pci_map) pm_link; }; struct vpd_readonly { char keyword[2]; char *value; int len; }; struct vpd_write { char keyword[2]; char *value; int start; int len; }; struct pcicfg_vpd { uint8_t vpd_reg; /* base register, + 2 for addr, + 4 data */ char vpd_cached; char *vpd_ident; /* string identifier */ int vpd_rocnt; struct vpd_readonly *vpd_ros; int vpd_wcnt; struct vpd_write *vpd_w; }; /* Interesting values for PCI MSI */ struct pcicfg_msi { uint16_t msi_ctrl; /* Message Control */ uint8_t msi_location; /* Offset of MSI capability registers. */ int msi_alloc; /* Number of allocated messages. */ uint64_t msi_addr; /* Contents of address register. */ uint16_t msi_data; /* Contents of data register. */ u_int msi_handlers; }; /* Interesting values for PCI MSI-X */ struct msix_vector { uint64_t mv_address; /* Contents of address register. */ uint32_t mv_data; /* Contents of data register. */ int mv_irq; }; struct msix_table_entry { u_int mte_vector; /* 1-based index into msix_vectors array. */ u_int mte_handlers; }; struct pcicfg_msix { uint16_t msix_ctrl; /* Message Control */ uint8_t msix_location; /* Offset of MSI-X capability registers. */ uint8_t msix_table_bar; /* BAR containing vector table. */ uint8_t msix_pba_bar; /* BAR containing PBA. */ uint32_t msix_table_offset; uint32_t msix_pba_offset; u_int msix_alloc; /* Number of allocated vectors. */ u_int msix_table_len; /* Length of virtual table. */ struct msix_table_entry *msix_table; /* Virtual table. */ struct msix_vector *msix_vectors; /* Array of allocated vectors. */ struct resource *msix_table_res; /* Resource containing vector table. */ struct resource *msix_pba_res; /* Resource containing PBA. */ }; struct pci_id_ofw_iommu { uintptr_t xref; uint32_t id; }; /* Interesting values for HyperTransport */ struct pcicfg_ht { uint8_t ht_slave; /* Non-zero if device is an HT slave. */ uint8_t ht_msimap; /* Offset of MSI mapping cap registers. */ uint16_t ht_msictrl; /* MSI mapping control */ uint64_t ht_msiaddr; /* MSI mapping base address */ }; /* Interesting values for PCI-express */ struct pcicfg_pcie { uint8_t pcie_location; /* Offset of PCI-e capability registers. */ uint8_t pcie_type; /* Device type. */ uint16_t pcie_flags; /* Device capabilities register. */ uint16_t pcie_device_ctl; /* Device control register. */ uint16_t pcie_link_ctl; /* Link control register. */ uint16_t pcie_slot_ctl; /* Slot control register. */ uint16_t pcie_root_ctl; /* Root control register. */ uint16_t pcie_device_ctl2; /* Second device control register. */ uint16_t pcie_link_ctl2; /* Second link control register. */ uint16_t pcie_slot_ctl2; /* Second slot control register. */ }; struct pcicfg_pcix { uint16_t pcix_command; uint8_t pcix_location; /* Offset of PCI-X capability registers. */ }; struct pcicfg_vf { int index; }; struct pci_ea_entry { int eae_bei; uint32_t eae_flags; uint64_t eae_base; uint64_t eae_max_offset; uint32_t eae_cfg_offset; STAILQ_ENTRY(pci_ea_entry) eae_link; }; struct pcicfg_ea { int ea_location; /* Structure offset in Configuration Header */ STAILQ_HEAD(, pci_ea_entry) ea_entries; /* EA entries */ }; #define PCICFG_VF 0x0001 /* Device is an SR-IOV Virtual Function */ /* config header information common to all header types */ typedef struct pcicfg { device_t dev; /* device which owns this */ STAILQ_HEAD(, pci_map) maps; /* BARs */ uint16_t subvendor; /* card vendor ID */ uint16_t subdevice; /* card device ID, assigned by card vendor */ uint16_t vendor; /* chip vendor ID */ uint16_t device; /* chip device ID, assigned by chip vendor */ uint16_t cmdreg; /* disable/enable chip and PCI options */ uint16_t statreg; /* supported PCI features and error state */ uint8_t baseclass; /* chip PCI class */ uint8_t subclass; /* chip PCI subclass */ uint8_t progif; /* chip PCI programming interface */ uint8_t revid; /* chip revision ID */ uint8_t hdrtype; /* chip config header type */ uint8_t cachelnsz; /* cache line size in 4byte units */ uint8_t intpin; /* PCI interrupt pin */ uint8_t intline; /* interrupt line (IRQ for PC arch) */ uint8_t mingnt; /* min. useful bus grant time in 250ns units */ uint8_t maxlat; /* max. tolerated bus grant latency in 250ns */ uint8_t lattimer; /* latency timer in units of 30ns bus cycles */ uint8_t mfdev; /* multi-function device (from hdrtype reg) */ uint8_t nummaps; /* actual number of PCI maps used */ uint32_t domain; /* PCI domain */ uint8_t bus; /* config space bus address */ uint8_t slot; /* config space slot address */ uint8_t func; /* config space function number */ uint32_t flags; /* flags defined above */ struct pcicfg_bridge bridge; /* Bridges */ struct pcicfg_pp pp; /* Power management */ struct pcicfg_vpd vpd; /* Vital product data */ struct pcicfg_msi msi; /* PCI MSI */ struct pcicfg_msix msix; /* PCI MSI-X */ struct pcicfg_ht ht; /* HyperTransport */ struct pcicfg_pcie pcie; /* PCI Express */ struct pcicfg_pcix pcix; /* PCI-X */ struct pcicfg_iov *iov; /* SR-IOV */ struct pcicfg_vf vf; /* SR-IOV Virtual Function */ struct pcicfg_ea ea; /* Enhanced Allocation */ } pcicfgregs; /* additional type 1 device config header information (PCI to PCI bridge) */ typedef struct { pci_addr_t pmembase; /* base address of prefetchable memory */ pci_addr_t pmemlimit; /* topmost address of prefetchable memory */ uint32_t membase; /* base address of memory window */ uint32_t memlimit; /* topmost address of memory window */ uint32_t iobase; /* base address of port window */ uint32_t iolimit; /* topmost address of port window */ uint16_t secstat; /* secondary bus status register */ uint16_t bridgectl; /* bridge control register */ uint8_t seclat; /* CardBus latency timer */ } pcih1cfgregs; /* additional type 2 device config header information (CardBus bridge) */ typedef struct { uint32_t membase0; /* base address of memory window */ uint32_t memlimit0; /* topmost address of memory window */ uint32_t membase1; /* base address of memory window */ uint32_t memlimit1; /* topmost address of memory window */ uint32_t iobase0; /* base address of port window */ uint32_t iolimit0; /* topmost address of port window */ uint32_t iobase1; /* base address of port window */ uint32_t iolimit1; /* topmost address of port window */ uint32_t pccardif; /* PC Card 16bit IF legacy more base addr. */ uint16_t secstat; /* secondary bus status register */ uint16_t bridgectl; /* bridge control register */ uint8_t seclat; /* CardBus latency timer */ } pcih2cfgregs; extern uint32_t pci_numdevs; extern int pci_enable_aspm; /* * The bitfield has to be stable and match the fields below (so that * match_flag_vendor must be bit 0) so we have to do the endian dance. We can't * use enums or #define constants because then the macros for subsetting matches * wouldn't work. These tables are parsed by devmatch and others to connect * modules with devices on the PCI bus. */ struct pci_device_table { #if BYTE_ORDER == LITTLE_ENDIAN uint16_t match_flag_vendor:1, match_flag_device:1, match_flag_subvendor:1, match_flag_subdevice:1, match_flag_class:1, match_flag_subclass:1, match_flag_revid:1, match_flag_unused:9; #else uint16_t match_flag_unused:9, match_flag_revid:1, match_flag_subclass:1, match_flag_class:1, match_flag_subdevice:1, match_flag_subvendor:1, match_flag_device:1, match_flag_vendor:1; #endif uint16_t vendor; uint16_t device; uint16_t subvendor; uint16_t subdevice; uint16_t class_id; uint16_t subclass; uint16_t revid; uint16_t unused; uintptr_t driver_data; char *descr; }; #define PCI_DEV(v, d) \ .match_flag_vendor = 1, .vendor = (v), \ .match_flag_device = 1, .device = (d) #define PCI_SUBDEV(sv, sd) \ .match_flag_subvendor = 1, .subvendor = (sv), \ .match_flag_subdevice = 1, .subdevice = (sd) #define PCI_CLASS(x) \ .match_flag_class = 1, .class_id = (x) #define PCI_SUBCLASS(x) \ .match_flag_subclass = 1, .subclass = (x) #define PCI_REVID(x) \ .match_flag_revid = 1, .revid = (x) #define PCI_DESCR(x) \ .descr = (x) #define PCI_PNP_STR \ "M16:mask;U16:vendor;U16:device;U16:subvendor;U16:subdevice;" \ "U16:class;U16:subclass;U16:revid;" #define PCI_PNP_INFO(table) \ MODULE_PNP_INFO(PCI_PNP_STR, pci, table, table, \ sizeof(table) / sizeof(table[0])) const struct pci_device_table *pci_match_device(device_t child, const struct pci_device_table *id, size_t nelt); #define PCI_MATCH(child, table) \ pci_match_device(child, (table), nitems(table)); /* Only if the prerequisites are present */ #if defined(_SYS_BUS_H_) && defined(_SYS_PCIIO_H_) struct pci_devinfo { STAILQ_ENTRY(pci_devinfo) pci_links; struct resource_list resources; pcicfgregs cfg; struct pci_conf conf; }; #endif #ifdef _SYS_BUS_H_ #include "pci_if.h" enum pci_device_ivars { PCI_IVAR_SUBVENDOR, PCI_IVAR_SUBDEVICE, PCI_IVAR_VENDOR, PCI_IVAR_DEVICE, PCI_IVAR_DEVID, PCI_IVAR_CLASS, PCI_IVAR_SUBCLASS, PCI_IVAR_PROGIF, PCI_IVAR_REVID, PCI_IVAR_INTPIN, PCI_IVAR_IRQ, PCI_IVAR_DOMAIN, PCI_IVAR_BUS, PCI_IVAR_SLOT, PCI_IVAR_FUNCTION, PCI_IVAR_ETHADDR, PCI_IVAR_CMDREG, PCI_IVAR_CACHELNSZ, PCI_IVAR_MINGNT, PCI_IVAR_MAXLAT, PCI_IVAR_LATTIMER }; /* * Simplified accessors for pci devices */ #define PCI_ACCESSOR(var, ivar, type) \ __BUS_ACCESSOR(pci, var, PCI, ivar, type) PCI_ACCESSOR(subvendor, SUBVENDOR, uint16_t) PCI_ACCESSOR(subdevice, SUBDEVICE, uint16_t) PCI_ACCESSOR(vendor, VENDOR, uint16_t) PCI_ACCESSOR(device, DEVICE, uint16_t) PCI_ACCESSOR(devid, DEVID, uint32_t) PCI_ACCESSOR(class, CLASS, uint8_t) PCI_ACCESSOR(subclass, SUBCLASS, uint8_t) PCI_ACCESSOR(progif, PROGIF, uint8_t) PCI_ACCESSOR(revid, REVID, uint8_t) PCI_ACCESSOR(intpin, INTPIN, uint8_t) PCI_ACCESSOR(irq, IRQ, uint8_t) PCI_ACCESSOR(domain, DOMAIN, uint32_t) PCI_ACCESSOR(bus, BUS, uint8_t) PCI_ACCESSOR(slot, SLOT, uint8_t) PCI_ACCESSOR(function, FUNCTION, uint8_t) PCI_ACCESSOR(ether, ETHADDR, uint8_t *) PCI_ACCESSOR(cmdreg, CMDREG, uint8_t) PCI_ACCESSOR(cachelnsz, CACHELNSZ, uint8_t) PCI_ACCESSOR(mingnt, MINGNT, uint8_t) PCI_ACCESSOR(maxlat, MAXLAT, uint8_t) PCI_ACCESSOR(lattimer, LATTIMER, uint8_t) #undef PCI_ACCESSOR /* * Operations on configuration space. */ static __inline uint32_t pci_read_config(device_t dev, int reg, int width) { return PCI_READ_CONFIG(device_get_parent(dev), dev, reg, width); } static __inline void pci_write_config(device_t dev, int reg, uint32_t val, int width) { PCI_WRITE_CONFIG(device_get_parent(dev), dev, reg, val, width); } /* * Ivars for pci bridges. */ /*typedef enum pci_device_ivars pcib_device_ivars;*/ enum pcib_device_ivars { PCIB_IVAR_DOMAIN, PCIB_IVAR_BUS }; #define PCIB_ACCESSOR(var, ivar, type) \ __BUS_ACCESSOR(pcib, var, PCIB, ivar, type) PCIB_ACCESSOR(domain, DOMAIN, uint32_t) PCIB_ACCESSOR(bus, BUS, uint32_t) #undef PCIB_ACCESSOR /* * PCI interrupt validation. Invalid interrupt values such as 0 or 128 * on i386 or other platforms should be mapped out in the MD pcireadconf * code and not here, since the only MI invalid IRQ is 255. */ #define PCI_INVALID_IRQ 255 #define PCI_INTERRUPT_VALID(x) ((x) != PCI_INVALID_IRQ) /* * Convenience functions. * * These should be used in preference to manually manipulating * configuration space. */ static __inline int pci_enable_busmaster(device_t dev) { return(PCI_ENABLE_BUSMASTER(device_get_parent(dev), dev)); } static __inline int pci_disable_busmaster(device_t dev) { return(PCI_DISABLE_BUSMASTER(device_get_parent(dev), dev)); } static __inline int pci_enable_io(device_t dev, int space) { return(PCI_ENABLE_IO(device_get_parent(dev), dev, space)); } static __inline int pci_disable_io(device_t dev, int space) { return(PCI_DISABLE_IO(device_get_parent(dev), dev, space)); } static __inline int pci_get_vpd_ident(device_t dev, const char **identptr) { return(PCI_GET_VPD_IDENT(device_get_parent(dev), dev, identptr)); } static __inline int pci_get_vpd_readonly(device_t dev, const char *kw, const char **vptr) { return(PCI_GET_VPD_READONLY(device_get_parent(dev), dev, kw, vptr)); } /* * Check if the address range falls within the VGA defined address range(s) */ static __inline int pci_is_vga_ioport_range(rman_res_t start, rman_res_t end) { return (((start >= 0x3b0 && end <= 0x3bb) || (start >= 0x3c0 && end <= 0x3df)) ? 1 : 0); } static __inline int pci_is_vga_memory_range(rman_res_t start, rman_res_t end) { return ((start >= 0xa0000 && end <= 0xbffff) ? 1 : 0); } /* * PCI power states are as defined by ACPI: * - * D0 State in which device is on and running. It is receiving full - * power from the system and delivering full functionality to the user. - * D1 Class-specific low-power state in which device context may or may not - * be lost. Buses in D1 cannot do anything to the bus that would force - * devices on that bus to lose context. - * D2 Class-specific low-power state in which device context may or may - * not be lost. Attains greater power savings than D1. Buses in D2 - * can cause devices on that bus to lose some context. Devices in D2 - * must be prepared for the bus to be in D2 or higher. - * D3 State in which the device is off and not running. Device context is - * lost. Power can be removed from the device. + * D0 State in which device is on and running. It is receiving full + * power from the system and delivering full functionality to the user. + * D1 Class-specific low-power state in which device context may or may not + * be lost. Buses in D1 cannot do anything to the bus that would force + * devices on that bus to lose context. + * D2 Class-specific low-power state in which device context may or may + * not be lost. Attains greater power savings than D1. Buses in D2 + * can cause devices on that bus to lose some context. Devices in D2 + * must be prepared for the bus to be in D2 or higher. + * D3hot State in which the device is off and not running. Device context is + * lost. Power can be removed from the device. + * D3cold Same as D3hot, but power has been removed from the device. */ #define PCI_POWERSTATE_D0 0 #define PCI_POWERSTATE_D1 1 #define PCI_POWERSTATE_D2 2 -#define PCI_POWERSTATE_D3 3 +#define PCI_POWERSTATE_D3_HOT 3 +#define PCI_POWERSTATE_D3_COLD 4 +#define PCI_POWERSTATE_D3 PCI_POWERSTATE_D3_COLD #define PCI_POWERSTATE_UNKNOWN -1 static __inline int pci_set_powerstate(device_t dev, int state) { return PCI_SET_POWERSTATE(device_get_parent(dev), dev, state); } static __inline int pci_get_powerstate(device_t dev) { return PCI_GET_POWERSTATE(device_get_parent(dev), dev); } static __inline int pci_find_cap(device_t dev, int capability, int *capreg) { return (PCI_FIND_CAP(device_get_parent(dev), dev, capability, capreg)); } static __inline int pci_find_next_cap(device_t dev, int capability, int start, int *capreg) { return (PCI_FIND_NEXT_CAP(device_get_parent(dev), dev, capability, start, capreg)); } static __inline int pci_find_extcap(device_t dev, int capability, int *capreg) { return (PCI_FIND_EXTCAP(device_get_parent(dev), dev, capability, capreg)); } static __inline int pci_find_next_extcap(device_t dev, int capability, int start, int *capreg) { return (PCI_FIND_NEXT_EXTCAP(device_get_parent(dev), dev, capability, start, capreg)); } static __inline int pci_find_htcap(device_t dev, int capability, int *capreg) { return (PCI_FIND_HTCAP(device_get_parent(dev), dev, capability, capreg)); } static __inline int pci_find_next_htcap(device_t dev, int capability, int start, int *capreg) { return (PCI_FIND_NEXT_HTCAP(device_get_parent(dev), dev, capability, start, capreg)); } static __inline int pci_alloc_msi(device_t dev, int *count) { return (PCI_ALLOC_MSI(device_get_parent(dev), dev, count)); } static __inline int pci_alloc_msix(device_t dev, int *count) { return (PCI_ALLOC_MSIX(device_get_parent(dev), dev, count)); } static __inline void pci_enable_msi(device_t dev, uint64_t address, uint16_t data) { PCI_ENABLE_MSI(device_get_parent(dev), dev, address, data); } static __inline void pci_enable_msix(device_t dev, u_int index, uint64_t address, uint32_t data) { PCI_ENABLE_MSIX(device_get_parent(dev), dev, index, address, data); } static __inline void pci_disable_msi(device_t dev) { PCI_DISABLE_MSI(device_get_parent(dev), dev); } static __inline int pci_remap_msix(device_t dev, int count, const u_int *vectors) { return (PCI_REMAP_MSIX(device_get_parent(dev), dev, count, vectors)); } static __inline int pci_release_msi(device_t dev) { return (PCI_RELEASE_MSI(device_get_parent(dev), dev)); } static __inline int pci_msi_count(device_t dev) { return (PCI_MSI_COUNT(device_get_parent(dev), dev)); } static __inline int pci_msix_count(device_t dev) { return (PCI_MSIX_COUNT(device_get_parent(dev), dev)); } static __inline int pci_msix_pba_bar(device_t dev) { return (PCI_MSIX_PBA_BAR(device_get_parent(dev), dev)); } static __inline int pci_msix_table_bar(device_t dev) { return (PCI_MSIX_TABLE_BAR(device_get_parent(dev), dev)); } static __inline int pci_get_id(device_t dev, enum pci_id_type type, uintptr_t *id) { return (PCI_GET_ID(device_get_parent(dev), dev, type, id)); } /* * This is the deprecated interface, there is no way to tell the difference * between a failure and a valid value that happens to be the same as the * failure value. */ static __inline uint16_t pci_get_rid(device_t dev) { uintptr_t rid; if (pci_get_id(dev, PCI_ID_RID, &rid) != 0) return (0); return (rid); } static __inline void pci_child_added(device_t dev) { return (PCI_CHILD_ADDED(device_get_parent(dev), dev)); } device_t pci_find_bsf(uint8_t, uint8_t, uint8_t); device_t pci_find_dbsf(uint32_t, uint8_t, uint8_t, uint8_t); device_t pci_find_device(uint16_t, uint16_t); device_t pci_find_class(uint8_t class, uint8_t subclass); device_t pci_find_class_from(uint8_t class, uint8_t subclass, device_t devfrom); device_t pci_find_base_class_from(uint8_t class, device_t devfrom); /* Can be used by drivers to manage the MSI-X table. */ int pci_pending_msix(device_t dev, u_int index); int pci_msi_device_blacklisted(device_t dev); int pci_msix_device_blacklisted(device_t dev); void pci_ht_map_msi(device_t dev, uint64_t addr); device_t pci_find_pcie_root_port(device_t dev); int pci_get_relaxed_ordering_enabled(device_t dev); int pci_get_max_payload(device_t dev); int pci_get_max_read_req(device_t dev); void pci_restore_state(device_t dev); void pci_save_state(device_t dev); int pci_set_max_read_req(device_t dev, int size); int pci_power_reset(device_t dev); void pci_clear_pme(device_t dev); void pci_enable_pme(device_t dev); bool pci_has_pm(device_t dev); uint32_t pcie_read_config(device_t dev, int reg, int width); void pcie_write_config(device_t dev, int reg, uint32_t value, int width); uint32_t pcie_adjust_config(device_t dev, int reg, uint32_t mask, uint32_t value, int width); void pcie_apei_error(device_t dev, int sev, uint8_t *aer); bool pcie_flr(device_t dev, u_int max_delay, bool force); int pcie_get_max_completion_timeout(device_t dev); bool pcie_wait_for_pending_transactions(device_t dev, u_int max_delay); int pcie_link_reset(device_t port, int pcie_location); void pci_print_faulted_dev(void); #endif /* _SYS_BUS_H_ */ /* * cdev switch for control device, initialised in generic PCI code */ extern struct cdevsw pcicdev; /* * List of all PCI devices, generation count for the list. */ STAILQ_HEAD(devlist, pci_devinfo); extern struct devlist pci_devq; extern uint32_t pci_generation; struct pci_map *pci_find_bar(device_t dev, int reg); struct pci_map *pci_first_bar(device_t dev); struct pci_map *pci_next_bar(struct pci_map *pm); int pci_bar_enabled(device_t dev, struct pci_map *pm); struct pcicfg_vpd *pci_fetch_vpd_list(device_t dev); #define VGA_PCI_BIOS_SHADOW_ADDR 0xC0000 #define VGA_PCI_BIOS_SHADOW_SIZE 131072 int vga_pci_is_boot_display(device_t dev); void * vga_pci_map_bios(device_t dev, size_t *size); void vga_pci_unmap_bios(device_t dev, void *bios); int vga_pci_repost(device_t dev); /** * Global eventhandlers invoked when PCI devices are added or removed * from the system. */ typedef void (*pci_event_fn)(void *arg, device_t dev); EVENTHANDLER_DECLARE(pci_add_device, pci_event_fn); EVENTHANDLER_DECLARE(pci_delete_device, pci_event_fn); #endif /* _PCIVAR_H_ */