Index: head/sys/arm64/acpica/acpi_iort.c =================================================================== --- head/sys/arm64/acpica/acpi_iort.c (revision 363273) +++ head/sys/arm64/acpica/acpi_iort.c (revision 363274) @@ -1,566 +1,585 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2018 Marvell International Ltd. * * Author: Jayachandran C Nair * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_acpi.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include /* * Track next XREF available for ITS groups. */ static u_int acpi_its_xref = ACPI_MSI_XREF; /* * Some types of IORT nodes have a set of mappings. Each of them map * a range of device IDs [base..end] from the current node to another * node. The corresponding device IDs on destination node starts at * outbase. */ struct iort_map_entry { u_int base; u_int end; u_int outbase; u_int flags; u_int out_node_offset; struct iort_node *out_node; }; /* * The ITS group node does not have any outgoing mappings. It has a * of a list of GIC ITS blocks which can handle the device ID. We * will store the PIC XREF used by the block and the blocks proximity * data here, so that it can be retrieved together. */ struct iort_its_entry { u_int its_id; u_int xref; int pxm; }; /* * IORT node. Each node has some device specific data depending on the * type of the node. The node can also have a set of mappings, OR in * case of ITS group nodes a set of ITS entries. * The nodes are kept in a TAILQ by type. */ struct iort_node { TAILQ_ENTRY(iort_node) next; /* next entry with same type */ enum AcpiIortNodeType type; /* ACPI type */ u_int node_offset; /* offset in IORT - node ID */ u_int nentries; /* items in array below */ u_int usecount; /* for bookkeeping */ u_int revision; /* node revision */ union { ACPI_IORT_ROOT_COMPLEX pci_rc; /* PCI root complex */ ACPI_IORT_SMMU smmu; ACPI_IORT_SMMU_V3 smmu_v3; } data; union { struct iort_map_entry *mappings; /* node mappings */ struct iort_its_entry *its; /* ITS IDs array */ } entries; }; /* Lists for each of the types. */ static TAILQ_HEAD(, iort_node) pci_nodes = TAILQ_HEAD_INITIALIZER(pci_nodes); static TAILQ_HEAD(, iort_node) smmu_nodes = TAILQ_HEAD_INITIALIZER(smmu_nodes); static TAILQ_HEAD(, iort_node) its_groups = TAILQ_HEAD_INITIALIZER(its_groups); static int iort_entry_get_id_mapping_index(struct iort_node *node) { switch(node->type) { case ACPI_IORT_NODE_SMMU_V3: /* The ID mapping field was added in version 1 */ if (node->revision < 1) return (-1); /* * If all the control interrupts are GISCV based the ID * mapping field is ignored. */ if (node->data.smmu_v3.EventGsiv != 0 && node->data.smmu_v3.PriGsiv != 0 && node->data.smmu_v3.GerrGsiv != 0 && node->data.smmu_v3.SyncGsiv != 0) return (-1); if (node->data.smmu_v3.IdMappingIndex >= node->nentries) return (-1); return (node->data.smmu_v3.IdMappingIndex); case ACPI_IORT_NODE_PMCG: return (0); default: break; } return (-1); } /* * Lookup an ID in the mappings array. If successful, map the input ID * to the output ID and return the output node found. */ static struct iort_node * iort_entry_lookup(struct iort_node *node, u_int id, u_int *outid) { struct iort_map_entry *entry; int i, id_map; id_map = iort_entry_get_id_mapping_index(node); entry = node->entries.mappings; for (i = 0; i < node->nentries; i++, entry++) { if (i == id_map) continue; if (entry->base <= id && id <= entry->end) break; } if (i == node->nentries) return (NULL); if ((entry->flags & ACPI_IORT_ID_SINGLE_MAPPING) == 0) - *outid = entry->outbase + (id - entry->base); + *outid = entry->outbase + (id - entry->base); else *outid = entry->outbase; return (entry->out_node); } /* * Map a PCI RID to a SMMU node or an ITS node, based on outtype. */ static struct iort_node * iort_pci_rc_map(u_int seg, u_int rid, u_int outtype, u_int *outid) { struct iort_node *node, *out_node; u_int nxtid; out_node = NULL; TAILQ_FOREACH(node, &pci_nodes, next) { if (node->data.pci_rc.PciSegmentNumber != seg) continue; out_node = iort_entry_lookup(node, rid, &nxtid); if (out_node != NULL) break; } /* Could not find a PCI RC node with segment and device ID. */ if (out_node == NULL) return (NULL); /* Node can be SMMU or ITS. If SMMU, we need another lookup. */ if (outtype == ACPI_IORT_NODE_ITS_GROUP && (out_node->type == ACPI_IORT_NODE_SMMU_V3 || out_node->type == ACPI_IORT_NODE_SMMU)) { out_node = iort_entry_lookup(out_node, nxtid, &nxtid); if (out_node == NULL) return (NULL); } KASSERT(out_node->type == outtype, ("mapping fail")); *outid = nxtid; return (out_node); } #ifdef notyet /* * Not implemented, map a PCIe device to the SMMU it is associated with. */ int acpi_iort_map_smmu(u_int seg, u_int devid, void **smmu, u_int *sid) { /* XXX: convert oref to SMMU device */ return (ENXIO); } #endif /* * Allocate memory for a node, initialize and copy mappings. 'start' * argument provides the table start used to calculate the node offset. */ static void iort_copy_data(struct iort_node *node, ACPI_IORT_NODE *node_entry) { ACPI_IORT_ID_MAPPING *map_entry; struct iort_map_entry *mapping; int i; map_entry = ACPI_ADD_PTR(ACPI_IORT_ID_MAPPING, node_entry, node_entry->MappingOffset); node->nentries = node_entry->MappingCount; node->usecount = 0; mapping = malloc(sizeof(*mapping) * node->nentries, M_DEVBUF, M_WAITOK | M_ZERO); node->entries.mappings = mapping; for (i = 0; i < node->nentries; i++, mapping++, map_entry++) { mapping->base = map_entry->InputBase; mapping->end = map_entry->InputBase + map_entry->IdCount - 1; mapping->outbase = map_entry->OutputBase; mapping->out_node_offset = map_entry->OutputReference; mapping->flags = map_entry->Flags; mapping->out_node = NULL; } } /* * Allocate and copy an ITS group. */ static void iort_copy_its(struct iort_node *node, ACPI_IORT_NODE *node_entry) { struct iort_its_entry *its; ACPI_IORT_ITS_GROUP *itsg_entry; UINT32 *id; int i; itsg_entry = (ACPI_IORT_ITS_GROUP *)node_entry->NodeData; node->nentries = itsg_entry->ItsCount; node->usecount = 0; its = malloc(sizeof(*its) * node->nentries, M_DEVBUF, M_WAITOK | M_ZERO); node->entries.its = its; id = &itsg_entry->Identifiers[0]; for (i = 0; i < node->nentries; i++, its++, id++) { its->its_id = *id; its->pxm = -1; its->xref = 0; } } /* * Walk the IORT table and add nodes to corresponding list. */ static void iort_add_nodes(ACPI_IORT_NODE *node_entry, u_int node_offset) { ACPI_IORT_ROOT_COMPLEX *pci_rc; ACPI_IORT_SMMU *smmu; ACPI_IORT_SMMU_V3 *smmu_v3; struct iort_node *node; node = malloc(sizeof(*node), M_DEVBUF, M_WAITOK | M_ZERO); node->type = node_entry->Type; node->node_offset = node_offset; node->revision = node_entry->Revision; /* copy nodes depending on type */ switch(node_entry->Type) { case ACPI_IORT_NODE_PCI_ROOT_COMPLEX: pci_rc = (ACPI_IORT_ROOT_COMPLEX *)node_entry->NodeData; memcpy(&node->data.pci_rc, pci_rc, sizeof(*pci_rc)); iort_copy_data(node, node_entry); TAILQ_INSERT_TAIL(&pci_nodes, node, next); break; case ACPI_IORT_NODE_SMMU: smmu = (ACPI_IORT_SMMU *)node_entry->NodeData; memcpy(&node->data.smmu, smmu, sizeof(*smmu)); iort_copy_data(node, node_entry); TAILQ_INSERT_TAIL(&smmu_nodes, node, next); break; case ACPI_IORT_NODE_SMMU_V3: smmu_v3 = (ACPI_IORT_SMMU_V3 *)node_entry->NodeData; memcpy(&node->data.smmu_v3, smmu_v3, sizeof(*smmu_v3)); iort_copy_data(node, node_entry); TAILQ_INSERT_TAIL(&smmu_nodes, node, next); break; case ACPI_IORT_NODE_ITS_GROUP: iort_copy_its(node, node_entry); TAILQ_INSERT_TAIL(&its_groups, node, next); break; default: printf("ACPI: IORT: Dropping unhandled type %u\n", node_entry->Type); free(node, M_DEVBUF); break; } } /* * For the mapping entry given, walk thru all the possible destination * nodes and resolve the output reference. */ static void iort_resolve_node(struct iort_map_entry *entry, int check_smmu) { struct iort_node *node, *np; node = NULL; if (check_smmu) { TAILQ_FOREACH(np, &smmu_nodes, next) { if (entry->out_node_offset == np->node_offset) { node = np; break; } } } if (node == NULL) { TAILQ_FOREACH(np, &its_groups, next) { if (entry->out_node_offset == np->node_offset) { node = np; break; } } } if (node != NULL) { node->usecount++; entry->out_node = node; } else { printf("ACPI: IORT: Firmware Bug: no mapping for node %u\n", entry->out_node_offset); } } /* * Resolve all output node references to node pointers. */ static void iort_post_process_mappings(void) { struct iort_node *node; int i; TAILQ_FOREACH(node, &pci_nodes, next) for (i = 0; i < node->nentries; i++) iort_resolve_node(&node->entries.mappings[i], TRUE); TAILQ_FOREACH(node, &smmu_nodes, next) for (i = 0; i < node->nentries; i++) iort_resolve_node(&node->entries.mappings[i], FALSE); /* TODO: named nodes */ } /* * Walk MADT table, assign PIC xrefs to all ITS entries. */ static void madt_resolve_its_xref(ACPI_SUBTABLE_HEADER *entry, void *arg) { ACPI_MADT_GENERIC_TRANSLATOR *gict; struct iort_node *its_node; struct iort_its_entry *its_entry; u_int xref; int i, matches; if (entry->Type != ACPI_MADT_TYPE_GENERIC_TRANSLATOR) return; gict = (ACPI_MADT_GENERIC_TRANSLATOR *)entry; matches = 0; xref = acpi_its_xref++; TAILQ_FOREACH(its_node, &its_groups, next) { its_entry = its_node->entries.its; for (i = 0; i < its_node->nentries; i++, its_entry++) { if (its_entry->its_id == gict->TranslationId) { its_entry->xref = xref; matches++; } } } if (matches == 0) printf("ACPI: IORT: Unused ITS block, ID %u\n", gict->TranslationId); } /* * Walk SRAT, assign proximity to all ITS entries. */ static void srat_resolve_its_pxm(ACPI_SUBTABLE_HEADER *entry, void *arg) { ACPI_SRAT_GIC_ITS_AFFINITY *gicits; struct iort_node *its_node; struct iort_its_entry *its_entry; int *map_counts; int i, matches, dom; if (entry->Type != ACPI_SRAT_TYPE_GIC_ITS_AFFINITY) return; matches = 0; map_counts = arg; gicits = (ACPI_SRAT_GIC_ITS_AFFINITY *)entry; dom = acpi_map_pxm_to_vm_domainid(gicits->ProximityDomain); /* * Catch firmware and config errors. map_counts keeps a * count of ProximityDomain values mapping to a domain ID */ #if MAXMEMDOM > 1 if (dom == -1) printf("Firmware Error: Proximity Domain %d could not be" " mapped for GIC ITS ID %d!\n", gicits->ProximityDomain, gicits->ItsId); #endif /* use dom + 1 as index to handle the case where dom == -1 */ i = ++map_counts[dom + 1]; if (i > 1) { #ifdef NUMA if (dom != -1) printf("ERROR: Multiple Proximity Domains map to the" " same NUMA domain %d!\n", dom); #else printf("WARNING: multiple Proximity Domains in SRAT but NUMA" " NOT enabled!\n"); #endif } TAILQ_FOREACH(its_node, &its_groups, next) { its_entry = its_node->entries.its; for (i = 0; i < its_node->nentries; i++, its_entry++) { if (its_entry->its_id == gicits->ItsId) { its_entry->pxm = dom; matches++; } } } if (matches == 0) printf("ACPI: IORT: ITS block %u in SRAT not found in IORT!\n", gicits->ItsId); } /* * Cross check the ITS Id with MADT and (if available) SRAT. */ static int iort_post_process_its(void) { ACPI_TABLE_MADT *madt; ACPI_TABLE_SRAT *srat; vm_paddr_t madt_pa, srat_pa; int map_counts[MAXMEMDOM + 1] = { 0 }; /* Check ITS block in MADT */ madt_pa = acpi_find_table(ACPI_SIG_MADT); KASSERT(madt_pa != 0, ("no MADT!")); madt = acpi_map_table(madt_pa, ACPI_SIG_MADT); KASSERT(madt != NULL, ("can't map MADT!")); acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length, madt_resolve_its_xref, NULL); acpi_unmap_table(madt); /* Get proximtiy if available */ srat_pa = acpi_find_table(ACPI_SIG_SRAT); if (srat_pa != 0) { srat = acpi_map_table(srat_pa, ACPI_SIG_SRAT); KASSERT(srat != NULL, ("can't map SRAT!")); acpi_walk_subtables(srat + 1, (char *)srat + srat->Header.Length, srat_resolve_its_pxm, map_counts); acpi_unmap_table(srat); } return (0); } /* * Find, parse, and save IO Remapping Table ("IORT"). */ static int acpi_parse_iort(void *dummy __unused) { ACPI_TABLE_IORT *iort; ACPI_IORT_NODE *node_entry; vm_paddr_t iort_pa; u_int node_offset; iort_pa = acpi_find_table(ACPI_SIG_IORT); if (iort_pa == 0) return (ENXIO); iort = acpi_map_table(iort_pa, ACPI_SIG_IORT); if (iort == NULL) { printf("ACPI: Unable to map the IORT table!\n"); return (ENXIO); } for (node_offset = iort->NodeOffset; node_offset < iort->Header.Length; node_offset += node_entry->Length) { node_entry = ACPI_ADD_PTR(ACPI_IORT_NODE, iort, node_offset); iort_add_nodes(node_entry, node_offset); } acpi_unmap_table(iort); iort_post_process_mappings(); iort_post_process_its(); return (0); } SYSINIT(acpi_parse_iort, SI_SUB_DRIVERS, SI_ORDER_FIRST, acpi_parse_iort, NULL); /* * Provide ITS ID to PIC xref mapping. */ int acpi_iort_its_lookup(u_int its_id, u_int *xref, int *pxm) { struct iort_node *its_node; struct iort_its_entry *its_entry; int i; TAILQ_FOREACH(its_node, &its_groups, next) { its_entry = its_node->entries.its; for (i = 0; i < its_node->nentries; i++, its_entry++) { if (its_entry->its_id == its_id) { *xref = its_entry->xref; *pxm = its_entry->pxm; return (0); } } } return (ENOENT); } /* * Find mapping for a PCIe device given segment and device ID * returns the XREF for MSI interrupt setup and the device ID to * use for the interrupt setup */ int acpi_iort_map_pci_msi(u_int seg, u_int rid, u_int *xref, u_int *devid) { struct iort_node *node; node = iort_pci_rc_map(seg, rid, ACPI_IORT_NODE_ITS_GROUP, devid); if (node == NULL) return (ENOENT); /* This should be an ITS node */ KASSERT(node->type == ACPI_IORT_NODE_ITS_GROUP, ("bad group")); /* return first node, we don't handle more than that now. */ *xref = node->entries.its[0].xref; + return (0); +} + +int +acpi_iort_map_pci_smmuv3(u_int seg, u_int rid, u_int *xref, u_int *sid) +{ + ACPI_IORT_SMMU_V3 *smmu; + struct iort_node *node; + + node = iort_pci_rc_map(seg, rid, ACPI_IORT_NODE_SMMU_V3, sid); + if (node == NULL) + return (ENOENT); + + /* This should be an SMMU node. */ + KASSERT(node->type == ACPI_IORT_NODE_SMMU_V3, ("bad node")); + + smmu = (ACPI_IORT_SMMU_V3 *)&node->data.smmu_v3; + *xref = smmu->BaseAddress; + return (0); } Index: head/sys/dev/acpica/acpivar.h =================================================================== --- head/sys/dev/acpica/acpivar.h (revision 363273) +++ head/sys/dev/acpica/acpivar.h (revision 363274) @@ -1,562 +1,563 @@ /*- * Copyright (c) 2000 Mitsuru IWASAKI * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _ACPIVAR_H_ #define _ACPIVAR_H_ #ifdef _KERNEL #include "acpi_if.h" #include "bus_if.h" #include #ifdef INTRNG #include #endif #include #include #include #include #include #include #include #include struct apm_clone_data; struct acpi_softc { device_t acpi_dev; struct cdev *acpi_dev_t; int acpi_enabled; int acpi_sstate; int acpi_sleep_disabled; int acpi_resources_reserved; struct sysctl_ctx_list acpi_sysctl_ctx; struct sysctl_oid *acpi_sysctl_tree; int acpi_power_button_sx; int acpi_sleep_button_sx; int acpi_lid_switch_sx; int acpi_standby_sx; int acpi_suspend_sx; int acpi_sleep_delay; int acpi_s4bios; int acpi_do_disable; int acpi_verbose; int acpi_handle_reboot; vm_offset_t acpi_wakeaddr; vm_paddr_t acpi_wakephys; int acpi_next_sstate; /* Next suspend Sx state. */ struct apm_clone_data *acpi_clone; /* Pseudo-dev for devd(8). */ STAILQ_HEAD(,apm_clone_data) apm_cdevs; /* All apm/apmctl/acpi cdevs. */ struct callout susp_force_to; /* Force suspend if no acks. */ }; struct acpi_device { /* ACPI ivars */ ACPI_HANDLE ad_handle; void *ad_private; int ad_flags; int ad_cls_class; /* Resources */ struct resource_list ad_rl; }; #ifdef INTRNG struct intr_map_data_acpi { struct intr_map_data hdr; u_int irq; u_int pol; u_int trig; }; #endif /* Track device (/dev/{apm,apmctl} and /dev/acpi) notification status. */ struct apm_clone_data { STAILQ_ENTRY(apm_clone_data) entries; struct cdev *cdev; int flags; #define ACPI_EVF_NONE 0 /* /dev/apm semantics */ #define ACPI_EVF_DEVD 1 /* /dev/acpi is handled via devd(8) */ #define ACPI_EVF_WRITE 2 /* Device instance is opened writable. */ int notify_status; #define APM_EV_NONE 0 /* Device not yet aware of pending sleep. */ #define APM_EV_NOTIFIED 1 /* Device saw next sleep state. */ #define APM_EV_ACKED 2 /* Device agreed sleep can occur. */ struct acpi_softc *acpi_sc; struct selinfo sel_read; }; #define ACPI_PRW_MAX_POWERRES 8 struct acpi_prw_data { ACPI_HANDLE gpe_handle; int gpe_bit; int lowest_wake; ACPI_OBJECT power_res[ACPI_PRW_MAX_POWERRES]; int power_res_count; }; /* Flags for each device defined in the AML namespace. */ #define ACPI_FLAG_WAKE_ENABLED 0x1 /* Macros for extracting parts of a PCI address from an _ADR value. */ #define ACPI_ADR_PCI_SLOT(adr) (((adr) & 0xffff0000) >> 16) #define ACPI_ADR_PCI_FUNC(adr) ((adr) & 0xffff) /* * Entry points to ACPI from above are global functions defined in this * file, sysctls, and I/O on the control device. Entry points from below * are interrupts (the SCI), notifies, task queue threads, and the thermal * zone polling thread. * * ACPI tables and global shared data are protected by a global lock * (acpi_mutex). * * Each ACPI device can have its own driver-specific mutex for protecting * shared access to local data. The ACPI_LOCK macros handle mutexes. * * Drivers that need to serialize access to functions (e.g., to route * interrupts, get/set control paths, etc.) should use the sx lock macros * (ACPI_SERIAL). * * ACPI-CA handles its own locking and should not be called with locks held. * * The most complicated path is: * GPE -> EC runs _Qxx -> _Qxx reads EC space -> GPE */ extern struct mtx acpi_mutex; #define ACPI_LOCK(sys) mtx_lock(&sys##_mutex) #define ACPI_UNLOCK(sys) mtx_unlock(&sys##_mutex) #define ACPI_LOCK_ASSERT(sys) mtx_assert(&sys##_mutex, MA_OWNED); #define ACPI_LOCK_DECL(sys, name) \ static struct mtx sys##_mutex; \ MTX_SYSINIT(sys##_mutex, &sys##_mutex, name, MTX_DEF) #define ACPI_SERIAL_BEGIN(sys) sx_xlock(&sys##_sxlock) #define ACPI_SERIAL_END(sys) sx_xunlock(&sys##_sxlock) #define ACPI_SERIAL_ASSERT(sys) sx_assert(&sys##_sxlock, SX_XLOCKED); #define ACPI_SERIAL_DECL(sys, name) \ static struct sx sys##_sxlock; \ SX_SYSINIT(sys##_sxlock, &sys##_sxlock, name) /* * ACPI CA does not define layers for non-ACPI CA drivers. * We define some here within the range provided. */ #define ACPI_AC_ADAPTER 0x00010000 #define ACPI_BATTERY 0x00020000 #define ACPI_BUS 0x00040000 #define ACPI_BUTTON 0x00080000 #define ACPI_EC 0x00100000 #define ACPI_FAN 0x00200000 #define ACPI_POWERRES 0x00400000 #define ACPI_PROCESSOR 0x00800000 #define ACPI_THERMAL 0x01000000 #define ACPI_TIMER 0x02000000 #define ACPI_OEM 0x04000000 /* * Constants for different interrupt models used with acpi_SetIntrModel(). */ #define ACPI_INTR_PIC 0 #define ACPI_INTR_APIC 1 #define ACPI_INTR_SAPIC 2 /* * Various features and capabilities for the acpi_get_features() method. * In particular, these are used for the ACPI 3.0 _PDC and _OSC methods. * See the Intel document titled "Intel Processor Vendor-Specific ACPI", * number 302223-007. */ #define ACPI_CAP_PERF_MSRS (1 << 0) /* Intel SpeedStep PERF_CTL MSRs */ #define ACPI_CAP_C1_IO_HALT (1 << 1) /* Intel C1 "IO then halt" sequence */ #define ACPI_CAP_THR_MSRS (1 << 2) /* Intel OnDemand throttling MSRs */ #define ACPI_CAP_SMP_SAME (1 << 3) /* MP C1, Px, and Tx (all the same) */ #define ACPI_CAP_SMP_SAME_C3 (1 << 4) /* MP C2 and C3 (all the same) */ #define ACPI_CAP_SMP_DIFF_PX (1 << 5) /* MP Px (different, using _PSD) */ #define ACPI_CAP_SMP_DIFF_CX (1 << 6) /* MP Cx (different, using _CSD) */ #define ACPI_CAP_SMP_DIFF_TX (1 << 7) /* MP Tx (different, using _TSD) */ #define ACPI_CAP_SMP_C1_NATIVE (1 << 8) /* MP C1 support other than halt */ #define ACPI_CAP_SMP_C3_NATIVE (1 << 9) /* MP C2 and C3 support */ #define ACPI_CAP_PX_HW_COORD (1 << 11) /* Intel P-state HW coordination */ #define ACPI_CAP_INTR_CPPC (1 << 12) /* Native Interrupt Handling for Collaborative Processor Performance Control notifications */ #define ACPI_CAP_HW_DUTY_C (1 << 13) /* Hardware Duty Cycling */ /* * Quirk flags. * * ACPI_Q_BROKEN: Disables all ACPI support. * ACPI_Q_TIMER: Disables support for the ACPI timer. * ACPI_Q_MADT_IRQ0: Specifies that ISA IRQ 0 is wired up to pin 0 of the * first APIC and that the MADT should force that by ignoring the PC-AT * compatible flag and ignoring overrides that redirect IRQ 0 to pin 2. */ extern int acpi_quirks; #define ACPI_Q_OK 0 #define ACPI_Q_BROKEN (1 << 0) #define ACPI_Q_TIMER (1 << 1) #define ACPI_Q_MADT_IRQ0 (1 << 2) /* * Note that the low ivar values are reserved to provide * interface compatibility with ISA drivers which can also * attach to ACPI. */ #define ACPI_IVAR_HANDLE 0x100 #define ACPI_IVAR_UNUSED 0x101 /* Unused/reserved. */ #define ACPI_IVAR_PRIVATE 0x102 #define ACPI_IVAR_FLAGS 0x103 /* * Accessor functions for our ivars. Default value for BUS_READ_IVAR is * (type) 0. The accessor functions don't check return values. */ #define __ACPI_BUS_ACCESSOR(varp, var, ivarp, ivar, type) \ \ static __inline type varp ## _get_ ## var(device_t dev) \ { \ uintptr_t v = 0; \ BUS_READ_IVAR(device_get_parent(dev), dev, \ ivarp ## _IVAR_ ## ivar, &v); \ return ((type) v); \ } \ \ static __inline void varp ## _set_ ## var(device_t dev, type t) \ { \ uintptr_t v = (uintptr_t) t; \ BUS_WRITE_IVAR(device_get_parent(dev), dev, \ ivarp ## _IVAR_ ## ivar, v); \ } __ACPI_BUS_ACCESSOR(acpi, handle, ACPI, HANDLE, ACPI_HANDLE) __ACPI_BUS_ACCESSOR(acpi, private, ACPI, PRIVATE, void *) __ACPI_BUS_ACCESSOR(acpi, flags, ACPI, FLAGS, int) void acpi_fake_objhandler(ACPI_HANDLE h, void *data); static __inline device_t acpi_get_device(ACPI_HANDLE handle) { void *dev = NULL; AcpiGetData(handle, acpi_fake_objhandler, &dev); return ((device_t)dev); } static __inline ACPI_OBJECT_TYPE acpi_get_type(device_t dev) { ACPI_HANDLE h; ACPI_OBJECT_TYPE t; if ((h = acpi_get_handle(dev)) == NULL) return (ACPI_TYPE_NOT_FOUND); if (ACPI_FAILURE(AcpiGetType(h, &t))) return (ACPI_TYPE_NOT_FOUND); return (t); } /* Find the difference between two PM tick counts. */ static __inline uint32_t acpi_TimerDelta(uint32_t end, uint32_t start) { if (end < start && (AcpiGbl_FADT.Flags & ACPI_FADT_32BIT_TIMER) == 0) end |= 0x01000000; return (end - start); } #ifdef ACPI_DEBUGGER void acpi_EnterDebugger(void); #endif #ifdef ACPI_DEBUG #include #define STEP(x) do {printf x, printf("\n"); cngetc();} while (0) #else #define STEP(x) #endif #define ACPI_VPRINT(dev, acpi_sc, x...) do { \ if (acpi_get_verbose(acpi_sc)) \ device_printf(dev, x); \ } while (0) /* Values for the first status word returned by _OSC. */ #define ACPI_OSC_FAILURE (1 << 1) #define ACPI_OSC_BAD_UUID (1 << 2) #define ACPI_OSC_BAD_REVISION (1 << 3) #define ACPI_OSC_CAPS_MASKED (1 << 4) #define ACPI_DEVINFO_PRESENT(x, flags) \ (((x) & (flags)) == (flags)) #define ACPI_DEVICE_PRESENT(x) \ ACPI_DEVINFO_PRESENT(x, ACPI_STA_DEVICE_PRESENT | \ ACPI_STA_DEVICE_FUNCTIONING) #define ACPI_BATTERY_PRESENT(x) \ ACPI_DEVINFO_PRESENT(x, ACPI_STA_DEVICE_PRESENT | \ ACPI_STA_DEVICE_FUNCTIONING | ACPI_STA_BATTERY_PRESENT) /* Callback function type for walking subtables within a table. */ typedef void acpi_subtable_handler(ACPI_SUBTABLE_HEADER *, void *); BOOLEAN acpi_DeviceIsPresent(device_t dev); BOOLEAN acpi_BatteryIsPresent(device_t dev); ACPI_STATUS acpi_GetHandleInScope(ACPI_HANDLE parent, char *path, ACPI_HANDLE *result); ACPI_BUFFER *acpi_AllocBuffer(int size); ACPI_STATUS acpi_ConvertBufferToInteger(ACPI_BUFFER *bufp, UINT32 *number); ACPI_STATUS acpi_GetInteger(ACPI_HANDLE handle, char *path, UINT32 *number); ACPI_STATUS acpi_SetInteger(ACPI_HANDLE handle, char *path, UINT32 number); ACPI_STATUS acpi_ForeachPackageObject(ACPI_OBJECT *obj, void (*func)(ACPI_OBJECT *comp, void *arg), void *arg); ACPI_STATUS acpi_FindIndexedResource(ACPI_BUFFER *buf, int index, ACPI_RESOURCE **resp); ACPI_STATUS acpi_AppendBufferResource(ACPI_BUFFER *buf, ACPI_RESOURCE *res); UINT8 acpi_DSMQuery(ACPI_HANDLE h, uint8_t *uuid, int revision); ACPI_STATUS acpi_EvaluateDSM(ACPI_HANDLE handle, uint8_t *uuid, int revision, uint64_t function, union acpi_object *package, ACPI_BUFFER *out_buf); ACPI_STATUS acpi_EvaluateOSC(ACPI_HANDLE handle, uint8_t *uuid, int revision, int count, uint32_t *caps_in, uint32_t *caps_out, bool query); ACPI_STATUS acpi_OverrideInterruptLevel(UINT32 InterruptNumber); ACPI_STATUS acpi_SetIntrModel(int model); int acpi_ReqSleepState(struct acpi_softc *sc, int state); int acpi_AckSleepState(struct apm_clone_data *clone, int error); ACPI_STATUS acpi_SetSleepState(struct acpi_softc *sc, int state); int acpi_wake_set_enable(device_t dev, int enable); int acpi_parse_prw(ACPI_HANDLE h, struct acpi_prw_data *prw); ACPI_STATUS acpi_Startup(void); void acpi_UserNotify(const char *subsystem, ACPI_HANDLE h, uint8_t notify); int acpi_bus_alloc_gas(device_t dev, int *type, int *rid, ACPI_GENERIC_ADDRESS *gas, struct resource **res, u_int flags); void acpi_walk_subtables(void *first, void *end, acpi_subtable_handler *handler, void *arg); BOOLEAN acpi_has_hid(ACPI_HANDLE handle); int acpi_MatchHid(ACPI_HANDLE h, const char *hid); #define ACPI_MATCHHID_NOMATCH 0 #define ACPI_MATCHHID_HID 1 #define ACPI_MATCHHID_CID 2 struct acpi_parse_resource_set { void (*set_init)(device_t dev, void *arg, void **context); void (*set_done)(device_t dev, void *context); void (*set_ioport)(device_t dev, void *context, uint64_t base, uint64_t length); void (*set_iorange)(device_t dev, void *context, uint64_t low, uint64_t high, uint64_t length, uint64_t align); void (*set_memory)(device_t dev, void *context, uint64_t base, uint64_t length); void (*set_memoryrange)(device_t dev, void *context, uint64_t low, uint64_t high, uint64_t length, uint64_t align); void (*set_irq)(device_t dev, void *context, uint8_t *irq, int count, int trig, int pol); void (*set_ext_irq)(device_t dev, void *context, uint32_t *irq, int count, int trig, int pol); void (*set_drq)(device_t dev, void *context, uint8_t *drq, int count); void (*set_start_dependent)(device_t dev, void *context, int preference); void (*set_end_dependent)(device_t dev, void *context); }; extern struct acpi_parse_resource_set acpi_res_parse_set; int acpi_identify(void); void acpi_config_intr(device_t dev, ACPI_RESOURCE *res); #ifdef INTRNG int acpi_map_intr(device_t dev, u_int irq, ACPI_HANDLE handle); #endif ACPI_STATUS acpi_lookup_irq_resource(device_t dev, int rid, struct resource *res, ACPI_RESOURCE *acpi_res); ACPI_STATUS acpi_parse_resources(device_t dev, ACPI_HANDLE handle, struct acpi_parse_resource_set *set, void *arg); struct resource *acpi_alloc_sysres(device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); /* ACPI event handling */ UINT32 acpi_event_power_button_sleep(void *context); UINT32 acpi_event_power_button_wake(void *context); UINT32 acpi_event_sleep_button_sleep(void *context); UINT32 acpi_event_sleep_button_wake(void *context); #define ACPI_EVENT_PRI_FIRST 0 #define ACPI_EVENT_PRI_DEFAULT 10000 #define ACPI_EVENT_PRI_LAST 20000 typedef void (*acpi_event_handler_t)(void *, int); EVENTHANDLER_DECLARE(acpi_sleep_event, acpi_event_handler_t); EVENTHANDLER_DECLARE(acpi_wakeup_event, acpi_event_handler_t); /* Device power control. */ ACPI_STATUS acpi_pwr_wake_enable(ACPI_HANDLE consumer, int enable); ACPI_STATUS acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state); int acpi_device_pwr_for_sleep(device_t bus, device_t dev, int *dstate); int acpi_set_powerstate(device_t child, int state); /* APM emulation */ void acpi_apm_init(struct acpi_softc *); /* Misc. */ static __inline struct acpi_softc * acpi_device_get_parent_softc(device_t child) { device_t parent; parent = device_get_parent(child); if (parent == NULL) return (NULL); return (device_get_softc(parent)); } static __inline int acpi_get_verbose(struct acpi_softc *sc) { if (sc) return (sc->acpi_verbose); return (0); } char *acpi_name(ACPI_HANDLE handle); int acpi_avoid(ACPI_HANDLE handle); int acpi_disabled(char *subsys); int acpi_machdep_init(device_t dev); void acpi_install_wakeup_handler(struct acpi_softc *sc); int acpi_sleep_machdep(struct acpi_softc *sc, int state); int acpi_wakeup_machdep(struct acpi_softc *sc, int state, int sleep_result, int intr_enabled); int acpi_table_quirks(int *quirks); int acpi_machdep_quirks(int *quirks); int acpi_pnpinfo_str(ACPI_HANDLE handle, char *buf, size_t buflen); uint32_t hpet_get_uid(device_t dev); /* Battery Abstraction. */ struct acpi_battinfo; int acpi_battery_register(device_t dev); int acpi_battery_remove(device_t dev); int acpi_battery_get_units(void); int acpi_battery_get_info_expire(void); int acpi_battery_bst_valid(struct acpi_bst *bst); int acpi_battery_bix_valid(struct acpi_bix *bix); int acpi_battery_get_battinfo(device_t dev, struct acpi_battinfo *info); /* Embedded controller. */ void acpi_ec_ecdt_probe(device_t); /* AC adapter interface. */ int acpi_acad_get_acline(int *); /* Package manipulation convenience functions. */ #define ACPI_PKG_VALID(pkg, size) \ ((pkg) != NULL && (pkg)->Type == ACPI_TYPE_PACKAGE && \ (pkg)->Package.Count >= (size)) #define ACPI_PKG_VALID_EQ(pkg, size) \ ((pkg) != NULL && (pkg)->Type == ACPI_TYPE_PACKAGE && \ (pkg)->Package.Count == (size)) int acpi_PkgInt(ACPI_OBJECT *res, int idx, UINT64 *dst); int acpi_PkgInt32(ACPI_OBJECT *res, int idx, uint32_t *dst); int acpi_PkgInt16(ACPI_OBJECT *res, int idx, uint16_t *dst); int acpi_PkgStr(ACPI_OBJECT *res, int idx, void *dst, size_t size); int acpi_PkgGas(device_t dev, ACPI_OBJECT *res, int idx, int *type, int *rid, struct resource **dst, u_int flags); int acpi_PkgFFH_IntelCpu(ACPI_OBJECT *res, int idx, int *vendor, int *class, uint64_t *address, int *accsize); ACPI_HANDLE acpi_GetReference(ACPI_HANDLE scope, ACPI_OBJECT *obj); /* * Base level for BUS_ADD_CHILD. Special devices are added at orders less * than this, and normal devices at or above this level. This keeps the * probe order sorted so that things like sysresource are available before * their children need them. */ #define ACPI_DEV_BASE_ORDER 100 /* Default maximum number of tasks to enqueue. */ #ifndef ACPI_MAX_TASKS #define ACPI_MAX_TASKS MAX(32, MAXCPU * 4) #endif /* Default number of task queue threads to start. */ #ifndef ACPI_MAX_THREADS #define ACPI_MAX_THREADS 3 #endif /* Use the device logging level for ktr(4). */ #define KTR_ACPI KTR_DEV SYSCTL_DECL(_debug_acpi); /* * Parse and use proximity information in SRAT and SLIT. */ int acpi_pxm_init(int ncpus, vm_paddr_t maxphys); void acpi_pxm_parse_tables(void); void acpi_pxm_set_mem_locality(void); void acpi_pxm_set_cpu_locality(void); int acpi_pxm_get_cpu_locality(int apic_id); /* * Map a PXM to a VM domain. * * Returns the VM domain ID if found, or -1 if not found / invalid. */ int acpi_map_pxm_to_vm_domainid(int pxm); int acpi_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, cpuset_t *cpuset); int acpi_get_domain(device_t dev, device_t child, int *domain); #ifdef __aarch64__ /* * ARM specific ACPI interfaces, relating to IORT table. */ int acpi_iort_map_pci_msi(u_int seg, u_int rid, u_int *xref, u_int *devid); +int acpi_iort_map_pci_smmuv3(u_int seg, u_int rid, u_int *xref, u_int *devid); int acpi_iort_its_lookup(u_int its_id, u_int *xref, int *pxm); #endif #endif /* _KERNEL */ #endif /* !_ACPIVAR_H_ */