Index: head/sys/amd64/vmm/amd/amdvi_hw.c =================================================================== --- head/sys/amd64/vmm/amd/amdvi_hw.c (revision 329359) +++ head/sys/amd64/vmm/amd/amdvi_hw.c (revision 329360) @@ -1,1473 +1,1459 @@ /*- * Copyright (c) 2016, Anish Gupta (anish@freebsd.org) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include "io/iommu.h" #include "amdvi_priv.h" SYSCTL_DECL(_hw_vmm); SYSCTL_NODE(_hw_vmm, OID_AUTO, amdvi, CTLFLAG_RW, NULL, NULL); #define MOD_INC(a, s, m) (((a) + (s)) % ((m) * (s))) #define MOD_DEC(a, s, m) (((a) - (s)) % ((m) * (s))) /* Print RID or device ID in PCI string format. */ #define RID2PCI_STR(d) PCI_RID2BUS(d), PCI_RID2SLOT(d), PCI_RID2FUNC(d) static void amdvi_dump_cmds(struct amdvi_softc *softc); static void amdvi_print_dev_cap(struct amdvi_softc *softc); MALLOC_DEFINE(M_AMDVI, "amdvi", "amdvi"); extern device_t *ivhd_devs; extern int ivhd_count; SYSCTL_INT(_hw_vmm_amdvi, OID_AUTO, count, CTLFLAG_RDTUN, &ivhd_count, 0, NULL); static int amdvi_enable_user = 0; SYSCTL_INT(_hw_vmm_amdvi, OID_AUTO, enable, CTLFLAG_RDTUN, &amdvi_enable_user, 0, NULL); TUNABLE_INT("hw.vmm.amdvi_enable", &amdvi_enable_user); #ifdef AMDVI_ATS_ENABLE /* XXX: ATS is not tested. */ static int amdvi_enable_iotlb = 1; SYSCTL_INT(_hw_vmm_amdvi, OID_AUTO, iotlb_enabled, CTLFLAG_RDTUN, &amdvi_enable_iotlb, 0, NULL); TUNABLE_INT("hw.vmm.enable_iotlb", &amdvi_enable_iotlb); #endif static int amdvi_host_ptp = 1; /* Use page tables for host. */ SYSCTL_INT(_hw_vmm_amdvi, OID_AUTO, host_ptp, CTLFLAG_RDTUN, &amdvi_host_ptp, 0, NULL); TUNABLE_INT("hw.vmm.amdvi.host_ptp", &amdvi_host_ptp); /* Page table level used <= supported by h/w[v1=7]. */ static int amdvi_ptp_level = 4; SYSCTL_INT(_hw_vmm_amdvi, OID_AUTO, ptp_level, CTLFLAG_RDTUN, &amdvi_ptp_level, 0, NULL); TUNABLE_INT("hw.vmm.amdvi.ptp_level", &amdvi_ptp_level); /* Disable fault event reporting. */ static int amdvi_disable_io_fault = 0; SYSCTL_INT(_hw_vmm_amdvi, OID_AUTO, disable_io_fault, CTLFLAG_RDTUN, &amdvi_disable_io_fault, 0, NULL); TUNABLE_INT("hw.vmm.amdvi.disable_io_fault", &amdvi_disable_io_fault); static uint32_t amdvi_dom_id = 0; /* 0 is reserved for host. */ SYSCTL_UINT(_hw_vmm_amdvi, OID_AUTO, domain_id, CTLFLAG_RD, &amdvi_dom_id, 0, NULL); /* * Device table entry. * Bus(256) x Dev(32) x Fun(8) x DTE(256 bits or 32 bytes). * = 256 * 2 * PAGE_SIZE. */ static struct amdvi_dte amdvi_dte[PCI_NUM_DEV_MAX] __aligned(PAGE_SIZE); CTASSERT(PCI_NUM_DEV_MAX == 0x10000); CTASSERT(sizeof(amdvi_dte) == 0x200000); static SLIST_HEAD (, amdvi_domain) dom_head; static inline uint32_t amdvi_pci_read(struct amdvi_softc *softc, int off) { return (pci_cfgregread(PCI_RID2BUS(softc->pci_rid), PCI_RID2SLOT(softc->pci_rid), PCI_RID2FUNC(softc->pci_rid), off, 4)); } #ifdef AMDVI_ATS_ENABLE /* XXX: Should be in pci.c */ /* * Check if device has ATS capability and its enabled. * If ATS is absent or disabled, return (-1), otherwise ATS * queue length. */ static int amdvi_find_ats_qlen(uint16_t devid) { device_t dev; uint32_t off, cap; int qlen = -1; dev = pci_find_bsf(PCI_RID2BUS(devid), PCI_RID2SLOT(devid), PCI_RID2FUNC(devid)); if (!dev) { return (-1); } #define PCIM_ATS_EN BIT(31) if (pci_find_extcap(dev, PCIZ_ATS, &off) == 0) { cap = pci_read_config(dev, off + 4, 4); qlen = (cap & 0x1F); qlen = qlen ? qlen : 32; printf("AMD-Vi: PCI device %d.%d.%d ATS %s qlen=%d\n", RID2PCI_STR(devid), (cap & PCIM_ATS_EN) ? "enabled" : "Disabled", qlen); qlen = (cap & PCIM_ATS_EN) ? qlen : -1; } return (qlen); } /* * Check if an endpoint device support device IOTLB or ATS. */ static inline bool amdvi_dev_support_iotlb(struct amdvi_softc *softc, uint16_t devid) { struct ivhd_dev_cfg *cfg; int qlen, i; bool pci_ats, ivhd_ats; qlen = amdvi_find_ats_qlen(devid); if (qlen < 0) return (false); KASSERT(softc, ("softc is NULL")); cfg = softc->dev_cfg; ivhd_ats = false; for (i = 0; i < softc->dev_cfg_cnt; i++) { if ((cfg->start_id <= devid) && (cfg->end_id >= devid)) { ivhd_ats = cfg->enable_ats; break; } cfg++; } pci_ats = (qlen < 0) ? false : true; if (pci_ats != ivhd_ats) device_printf(softc->dev, "BIOS bug: mismatch in ATS setting for %d.%d.%d," "ATS inv qlen = %d\n", RID2PCI_STR(devid), qlen); /* Ignore IVRS setting and respect PCI setting. */ return (pci_ats); } #endif /* Enable IOTLB support for IOMMU if its supported. */ static inline void amdvi_hw_enable_iotlb(struct amdvi_softc *softc) { #ifndef AMDVI_ATS_ENABLE softc->iotlb = false; #else bool supported; supported = (softc->ivhd_flag & IVHD_FLAG_IOTLB) ? true : false; if (softc->pci_cap & AMDVI_PCI_CAP_IOTLB) { if (!supported) device_printf(softc->dev, "IOTLB disabled by BIOS.\n"); if (supported && !amdvi_enable_iotlb) { device_printf(softc->dev, "IOTLB disabled by user.\n"); supported = false; } } else supported = false; softc->iotlb = supported; #endif } static int amdvi_init_cmd(struct amdvi_softc *softc) { struct amdvi_ctrl *ctrl = softc->ctrl; ctrl->cmd.len = 8; /* Use 256 command buffer entries. */ softc->cmd_max = 1 << ctrl->cmd.len; softc->cmd = malloc(sizeof(struct amdvi_cmd) * softc->cmd_max, M_AMDVI, M_WAITOK | M_ZERO); if ((uintptr_t)softc->cmd & PAGE_MASK) panic("AMDVi: Command buffer not aligned on page boundary."); ctrl->cmd.base = vtophys(softc->cmd) / PAGE_SIZE; /* * XXX: Reset the h/w pointers in case IOMMU is restarting, * h/w doesn't clear these pointers based on empirical data. */ ctrl->cmd_tail = 0; ctrl->cmd_head = 0; return (0); } /* * Note: Update tail pointer after we have written the command since tail * pointer update cause h/w to execute new commands, see section 3.3 * of AMD IOMMU spec ver 2.0. */ /* Get the command tail pointer w/o updating it. */ static struct amdvi_cmd * amdvi_get_cmd_tail(struct amdvi_softc *softc) { struct amdvi_ctrl *ctrl; struct amdvi_cmd *tail; KASSERT(softc, ("softc is NULL")); KASSERT(softc->cmd != NULL, ("cmd is NULL")); ctrl = softc->ctrl; KASSERT(ctrl != NULL, ("ctrl is NULL")); tail = (struct amdvi_cmd *)((uint8_t *)softc->cmd + ctrl->cmd_tail); return (tail); } /* * Update the command tail pointer which will start command execution. */ static void amdvi_update_cmd_tail(struct amdvi_softc *softc) { struct amdvi_ctrl *ctrl; int size; size = sizeof(struct amdvi_cmd); KASSERT(softc->cmd != NULL, ("cmd is NULL")); ctrl = softc->ctrl; KASSERT(ctrl != NULL, ("ctrl is NULL")); ctrl->cmd_tail = MOD_INC(ctrl->cmd_tail, size, softc->cmd_max); softc->total_cmd++; #ifdef AMDVI_DEBUG_CMD device_printf(softc->dev, "cmd_tail: %s Tail:0x%x, Head:0x%x.\n", ctrl->cmd_tail, ctrl->cmd_head); #endif } /* * Various commands supported by IOMMU. */ /* Completion wait command. */ static void amdvi_cmd_cmp(struct amdvi_softc *softc, const uint64_t data) { struct amdvi_cmd *cmd; uint64_t pa; cmd = amdvi_get_cmd_tail(softc); KASSERT(cmd != NULL, ("Cmd is NULL")); pa = vtophys(&softc->cmp_data); cmd->opcode = AMDVI_CMP_WAIT_OPCODE; cmd->word0 = (pa & 0xFFFFFFF8) | (AMDVI_CMP_WAIT_STORE); //(AMDVI_CMP_WAIT_FLUSH | AMDVI_CMP_WAIT_STORE); cmd->word1 = (pa >> 32) & 0xFFFFF; cmd->addr = data; amdvi_update_cmd_tail(softc); } /* Invalidate device table entry. */ static void amdvi_cmd_inv_dte(struct amdvi_softc *softc, uint16_t devid) { struct amdvi_cmd *cmd; cmd = amdvi_get_cmd_tail(softc); KASSERT(cmd != NULL, ("Cmd is NULL")); cmd->opcode = AMDVI_INVD_DTE_OPCODE; cmd->word0 = devid; amdvi_update_cmd_tail(softc); #ifdef AMDVI_DEBUG_CMD device_printf(softc->dev, "Invalidated DTE:0x%x\n", devid); #endif } /* Invalidate IOMMU page, use for invalidation of domain. */ static void amdvi_cmd_inv_iommu_pages(struct amdvi_softc *softc, uint16_t domain_id, uint64_t addr, bool guest_nested, bool pde, bool page) { struct amdvi_cmd *cmd; cmd = amdvi_get_cmd_tail(softc); KASSERT(cmd != NULL, ("Cmd is NULL")); cmd->opcode = AMDVI_INVD_PAGE_OPCODE; cmd->word1 = domain_id; /* * Invalidate all addresses for this domain. */ cmd->addr = addr; cmd->addr |= pde ? AMDVI_INVD_PAGE_PDE : 0; cmd->addr |= page ? AMDVI_INVD_PAGE_S : 0; amdvi_update_cmd_tail(softc); } #ifdef AMDVI_ATS_ENABLE /* Invalidate device IOTLB. */ static void amdvi_cmd_inv_iotlb(struct amdvi_softc *softc, uint16_t devid) { struct amdvi_cmd *cmd; int qlen; if (!softc->iotlb) return; qlen = amdvi_find_ats_qlen(devid); if (qlen < 0) { panic("AMDVI: Invalid ATS qlen(%d) for device %d.%d.%d\n", qlen, RID2PCI_STR(devid)); } cmd = amdvi_get_cmd_tail(softc); KASSERT(cmd != NULL, ("Cmd is NULL")); #ifdef AMDVI_DEBUG_CMD device_printf(softc->dev, "Invalidate IOTLB devID 0x%x" " Qlen:%d\n", devid, qlen); #endif cmd->opcode = AMDVI_INVD_IOTLB_OPCODE; cmd->word0 = devid; cmd->word1 = qlen; cmd->addr = AMDVI_INVD_IOTLB_ALL_ADDR | AMDVI_INVD_IOTLB_S; amdvi_update_cmd_tail(softc); } #endif #ifdef notyet /* For Interrupt Remap. */ static void amdvi_cmd_inv_intr_map(struct amdvi_softc *softc, uint16_t devid) { struct amdvi_cmd *cmd; cmd = amdvi_get_cmd_tail(softc); KASSERT(cmd != NULL, ("Cmd is NULL")); cmd->opcode = AMDVI_INVD_INTR_OPCODE; cmd->word0 = devid; amdvi_update_cmd_tail(softc); #ifdef AMDVI_DEBUG_CMD device_printf(softc->dev, "Invalidate INTR map of devID 0x%x\n", devid); #endif } #endif /* Invalidate domain using INVALIDATE_IOMMU_PAGES command. */ static void amdvi_inv_domain(struct amdvi_softc *softc, uint16_t domain_id) { struct amdvi_cmd *cmd; cmd = amdvi_get_cmd_tail(softc); KASSERT(cmd != NULL, ("Cmd is NULL")); /* * See section 3.3.3 of IOMMU spec rev 2.0, software note * for invalidating domain. */ amdvi_cmd_inv_iommu_pages(softc, domain_id, AMDVI_INVD_PAGE_ALL_ADDR, false, true, true); #ifdef AMDVI_DEBUG_CMD device_printf(softc->dev, "Invalidate domain:0x%x\n", domain_id); #endif } static bool amdvi_cmp_wait(struct amdvi_softc *softc) { struct amdvi_ctrl *ctrl; const uint64_t VERIFY = 0xA5A5; volatile uint64_t *read; int i; bool status; ctrl = softc->ctrl; read = &softc->cmp_data; *read = 0; amdvi_cmd_cmp(softc, VERIFY); /* Wait for h/w to update completion data. */ for (i = 0; i < 100 && (*read != VERIFY); i++) { DELAY(1000); /* 1 ms */ } status = (VERIFY == softc->cmp_data) ? true : false; #ifdef AMDVI_DEBUG_CMD if (status) device_printf(softc->dev, "CMD completion DONE Tail:0x%x, " "Head:0x%x, loop:%d.\n", ctrl->cmd_tail, ctrl->cmd_head, loop); #endif return (status); } static void amdvi_wait(struct amdvi_softc *softc) { struct amdvi_ctrl *ctrl; int i; KASSERT(softc, ("softc is NULL")); ctrl = softc->ctrl; KASSERT(ctrl != NULL, ("ctrl is NULL")); /* Don't wait if h/w is not enabled. */ if ((ctrl->control & AMDVI_CTRL_EN) == 0) return; for (i = 0; i < 10; i++) { if (amdvi_cmp_wait(softc)) return; } device_printf(softc->dev, "Error: completion failed" " tail:0x%x, head:0x%x.\n", ctrl->cmd_tail, ctrl->cmd_head); amdvi_dump_cmds(softc); } static void amdvi_dump_cmds(struct amdvi_softc *softc) { struct amdvi_ctrl *ctrl; struct amdvi_cmd *cmd; int off, i; ctrl = softc->ctrl; device_printf(softc->dev, "Dump all the commands:\n"); /* * If h/w is stuck in completion, it is the previous command, * start dumping from previous command onward. */ off = MOD_DEC(ctrl->cmd_head, sizeof(struct amdvi_cmd), softc->cmd_max); for (i = 0; off != ctrl->cmd_tail && i < softc->cmd_max; i++) { cmd = (struct amdvi_cmd *)((uint8_t *)softc->cmd + off); printf(" [CMD%d, off:0x%x] opcode= 0x%x 0x%x" " 0x%x 0x%lx\n", i, off, cmd->opcode, cmd->word0, cmd->word1, cmd->addr); off = (off + sizeof(struct amdvi_cmd)) % (softc->cmd_max * sizeof(struct amdvi_cmd)); } } static int amdvi_init_event(struct amdvi_softc *softc) { struct amdvi_ctrl *ctrl; ctrl = softc->ctrl; ctrl->event.len = 8; softc->event_max = 1 << ctrl->event.len; softc->event = malloc(sizeof(struct amdvi_event) * softc->event_max, M_AMDVI, M_WAITOK | M_ZERO); if ((uintptr_t)softc->event & PAGE_MASK) { device_printf(softc->dev, "Event buffer not aligned on page."); return (false); } ctrl->event.base = vtophys(softc->event) / PAGE_SIZE; /* Reset the pointers. */ ctrl->evt_head = 0; ctrl->evt_tail = 0; return (0); } static inline void amdvi_decode_evt_flag(uint16_t flag) { flag &= AMDVI_EVENT_FLAG_MASK; printf(" 0x%b]\n", flag, "\020" "\001GN" "\002NX" "\003US" "\004I" "\005PR" "\006RW" "\007PE" "\010RZ" "\011TR" ); } /* See section 2.5.4 of AMD IOMMU spec ver 2.62.*/ static inline void amdvi_decode_evt_flag_type(uint8_t type) { switch (AMDVI_EVENT_FLAG_TYPE(type)) { case 0: printf("RSVD\n"); break; case 1: printf("Master Abort\n"); break; case 2: printf("Target Abort\n"); break; case 3: printf("Data Err\n"); break; default: break; } } static void amdvi_decode_inv_dte_evt(uint16_t devid, uint16_t domid, uint64_t addr, uint16_t flag) { printf("\t[IO_PAGE_FAULT EVT: devId:0x%x DomId:0x%x" " Addr:0x%lx", devid, domid, addr); amdvi_decode_evt_flag(flag); } static void amdvi_decode_pf_evt(uint16_t devid, uint16_t domid, uint64_t addr, uint16_t flag) { printf("\t[IO_PAGE_FAULT EVT: devId:0x%x DomId:0x%x" " Addr:0x%lx", devid, domid, addr); amdvi_decode_evt_flag(flag); } static void amdvi_decode_dte_hwerr_evt(uint16_t devid, uint16_t domid, uint64_t addr, uint16_t flag) { printf("\t[DEV_TAB_HW_ERR EVT: devId:0x%x DomId:0x%x" " Addr:0x%lx", devid, domid, addr); amdvi_decode_evt_flag(flag); amdvi_decode_evt_flag_type(flag); } static void amdvi_decode_page_hwerr_evt(uint16_t devid, uint16_t domid, uint64_t addr, uint16_t flag) { printf("\t[PAGE_TAB_HW_ERR EVT: devId:0x%x DomId:0x%x" " Addr:0x%lx", devid, domid, addr); amdvi_decode_evt_flag(flag); amdvi_decode_evt_flag_type(AMDVI_EVENT_FLAG_TYPE(flag)); } static void amdvi_decode_evt(struct amdvi_event *evt) { struct amdvi_cmd *cmd; switch (evt->opcode) { case AMDVI_EVENT_INVALID_DTE: amdvi_decode_inv_dte_evt(evt->devid, evt->pasid_domid, evt->addr, evt->flag); break; case AMDVI_EVENT_PFAULT: amdvi_decode_pf_evt(evt->devid, evt->pasid_domid, evt->addr, evt->flag); break; case AMDVI_EVENT_DTE_HW_ERROR: amdvi_decode_dte_hwerr_evt(evt->devid, evt->pasid_domid, evt->addr, evt->flag); break; case AMDVI_EVENT_PAGE_HW_ERROR: amdvi_decode_page_hwerr_evt(evt->devid, evt->pasid_domid, evt->addr, evt->flag); break; case AMDVI_EVENT_ILLEGAL_CMD: /* FALL THROUGH */ case AMDVI_EVENT_CMD_HW_ERROR: printf("\t[%s EVT]\n", (evt->opcode == AMDVI_EVENT_ILLEGAL_CMD) ? "ILLEGAL CMD" : "CMD HW ERR"); cmd = (struct amdvi_cmd *)PHYS_TO_DMAP(evt->addr); printf("\tCMD opcode= 0x%x 0x%x 0x%x 0x%lx\n", cmd->opcode, cmd->word0, cmd->word1, cmd->addr); break; case AMDVI_EVENT_IOTLB_TIMEOUT: printf("\t[IOTLB_INV_TIMEOUT devid:0x%x addr:0x%lx]\n", evt->devid, evt->addr); break; case AMDVI_EVENT_INVALID_DTE_REQ: printf("\t[INV_DTE devid:0x%x addr:0x%lx type:0x%x tr:%d]\n", evt->devid, evt->addr, evt->flag >> 9, (evt->flag >> 8) & 1); break; case AMDVI_EVENT_INVALID_PPR_REQ: case AMDVI_EVENT_COUNTER_ZERO: printf("AMD-Vi: v2 events.\n"); break; default: printf("Unsupported AMD-Vi event:%d\n", evt->opcode); } } static void amdvi_print_events(struct amdvi_softc *softc) { struct amdvi_ctrl *ctrl; struct amdvi_event *event; int i, size; ctrl = softc->ctrl; size = sizeof(struct amdvi_event); for (i = 0; i < softc->event_max; i++) { event = &softc->event[ctrl->evt_head / size]; if (!event->opcode) break; device_printf(softc->dev, "\t[Event%d: Head:0x%x Tail:0x%x]\n", i, ctrl->evt_head, ctrl->evt_tail); amdvi_decode_evt(event); ctrl->evt_head = MOD_INC(ctrl->evt_head, size, softc->event_max); } } static int amdvi_init_dte(struct amdvi_softc *softc) { struct amdvi_ctrl *ctrl; ctrl = softc->ctrl; ctrl->dte.base = vtophys(amdvi_dte) / PAGE_SIZE; ctrl->dte.size = 0x1FF; /* 2MB device table. */ return (0); } /* * Not all capabilities of IOMMU are available in ACPI IVHD flag * or EFR entry, read directly from device. */ static int amdvi_print_pci_cap(device_t dev) { struct amdvi_softc *softc; uint32_t off, cap; softc = device_get_softc(dev); off = softc->cap_off; /* * Section 3.7.1 of IOMMU sepc rev 2.0. * Read capability from device. */ cap = amdvi_pci_read(softc, off); /* Make sure capability type[18:16] is 3. */ KASSERT((((cap >> 16) & 0x7) == 0x3), ("Not a IOMMU capability 0x%x@0x%x", cap, off)); softc->pci_cap = cap >> 24; device_printf(softc->dev, "PCI cap 0x%x@0x%x feature:%b\n", cap, off, softc->pci_cap, - "\020\001IOTLB\002HT\003NPCache\004EFR"); + "\20\1IOTLB\2HT\3NPCache\4EFR\5CapExt"); - /* IOMMU spec Rev 2.0, section 3.7.2.1 */ - softc->pci_efr = softc->ctrl->ex_feature; - if (softc->pci_efr) { - device_printf(softc->dev, "PCI extended Feature:%b\n", - (int)softc->pci_efr, - "\020\001PreFSup\002PPRSup\003XTSup\004NXSup\006IASup" - "\007GASup\008HESup\009PCSup"); - device_printf(softc->dev, - "PCI HATS = %d GATS = %d GLXSup = %d, max PASID: 0x%x ", - (int)((softc->pci_efr >> 10) & 0x3), - (int)((softc->pci_efr >> 12) & 0x3), - (int)((softc->pci_efr >> 14) & 0x3), - (int)((softc->pci_efr >> 32) & 0x1F) + 1); - } - return (0); } static void amdvi_event_intr(void *arg) { struct amdvi_softc *softc; struct amdvi_ctrl *ctrl; softc = (struct amdvi_softc *)arg; ctrl = softc->ctrl; device_printf(softc->dev, "EVT INTR %ld Status:0x%x" " EVT Head:0x%x Tail:0x%x]\n", softc->event_intr_cnt++, ctrl->status, ctrl->evt_head, ctrl->evt_tail); printf(" [CMD Total 0x%lx] Tail:0x%x, Head:0x%x.\n", softc->total_cmd, ctrl->cmd_tail, ctrl->cmd_head); amdvi_print_events(softc); ctrl->status &= AMDVI_STATUS_EV_OF | AMDVI_STATUS_EV_INTR; } static void amdvi_free_evt_intr_res(device_t dev) { struct amdvi_softc *softc; softc = device_get_softc(dev); if (softc->event_tag != NULL) { bus_teardown_intr(dev, softc->event_res, softc->event_tag); } if (softc->event_res != NULL) { bus_release_resource(dev, SYS_RES_IRQ, softc->event_rid, softc->event_res); } bus_delete_resource(dev, SYS_RES_IRQ, softc->event_rid); PCIB_RELEASE_MSI(device_get_parent(device_get_parent(dev)), dev, 1, &softc->event_irq); } static bool amdvi_alloc_intr_resources(struct amdvi_softc *softc) { struct amdvi_ctrl *ctrl; device_t dev, pcib; device_t mmio_dev; uint64_t msi_addr; uint32_t msi_data; int err; dev = softc->dev; pcib = device_get_parent(device_get_parent(dev)); mmio_dev = pci_find_bsf(PCI_RID2BUS(softc->pci_rid), PCI_RID2SLOT(softc->pci_rid), PCI_RID2FUNC(softc->pci_rid)); if (device_is_attached(mmio_dev)) { device_printf(dev, "warning: IOMMU device is claimed by another driver %s\n", device_get_driver(mmio_dev)->name); } softc->event_irq = -1; softc->event_rid = 0; /* * Section 3.7.1 of IOMMU rev 2.0. With MSI, there is only one * interrupt. XXX: Enable MSI/X support. */ err = PCIB_ALLOC_MSI(pcib, dev, 1, 1, &softc->event_irq); if (err) { device_printf(dev, "Couldn't find event MSI IRQ resource.\n"); return (ENOENT); } err = bus_set_resource(dev, SYS_RES_IRQ, softc->event_rid, softc->event_irq, 1); if (err) { device_printf(dev, "Couldn't set event MSI resource.\n"); return (ENXIO); } softc->event_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &softc->event_rid, RF_ACTIVE); if (!softc->event_res) { device_printf(dev, "Unable to allocate event INTR resource.\n"); return (ENOMEM); } if (bus_setup_intr(dev, softc->event_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, amdvi_event_intr, softc, &softc->event_tag)) { device_printf(dev, "Fail to setup event intr\n"); bus_release_resource(softc->dev, SYS_RES_IRQ, softc->event_rid, softc->event_res); softc->event_res = NULL; return (ENXIO); } bus_describe_intr(dev, softc->event_res, softc->event_tag, "fault"); err = PCIB_MAP_MSI(pcib, dev, softc->event_irq, &msi_addr, &msi_data); if (err) { device_printf(dev, "Event interrupt config failed, err=%d.\n", err); amdvi_free_evt_intr_res(softc->dev); return (err); } /* Clear interrupt status bits. */ ctrl = softc->ctrl; ctrl->status &= AMDVI_STATUS_EV_OF | AMDVI_STATUS_EV_INTR; /* Now enable MSI interrupt. */ pci_enable_msi(mmio_dev, msi_addr, msi_data); return (0); } static void amdvi_print_dev_cap(struct amdvi_softc *softc) { struct ivhd_dev_cfg *cfg; int i; cfg = softc->dev_cfg; for (i = 0; i < softc->dev_cfg_cnt; i++) { device_printf(softc->dev, "device [0x%x - 0x%x]" "config:%b%s\n", cfg->start_id, cfg->end_id, cfg->data, "\020\001INIT\002ExtInt\003NMI" "\007LINT0\008LINT1", cfg->enable_ats ? "ATS enabled" : ""); cfg++; } } static int amdvi_handle_sysctl(SYSCTL_HANDLER_ARGS) { struct amdvi_softc *softc; int result, type, error = 0; softc = (struct amdvi_softc *)arg1; type = arg2; switch (type) { case 0: result = softc->ctrl->cmd_head; error = sysctl_handle_int(oidp, &result, 0, req); break; case 1: result = softc->ctrl->cmd_tail; error = sysctl_handle_int(oidp, &result, 0, req); break; case 2: result = softc->ctrl->evt_head; error = sysctl_handle_int(oidp, &result, 0, req); break; case 3: result = softc->ctrl->evt_tail; error = sysctl_handle_int(oidp, &result, 0, req); break; default: device_printf(softc->dev, "Unknown sysctl:%d\n", type); } return (error); } static void amdvi_add_sysctl(struct amdvi_softc *softc) { struct sysctl_oid_list *child; struct sysctl_ctx_list *ctx; device_t dev; dev = softc->dev; ctx = device_get_sysctl_ctx(dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "event_intr_count", CTLFLAG_RD, &softc->event_intr_cnt, "Event interrupt count"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "command_count", CTLFLAG_RD, &softc->total_cmd, "Command submitted count"); SYSCTL_ADD_U16(ctx, child, OID_AUTO, "pci_rid", CTLFLAG_RD, &softc->pci_rid, 0, "IOMMU RID"); SYSCTL_ADD_U16(ctx, child, OID_AUTO, "start_dev_rid", CTLFLAG_RD, &softc->start_dev_rid, 0, "Start of device under this IOMMU"); SYSCTL_ADD_U16(ctx, child, OID_AUTO, "end_dev_rid", CTLFLAG_RD, &softc->end_dev_rid, 0, "End of device under this IOMMU"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "command_head", CTLTYPE_UINT | CTLFLAG_RD, softc, 0, amdvi_handle_sysctl, "IU", "Command head"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "command_tail", CTLTYPE_UINT | CTLFLAG_RD, softc, 1, amdvi_handle_sysctl, "IU", "Command tail"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "event_head", CTLTYPE_UINT | CTLFLAG_RD, softc, 2, amdvi_handle_sysctl, "IU", "Command head"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "event_tail", CTLTYPE_UINT | CTLFLAG_RD, softc, 3, amdvi_handle_sysctl, "IU", "Command tail"); } int amdvi_setup_hw(struct amdvi_softc *softc) { device_t dev; int status; dev = softc->dev; amdvi_hw_enable_iotlb(softc); amdvi_print_dev_cap(softc); if ((status = amdvi_print_pci_cap(dev)) != 0) { device_printf(dev, "PCI capability.\n"); return (status); } if ((status = amdvi_init_cmd(softc)) != 0) { device_printf(dev, "Couldn't configure command buffer.\n"); return (status); } if ((status = amdvi_init_event(softc)) != 0) { device_printf(dev, "Couldn't configure event buffer.\n"); return (status); } if ((status = amdvi_init_dte(softc)) != 0) { device_printf(dev, "Couldn't configure device table.\n"); return (status); } if ((status = amdvi_alloc_intr_resources(softc)) != 0) { return (status); } amdvi_add_sysctl(softc); return (0); } int amdvi_teardown_hw(struct amdvi_softc *softc) { device_t dev; dev = softc->dev; /* * Called after disable, h/w is stopped by now, free all the resources. */ amdvi_free_evt_intr_res(dev); if (softc->cmd) free(softc->cmd, M_AMDVI); if (softc->event) free(softc->event, M_AMDVI); return (0); } /*********** bhyve interfaces *********************/ static int amdvi_init(void) { if (!ivhd_count) { return (EIO); } if (!amdvi_enable_user && ivhd_count) { printf("bhyve: Found %d AMD-Vi/IOMMU device(s), " - "use hw.vmm.amdvi_enable=1 to enable pass-through.\n", + "use hw.vmm.amdvi.enable=1 to enable pass-through.\n", ivhd_count); return (EINVAL); } return (0); } static void amdvi_cleanup(void) { /* Nothing. */ } static uint16_t amdvi_domainId(void) { /* * If we hit maximum domain limit, rollover leaving host * domain(0). * XXX: make sure that this domain is not used. */ if (amdvi_dom_id == AMDVI_MAX_DOMAIN) amdvi_dom_id = 1; return ((uint16_t)amdvi_dom_id++); } static void amdvi_do_inv_domain(uint16_t domain_id, bool create) { struct amdvi_softc *softc; int i; for (i = 0; i < ivhd_count; i++) { softc = device_get_softc(ivhd_devs[i]); KASSERT(softc, ("softc is NULL")); /* * If not present pages are cached, invalidate page after * creating domain. */ #if 0 if (create && ((softc->pci_cap & AMDVI_PCI_CAP_NPCACHE) == 0)) continue; #endif amdvi_inv_domain(softc, domain_id); amdvi_wait(softc); } } static void * amdvi_create_domain(vm_paddr_t maxaddr) { struct amdvi_domain *dom; dom = malloc(sizeof(struct amdvi_domain), M_AMDVI, M_ZERO | M_WAITOK); dom->id = amdvi_domainId(); //dom->maxaddr = maxaddr; #ifdef AMDVI_DEBUG_CMD printf("Created domain #%d\n", dom->id); #endif /* * Host domain(#0) don't create translation table. */ if (dom->id || amdvi_host_ptp) dom->ptp = malloc(PAGE_SIZE, M_AMDVI, M_WAITOK | M_ZERO); dom->ptp_level = amdvi_ptp_level; amdvi_do_inv_domain(dom->id, true); SLIST_INSERT_HEAD(&dom_head, dom, next); return (dom); } static void amdvi_free_ptp(uint64_t *ptp, int level) { int i; if (level < 1) return; for (i = 0; i < NPTEPG ; i++) { if ((ptp[i] & AMDVI_PT_PRESENT) == 0) continue; /* XXX: Add super-page or PTE mapping > 4KB. */ #ifdef notyet /* Super-page mapping. */ if (AMDVI_PD_SUPER(ptp[i])) continue; #endif amdvi_free_ptp((uint64_t *)PHYS_TO_DMAP(ptp[i] & AMDVI_PT_MASK), level - 1); } free(ptp, M_AMDVI); } static void amdvi_destroy_domain(void *arg) { struct amdvi_domain *domain; domain = (struct amdvi_domain *)arg; KASSERT(domain, ("domain is NULL")); #ifdef AMDVI_DEBUG_CMD printf("Destroying domain %d\n", domain->id); #endif if (domain->ptp) amdvi_free_ptp(domain->ptp, domain->ptp_level); amdvi_do_inv_domain(domain->id, false); SLIST_REMOVE(&dom_head, domain, amdvi_domain, next); free(domain, M_AMDVI); } static uint64_t amdvi_set_pt(uint64_t *pt, int level, vm_paddr_t gpa, vm_paddr_t hpa, uint64_t pg_size, bool create) { uint64_t *page, pa; int shift, index; const int PT_SHIFT = 9; const int PT_INDEX_MASK = (1 << PT_SHIFT) - 1; /* Based on PT_SHIFT */ if (!pg_size) return (0); if (hpa & (pg_size - 1)) { printf("HPA is not size aligned.\n"); return (0); } if (gpa & (pg_size - 1)) { printf("HPA is not size aligned.\n"); return (0); } shift = PML4SHIFT; while ((shift > PAGE_SHIFT) && (pg_size < (1UL << shift))) { index = (gpa >> shift) & PT_INDEX_MASK; if ((pt[index] == 0) && create) { page = malloc(PAGE_SIZE, M_AMDVI, M_WAITOK | M_ZERO); pa = vtophys(page); pt[index] = pa | AMDVI_PT_PRESENT | AMDVI_PT_RW | ((level - 1) << AMDVI_PD_LEVEL_SHIFT); } #ifdef AMDVI_DEBUG_PTE if ((gpa % 0x1000000) == 0) printf("[level%d, shift = %d]PTE:0x%lx\n", level, shift, pt[index]); #endif #define PTE2PA(x) ((uint64_t)(x) & AMDVI_PT_MASK) pa = PTE2PA(pt[index]); pt = (uint64_t *)PHYS_TO_DMAP(pa); shift -= PT_SHIFT; level--; } /* Leaf entry. */ index = (gpa >> shift) & PT_INDEX_MASK; if (create) { pt[index] = hpa | AMDVI_PT_RW | AMDVI_PT_PRESENT; } else pt[index] = 0; #ifdef AMDVI_DEBUG_PTE if ((gpa % 0x1000000) == 0) printf("[Last level%d, shift = %d]PTE:0x%lx\n", level, shift, pt[index]); #endif return (1ULL << shift); } static uint64_t amdvi_update_mapping(struct amdvi_domain *domain, vm_paddr_t gpa, vm_paddr_t hpa, uint64_t size, bool create) { uint64_t mapped, *ptp, len; int level; KASSERT(domain, ("domain is NULL")); level = domain->ptp_level; KASSERT(level, ("Page table level is 0")); ptp = domain->ptp; KASSERT(ptp, ("PTP is NULL")); mapped = 0; while (mapped < size) { len = amdvi_set_pt(ptp, level, gpa + mapped, hpa + mapped, PAGE_SIZE, create); if (!len) { printf("Error: Couldn't map HPA:0x%lx GPA:0x%lx\n", hpa, gpa); return (0); } mapped += len; } return (mapped); } static uint64_t amdvi_create_mapping(void *arg, vm_paddr_t gpa, vm_paddr_t hpa, uint64_t len) { struct amdvi_domain *domain; domain = (struct amdvi_domain *)arg; if (domain->id && !domain->ptp) { printf("ptp is NULL"); return (-1); } /* * If host domain is created w/o page table, skip IOMMU page * table set-up. */ if (domain->ptp) return (amdvi_update_mapping(domain, gpa, hpa, len, true)); else return (len); } static uint64_t amdvi_destroy_mapping(void *arg, vm_paddr_t gpa, uint64_t len) { struct amdvi_domain *domain; domain = (struct amdvi_domain *)arg; /* * If host domain is created w/o page table, skip IOMMU page * table set-up. */ if (domain->ptp) return (amdvi_update_mapping(domain, gpa, 0, len, false)); return (len); } static struct amdvi_softc * amdvi_find_iommu(uint16_t devid) { struct amdvi_softc *softc; int i; for (i = 0; i < ivhd_count; i++) { softc = device_get_softc(ivhd_devs[i]); if ((devid >= softc->start_dev_rid) && (devid <= softc->end_dev_rid)) return (softc); } /* * XXX: BIOS bug, device not in IVRS table, assume its from first IOMMU. */ printf("BIOS bug device(%d.%d.%d) doesn't have IVHD entry.\n", RID2PCI_STR(devid)); return (device_get_softc(ivhd_devs[0])); } /* * Set-up device table entry. * IOMMU spec Rev 2.0, section 3.2.2.2, some of the fields must * be set concurrently, e.g. read and write bits. */ static void amdvi_set_dte(struct amdvi_domain *domain, uint16_t devid, bool enable) { struct amdvi_softc *softc; - struct amdvi_dte temp; + struct amdvi_dte* temp; + KASSERT(domain, ("domain is NULL for pci_rid:0x%x\n", devid)); + softc = amdvi_find_iommu(devid); KASSERT(softc, ("softc is NULL for pci_rid:0x%x\n", devid)); - memset(&temp, 0, sizeof(struct amdvi_dte)); + temp = &amdvi_dte[devid]; #ifdef AMDVI_ATS_ENABLE /* If IOMMU and device support IOTLB, enable it. */ if (amdvi_dev_support_iotlb(softc, devid) && softc->iotlb) - temp.iotlb_enable = 1; + temp->iotlb_enable = 1; #endif /* Avoid duplicate I/O faults. */ - temp.sup_second_io_fault = 1; - temp.sup_all_io_fault = amdvi_disable_io_fault; + temp->sup_second_io_fault = 1; + temp->sup_all_io_fault = amdvi_disable_io_fault; - temp.dt_valid = 1; - temp.domain_id = domain->id; + temp->dt_valid = 1; + temp->domain_id = domain->id; if (enable) { if (domain->ptp) { - temp.pt_base = vtophys(domain->ptp) >> 12; - temp.pt_level = amdvi_ptp_level; + temp->pt_base = vtophys(domain->ptp) >> 12; + temp->pt_level = amdvi_ptp_level; } /* * XXX: Page table valid[TV] bit must be set even if host domain * page tables are not enabled. */ - temp.pt_valid = 1; - temp.read_allow = 1; - temp.write_allow = 1; + temp->pt_valid = 1; + temp->read_allow = 1; + temp->write_allow = 1; } - amdvi_dte[devid] = temp; } static void amdvi_inv_device(uint16_t devid) { struct amdvi_softc *softc; softc = amdvi_find_iommu(devid); KASSERT(softc, ("softc is NULL")); amdvi_cmd_inv_dte(softc, devid); #ifdef AMDVI_ATS_ENABLE if (amdvi_dev_support_iotlb(softc, devid)) amdvi_cmd_inv_iotlb(softc, devid); #endif amdvi_wait(softc); } static void amdvi_add_device(void *arg, uint16_t devid) { struct amdvi_domain *domain; domain = (struct amdvi_domain *)arg; KASSERT(domain != NULL, ("domain is NULL")); #ifdef AMDVI_DEBUG_CMD printf("Assigning device(%d.%d.%d) to domain:%d\n", RID2PCI_STR(devid), domain->id); #endif amdvi_set_dte(domain, devid, true); amdvi_inv_device(devid); } static void amdvi_remove_device(void *arg, uint16_t devid) { struct amdvi_domain *domain; domain = (struct amdvi_domain *)arg; #ifdef AMDVI_DEBUG_CMD printf("Remove device(0x%x) from domain:%d\n", devid, domain->id); #endif amdvi_set_dte(domain, devid, false); amdvi_inv_device(devid); } static void amdvi_enable(void) { struct amdvi_ctrl *ctrl; struct amdvi_softc *softc; uint64_t val; int i; for (i = 0; i < ivhd_count; i++) { softc = device_get_softc(ivhd_devs[i]); KASSERT(softc, ("softc is NULL\n")); ctrl = softc->ctrl; KASSERT(ctrl, ("ctrl is NULL\n")); val = ( AMDVI_CTRL_EN | AMDVI_CTRL_CMD | AMDVI_CTRL_ELOG | AMDVI_CTRL_ELOGINT | AMDVI_CTRL_INV_TO_1S); if (softc->ivhd_flag & IVHD_FLAG_COH) val |= AMDVI_CTRL_COH; if (softc->ivhd_flag & IVHD_FLAG_HTT) val |= AMDVI_CTRL_HTT; if (softc->ivhd_flag & IVHD_FLAG_RPPW) val |= AMDVI_CTRL_RPPW; if (softc->ivhd_flag & IVHD_FLAG_PPW) val |= AMDVI_CTRL_PPW; if (softc->ivhd_flag & IVHD_FLAG_ISOC) val |= AMDVI_CTRL_ISOC; ctrl->control = val; } } static void amdvi_disable(void) { struct amdvi_ctrl *ctrl; struct amdvi_softc *softc; int i; for (i = 0; i < ivhd_count; i++) { softc = device_get_softc(ivhd_devs[i]); KASSERT(softc, ("softc is NULL\n")); ctrl = softc->ctrl; KASSERT(ctrl, ("ctrl is NULL\n")); ctrl->control = 0; } } static void amdvi_inv_tlb(void *arg) { struct amdvi_domain *domain; domain = (struct amdvi_domain *)arg; KASSERT(domain, ("domain is NULL")); amdvi_do_inv_domain(domain->id, false); } struct iommu_ops iommu_ops_amd = { amdvi_init, amdvi_cleanup, amdvi_enable, amdvi_disable, amdvi_create_domain, amdvi_destroy_domain, amdvi_create_mapping, amdvi_destroy_mapping, amdvi_add_device, amdvi_remove_device, amdvi_inv_tlb }; Index: head/sys/amd64/vmm/amd/amdvi_priv.h =================================================================== --- head/sys/amd64/vmm/amd/amdvi_priv.h (revision 329359) +++ head/sys/amd64/vmm/amd/amdvi_priv.h (revision 329360) @@ -1,395 +1,398 @@ /*- * Copyright (c) 2016 Anish Gupta (anish@freebsd.org) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _AMDVI_PRIV_H_ #define _AMDVI_PRIV_H_ +#include + #define BIT(n) (1ULL << (n)) /* Return value of bits[n:m] where n and (n >= ) m are bit positions. */ #define REG_BITS(x, n, m) (((x) >> (m)) & \ ((1 << (((n) - (m)) + 1)) - 1)) /* * IOMMU PCI capability. */ #define AMDVI_PCI_CAP_IOTLB BIT(0) /* IOTLB is supported. */ #define AMDVI_PCI_CAP_HT BIT(1) /* HyperTransport tunnel support. */ #define AMDVI_PCI_CAP_NPCACHE BIT(2) /* Not present page cached. */ #define AMDVI_PCI_CAP_EFR BIT(3) /* Extended features. */ #define AMDVI_PCI_CAP_EXT BIT(4) /* Miscellaneous information reg. */ /* * IOMMU extended features. */ #define AMDVI_EX_FEA_PREFSUP BIT(0) /* Prefetch command support. */ #define AMDVI_EX_FEA_PPRSUP BIT(1) /* PPR support */ #define AMDVI_EX_FEA_XTSUP BIT(2) /* Reserved */ #define AMDVI_EX_FEA_NXSUP BIT(3) /* No-execute. */ #define AMDVI_EX_FEA_GTSUP BIT(4) /* Guest translation support. */ #define AMDVI_EX_FEA_EFRW BIT(5) /* Reserved */ #define AMDVI_EX_FEA_IASUP BIT(6) /* Invalidate all command supp. */ #define AMDVI_EX_FEA_GASUP BIT(7) /* Guest APIC or AVIC support. */ #define AMDVI_EX_FEA_HESUP BIT(8) /* Hardware Error. */ #define AMDVI_EX_FEA_PCSUP BIT(9) /* Performance counters support. */ /* XXX: add more EFER bits. */ /* * Device table entry or DTE * NOTE: Must be 256-bits/32 bytes aligned. */ struct amdvi_dte { uint32_t dt_valid:1; /* Device Table valid. */ uint32_t pt_valid:1; /* Page translation valid. */ uint16_t :7; /* Reserved[8:2] */ uint8_t pt_level:3; /* Paging level, 0 to disable. */ uint64_t pt_base:40; /* Page table root pointer. */ uint8_t :3; /* Reserved[54:52] */ uint8_t gv_valid:1; /* Revision 2, GVA to SPA. */ uint8_t gv_level:2; /* Revision 2, GLX level. */ uint8_t gv_cr3_lsb:3; /* Revision 2, GCR3[14:12] */ uint8_t read_allow:1; /* I/O read enabled. */ uint8_t write_allow:1; /* I/O write enabled. */ uint8_t :1; /* Reserved[63] */ uint16_t domain_id:16; /* Domain ID */ uint16_t gv_cr3_lsb2:16; /* Revision 2, GCR3[30:15] */ uint8_t iotlb_enable:1; /* Device support IOTLB */ uint8_t sup_second_io_fault:1; /* Suppress subsequent I/O faults. */ uint8_t sup_all_io_fault:1; /* Suppress all I/O page faults. */ uint8_t IOctl:2; /* Port I/O control. */ uint8_t iotlb_cache_disable:1; /* IOTLB cache hints. */ uint8_t snoop_disable:1; /* Snoop disable. */ uint8_t allow_ex:1; /* Allow exclusion. */ uint8_t sysmgmt:2; /* System management message.*/ uint8_t :1; /* Reserved[106] */ uint32_t gv_cr3_msb:21; /* Revision 2, GCR3[51:31] */ uint8_t intmap_valid:1; /* Interrupt map valid. */ uint8_t intmap_len:4; /* Interrupt map table length. */ uint8_t intmap_ign:1; /* Ignore unmapped interrupts. */ uint64_t intmap_base:46; /* IntMap base. */ uint8_t :4; /* Reserved[183:180] */ uint8_t init_pass:1; /* INIT pass through or PT */ uint8_t extintr_pass:1; /* External Interrupt PT */ uint8_t nmi_pass:1; /* NMI PT */ uint8_t :1; /* Reserved[187] */ uint8_t intr_ctrl:2; /* Interrupt control */ uint8_t lint0_pass:1; /* LINT0 PT */ uint8_t lint1_pass:1; /* LINT1 PT */ uint64_t :64; /* Reserved[255:192] */ } __attribute__((__packed__)); CTASSERT(sizeof(struct amdvi_dte) == 32); /* * IOMMU command entry. */ struct amdvi_cmd { uint32_t word0; uint32_t word1:28; uint8_t opcode:4; uint64_t addr; } __attribute__((__packed__)); /* Command opcodes. */ #define AMDVI_CMP_WAIT_OPCODE 0x1 /* Completion wait. */ #define AMDVI_INVD_DTE_OPCODE 0x2 /* Invalidate device table entry. */ #define AMDVI_INVD_PAGE_OPCODE 0x3 /* Invalidate pages. */ #define AMDVI_INVD_IOTLB_OPCODE 0x4 /* Invalidate IOTLB pages. */ #define AMDVI_INVD_INTR_OPCODE 0x5 /* Invalidate Interrupt table. */ #define AMDVI_PREFETCH_PAGES_OPCODE 0x6 /* Prefetch IOMMU pages. */ #define AMDVI_COMP_PPR_OPCODE 0x7 /* Complete PPR request. */ #define AMDVI_INV_ALL_OPCODE 0x8 /* Invalidate all. */ /* Completion wait attributes. */ #define AMDVI_CMP_WAIT_STORE BIT(0) /* Write back data. */ #define AMDVI_CMP_WAIT_INTR BIT(1) /* Completion wait interrupt. */ #define AMDVI_CMP_WAIT_FLUSH BIT(2) /* Flush queue. */ /* Invalidate page. */ #define AMDVI_INVD_PAGE_S BIT(0) /* Invalidation size. */ #define AMDVI_INVD_PAGE_PDE BIT(1) /* Invalidate PDE. */ #define AMDVI_INVD_PAGE_GN_GVA BIT(2) /* GPA or GVA. */ #define AMDVI_INVD_PAGE_ALL_ADDR (0x7FFFFFFFFFFFFULL << 12) /* Invalidate IOTLB. */ #define AMDVI_INVD_IOTLB_S BIT(0) /* Invalidation size 4k or addr */ #define AMDVI_INVD_IOTLB_GN_GVA BIT(2) /* GPA or GVA. */ #define AMDVI_INVD_IOTLB_ALL_ADDR (0x7FFFFFFFFFFFFULL << 12) /* XXX: add more command entries. */ /* * IOMMU event entry. */ struct amdvi_event { uint16_t devid; uint16_t pasid_hi; uint16_t pasid_domid; /* PASID low or DomainID */ uint16_t flag:12; uint8_t opcode:4; uint64_t addr; } __attribute__((__packed__)); CTASSERT(sizeof(struct amdvi_event) == 16); /* Various event types. */ #define AMDVI_EVENT_INVALID_DTE 0x1 #define AMDVI_EVENT_PFAULT 0x2 #define AMDVI_EVENT_DTE_HW_ERROR 0x3 #define AMDVI_EVENT_PAGE_HW_ERROR 0x4 #define AMDVI_EVENT_ILLEGAL_CMD 0x5 #define AMDVI_EVENT_CMD_HW_ERROR 0x6 #define AMDVI_EVENT_IOTLB_TIMEOUT 0x7 #define AMDVI_EVENT_INVALID_DTE_REQ 0x8 #define AMDVI_EVENT_INVALID_PPR_REQ 0x9 #define AMDVI_EVENT_COUNTER_ZERO 0xA #define AMDVI_EVENT_FLAG_MASK 0x1FF /* Mask for event flags. */ #define AMDVI_EVENT_FLAG_TYPE(x) (((x) >> 9) & 0x3) /* * IOMMU control block. */ struct amdvi_ctrl { struct { uint16_t size:9; uint16_t :3; uint64_t base:40; /* Devtable register base. */ uint16_t :12; } dte; struct { uint16_t :12; uint64_t base:40; uint8_t :4; uint8_t len:4; uint8_t :4; } cmd; struct { uint16_t :12; uint64_t base:40; uint8_t :4; uint8_t len:4; uint8_t :4; } event; uint16_t control :13; uint64_t :51; struct { uint8_t enable:1; uint8_t allow:1; uint16_t :10; uint64_t base:40; uint16_t :12; uint16_t :12; uint64_t limit:40; uint16_t :12; } excl; /* * Revision 2 only. */ uint64_t ex_feature; struct { uint16_t :12; uint64_t base:40; uint8_t :4; uint8_t len:4; uint8_t :4; } ppr; uint64_t first_event; uint64_t second_event; uint64_t event_status; /* Revision 2 only, end. */ uint8_t pad1[0x1FA8]; /* Padding. */ uint32_t cmd_head:19; uint64_t :45; uint32_t cmd_tail:19; uint64_t :45; uint32_t evt_head:19; uint64_t :45; uint32_t evt_tail:19; uint64_t :45; uint32_t status:19; uint64_t :45; uint64_t pad2; uint8_t :4; uint16_t ppr_head:15; uint64_t :45; uint8_t :4; uint16_t ppr_tail:15; uint64_t :45; uint8_t pad3[0x1FC0]; /* Padding. */ /* XXX: More for rev2. */ } __attribute__((__packed__)); CTASSERT(offsetof(struct amdvi_ctrl, pad1)== 0x58); CTASSERT(offsetof(struct amdvi_ctrl, pad2)== 0x2028); CTASSERT(offsetof(struct amdvi_ctrl, pad3)== 0x2040); #define AMDVI_MMIO_V1_SIZE (4 * PAGE_SIZE) /* v1 size */ /* * AMF IOMMU v2 size including event counters */ #define AMDVI_MMIO_V2_SIZE (8 * PAGE_SIZE) CTASSERT(sizeof(struct amdvi_ctrl) == 0x4000); CTASSERT(sizeof(struct amdvi_ctrl) == AMDVI_MMIO_V1_SIZE); /* IVHD flag */ #define IVHD_FLAG_HTT BIT(0) /* Hypertransport Tunnel. */ #define IVHD_FLAG_PPW BIT(1) /* Pass posted write. */ #define IVHD_FLAG_RPPW BIT(2) /* Response pass posted write. */ #define IVHD_FLAG_ISOC BIT(3) /* Isoc support. */ #define IVHD_FLAG_IOTLB BIT(4) /* IOTLB support. */ #define IVHD_FLAG_COH BIT(5) /* Coherent control, default 1 */ #define IVHD_FLAG_PFS BIT(6) /* Prefetch IOMMU pages. */ #define IVHD_FLAG_PPRS BIT(7) /* Peripheral page support. */ /* IVHD device entry data setting. */ #define IVHD_DEV_LINT0_PASS BIT(6) /* LINT0 interrupts. */ #define IVHD_DEV_LINT1_PASS BIT(7) /* LINT1 interrupts. */ /* Bit[5:4] for System Mgmt. Bit3 is reserved. */ #define IVHD_DEV_INIT_PASS BIT(0) /* INIT */ #define IVHD_DEV_EXTINTR_PASS BIT(1) /* ExtInt */ #define IVHD_DEV_NMI_PASS BIT(2) /* NMI */ /* IVHD 8-byte extended data settings. */ #define IVHD_DEV_EXT_ATS_DISABLE BIT(31) /* Disable ATS */ /* IOMMU control register. */ #define AMDVI_CTRL_EN BIT(0) /* IOMMU enable. */ #define AMDVI_CTRL_HTT BIT(1) /* Hypertransport tunnel enable. */ #define AMDVI_CTRL_ELOG BIT(2) /* Event log enable. */ #define AMDVI_CTRL_ELOGINT BIT(3) /* Event log interrupt. */ #define AMDVI_CTRL_COMINT BIT(4) /* Completion wait interrupt. */ #define AMDVI_CTRL_PPW BIT(8) #define AMDVI_CTRL_RPPW BIT(9) #define AMDVI_CTRL_COH BIT(10) #define AMDVI_CTRL_ISOC BIT(11) #define AMDVI_CTRL_CMD BIT(12) /* Command buffer enable. */ #define AMDVI_CTRL_PPRLOG BIT(13) #define AMDVI_CTRL_PPRINT BIT(14) #define AMDVI_CTRL_PPREN BIT(15) #define AMDVI_CTRL_GTE BIT(16) /* Guest translation enable. */ #define AMDVI_CTRL_GAE BIT(17) /* Guest APIC enable. */ /* Invalidation timeout. */ #define AMDVI_CTRL_INV_NO_TO 0 /* No timeout. */ #define AMDVI_CTRL_INV_TO_1ms 1 /* 1 ms */ #define AMDVI_CTRL_INV_TO_10ms 2 /* 10 ms */ #define AMDVI_CTRL_INV_TO_100ms 3 /* 100 ms */ #define AMDVI_CTRL_INV_TO_1S 4 /* 1 second */ #define AMDVI_CTRL_INV_TO_10S 5 /* 10 second */ #define AMDVI_CTRL_INV_TO_100S 6 /* 100 second */ /* * Max number of PCI devices. * 256 bus x 32 slot/devices x 8 functions. */ #define PCI_NUM_DEV_MAX 0x10000 /* Maximum number of domains supported by IOMMU. */ #define AMDVI_MAX_DOMAIN (BIT(16) - 1) /* * IOMMU Page Table attributes. */ #define AMDVI_PT_PRESENT BIT(0) #define AMDVI_PT_COHERENT BIT(60) #define AMDVI_PT_READ BIT(61) #define AMDVI_PT_WRITE BIT(62) #define AMDVI_PT_RW (AMDVI_PT_READ | AMDVI_PT_WRITE) #define AMDVI_PT_MASK 0xFFFFFFFFFF000UL /* Only [51:12] for PA */ #define AMDVI_PD_LEVEL_SHIFT 9 #define AMDVI_PD_SUPER(x) (((x) >> AMDVI_PD_LEVEL_SHIFT) == 7) /* * IOMMU Status, offset 0x2020 */ #define AMDVI_STATUS_EV_OF BIT(0) /* Event overflow. */ #define AMDVI_STATUS_EV_INTR BIT(1) /* Event interrupt. */ /* Completion wait command completed. */ #define AMDVI_STATUS_CMP BIT(2) #define IVRS_CTRL_RID 1 /* MMIO RID */ /* ACPI IVHD */ struct ivhd_dev_cfg { uint32_t start_id; uint32_t end_id; uint8_t data; /* Device configuration. */ bool enable_ats; /* ATS enabled for the device. */ int ats_qlen; /* ATS invalidation queue depth. */ }; struct amdvi_domain { uint64_t *ptp; /* Highest level page table */ int ptp_level; /* Level of page tables */ u_int id; /* Domain id */ SLIST_ENTRY (amdvi_domain) next; }; /* * AMD IOMMU softc. */ struct amdvi_softc { struct amdvi_ctrl *ctrl; /* Control area. */ device_t dev; /* IOMMU device. */ + enum AcpiIvrsType ivhd_type; /* IOMMU IVHD type 0x10/0x11 or 0x40 */ bool iotlb; /* IOTLB supported by IOMMU */ struct amdvi_cmd *cmd; /* Command descriptor area. */ int cmd_max; /* Max number of commands. */ uint64_t cmp_data; /* Command completion write back. */ struct amdvi_event *event; /* Event descriptor area. */ struct resource *event_res; /* Event interrupt resource. */ void *event_tag; /* Event interrupt tag. */ int event_max; /* Max number of events. */ int event_irq; int event_rid; /* ACPI various flags. */ uint32_t ivhd_flag; /* ACPI IVHD flag. */ - uint32_t ivhd_efr; /* ACPI v1 Reserved or v2 EFR . */ + uint32_t ivhd_feature; /* ACPI v1 Reserved or v2 attribute. */ + uint64_t ext_feature; /* IVHD EFR */ /* PCI related. */ uint16_t cap_off; /* PCI Capability offset. */ uint8_t pci_cap; /* PCI capability. */ - uint64_t pci_efr; /* PCI EFR for rev2.0 */ uint16_t pci_seg; /* IOMMU PCI domain/segment. */ uint16_t pci_rid; /* PCI BDF of IOMMU */ /* Device range under this IOMMU. */ uint16_t start_dev_rid; /* First device under this IOMMU. */ uint16_t end_dev_rid; /* Last device under this IOMMU. */ /* BIOS provided device configuration for end points. */ struct ivhd_dev_cfg dev_cfg[10]; int dev_cfg_cnt; /* Software statistics. */ uint64_t event_intr_cnt; /* Total event INTR count. */ uint64_t total_cmd; /* Total number of commands. */ }; int amdvi_setup_hw(struct amdvi_softc *softc); int amdvi_teardown_hw(struct amdvi_softc *softc); #endif /* _AMDVI_PRIV_H_ */ Index: head/sys/amd64/vmm/amd/ivrs_drv.c =================================================================== --- head/sys/amd64/vmm/amd/ivrs_drv.c (revision 329359) +++ head/sys/amd64/vmm/amd/ivrs_drv.c (revision 329360) @@ -1,518 +1,727 @@ /*- * Copyright (c) 2016, Anish Gupta (anish@freebsd.org) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_acpi.h" #include #include #include #include #include #include #include #include #include #include #include #include "io/iommu.h" #include "amdvi_priv.h" device_t *ivhd_devs; /* IVHD or AMD-Vi device list. */ -int ivhd_count; /* Number of IVHD or AMD-Vi devices. */ +int ivhd_count; /* Number of IVHD header. */ +/* + * Cached IVHD header list. + * Single entry for each IVHD, filtered the legacy one. + */ +ACPI_IVRS_HARDWARE *ivhd_hdrs[10]; extern int amdvi_ptp_level; /* Page table levels. */ -typedef int (*ivhd_iter_t)(ACPI_IVRS_HEADER * ptr, void *arg); - +typedef int (*ivhd_iter_t)(ACPI_IVRS_HEADER *ptr, void *arg); /* * Iterate IVRS table for IVHD and IVMD device type. */ static void ivrs_hdr_iterate_tbl(ivhd_iter_t iter, void *arg) { ACPI_TABLE_IVRS *ivrs; ACPI_IVRS_HEADER *ivrs_hdr, *end; ACPI_STATUS status; status = AcpiGetTable(ACPI_SIG_IVRS, 1, (ACPI_TABLE_HEADER **)&ivrs); if (ACPI_FAILURE(status)) return; if (ivrs->Header.Length == 0) { return; } ivrs_hdr = (ACPI_IVRS_HEADER *)(ivrs + 1); end = (ACPI_IVRS_HEADER *)((char *)ivrs + ivrs->Header.Length); while (ivrs_hdr < end) { if ((uint8_t *)ivrs_hdr + ivrs_hdr->Length > (uint8_t *)end) { printf("AMD-Vi:IVHD/IVMD is corrupted, length : %d\n", ivrs_hdr->Length); break; } switch (ivrs_hdr->Type) { case ACPI_IVRS_TYPE_HARDWARE: /* Legacy */ case 0x11: case 0x40: /* ACPI HID */ if (!iter(ivrs_hdr, arg)) return; break; case ACPI_IVRS_TYPE_MEMORY1: case ACPI_IVRS_TYPE_MEMORY2: case ACPI_IVRS_TYPE_MEMORY3: if (!iter(ivrs_hdr, arg)) return; break; default: printf("AMD-Vi:Not IVHD/IVMD type(%d)", ivrs_hdr->Type); } ivrs_hdr = (ACPI_IVRS_HEADER *)((uint8_t *)ivrs_hdr + ivrs_hdr->Length); } } -static int +static bool ivrs_is_ivhd(UINT8 type) { - if ((type == ACPI_IVRS_TYPE_HARDWARE) || (type == 0x11) || (type == 0x40)) - return (1); + switch(type) { + case ACPI_IVRS_TYPE_HARDWARE: + case ACPI_IVRS_TYPE_HARDWARE_EXT1: + case ACPI_IVRS_TYPE_HARDWARE_EXT2: + return (true); - return (0); + default: + return (false); + } } /* Count the number of AMD-Vi devices in the system. */ static int ivhd_count_iter(ACPI_IVRS_HEADER * ivrs_he, void *arg) { if (ivrs_is_ivhd(ivrs_he->Type)) ivhd_count++; return (1); } struct find_ivrs_hdr_args { int i; ACPI_IVRS_HEADER *ptr; }; static int ivrs_hdr_find_iter(ACPI_IVRS_HEADER * ivrs_hdr, void *args) { struct find_ivrs_hdr_args *fi; fi = (struct find_ivrs_hdr_args *)args; if (ivrs_is_ivhd(ivrs_hdr->Type)) { if (fi->i == 0) { fi->ptr = ivrs_hdr; return (0); } fi->i--; } return (1); } static ACPI_IVRS_HARDWARE * ivhd_find_by_index(int idx) { struct find_ivrs_hdr_args fi; fi.i = idx; fi.ptr = NULL; ivrs_hdr_iterate_tbl(ivrs_hdr_find_iter, &fi); return ((ACPI_IVRS_HARDWARE *)fi.ptr); } static void ivhd_dev_add_entry(struct amdvi_softc *softc, uint32_t start_id, uint32_t end_id, uint8_t cfg, bool ats) { struct ivhd_dev_cfg *dev_cfg; /* If device doesn't have special data, don't add it. */ if (!cfg) return; dev_cfg = &softc->dev_cfg[softc->dev_cfg_cnt++]; dev_cfg->start_id = start_id; dev_cfg->end_id = end_id; dev_cfg->data = cfg; dev_cfg->enable_ats = ats; } /* * Record device attributes as suggested by BIOS. */ static int -ivhd_dev_parse(ACPI_IVRS_HARDWARE * ivhd, struct amdvi_softc *softc) +ivhd_dev_parse(ACPI_IVRS_HARDWARE* ivhd, struct amdvi_softc *softc) { ACPI_IVRS_DE_HEADER *de; uint8_t *p, *end; int range_start_id = 0, range_end_id = 0; uint32_t *extended; uint8_t all_data = 0, range_data = 0; bool range_enable_ats = false, enable_ats; softc->start_dev_rid = ~0; softc->end_dev_rid = 0; - /* - * XXX The following actually depends on Header.Type and - * is only true for 0x10. - */ - p = (uint8_t *)ivhd + sizeof(ACPI_IVRS_HARDWARE); + switch (ivhd->Header.Type) { + case ACPI_IVRS_TYPE_HARDWARE_EXT1: + case ACPI_IVRS_TYPE_HARDWARE_EXT2: + p = (uint8_t *)ivhd + sizeof(ACPI_IVRS_HARDWARE_NEW); + de = (ACPI_IVRS_DE_HEADER *) ((uint8_t *)ivhd + + sizeof(ACPI_IVRS_HARDWARE_NEW)); + break; + + case ACPI_IVRS_TYPE_HARDWARE: + p = (uint8_t *)ivhd + sizeof(ACPI_IVRS_HARDWARE); + de = (ACPI_IVRS_DE_HEADER *) ((uint8_t *)ivhd + + sizeof(ACPI_IVRS_HARDWARE)); + break; + + default: + device_printf(softc->dev, + "unknown type: 0x%x\n", ivhd->Header.Type); + return (-1); + } + end = (uint8_t *)ivhd + ivhd->Header.Length; while (p < end) { de = (ACPI_IVRS_DE_HEADER *)p; softc->start_dev_rid = MIN(softc->start_dev_rid, de->Id); softc->end_dev_rid = MAX(softc->end_dev_rid, de->Id); switch (de->Type) { case ACPI_IVRS_TYPE_ALL: all_data = de->DataSetting; break; case ACPI_IVRS_TYPE_SELECT: case ACPI_IVRS_TYPE_ALIAS_SELECT: case ACPI_IVRS_TYPE_EXT_SELECT: enable_ats = false; if (de->Type == ACPI_IVRS_TYPE_EXT_SELECT) { extended = (uint32_t *)(de + 1); enable_ats = (*extended & IVHD_DEV_EXT_ATS_DISABLE) ? false : true; } ivhd_dev_add_entry(softc, de->Id, de->Id, de->DataSetting | all_data, enable_ats); break; case ACPI_IVRS_TYPE_START: case ACPI_IVRS_TYPE_ALIAS_START: case ACPI_IVRS_TYPE_EXT_START: range_start_id = de->Id; range_data = de->DataSetting; if (de->Type == ACPI_IVRS_TYPE_EXT_START) { extended = (uint32_t *)(de + 1); range_enable_ats = (*extended & IVHD_DEV_EXT_ATS_DISABLE) ? false : true; } break; case ACPI_IVRS_TYPE_END: range_end_id = de->Id; ivhd_dev_add_entry(softc, range_start_id, range_end_id, range_data | all_data, range_enable_ats); range_start_id = range_end_id = 0; range_data = 0; all_data = 0; break; case ACPI_IVRS_TYPE_PAD4: break; case ACPI_IVRS_TYPE_SPECIAL: /* HPET or IOAPIC */ break; default: if ((de->Type < 5) || (de->Type >= ACPI_IVRS_TYPE_PAD8)) device_printf(softc->dev, "Unknown dev entry:0x%x\n", de->Type); } if (softc->dev_cfg_cnt > (sizeof(softc->dev_cfg) / sizeof(softc->dev_cfg[0]))) { device_printf(softc->dev, "WARN Too many device entries.\n"); return (EINVAL); } if (de->Type < 0x40) p += sizeof(ACPI_IVRS_DEVICE4); else if (de->Type < 0x80) p += sizeof(ACPI_IVRS_DEVICE8A); else { printf("Variable size IVHD type 0x%x not supported\n", de->Type); break; } } KASSERT((softc->end_dev_rid >= softc->start_dev_rid), ("Device end[0x%x] < start[0x%x.\n", softc->end_dev_rid, softc->start_dev_rid)); return (0); } +static bool +ivhd_is_newer(ACPI_IVRS_HEADER *old, ACPI_IVRS_HEADER *new) +{ + /* + * Newer IVRS header type take precedence. + */ + if ((old->DeviceId == new->DeviceId) && + (old->Type == ACPI_IVRS_TYPE_HARDWARE) && + ((new->Type == ACPI_IVRS_TYPE_HARDWARE_EXT1) || + (new->Type == ACPI_IVRS_TYPE_HARDWARE_EXT1))) { + return (true); + } + + return (false); +} + static void ivhd_identify(driver_t *driver, device_t parent) { ACPI_TABLE_IVRS *ivrs; ACPI_IVRS_HARDWARE *ivhd; ACPI_STATUS status; - uint32_t info; int i, count = 0; + uint32_t ivrs_ivinfo; if (acpi_disabled("ivhd")) return; status = AcpiGetTable(ACPI_SIG_IVRS, 1, (ACPI_TABLE_HEADER **)&ivrs); if (ACPI_FAILURE(status)) return; if (ivrs->Header.Length == 0) { return; } - info = ivrs->Info; - printf("AMD-Vi IVRS VAsize = %d PAsize = %d GVAsize = %d flags:%b\n", - REG_BITS(info, 21, 15), REG_BITS(info, 14, 8), - REG_BITS(info, 7, 5), REG_BITS(info, 22, 22), - "\020\001HtAtsResv"); + ivrs_ivinfo = ivrs->Info; + printf("AMD-Vi: IVRS Info VAsize = %d PAsize = %d GVAsize = %d" + " flags:%b\n", + REG_BITS(ivrs_ivinfo, 21, 15), REG_BITS(ivrs_ivinfo, 14, 8), + REG_BITS(ivrs_ivinfo, 7, 5), REG_BITS(ivrs_ivinfo, 22, 22), + "\020\001EFRSup"); ivrs_hdr_iterate_tbl(ivhd_count_iter, NULL); if (!ivhd_count) return; - ivhd_devs = malloc(sizeof(device_t) * ivhd_count, M_DEVBUF, - M_WAITOK | M_ZERO); for (i = 0; i < ivhd_count; i++) { ivhd = ivhd_find_by_index(i); - if (ivhd == NULL) { - printf("Can't find IVHD entry%d\n", i); - continue; + KASSERT(ivhd, ("ivhd%d is NULL\n", i)); + ivhd_hdrs[i] = ivhd; + } + + /* + * Scan for presence of legacy and non-legacy device type + * for same AMD-Vi device and override the old one. + */ + for (i = ivhd_count - 1 ; i > 0 ; i--){ + if (ivhd_is_newer(&ivhd_hdrs[i-1]->Header, + &ivhd_hdrs[i]->Header)) { + ivhd_hdrs[i-1] = ivhd_hdrs[i]; + ivhd_count--; } + } + ivhd_devs = malloc(sizeof(device_t) * ivhd_count, M_DEVBUF, + M_WAITOK | M_ZERO); + for (i = 0; i < ivhd_count; i++) { + ivhd = ivhd_hdrs[i]; + KASSERT(ivhd, ("ivhd%d is NULL\n", i)); + /* * Use a high order to ensure that this driver is probed after * the Host-PCI bridge and the root PCI bus. */ ivhd_devs[i] = BUS_ADD_CHILD(parent, ACPI_DEV_BASE_ORDER + 10 * 10, "ivhd", i); /* * XXX: In case device was not destroyed before, add will fail. * locate the old device instance. */ if (ivhd_devs[i] == NULL) { ivhd_devs[i] = device_find_child(parent, "ivhd", i); if (ivhd_devs[i] == NULL) { - printf("AMD-Vi: cant find AMD-Vi dev%d\n", i); + printf("AMD-Vi: cant find ivhd%d\n", i); break; } } count++; } /* * Update device count in case failed to attach. */ ivhd_count = count; } static int ivhd_probe(device_t dev) { + ACPI_IVRS_HARDWARE *ivhd; + int unit; if (acpi_get_handle(dev) != NULL) return (ENXIO); - device_set_desc(dev, "AMD-Vi/IOMMU or ivhd"); + unit = device_get_unit(dev); + KASSERT((unit < ivhd_count), + ("ivhd unit %d > count %d", unit, ivhd_count)); + ivhd = ivhd_hdrs[unit]; + KASSERT(ivhd, ("ivhd is NULL")); + + if (ivhd->Header.Type == ACPI_IVRS_TYPE_HARDWARE) + device_set_desc(dev, "AMD-Vi/IOMMU ivhd"); + else + device_set_desc(dev, "AMD-Vi/IOMMU ivhd with EFR"); + return (BUS_PROBE_NOWILDCARD); } +static void +ivhd_print_flag(device_t dev, enum AcpiIvrsType ivhd_type, uint8_t flag) +{ + /* + * IVHD lgeacy type has two extra high bits in flag which has + * been moved to EFR for non-legacy device. + */ + switch (ivhd_type) { + case ACPI_IVRS_TYPE_HARDWARE: + device_printf(dev, "Flag:%b\n", flag, + "\020" + "\001HtTunEn" + "\002PassPW" + "\003ResPassPW" + "\004Isoc" + "\005IotlbSup" + "\006Coherent" + "\007PreFSup" + "\008PPRSup"); + break; + + case ACPI_IVRS_TYPE_HARDWARE_EXT1: + case ACPI_IVRS_TYPE_HARDWARE_EXT2: + device_printf(dev, "Flag:%b\n", flag, + "\020" + "\001HtTunEn" + "\002PassPW" + "\003ResPassPW" + "\004Isoc" + "\005IotlbSup" + "\006Coherent"); + break; + + default: + device_printf(dev, "Can't decode flag of ivhd type :0x%x\n", + ivhd_type); + break; + } +} + +/* + * Feature in legacy IVHD type(0x10) and attribute in newer type(0x11 and 0x40). + */ +static void +ivhd_print_feature(device_t dev, enum AcpiIvrsType ivhd_type, uint32_t feature) +{ + switch (ivhd_type) { + case ACPI_IVRS_TYPE_HARDWARE: + device_printf(dev, "Features(type:0x%x) HATS = %d GATS = %d" + " MsiNumPPR = %d PNBanks= %d PNCounters= %d\n", + ivhd_type, + REG_BITS(feature, 31, 30), + REG_BITS(feature, 29, 28), + REG_BITS(feature, 27, 23), + REG_BITS(feature, 22, 17), + REG_BITS(feature, 16, 13)); + device_printf(dev, "max PASID = %d GLXSup = %d Feature:%b\n", + REG_BITS(feature, 12, 8), + REG_BITS(feature, 4, 3), + feature, + "\020" + "\002NXSup" + "\003GTSup" + "\004" + "\005IASup" + "\006GASup" + "\007HESup"); + break; + + /* Fewer features or attributes are reported in non-legacy type. */ + case ACPI_IVRS_TYPE_HARDWARE_EXT1: + case ACPI_IVRS_TYPE_HARDWARE_EXT2: + device_printf(dev, "Features(type:0x%x) MsiNumPPR = %d" + " PNBanks= %d PNCounters= %d\n", + ivhd_type, + REG_BITS(feature, 27, 23), + REG_BITS(feature, 22, 17), + REG_BITS(feature, 16, 13)); + break; + + default: /* Other ivhd type features are not decoded. */ + device_printf(dev, "Can't decode ivhd type :0x%x\n", ivhd_type); + } +} + +/* Print extended features of IOMMU. */ +static void +ivhd_print_ext_feature(device_t dev, uint64_t ext_feature) +{ + uint32_t ext_low, ext_high; + + if (!ext_feature) + return; + + ext_low = ext_feature; + device_printf(dev, "Extended features[31:0]:%b " + "HATS = 0x%x GATS = 0x%x " + "GLXSup = 0x%x SmiFSup = 0x%x SmiFRC = 0x%x " + "GAMSup = 0x%x DualPortLogSup = 0x%x DualEventLogSup = 0x%x\n", + (int)ext_low, + "\020" + "\001PreFSup" + "\002PPRSup" + "\003" + "\004NXSup" + "\005GTSup" + "\006" + "\007IASup" + "\008GASup" + "\009HESup" + "\010PCSup", + REG_BITS(ext_low, 11, 10), + REG_BITS(ext_low, 13, 12), + REG_BITS(ext_low, 15, 14), + REG_BITS(ext_low, 17, 16), + REG_BITS(ext_low, 20, 18), + REG_BITS(ext_low, 23, 21), + REG_BITS(ext_low, 25, 24), + REG_BITS(ext_low, 29, 28)); + + ext_high = ext_feature >> 32; + device_printf(dev, "Extended features[62:32]:%b " + "Max PASID: 0x%x DevTblSegSup = 0x%x " + "MarcSup = 0x%x\n", + (int)(ext_high), + "\020" + "\006USSup" + "\009PprOvrflwEarlySup" + "\010PPRAutoRspSup" + "\013BlKStopMrkSup" + "\014PerfOptSup" + "\015MsiCapMmioSup" + "\017GIOSup" + "\018HASup" + "\019EPHSup" + "\020AttrFWSup" + "\021HDSup" + "\023InvIotlbSup", + REG_BITS(ext_high, 5, 0), + REG_BITS(ext_high, 8, 7), + REG_BITS(ext_high, 11, 10)); +} + static int ivhd_print_cap(struct amdvi_softc *softc, ACPI_IVRS_HARDWARE * ivhd) { device_t dev; int max_ptp_level; dev = softc->dev; - device_printf(dev, "Flag:%b\n", softc->ivhd_flag, - "\020\001HtTunEn\002PassPW\003ResPassPW\004Isoc\005IotlbSup" - "\006Coherent\007PreFSup\008PPRSup"); - /* - * If no extended feature[EFR], its rev1 with maximum paging level as 7. - */ + + ivhd_print_flag(dev, softc->ivhd_type, softc->ivhd_flag); + ivhd_print_feature(dev, softc->ivhd_type, softc->ivhd_feature); + ivhd_print_ext_feature(dev, softc->ext_feature); max_ptp_level = 7; - if (softc->ivhd_efr) { - device_printf(dev, "EFR HATS = %d GATS = %d GLXSup = %d " - "MsiNumPr = %d PNBanks= %d PNCounters= %d\n" - "max PASID = %d EFR: %b \n", - REG_BITS(softc->ivhd_efr, 31, 30), - REG_BITS(softc->ivhd_efr, 29, 28), - REG_BITS(softc->ivhd_efr, 4, 3), - REG_BITS(softc->ivhd_efr, 27, 23), - REG_BITS(softc->ivhd_efr, 22, 17), - REG_BITS(softc->ivhd_efr, 16, 13), - REG_BITS(softc->ivhd_efr, 12, 8), - softc->ivhd_efr, "\020\001XTSup\002NXSup\003GTSup\005IASup" - "\006GASup\007HESup\008PPRSup"); - - max_ptp_level = REG_BITS(softc->ivhd_efr, 31, 30) + 4; - } - /* Make sure device support minimum page level as requested by user. */ if (max_ptp_level < amdvi_ptp_level) { - device_printf(dev, "Insufficient PTP level:%d\n", - max_ptp_level); + device_printf(dev, "insufficient PTP level:%d\n", + max_ptp_level); return (EINVAL); + } else { + device_printf(softc->dev, "supported paging level:%d, will use only: %d\n", + max_ptp_level, amdvi_ptp_level); } - device_printf(softc->dev, "max supported paging level:%d restricting to: %d\n", - max_ptp_level, amdvi_ptp_level); - device_printf(softc->dev, "device supported range " - "[0x%x - 0x%x]\n", softc->start_dev_rid, softc->end_dev_rid); + device_printf(softc->dev, "device range: 0x%x - 0x%x\n", + softc->start_dev_rid, softc->end_dev_rid); return (0); } static int ivhd_attach(device_t dev) { ACPI_IVRS_HARDWARE *ivhd; + ACPI_IVRS_HARDWARE_NEW *ivhd1; struct amdvi_softc *softc; int status, unit; unit = device_get_unit(dev); + KASSERT((unit < ivhd_count), + ("ivhd unit %d > count %d", unit, ivhd_count)); /* Make sure its same device for which attach is called. */ - if (ivhd_devs[unit] != dev) - panic("Not same device old %p new %p", ivhd_devs[unit], dev); + KASSERT((ivhd_devs[unit] == dev), + ("Not same device old %p new %p", ivhd_devs[unit], dev)); softc = device_get_softc(dev); softc->dev = dev; - ivhd = ivhd_find_by_index(unit); - if (ivhd == NULL) - return (EINVAL); + ivhd = ivhd_hdrs[unit]; + KASSERT(ivhd, ("ivhd is NULL")); + softc->ivhd_type = ivhd->Header.Type; softc->pci_seg = ivhd->PciSegmentGroup; softc->pci_rid = ivhd->Header.DeviceId; softc->ivhd_flag = ivhd->Header.Flags; - softc->ivhd_efr = ivhd->Reserved; /* + * On lgeacy IVHD type(0x10), it is documented as feature + * but in newer type it is attribute. + */ + softc->ivhd_feature = ivhd->Reserved; + /* * PCI capability has more capabilities that are not part of IVRS. */ softc->cap_off = ivhd->CapabilityOffset; #ifdef notyet /* IVHD Info bit[4:0] is event MSI/X number. */ softc->event_msix = ivhd->Info & 0x1F; #endif + switch (ivhd->Header.Type) { + case ACPI_IVRS_TYPE_HARDWARE_EXT1: + case ACPI_IVRS_TYPE_HARDWARE_EXT2: + ivhd1 = (ACPI_IVRS_HARDWARE_NEW *)ivhd; + softc->ext_feature = ivhd1->ExtFR; + break; + + } + softc->ctrl = (struct amdvi_ctrl *) PHYS_TO_DMAP(ivhd->BaseAddress); status = ivhd_dev_parse(ivhd, softc); if (status != 0) { device_printf(dev, "endpoint device parsing error=%d\n", status); } status = ivhd_print_cap(softc, ivhd); if (status != 0) { return (status); } status = amdvi_setup_hw(softc); if (status != 0) { device_printf(dev, "couldn't be initialised, error=%d\n", status); return (status); } return (0); } static int ivhd_detach(device_t dev) { struct amdvi_softc *softc; softc = device_get_softc(dev); amdvi_teardown_hw(softc); /* * XXX: delete the device. * don't allow detach, return EBUSY. */ return (0); } static int ivhd_suspend(device_t dev) { return (0); } static int ivhd_resume(device_t dev) { return (0); } static device_method_t ivhd_methods[] = { DEVMETHOD(device_identify, ivhd_identify), DEVMETHOD(device_probe, ivhd_probe), DEVMETHOD(device_attach, ivhd_attach), DEVMETHOD(device_detach, ivhd_detach), DEVMETHOD(device_suspend, ivhd_suspend), DEVMETHOD(device_resume, ivhd_resume), DEVMETHOD_END }; static driver_t ivhd_driver = { "ivhd", ivhd_methods, sizeof(struct amdvi_softc), }; static devclass_t ivhd_devclass; /* * Load this module at the end after PCI re-probing to configure interrupt. */ DRIVER_MODULE_ORDERED(ivhd, acpi, ivhd_driver, ivhd_devclass, 0, 0, SI_ORDER_ANY); MODULE_DEPEND(ivhd, acpi, 1, 1, 1); MODULE_DEPEND(ivhd, pci, 1, 1, 1); Index: head/sys/contrib/dev/acpica/include/actbl2.h =================================================================== --- head/sys/contrib/dev/acpica/include/actbl2.h (revision 329359) +++ head/sys/contrib/dev/acpica/include/actbl2.h (revision 329360) @@ -1,2150 +1,2167 @@ /****************************************************************************** * * Name: actbl2.h - ACPI Table Definitions (tables not in ACPI spec) * *****************************************************************************/ /****************************************************************************** * * 1. Copyright Notice * * Some or all of this work - Copyright (c) 1999 - 2018, Intel Corp. * All rights reserved. * * 2. License * * 2.1. This is your license from Intel Corp. under its intellectual property * rights. You may have additional license terms from the party that provided * you this software, covering your right to use that party's intellectual * property rights. * * 2.2. Intel grants, free of charge, to any person ("Licensee") obtaining a * copy of the source code appearing in this file ("Covered Code") an * irrevocable, perpetual, worldwide license under Intel's copyrights in the * base code distributed originally by Intel ("Original Intel Code") to copy, * make derivatives, distribute, use and display any portion of the Covered * Code in any form, with the right to sublicense such rights; and * * 2.3. Intel grants Licensee a non-exclusive and non-transferable patent * license (with the right to sublicense), under only those claims of Intel * patents that are infringed by the Original Intel Code, to make, use, sell, * offer to sell, and import the Covered Code and derivative works thereof * solely to the minimum extent necessary to exercise the above copyright * license, and in no event shall the patent license extend to any additions * to or modifications of the Original Intel Code. No other license or right * is granted directly or by implication, estoppel or otherwise; * * The above copyright and patent license is granted only if the following * conditions are met: * * 3. Conditions * * 3.1. Redistribution of Source with Rights to Further Distribute Source. * Redistribution of source code of any substantial portion of the Covered * Code or modification with rights to further distribute source must include * the above Copyright Notice, the above License, this list of Conditions, * and the following Disclaimer and Export Compliance provision. In addition, * Licensee must cause all Covered Code to which Licensee contributes to * contain a file documenting the changes Licensee made to create that Covered * Code and the date of any change. Licensee must include in that file the * documentation of any changes made by any predecessor Licensee. Licensee * must include a prominent statement that the modification is derived, * directly or indirectly, from Original Intel Code. * * 3.2. Redistribution of Source with no Rights to Further Distribute Source. * Redistribution of source code of any substantial portion of the Covered * Code or modification without rights to further distribute source must * include the following Disclaimer and Export Compliance provision in the * documentation and/or other materials provided with distribution. In * addition, Licensee may not authorize further sublicense of source of any * portion of the Covered Code, and must include terms to the effect that the * license from Licensee to its licensee is limited to the intellectual * property embodied in the software Licensee provides to its licensee, and * not to intellectual property embodied in modifications its licensee may * make. * * 3.3. Redistribution of Executable. Redistribution in executable form of any * substantial portion of the Covered Code or modification must reproduce the * above Copyright Notice, and the following Disclaimer and Export Compliance * provision in the documentation and/or other materials provided with the * distribution. * * 3.4. Intel retains all right, title, and interest in and to the Original * Intel Code. * * 3.5. Neither the name Intel nor any other trademark owned or controlled by * Intel shall be used in advertising or otherwise to promote the sale, use or * other dealings in products derived from or relating to the Covered Code * without prior written authorization from Intel. * * 4. Disclaimer and Export Compliance * * 4.1. INTEL MAKES NO WARRANTY OF ANY KIND REGARDING ANY SOFTWARE PROVIDED * HERE. ANY SOFTWARE ORIGINATING FROM INTEL OR DERIVED FROM INTEL SOFTWARE * IS PROVIDED "AS IS," AND INTEL WILL NOT PROVIDE ANY SUPPORT, ASSISTANCE, * INSTALLATION, TRAINING OR OTHER SERVICES. INTEL WILL NOT PROVIDE ANY * UPDATES, ENHANCEMENTS OR EXTENSIONS. INTEL SPECIFICALLY DISCLAIMS ANY * IMPLIED WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT AND FITNESS FOR A * PARTICULAR PURPOSE. * * 4.2. IN NO EVENT SHALL INTEL HAVE ANY LIABILITY TO LICENSEE, ITS LICENSEES * OR ANY OTHER THIRD PARTY, FOR ANY LOST PROFITS, LOST DATA, LOSS OF USE OR * COSTS OF PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES, OR FOR ANY INDIRECT, * SPECIAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THIS AGREEMENT, UNDER ANY * CAUSE OF ACTION OR THEORY OF LIABILITY, AND IRRESPECTIVE OF WHETHER INTEL * HAS ADVANCE NOTICE OF THE POSSIBILITY OF SUCH DAMAGES. THESE LIMITATIONS * SHALL APPLY NOTWITHSTANDING THE FAILURE OF THE ESSENTIAL PURPOSE OF ANY * LIMITED REMEDY. * * 4.3. Licensee shall not export, either directly or indirectly, any of this * software or system incorporating such software without first obtaining any * required license or other approval from the U. S. Department of Commerce or * any other agency or department of the United States Government. In the * event Licensee exports any such software from the United States or * re-exports any such software from a foreign destination, Licensee shall * ensure that the distribution and export/re-export of the software is in * compliance with all laws, regulations, orders, or other restrictions of the * U.S. Export Administration Regulations. Licensee agrees that neither it nor * any of its subsidiaries will export/re-export any technical data, process, * software, or service, directly or indirectly, to any country for which the * United States government or any agency thereof requires an export license, * other governmental approval, or letter of assurance, without first obtaining * such license, approval or letter. * ***************************************************************************** * * Alternatively, you may choose to be licensed under the terms of the * following license: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, you may choose to be licensed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * *****************************************************************************/ #ifndef __ACTBL2_H__ #define __ACTBL2_H__ /******************************************************************************* * * Additional ACPI Tables (2) * * These tables are not consumed directly by the ACPICA subsystem, but are * included here to support device drivers and the AML disassembler. * ******************************************************************************/ /* * Values for description table header signatures for tables defined in this * file. Useful because they make it more difficult to inadvertently type in * the wrong signature. */ #define ACPI_SIG_IORT "IORT" /* IO Remapping Table */ #define ACPI_SIG_IVRS "IVRS" /* I/O Virtualization Reporting Structure */ #define ACPI_SIG_LPIT "LPIT" /* Low Power Idle Table */ #define ACPI_SIG_MADT "APIC" /* Multiple APIC Description Table */ #define ACPI_SIG_MCFG "MCFG" /* PCI Memory Mapped Configuration table */ #define ACPI_SIG_MCHI "MCHI" /* Management Controller Host Interface table */ #define ACPI_SIG_MPST "MPST" /* Memory Power State Table */ #define ACPI_SIG_MSCT "MSCT" /* Maximum System Characteristics Table */ #define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */ #define ACPI_SIG_MTMR "MTMR" /* MID Timer table */ #define ACPI_SIG_NFIT "NFIT" /* NVDIMM Firmware Interface Table */ #define ACPI_SIG_PCCT "PCCT" /* Platform Communications Channel Table */ #define ACPI_SIG_PDTT "PDTT" /* Platform Debug Trigger Table */ #define ACPI_SIG_PMTT "PMTT" /* Platform Memory Topology Table */ #define ACPI_SIG_PPTT "PPTT" /* Processor Properties Topology Table */ #define ACPI_SIG_RASF "RASF" /* RAS Feature table */ #define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */ #define ACPI_SIG_SDEI "SDEI" /* Software Delegated Exception Interface Table */ #define ACPI_SIG_SDEV "SDEV" /* Secure Devices table */ /* * All tables must be byte-packed to match the ACPI specification, since * the tables are provided by the system BIOS. */ #pragma pack(1) /* * Note: C bitfields are not used for this reason: * * "Bitfields are great and easy to read, but unfortunately the C language * does not specify the layout of bitfields in memory, which means they are * essentially useless for dealing with packed data in on-disk formats or * binary wire protocols." (Or ACPI tables and buffers.) "If you ask me, * this decision was a design error in C. Ritchie could have picked an order * and stuck with it." Norman Ramsey. * See http://stackoverflow.com/a/1053662/41661 */ /******************************************************************************* * * IORT - IO Remapping Table * * Conforms to "IO Remapping Table System Software on ARM Platforms", * Document number: ARM DEN 0049C, May 2017 * ******************************************************************************/ typedef struct acpi_table_iort { ACPI_TABLE_HEADER Header; UINT32 NodeCount; UINT32 NodeOffset; UINT32 Reserved; } ACPI_TABLE_IORT; /* * IORT subtables */ typedef struct acpi_iort_node { UINT8 Type; UINT16 Length; UINT8 Revision; UINT32 Reserved; UINT32 MappingCount; UINT32 MappingOffset; char NodeData[1]; } ACPI_IORT_NODE; /* Values for subtable Type above */ enum AcpiIortNodeType { ACPI_IORT_NODE_ITS_GROUP = 0x00, ACPI_IORT_NODE_NAMED_COMPONENT = 0x01, ACPI_IORT_NODE_PCI_ROOT_COMPLEX = 0x02, ACPI_IORT_NODE_SMMU = 0x03, ACPI_IORT_NODE_SMMU_V3 = 0x04 }; typedef struct acpi_iort_id_mapping { UINT32 InputBase; /* Lowest value in input range */ UINT32 IdCount; /* Number of IDs */ UINT32 OutputBase; /* Lowest value in output range */ UINT32 OutputReference; /* A reference to the output node */ UINT32 Flags; } ACPI_IORT_ID_MAPPING; /* Masks for Flags field above for IORT subtable */ #define ACPI_IORT_ID_SINGLE_MAPPING (1) typedef struct acpi_iort_memory_access { UINT32 CacheCoherency; UINT8 Hints; UINT16 Reserved; UINT8 MemoryFlags; } ACPI_IORT_MEMORY_ACCESS; /* Values for CacheCoherency field above */ #define ACPI_IORT_NODE_COHERENT 0x00000001 /* The device node is fully coherent */ #define ACPI_IORT_NODE_NOT_COHERENT 0x00000000 /* The device node is not coherent */ /* Masks for Hints field above */ #define ACPI_IORT_HT_TRANSIENT (1) #define ACPI_IORT_HT_WRITE (1<<1) #define ACPI_IORT_HT_READ (1<<2) #define ACPI_IORT_HT_OVERRIDE (1<<3) /* Masks for MemoryFlags field above */ #define ACPI_IORT_MF_COHERENCY (1) #define ACPI_IORT_MF_ATTRIBUTES (1<<1) /* * IORT node specific subtables */ typedef struct acpi_iort_its_group { UINT32 ItsCount; UINT32 Identifiers[1]; /* GIC ITS identifier arrary */ } ACPI_IORT_ITS_GROUP; typedef struct acpi_iort_named_component { UINT32 NodeFlags; UINT64 MemoryProperties; /* Memory access properties */ UINT8 MemoryAddressLimit; /* Memory address size limit */ char DeviceName[1]; /* Path of namespace object */ } ACPI_IORT_NAMED_COMPONENT; typedef struct acpi_iort_root_complex { UINT64 MemoryProperties; /* Memory access properties */ UINT32 AtsAttribute; UINT32 PciSegmentNumber; } ACPI_IORT_ROOT_COMPLEX; /* Values for AtsAttribute field above */ #define ACPI_IORT_ATS_SUPPORTED 0x00000001 /* The root complex supports ATS */ #define ACPI_IORT_ATS_UNSUPPORTED 0x00000000 /* The root complex doesn't support ATS */ typedef struct acpi_iort_smmu { UINT64 BaseAddress; /* SMMU base address */ UINT64 Span; /* Length of memory range */ UINT32 Model; UINT32 Flags; UINT32 GlobalInterruptOffset; UINT32 ContextInterruptCount; UINT32 ContextInterruptOffset; UINT32 PmuInterruptCount; UINT32 PmuInterruptOffset; UINT64 Interrupts[1]; /* Interrupt array */ } ACPI_IORT_SMMU; /* Values for Model field above */ #define ACPI_IORT_SMMU_V1 0x00000000 /* Generic SMMUv1 */ #define ACPI_IORT_SMMU_V2 0x00000001 /* Generic SMMUv2 */ #define ACPI_IORT_SMMU_CORELINK_MMU400 0x00000002 /* ARM Corelink MMU-400 */ #define ACPI_IORT_SMMU_CORELINK_MMU500 0x00000003 /* ARM Corelink MMU-500 */ #define ACPI_IORT_SMMU_CORELINK_MMU401 0x00000004 /* ARM Corelink MMU-401 */ #define ACPI_IORT_SMMU_CAVIUM_THUNDERX 0x00000005 /* Cavium ThunderX SMMUv2 */ /* Masks for Flags field above */ #define ACPI_IORT_SMMU_DVM_SUPPORTED (1) #define ACPI_IORT_SMMU_COHERENT_WALK (1<<1) /* Global interrupt format */ typedef struct acpi_iort_smmu_gsi { UINT32 NSgIrpt; UINT32 NSgIrptFlags; UINT32 NSgCfgIrpt; UINT32 NSgCfgIrptFlags; } ACPI_IORT_SMMU_GSI; typedef struct acpi_iort_smmu_v3 { UINT64 BaseAddress; /* SMMUv3 base address */ UINT32 Flags; UINT32 Reserved; UINT64 VatosAddress; UINT32 Model; UINT32 EventGsiv; UINT32 PriGsiv; UINT32 GerrGsiv; UINT32 SyncGsiv; UINT8 Pxm; UINT8 Reserved1; UINT16 Reserved2; UINT32 IdMappingIndex; } ACPI_IORT_SMMU_V3; /* Values for Model field above */ #define ACPI_IORT_SMMU_V3_GENERIC 0x00000000 /* Generic SMMUv3 */ #define ACPI_IORT_SMMU_V3_HISILICON_HI161X 0x00000001 /* HiSilicon Hi161x SMMUv3 */ #define ACPI_IORT_SMMU_V3_CAVIUM_CN99XX 0x00000002 /* Cavium CN99xx SMMUv3 */ /* Masks for Flags field above */ #define ACPI_IORT_SMMU_V3_COHACC_OVERRIDE (1) #define ACPI_IORT_SMMU_V3_HTTU_OVERRIDE (1<<1) #define ACPI_IORT_SMMU_V3_PXM_VALID (1<<3) /******************************************************************************* * * IVRS - I/O Virtualization Reporting Structure * Version 1 * * Conforms to "AMD I/O Virtualization Technology (IOMMU) Specification", * Revision 1.26, February 2009. * ******************************************************************************/ typedef struct acpi_table_ivrs { ACPI_TABLE_HEADER Header; /* Common ACPI table header */ UINT32 Info; /* Common virtualization info */ UINT64 Reserved; } ACPI_TABLE_IVRS; /* Values for Info field above */ #define ACPI_IVRS_PHYSICAL_SIZE 0x00007F00 /* 7 bits, physical address size */ #define ACPI_IVRS_VIRTUAL_SIZE 0x003F8000 /* 7 bits, virtual address size */ #define ACPI_IVRS_ATS_RESERVED 0x00400000 /* ATS address translation range reserved */ /* IVRS subtable header */ typedef struct acpi_ivrs_header { UINT8 Type; /* Subtable type */ UINT8 Flags; UINT16 Length; /* Subtable length */ UINT16 DeviceId; /* ID of IOMMU */ } ACPI_IVRS_HEADER; /* Values for subtable Type above */ enum AcpiIvrsType { ACPI_IVRS_TYPE_HARDWARE = 0x10, + ACPI_IVRS_TYPE_HARDWARE_EXT1 = 0x11, + ACPI_IVRS_TYPE_HARDWARE_EXT2 = 0x40, ACPI_IVRS_TYPE_MEMORY1 = 0x20, ACPI_IVRS_TYPE_MEMORY2 = 0x21, ACPI_IVRS_TYPE_MEMORY3 = 0x22 }; /* Masks for Flags field above for IVHD subtable */ #define ACPI_IVHD_TT_ENABLE (1) #define ACPI_IVHD_PASS_PW (1<<1) #define ACPI_IVHD_RES_PASS_PW (1<<2) #define ACPI_IVHD_ISOC (1<<3) #define ACPI_IVHD_IOTLB (1<<4) /* Masks for Flags field above for IVMD subtable */ #define ACPI_IVMD_UNITY (1) #define ACPI_IVMD_READ (1<<1) #define ACPI_IVMD_WRITE (1<<2) #define ACPI_IVMD_EXCLUSION_RANGE (1<<3) /* * IVRS subtables, correspond to Type in ACPI_IVRS_HEADER */ /* 0x10: I/O Virtualization Hardware Definition Block (IVHD) */ typedef struct acpi_ivrs_hardware { ACPI_IVRS_HEADER Header; UINT16 CapabilityOffset; /* Offset for IOMMU control fields */ UINT64 BaseAddress; /* IOMMU control registers */ UINT16 PciSegmentGroup; UINT16 Info; /* MSI number and unit ID */ UINT32 Reserved; } ACPI_IVRS_HARDWARE; + +/* 0x11 and 0x40: I/O Virtualization Hardware Definition Block (IVHD) */ + +typedef struct acpi_ivrs_hardware_new +{ + ACPI_IVRS_HEADER Header; + UINT16 CapabilityOffset; /* Offset for IOMMU control fields */ + UINT64 BaseAddress; /* IOMMU control registers */ + UINT16 PciSegmentGroup; + UINT16 Info; /* MSI number and unit ID */ + UINT32 Attr; /* IOMMU Feature */ + UINT64 ExtFR; /* IOMMU Extended Feature */ + UINT64 Reserved; /* v1 feature or v2 attribute */ + +} ACPI_IVRS_HARDWARE_NEW; /* Masks for Info field above */ #define ACPI_IVHD_MSI_NUMBER_MASK 0x001F /* 5 bits, MSI message number */ #define ACPI_IVHD_UNIT_ID_MASK 0x1F00 /* 5 bits, UnitID */ /* * Device Entries for IVHD subtable, appear after ACPI_IVRS_HARDWARE structure. * Upper two bits of the Type field are the (encoded) length of the structure. * Currently, only 4 and 8 byte entries are defined. 16 and 32 byte entries * are reserved for future use but not defined. */ typedef struct acpi_ivrs_de_header { UINT8 Type; UINT16 Id; UINT8 DataSetting; } ACPI_IVRS_DE_HEADER; /* Length of device entry is in the top two bits of Type field above */ #define ACPI_IVHD_ENTRY_LENGTH 0xC0 /* Values for device entry Type field above */ enum AcpiIvrsDeviceEntryType { /* 4-byte device entries, all use ACPI_IVRS_DEVICE4 */ ACPI_IVRS_TYPE_PAD4 = 0, ACPI_IVRS_TYPE_ALL = 1, ACPI_IVRS_TYPE_SELECT = 2, ACPI_IVRS_TYPE_START = 3, ACPI_IVRS_TYPE_END = 4, /* 8-byte device entries */ ACPI_IVRS_TYPE_PAD8 = 64, ACPI_IVRS_TYPE_NOT_USED = 65, ACPI_IVRS_TYPE_ALIAS_SELECT = 66, /* Uses ACPI_IVRS_DEVICE8A */ ACPI_IVRS_TYPE_ALIAS_START = 67, /* Uses ACPI_IVRS_DEVICE8A */ ACPI_IVRS_TYPE_EXT_SELECT = 70, /* Uses ACPI_IVRS_DEVICE8B */ ACPI_IVRS_TYPE_EXT_START = 71, /* Uses ACPI_IVRS_DEVICE8B */ ACPI_IVRS_TYPE_SPECIAL = 72 /* Uses ACPI_IVRS_DEVICE8C */ }; /* Values for Data field above */ #define ACPI_IVHD_INIT_PASS (1) #define ACPI_IVHD_EINT_PASS (1<<1) #define ACPI_IVHD_NMI_PASS (1<<2) #define ACPI_IVHD_SYSTEM_MGMT (3<<4) #define ACPI_IVHD_LINT0_PASS (1<<6) #define ACPI_IVHD_LINT1_PASS (1<<7) /* Types 0-4: 4-byte device entry */ typedef struct acpi_ivrs_device4 { ACPI_IVRS_DE_HEADER Header; } ACPI_IVRS_DEVICE4; /* Types 66-67: 8-byte device entry */ typedef struct acpi_ivrs_device8a { ACPI_IVRS_DE_HEADER Header; UINT8 Reserved1; UINT16 UsedId; UINT8 Reserved2; } ACPI_IVRS_DEVICE8A; /* Types 70-71: 8-byte device entry */ typedef struct acpi_ivrs_device8b { ACPI_IVRS_DE_HEADER Header; UINT32 ExtendedData; } ACPI_IVRS_DEVICE8B; /* Values for ExtendedData above */ #define ACPI_IVHD_ATS_DISABLED (1<<31) /* Type 72: 8-byte device entry */ typedef struct acpi_ivrs_device8c { ACPI_IVRS_DE_HEADER Header; UINT8 Handle; UINT16 UsedId; UINT8 Variety; } ACPI_IVRS_DEVICE8C; /* Values for Variety field above */ #define ACPI_IVHD_IOAPIC 1 #define ACPI_IVHD_HPET 2 /* 0x20, 0x21, 0x22: I/O Virtualization Memory Definition Block (IVMD) */ typedef struct acpi_ivrs_memory { ACPI_IVRS_HEADER Header; UINT16 AuxData; UINT64 Reserved; UINT64 StartAddress; UINT64 MemoryLength; } ACPI_IVRS_MEMORY; /******************************************************************************* * * LPIT - Low Power Idle Table * * Conforms to "ACPI Low Power Idle Table (LPIT)" July 2014. * ******************************************************************************/ typedef struct acpi_table_lpit { ACPI_TABLE_HEADER Header; /* Common ACPI table header */ } ACPI_TABLE_LPIT; /* LPIT subtable header */ typedef struct acpi_lpit_header { UINT32 Type; /* Subtable type */ UINT32 Length; /* Subtable length */ UINT16 UniqueId; UINT16 Reserved; UINT32 Flags; } ACPI_LPIT_HEADER; /* Values for subtable Type above */ enum AcpiLpitType { ACPI_LPIT_TYPE_NATIVE_CSTATE = 0x00, ACPI_LPIT_TYPE_RESERVED = 0x01 /* 1 and above are reserved */ }; /* Masks for Flags field above */ #define ACPI_LPIT_STATE_DISABLED (1) #define ACPI_LPIT_NO_COUNTER (1<<1) /* * LPIT subtables, correspond to Type in ACPI_LPIT_HEADER */ /* 0x00: Native C-state instruction based LPI structure */ typedef struct acpi_lpit_native { ACPI_LPIT_HEADER Header; ACPI_GENERIC_ADDRESS EntryTrigger; UINT32 Residency; UINT32 Latency; ACPI_GENERIC_ADDRESS ResidencyCounter; UINT64 CounterFrequency; } ACPI_LPIT_NATIVE; /******************************************************************************* * * MADT - Multiple APIC Description Table * Version 3 * ******************************************************************************/ typedef struct acpi_table_madt { ACPI_TABLE_HEADER Header; /* Common ACPI table header */ UINT32 Address; /* Physical address of local APIC */ UINT32 Flags; } ACPI_TABLE_MADT; /* Masks for Flags field above */ #define ACPI_MADT_PCAT_COMPAT (1) /* 00: System also has dual 8259s */ /* Values for PCATCompat flag */ #define ACPI_MADT_DUAL_PIC 1 #define ACPI_MADT_MULTIPLE_APIC 0 /* Values for MADT subtable type in ACPI_SUBTABLE_HEADER */ enum AcpiMadtType { ACPI_MADT_TYPE_LOCAL_APIC = 0, ACPI_MADT_TYPE_IO_APIC = 1, ACPI_MADT_TYPE_INTERRUPT_OVERRIDE = 2, ACPI_MADT_TYPE_NMI_SOURCE = 3, ACPI_MADT_TYPE_LOCAL_APIC_NMI = 4, ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE = 5, ACPI_MADT_TYPE_IO_SAPIC = 6, ACPI_MADT_TYPE_LOCAL_SAPIC = 7, ACPI_MADT_TYPE_INTERRUPT_SOURCE = 8, ACPI_MADT_TYPE_LOCAL_X2APIC = 9, ACPI_MADT_TYPE_LOCAL_X2APIC_NMI = 10, ACPI_MADT_TYPE_GENERIC_INTERRUPT = 11, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR = 12, ACPI_MADT_TYPE_GENERIC_MSI_FRAME = 13, ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR = 14, ACPI_MADT_TYPE_GENERIC_TRANSLATOR = 15, ACPI_MADT_TYPE_RESERVED = 16 /* 16 and greater are reserved */ }; /* * MADT Subtables, correspond to Type in ACPI_SUBTABLE_HEADER */ /* 0: Processor Local APIC */ typedef struct acpi_madt_local_apic { ACPI_SUBTABLE_HEADER Header; UINT8 ProcessorId; /* ACPI processor id */ UINT8 Id; /* Processor's local APIC id */ UINT32 LapicFlags; } ACPI_MADT_LOCAL_APIC; /* 1: IO APIC */ typedef struct acpi_madt_io_apic { ACPI_SUBTABLE_HEADER Header; UINT8 Id; /* I/O APIC ID */ UINT8 Reserved; /* Reserved - must be zero */ UINT32 Address; /* APIC physical address */ UINT32 GlobalIrqBase; /* Global system interrupt where INTI lines start */ } ACPI_MADT_IO_APIC; /* 2: Interrupt Override */ typedef struct acpi_madt_interrupt_override { ACPI_SUBTABLE_HEADER Header; UINT8 Bus; /* 0 - ISA */ UINT8 SourceIrq; /* Interrupt source (IRQ) */ UINT32 GlobalIrq; /* Global system interrupt */ UINT16 IntiFlags; } ACPI_MADT_INTERRUPT_OVERRIDE; /* 3: NMI Source */ typedef struct acpi_madt_nmi_source { ACPI_SUBTABLE_HEADER Header; UINT16 IntiFlags; UINT32 GlobalIrq; /* Global system interrupt */ } ACPI_MADT_NMI_SOURCE; /* 4: Local APIC NMI */ typedef struct acpi_madt_local_apic_nmi { ACPI_SUBTABLE_HEADER Header; UINT8 ProcessorId; /* ACPI processor id */ UINT16 IntiFlags; UINT8 Lint; /* LINTn to which NMI is connected */ } ACPI_MADT_LOCAL_APIC_NMI; /* 5: Address Override */ typedef struct acpi_madt_local_apic_override { ACPI_SUBTABLE_HEADER Header; UINT16 Reserved; /* Reserved, must be zero */ UINT64 Address; /* APIC physical address */ } ACPI_MADT_LOCAL_APIC_OVERRIDE; /* 6: I/O Sapic */ typedef struct acpi_madt_io_sapic { ACPI_SUBTABLE_HEADER Header; UINT8 Id; /* I/O SAPIC ID */ UINT8 Reserved; /* Reserved, must be zero */ UINT32 GlobalIrqBase; /* Global interrupt for SAPIC start */ UINT64 Address; /* SAPIC physical address */ } ACPI_MADT_IO_SAPIC; /* 7: Local Sapic */ typedef struct acpi_madt_local_sapic { ACPI_SUBTABLE_HEADER Header; UINT8 ProcessorId; /* ACPI processor id */ UINT8 Id; /* SAPIC ID */ UINT8 Eid; /* SAPIC EID */ UINT8 Reserved[3]; /* Reserved, must be zero */ UINT32 LapicFlags; UINT32 Uid; /* Numeric UID - ACPI 3.0 */ char UidString[1]; /* String UID - ACPI 3.0 */ } ACPI_MADT_LOCAL_SAPIC; /* 8: Platform Interrupt Source */ typedef struct acpi_madt_interrupt_source { ACPI_SUBTABLE_HEADER Header; UINT16 IntiFlags; UINT8 Type; /* 1=PMI, 2=INIT, 3=corrected */ UINT8 Id; /* Processor ID */ UINT8 Eid; /* Processor EID */ UINT8 IoSapicVector; /* Vector value for PMI interrupts */ UINT32 GlobalIrq; /* Global system interrupt */ UINT32 Flags; /* Interrupt Source Flags */ } ACPI_MADT_INTERRUPT_SOURCE; /* Masks for Flags field above */ #define ACPI_MADT_CPEI_OVERRIDE (1) /* 9: Processor Local X2APIC (ACPI 4.0) */ typedef struct acpi_madt_local_x2apic { ACPI_SUBTABLE_HEADER Header; UINT16 Reserved; /* Reserved - must be zero */ UINT32 LocalApicId; /* Processor x2APIC ID */ UINT32 LapicFlags; UINT32 Uid; /* ACPI processor UID */ } ACPI_MADT_LOCAL_X2APIC; /* 10: Local X2APIC NMI (ACPI 4.0) */ typedef struct acpi_madt_local_x2apic_nmi { ACPI_SUBTABLE_HEADER Header; UINT16 IntiFlags; UINT32 Uid; /* ACPI processor UID */ UINT8 Lint; /* LINTn to which NMI is connected */ UINT8 Reserved[3]; /* Reserved - must be zero */ } ACPI_MADT_LOCAL_X2APIC_NMI; /* 11: Generic Interrupt (ACPI 5.0 + ACPI 6.0 changes) */ typedef struct acpi_madt_generic_interrupt { ACPI_SUBTABLE_HEADER Header; UINT16 Reserved; /* Reserved - must be zero */ UINT32 CpuInterfaceNumber; UINT32 Uid; UINT32 Flags; UINT32 ParkingVersion; UINT32 PerformanceInterrupt; UINT64 ParkedAddress; UINT64 BaseAddress; UINT64 GicvBaseAddress; UINT64 GichBaseAddress; UINT32 VgicInterrupt; UINT64 GicrBaseAddress; UINT64 ArmMpidr; UINT8 EfficiencyClass; UINT8 Reserved2[3]; } ACPI_MADT_GENERIC_INTERRUPT; /* Masks for Flags field above */ /* ACPI_MADT_ENABLED (1) Processor is usable if set */ #define ACPI_MADT_PERFORMANCE_IRQ_MODE (1<<1) /* 01: Performance Interrupt Mode */ #define ACPI_MADT_VGIC_IRQ_MODE (1<<2) /* 02: VGIC Maintenance Interrupt mode */ /* 12: Generic Distributor (ACPI 5.0 + ACPI 6.0 changes) */ typedef struct acpi_madt_generic_distributor { ACPI_SUBTABLE_HEADER Header; UINT16 Reserved; /* Reserved - must be zero */ UINT32 GicId; UINT64 BaseAddress; UINT32 GlobalIrqBase; UINT8 Version; UINT8 Reserved2[3]; /* Reserved - must be zero */ } ACPI_MADT_GENERIC_DISTRIBUTOR; /* Values for Version field above */ enum AcpiMadtGicVersion { ACPI_MADT_GIC_VERSION_NONE = 0, ACPI_MADT_GIC_VERSION_V1 = 1, ACPI_MADT_GIC_VERSION_V2 = 2, ACPI_MADT_GIC_VERSION_V3 = 3, ACPI_MADT_GIC_VERSION_V4 = 4, ACPI_MADT_GIC_VERSION_RESERVED = 5 /* 5 and greater are reserved */ }; /* 13: Generic MSI Frame (ACPI 5.1) */ typedef struct acpi_madt_generic_msi_frame { ACPI_SUBTABLE_HEADER Header; UINT16 Reserved; /* Reserved - must be zero */ UINT32 MsiFrameId; UINT64 BaseAddress; UINT32 Flags; UINT16 SpiCount; UINT16 SpiBase; } ACPI_MADT_GENERIC_MSI_FRAME; /* Masks for Flags field above */ #define ACPI_MADT_OVERRIDE_SPI_VALUES (1) /* 14: Generic Redistributor (ACPI 5.1) */ typedef struct acpi_madt_generic_redistributor { ACPI_SUBTABLE_HEADER Header; UINT16 Reserved; /* reserved - must be zero */ UINT64 BaseAddress; UINT32 Length; } ACPI_MADT_GENERIC_REDISTRIBUTOR; /* 15: Generic Translator (ACPI 6.0) */ typedef struct acpi_madt_generic_translator { ACPI_SUBTABLE_HEADER Header; UINT16 Reserved; /* reserved - must be zero */ UINT32 TranslationId; UINT64 BaseAddress; UINT32 Reserved2; } ACPI_MADT_GENERIC_TRANSLATOR; /* * Common flags fields for MADT subtables */ /* MADT Local APIC flags */ #define ACPI_MADT_ENABLED (1) /* 00: Processor is usable if set */ /* MADT MPS INTI flags (IntiFlags) */ #define ACPI_MADT_POLARITY_MASK (3) /* 00-01: Polarity of APIC I/O input signals */ #define ACPI_MADT_TRIGGER_MASK (3<<2) /* 02-03: Trigger mode of APIC input signals */ /* Values for MPS INTI flags */ #define ACPI_MADT_POLARITY_CONFORMS 0 #define ACPI_MADT_POLARITY_ACTIVE_HIGH 1 #define ACPI_MADT_POLARITY_RESERVED 2 #define ACPI_MADT_POLARITY_ACTIVE_LOW 3 #define ACPI_MADT_TRIGGER_CONFORMS (0) #define ACPI_MADT_TRIGGER_EDGE (1<<2) #define ACPI_MADT_TRIGGER_RESERVED (2<<2) #define ACPI_MADT_TRIGGER_LEVEL (3<<2) /******************************************************************************* * * MCFG - PCI Memory Mapped Configuration table and subtable * Version 1 * * Conforms to "PCI Firmware Specification", Revision 3.0, June 20, 2005 * ******************************************************************************/ typedef struct acpi_table_mcfg { ACPI_TABLE_HEADER Header; /* Common ACPI table header */ UINT8 Reserved[8]; } ACPI_TABLE_MCFG; /* Subtable */ typedef struct acpi_mcfg_allocation { UINT64 Address; /* Base address, processor-relative */ UINT16 PciSegment; /* PCI segment group number */ UINT8 StartBusNumber; /* Starting PCI Bus number */ UINT8 EndBusNumber; /* Final PCI Bus number */ UINT32 Reserved; } ACPI_MCFG_ALLOCATION; /******************************************************************************* * * MCHI - Management Controller Host Interface Table * Version 1 * * Conforms to "Management Component Transport Protocol (MCTP) Host * Interface Specification", Revision 1.0.0a, October 13, 2009 * ******************************************************************************/ typedef struct acpi_table_mchi { ACPI_TABLE_HEADER Header; /* Common ACPI table header */ UINT8 InterfaceType; UINT8 Protocol; UINT64 ProtocolData; UINT8 InterruptType; UINT8 Gpe; UINT8 PciDeviceFlag; UINT32 GlobalInterrupt; ACPI_GENERIC_ADDRESS ControlRegister; UINT8 PciSegment; UINT8 PciBus; UINT8 PciDevice; UINT8 PciFunction; } ACPI_TABLE_MCHI; /******************************************************************************* * * MPST - Memory Power State Table (ACPI 5.0) * Version 1 * ******************************************************************************/ #define ACPI_MPST_CHANNEL_INFO \ UINT8 ChannelId; \ UINT8 Reserved1[3]; \ UINT16 PowerNodeCount; \ UINT16 Reserved2; /* Main table */ typedef struct acpi_table_mpst { ACPI_TABLE_HEADER Header; /* Common ACPI table header */ ACPI_MPST_CHANNEL_INFO /* Platform Communication Channel */ } ACPI_TABLE_MPST; /* Memory Platform Communication Channel Info */ typedef struct acpi_mpst_channel { ACPI_MPST_CHANNEL_INFO /* Platform Communication Channel */ } ACPI_MPST_CHANNEL; /* Memory Power Node Structure */ typedef struct acpi_mpst_power_node { UINT8 Flags; UINT8 Reserved1; UINT16 NodeId; UINT32 Length; UINT64 RangeAddress; UINT64 RangeLength; UINT32 NumPowerStates; UINT32 NumPhysicalComponents; } ACPI_MPST_POWER_NODE; /* Values for Flags field above */ #define ACPI_MPST_ENABLED 1 #define ACPI_MPST_POWER_MANAGED 2 #define ACPI_MPST_HOT_PLUG_CAPABLE 4 /* Memory Power State Structure (follows POWER_NODE above) */ typedef struct acpi_mpst_power_state { UINT8 PowerState; UINT8 InfoIndex; } ACPI_MPST_POWER_STATE; /* Physical Component ID Structure (follows POWER_STATE above) */ typedef struct acpi_mpst_component { UINT16 ComponentId; } ACPI_MPST_COMPONENT; /* Memory Power State Characteristics Structure (follows all POWER_NODEs) */ typedef struct acpi_mpst_data_hdr { UINT16 CharacteristicsCount; UINT16 Reserved; } ACPI_MPST_DATA_HDR; typedef struct acpi_mpst_power_data { UINT8 StructureId; UINT8 Flags; UINT16 Reserved1; UINT32 AveragePower; UINT32 PowerSaving; UINT64 ExitLatency; UINT64 Reserved2; } ACPI_MPST_POWER_DATA; /* Values for Flags field above */ #define ACPI_MPST_PRESERVE 1 #define ACPI_MPST_AUTOENTRY 2 #define ACPI_MPST_AUTOEXIT 4 /* Shared Memory Region (not part of an ACPI table) */ typedef struct acpi_mpst_shared { UINT32 Signature; UINT16 PccCommand; UINT16 PccStatus; UINT32 CommandRegister; UINT32 StatusRegister; UINT32 PowerStateId; UINT32 PowerNodeId; UINT64 EnergyConsumed; UINT64 AveragePower; } ACPI_MPST_SHARED; /******************************************************************************* * * MSCT - Maximum System Characteristics Table (ACPI 4.0) * Version 1 * ******************************************************************************/ typedef struct acpi_table_msct { ACPI_TABLE_HEADER Header; /* Common ACPI table header */ UINT32 ProximityOffset; /* Location of proximity info struct(s) */ UINT32 MaxProximityDomains;/* Max number of proximity domains */ UINT32 MaxClockDomains; /* Max number of clock domains */ UINT64 MaxAddress; /* Max physical address in system */ } ACPI_TABLE_MSCT; /* Subtable - Maximum Proximity Domain Information. Version 1 */ typedef struct acpi_msct_proximity { UINT8 Revision; UINT8 Length; UINT32 RangeStart; /* Start of domain range */ UINT32 RangeEnd; /* End of domain range */ UINT32 ProcessorCapacity; UINT64 MemoryCapacity; /* In bytes */ } ACPI_MSCT_PROXIMITY; /******************************************************************************* * * MSDM - Microsoft Data Management table * * Conforms to "Microsoft Software Licensing Tables (SLIC and MSDM)", * November 29, 2011. Copyright 2011 Microsoft * ******************************************************************************/ /* Basic MSDM table is only the common ACPI header */ typedef struct acpi_table_msdm { ACPI_TABLE_HEADER Header; /* Common ACPI table header */ } ACPI_TABLE_MSDM; /******************************************************************************* * * MTMR - MID Timer Table * Version 1 * * Conforms to "Simple Firmware Interface Specification", * Draft 0.8.2, Oct 19, 2010 * NOTE: The ACPI MTMR is equivalent to the SFI MTMR table. * ******************************************************************************/ typedef struct acpi_table_mtmr { ACPI_TABLE_HEADER Header; /* Common ACPI table header */ } ACPI_TABLE_MTMR; /* MTMR entry */ typedef struct acpi_mtmr_entry { ACPI_GENERIC_ADDRESS PhysicalAddress; UINT32 Frequency; UINT32 Irq; } ACPI_MTMR_ENTRY; /******************************************************************************* * * NFIT - NVDIMM Interface Table (ACPI 6.0+) * Version 1 * ******************************************************************************/ typedef struct acpi_table_nfit { ACPI_TABLE_HEADER Header; /* Common ACPI table header */ UINT32 Reserved; /* Reserved, must be zero */ } ACPI_TABLE_NFIT; /* Subtable header for NFIT */ typedef struct acpi_nfit_header { UINT16 Type; UINT16 Length; } ACPI_NFIT_HEADER; /* Values for subtable type in ACPI_NFIT_HEADER */ enum AcpiNfitType { ACPI_NFIT_TYPE_SYSTEM_ADDRESS = 0, ACPI_NFIT_TYPE_MEMORY_MAP = 1, ACPI_NFIT_TYPE_INTERLEAVE = 2, ACPI_NFIT_TYPE_SMBIOS = 3, ACPI_NFIT_TYPE_CONTROL_REGION = 4, ACPI_NFIT_TYPE_DATA_REGION = 5, ACPI_NFIT_TYPE_FLUSH_ADDRESS = 6, ACPI_NFIT_TYPE_CAPABILITIES = 7, ACPI_NFIT_TYPE_RESERVED = 8 /* 8 and greater are reserved */ }; /* * NFIT Subtables */ /* 0: System Physical Address Range Structure */ typedef struct acpi_nfit_system_address { ACPI_NFIT_HEADER Header; UINT16 RangeIndex; UINT16 Flags; UINT32 Reserved; /* Reserved, must be zero */ UINT32 ProximityDomain; UINT8 RangeGuid[16]; UINT64 Address; UINT64 Length; UINT64 MemoryMapping; } ACPI_NFIT_SYSTEM_ADDRESS; /* Flags */ #define ACPI_NFIT_ADD_ONLINE_ONLY (1) /* 00: Add/Online Operation Only */ #define ACPI_NFIT_PROXIMITY_VALID (1<<1) /* 01: Proximity Domain Valid */ /* Range Type GUIDs appear in the include/acuuid.h file */ /* 1: Memory Device to System Address Range Map Structure */ typedef struct acpi_nfit_memory_map { ACPI_NFIT_HEADER Header; UINT32 DeviceHandle; UINT16 PhysicalId; UINT16 RegionId; UINT16 RangeIndex; UINT16 RegionIndex; UINT64 RegionSize; UINT64 RegionOffset; UINT64 Address; UINT16 InterleaveIndex; UINT16 InterleaveWays; UINT16 Flags; UINT16 Reserved; /* Reserved, must be zero */ } ACPI_NFIT_MEMORY_MAP; /* Flags */ #define ACPI_NFIT_MEM_SAVE_FAILED (1) /* 00: Last SAVE to Memory Device failed */ #define ACPI_NFIT_MEM_RESTORE_FAILED (1<<1) /* 01: Last RESTORE from Memory Device failed */ #define ACPI_NFIT_MEM_FLUSH_FAILED (1<<2) /* 02: Platform flush failed */ #define ACPI_NFIT_MEM_NOT_ARMED (1<<3) /* 03: Memory Device is not armed */ #define ACPI_NFIT_MEM_HEALTH_OBSERVED (1<<4) /* 04: Memory Device observed SMART/health events */ #define ACPI_NFIT_MEM_HEALTH_ENABLED (1<<5) /* 05: SMART/health events enabled */ #define ACPI_NFIT_MEM_MAP_FAILED (1<<6) /* 06: Mapping to SPA failed */ /* 2: Interleave Structure */ typedef struct acpi_nfit_interleave { ACPI_NFIT_HEADER Header; UINT16 InterleaveIndex; UINT16 Reserved; /* Reserved, must be zero */ UINT32 LineCount; UINT32 LineSize; UINT32 LineOffset[1]; /* Variable length */ } ACPI_NFIT_INTERLEAVE; /* 3: SMBIOS Management Information Structure */ typedef struct acpi_nfit_smbios { ACPI_NFIT_HEADER Header; UINT32 Reserved; /* Reserved, must be zero */ UINT8 Data[1]; /* Variable length */ } ACPI_NFIT_SMBIOS; /* 4: NVDIMM Control Region Structure */ typedef struct acpi_nfit_control_region { ACPI_NFIT_HEADER Header; UINT16 RegionIndex; UINT16 VendorId; UINT16 DeviceId; UINT16 RevisionId; UINT16 SubsystemVendorId; UINT16 SubsystemDeviceId; UINT16 SubsystemRevisionId; UINT8 ValidFields; UINT8 ManufacturingLocation; UINT16 ManufacturingDate; UINT8 Reserved[2]; /* Reserved, must be zero */ UINT32 SerialNumber; UINT16 Code; UINT16 Windows; UINT64 WindowSize; UINT64 CommandOffset; UINT64 CommandSize; UINT64 StatusOffset; UINT64 StatusSize; UINT16 Flags; UINT8 Reserved1[6]; /* Reserved, must be zero */ } ACPI_NFIT_CONTROL_REGION; /* Flags */ #define ACPI_NFIT_CONTROL_BUFFERED (1) /* Block Data Windows implementation is buffered */ /* ValidFields bits */ #define ACPI_NFIT_CONTROL_MFG_INFO_VALID (1) /* Manufacturing fields are valid */ /* 5: NVDIMM Block Data Window Region Structure */ typedef struct acpi_nfit_data_region { ACPI_NFIT_HEADER Header; UINT16 RegionIndex; UINT16 Windows; UINT64 Offset; UINT64 Size; UINT64 Capacity; UINT64 StartAddress; } ACPI_NFIT_DATA_REGION; /* 6: Flush Hint Address Structure */ typedef struct acpi_nfit_flush_address { ACPI_NFIT_HEADER Header; UINT32 DeviceHandle; UINT16 HintCount; UINT8 Reserved[6]; /* Reserved, must be zero */ UINT64 HintAddress[1]; /* Variable length */ } ACPI_NFIT_FLUSH_ADDRESS; /* 7: Platform Capabilities Structure */ typedef struct acpi_nfit_capabilities { ACPI_NFIT_HEADER Header; UINT8 HighestCapability; UINT8 Reserved[3]; /* Reserved, must be zero */ UINT32 Capabilities; UINT32 Reserved2; } ACPI_NFIT_CAPABILITIES; /* Capabilities Flags */ #define ACPI_NFIT_CAPABILITY_CACHE_FLUSH (1) /* 00: Cache Flush to NVDIMM capable */ #define ACPI_NFIT_CAPABILITY_MEM_FLUSH (1<<1) /* 01: Memory Flush to NVDIMM capable */ #define ACPI_NFIT_CAPABILITY_MEM_MIRRORING (1<<2) /* 02: Memory Mirroring capable */ /* * NFIT/DVDIMM device handle support - used as the _ADR for each NVDIMM */ typedef struct nfit_device_handle { UINT32 Handle; } NFIT_DEVICE_HANDLE; /* Device handle construction and extraction macros */ #define ACPI_NFIT_DIMM_NUMBER_MASK 0x0000000F #define ACPI_NFIT_CHANNEL_NUMBER_MASK 0x000000F0 #define ACPI_NFIT_MEMORY_ID_MASK 0x00000F00 #define ACPI_NFIT_SOCKET_ID_MASK 0x0000F000 #define ACPI_NFIT_NODE_ID_MASK 0x0FFF0000 #define ACPI_NFIT_DIMM_NUMBER_OFFSET 0 #define ACPI_NFIT_CHANNEL_NUMBER_OFFSET 4 #define ACPI_NFIT_MEMORY_ID_OFFSET 8 #define ACPI_NFIT_SOCKET_ID_OFFSET 12 #define ACPI_NFIT_NODE_ID_OFFSET 16 /* Macro to construct a NFIT/NVDIMM device handle */ #define ACPI_NFIT_BUILD_DEVICE_HANDLE(dimm, channel, memory, socket, node) \ ((dimm) | \ ((channel) << ACPI_NFIT_CHANNEL_NUMBER_OFFSET) | \ ((memory) << ACPI_NFIT_MEMORY_ID_OFFSET) | \ ((socket) << ACPI_NFIT_SOCKET_ID_OFFSET) | \ ((node) << ACPI_NFIT_NODE_ID_OFFSET)) /* Macros to extract individual fields from a NFIT/NVDIMM device handle */ #define ACPI_NFIT_GET_DIMM_NUMBER(handle) \ ((handle) & ACPI_NFIT_DIMM_NUMBER_MASK) #define ACPI_NFIT_GET_CHANNEL_NUMBER(handle) \ (((handle) & ACPI_NFIT_CHANNEL_NUMBER_MASK) >> ACPI_NFIT_CHANNEL_NUMBER_OFFSET) #define ACPI_NFIT_GET_MEMORY_ID(handle) \ (((handle) & ACPI_NFIT_MEMORY_ID_MASK) >> ACPI_NFIT_MEMORY_ID_OFFSET) #define ACPI_NFIT_GET_SOCKET_ID(handle) \ (((handle) & ACPI_NFIT_SOCKET_ID_MASK) >> ACPI_NFIT_SOCKET_ID_OFFSET) #define ACPI_NFIT_GET_NODE_ID(handle) \ (((handle) & ACPI_NFIT_NODE_ID_MASK) >> ACPI_NFIT_NODE_ID_OFFSET) /******************************************************************************* * * PCCT - Platform Communications Channel Table (ACPI 5.0) * Version 2 (ACPI 6.2) * ******************************************************************************/ typedef struct acpi_table_pcct { ACPI_TABLE_HEADER Header; /* Common ACPI table header */ UINT32 Flags; UINT64 Reserved; } ACPI_TABLE_PCCT; /* Values for Flags field above */ #define ACPI_PCCT_DOORBELL 1 /* Values for subtable type in ACPI_SUBTABLE_HEADER */ enum AcpiPcctType { ACPI_PCCT_TYPE_GENERIC_SUBSPACE = 0, ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE = 1, ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2 = 2, /* ACPI 6.1 */ ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE = 3, /* ACPI 6.2 */ ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE = 4, /* ACPI 6.2 */ ACPI_PCCT_TYPE_RESERVED = 5 /* 5 and greater are reserved */ }; /* * PCCT Subtables, correspond to Type in ACPI_SUBTABLE_HEADER */ /* 0: Generic Communications Subspace */ typedef struct acpi_pcct_subspace { ACPI_SUBTABLE_HEADER Header; UINT8 Reserved[6]; UINT64 BaseAddress; UINT64 Length; ACPI_GENERIC_ADDRESS DoorbellRegister; UINT64 PreserveMask; UINT64 WriteMask; UINT32 Latency; UINT32 MaxAccessRate; UINT16 MinTurnaroundTime; } ACPI_PCCT_SUBSPACE; /* 1: HW-reduced Communications Subspace (ACPI 5.1) */ typedef struct acpi_pcct_hw_reduced { ACPI_SUBTABLE_HEADER Header; UINT32 PlatformInterrupt; UINT8 Flags; UINT8 Reserved; UINT64 BaseAddress; UINT64 Length; ACPI_GENERIC_ADDRESS DoorbellRegister; UINT64 PreserveMask; UINT64 WriteMask; UINT32 Latency; UINT32 MaxAccessRate; UINT16 MinTurnaroundTime; } ACPI_PCCT_HW_REDUCED; /* 2: HW-reduced Communications Subspace Type 2 (ACPI 6.1) */ typedef struct acpi_pcct_hw_reduced_type2 { ACPI_SUBTABLE_HEADER Header; UINT32 PlatformInterrupt; UINT8 Flags; UINT8 Reserved; UINT64 BaseAddress; UINT64 Length; ACPI_GENERIC_ADDRESS DoorbellRegister; UINT64 PreserveMask; UINT64 WriteMask; UINT32 Latency; UINT32 MaxAccessRate; UINT16 MinTurnaroundTime; ACPI_GENERIC_ADDRESS PlatformAckRegister; UINT64 AckPreserveMask; UINT64 AckWriteMask; } ACPI_PCCT_HW_REDUCED_TYPE2; /* 3: Extended PCC Master Subspace Type 3 (ACPI 6.2) */ typedef struct acpi_pcct_ext_pcc_master { ACPI_SUBTABLE_HEADER Header; UINT32 PlatformInterrupt; UINT8 Flags; UINT8 Reserved1; UINT64 BaseAddress; UINT32 Length; ACPI_GENERIC_ADDRESS DoorbellRegister; UINT64 PreserveMask; UINT64 WriteMask; UINT32 Latency; UINT32 MaxAccessRate; UINT32 MinTurnaroundTime; ACPI_GENERIC_ADDRESS PlatformAckRegister; UINT64 AckPreserveMask; UINT64 AckSetMask; UINT64 Reserved2; ACPI_GENERIC_ADDRESS CmdCompleteRegister; UINT64 CmdCompleteMask; ACPI_GENERIC_ADDRESS CmdUpdateRegister; UINT64 CmdUpdatePreserveMask; UINT64 CmdUpdateSetMask; ACPI_GENERIC_ADDRESS ErrorStatusRegister; UINT64 ErrorStatusMask; } ACPI_PCCT_EXT_PCC_MASTER; /* 4: Extended PCC Slave Subspace Type 4 (ACPI 6.2) */ typedef struct acpi_pcct_ext_pcc_slave { ACPI_SUBTABLE_HEADER Header; UINT32 PlatformInterrupt; UINT8 Flags; UINT8 Reserved1; UINT64 BaseAddress; UINT32 Length; ACPI_GENERIC_ADDRESS DoorbellRegister; UINT64 PreserveMask; UINT64 WriteMask; UINT32 Latency; UINT32 MaxAccessRate; UINT32 MinTurnaroundTime; ACPI_GENERIC_ADDRESS PlatformAckRegister; UINT64 AckPreserveMask; UINT64 AckSetMask; UINT64 Reserved2; ACPI_GENERIC_ADDRESS CmdCompleteRegister; UINT64 CmdCompleteMask; ACPI_GENERIC_ADDRESS CmdUpdateRegister; UINT64 CmdUpdatePreserveMask; UINT64 CmdUpdateSetMask; ACPI_GENERIC_ADDRESS ErrorStatusRegister; UINT64 ErrorStatusMask; } ACPI_PCCT_EXT_PCC_SLAVE; /* Values for doorbell flags above */ #define ACPI_PCCT_INTERRUPT_POLARITY (1) #define ACPI_PCCT_INTERRUPT_MODE (1<<1) /* * PCC memory structures (not part of the ACPI table) */ /* Shared Memory Region */ typedef struct acpi_pcct_shared_memory { UINT32 Signature; UINT16 Command; UINT16 Status; } ACPI_PCCT_SHARED_MEMORY; /* Extended PCC Subspace Shared Memory Region (ACPI 6.2) */ typedef struct acpi_pcct_ext_pcc_shared_memory { UINT32 Signature; UINT32 Flags; UINT32 Length; UINT32 Command; } ACPI_PCCT_EXT_PCC_SHARED_MEMORY; /******************************************************************************* * * PDTT - Platform Debug Trigger Table (ACPI 6.2) * Version 0 * ******************************************************************************/ typedef struct acpi_table_pdtt { ACPI_TABLE_HEADER Header; /* Common ACPI table header */ UINT8 TriggerCount; UINT8 Reserved[3]; UINT32 ArrayOffset; } ACPI_TABLE_PDTT; /* * PDTT Communication Channel Identifier Structure. * The number of these structures is defined by TriggerCount above, * starting at ArrayOffset. */ typedef struct acpi_pdtt_channel { UINT8 SubchannelId; UINT8 Flags; } ACPI_PDTT_CHANNEL; /* Flags for above */ #define ACPI_PDTT_RUNTIME_TRIGGER (1) #define ACPI_PDTT_WAIT_COMPLETION (1<<1) /******************************************************************************* * * PMTT - Platform Memory Topology Table (ACPI 5.0) * Version 1 * ******************************************************************************/ typedef struct acpi_table_pmtt { ACPI_TABLE_HEADER Header; /* Common ACPI table header */ UINT32 Reserved; } ACPI_TABLE_PMTT; /* Common header for PMTT subtables that follow main table */ typedef struct acpi_pmtt_header { UINT8 Type; UINT8 Reserved1; UINT16 Length; UINT16 Flags; UINT16 Reserved2; } ACPI_PMTT_HEADER; /* Values for Type field above */ #define ACPI_PMTT_TYPE_SOCKET 0 #define ACPI_PMTT_TYPE_CONTROLLER 1 #define ACPI_PMTT_TYPE_DIMM 2 #define ACPI_PMTT_TYPE_RESERVED 3 /* 0x03-0xFF are reserved */ /* Values for Flags field above */ #define ACPI_PMTT_TOP_LEVEL 0x0001 #define ACPI_PMTT_PHYSICAL 0x0002 #define ACPI_PMTT_MEMORY_TYPE 0x000C /* * PMTT subtables, correspond to Type in acpi_pmtt_header */ /* 0: Socket Structure */ typedef struct acpi_pmtt_socket { ACPI_PMTT_HEADER Header; UINT16 SocketId; UINT16 Reserved; } ACPI_PMTT_SOCKET; /* 1: Memory Controller subtable */ typedef struct acpi_pmtt_controller { ACPI_PMTT_HEADER Header; UINT32 ReadLatency; UINT32 WriteLatency; UINT32 ReadBandwidth; UINT32 WriteBandwidth; UINT16 AccessWidth; UINT16 Alignment; UINT16 Reserved; UINT16 DomainCount; } ACPI_PMTT_CONTROLLER; /* 1a: Proximity Domain substructure */ typedef struct acpi_pmtt_domain { UINT32 ProximityDomain; } ACPI_PMTT_DOMAIN; /* 2: Physical Component Identifier (DIMM) */ typedef struct acpi_pmtt_physical_component { ACPI_PMTT_HEADER Header; UINT16 ComponentId; UINT16 Reserved; UINT32 MemorySize; UINT32 BiosHandle; } ACPI_PMTT_PHYSICAL_COMPONENT; /******************************************************************************* * * PPTT - Processor Properties Topology Table (ACPI 6.2) * Version 1 * ******************************************************************************/ typedef struct acpi_table_pptt { ACPI_TABLE_HEADER Header; /* Common ACPI table header */ } ACPI_TABLE_PPTT; /* Values for Type field above */ enum AcpiPpttType { ACPI_PPTT_TYPE_PROCESSOR = 0, ACPI_PPTT_TYPE_CACHE = 1, ACPI_PPTT_TYPE_ID = 2, ACPI_PPTT_TYPE_RESERVED = 3 }; /* 0: Processor Hierarchy Node Structure */ typedef struct acpi_pptt_processor { ACPI_SUBTABLE_HEADER Header; UINT16 Reserved; UINT32 Flags; UINT32 Parent; UINT32 AcpiProcessorId; UINT32 NumberOfPrivResources; } ACPI_PPTT_PROCESSOR; /* Flags */ #define ACPI_PPTT_PHYSICAL_PACKAGE (1) /* Physical package */ #define ACPI_PPTT_ACPI_PROCESSOR_ID_VALID (2) /* ACPI Processor ID valid */ /* 1: Cache Type Structure */ typedef struct acpi_pptt_cache { ACPI_SUBTABLE_HEADER Header; UINT16 Reserved; UINT32 Flags; UINT32 NextLevelOfCache; UINT32 Size; UINT32 NumberOfSets; UINT8 Associativity; UINT8 Attributes; UINT16 LineSize; } ACPI_PPTT_CACHE; /* Flags */ #define ACPI_PPTT_SIZE_PROPERTY_VALID (1) /* Physical property valid */ #define ACPI_PPTT_NUMBER_OF_SETS_VALID (1<<1) /* Number of sets valid */ #define ACPI_PPTT_ASSOCIATIVITY_VALID (1<<2) /* Associativity valid */ #define ACPI_PPTT_ALLOCATION_TYPE_VALID (1<<3) /* Allocation type valid */ #define ACPI_PPTT_CACHE_TYPE_VALID (1<<4) /* Cache type valid */ #define ACPI_PPTT_WRITE_POLICY_VALID (1<<5) /* Write policy valid */ #define ACPI_PPTT_LINE_SIZE_VALID (1<<6) /* Line size valid */ /* Masks for Attributes */ #define ACPI_PPTT_MASK_ALLOCATION_TYPE (0x03) /* Allocation type */ #define ACPI_PPTT_MASK_CACHE_TYPE (0x0C) /* Cache type */ #define ACPI_PPTT_MASK_WRITE_POLICY (0x10) /* Write policy */ /* Attributes describing cache */ #define ACPI_PPTT_CACHE_READ_ALLOCATE (0x0) /* Cache line is allocated on read */ #define ACPI_PPTT_CACHE_WRITE_ALLOCATE (0x01) /* Cache line is allocated on write */ #define ACPI_PPTT_CACHE_RW_ALLOCATE (0x02) /* Cache line is allocated on read and write */ #define ACPI_PPTT_CACHE_RW_ALLOCATE_ALT (0x03) /* Alternate representation of above */ #define ACPI_PPTT_CACHE_TYPE_DATA (0x0) /* Data cache */ #define ACPI_PPTT_CACHE_TYPE_INSTR (1<<2) /* Instruction cache */ #define ACPI_PPTT_CACHE_TYPE_UNIFIED (2<<2) /* Unified I & D cache */ #define ACPI_PPTT_CACHE_TYPE_UNIFIED_ALT (3<<2) /* Alternate representation of above */ #define ACPI_PPTT_CACHE_POLICY_WB (0x0) /* Cache is write back */ #define ACPI_PPTT_CACHE_POLICY_WT (1<<4) /* Cache is write through */ /* 2: ID Structure */ typedef struct acpi_pptt_id { ACPI_SUBTABLE_HEADER Header; UINT16 Reserved; UINT32 VendorId; UINT64 Level1Id; UINT64 Level2Id; UINT16 MajorRev; UINT16 MinorRev; UINT16 SpinRev; } ACPI_PPTT_ID; /******************************************************************************* * * RASF - RAS Feature Table (ACPI 5.0) * Version 1 * ******************************************************************************/ typedef struct acpi_table_rasf { ACPI_TABLE_HEADER Header; /* Common ACPI table header */ UINT8 ChannelId[12]; } ACPI_TABLE_RASF; /* RASF Platform Communication Channel Shared Memory Region */ typedef struct acpi_rasf_shared_memory { UINT32 Signature; UINT16 Command; UINT16 Status; UINT16 Version; UINT8 Capabilities[16]; UINT8 SetCapabilities[16]; UINT16 NumParameterBlocks; UINT32 SetCapabilitiesStatus; } ACPI_RASF_SHARED_MEMORY; /* RASF Parameter Block Structure Header */ typedef struct acpi_rasf_parameter_block { UINT16 Type; UINT16 Version; UINT16 Length; } ACPI_RASF_PARAMETER_BLOCK; /* RASF Parameter Block Structure for PATROL_SCRUB */ typedef struct acpi_rasf_patrol_scrub_parameter { ACPI_RASF_PARAMETER_BLOCK Header; UINT16 PatrolScrubCommand; UINT64 RequestedAddressRange[2]; UINT64 ActualAddressRange[2]; UINT16 Flags; UINT8 RequestedSpeed; } ACPI_RASF_PATROL_SCRUB_PARAMETER; /* Masks for Flags and Speed fields above */ #define ACPI_RASF_SCRUBBER_RUNNING 1 #define ACPI_RASF_SPEED (7<<1) #define ACPI_RASF_SPEED_SLOW (0<<1) #define ACPI_RASF_SPEED_MEDIUM (4<<1) #define ACPI_RASF_SPEED_FAST (7<<1) /* Channel Commands */ enum AcpiRasfCommands { ACPI_RASF_EXECUTE_RASF_COMMAND = 1 }; /* Platform RAS Capabilities */ enum AcpiRasfCapabiliities { ACPI_HW_PATROL_SCRUB_SUPPORTED = 0, ACPI_SW_PATROL_SCRUB_EXPOSED = 1 }; /* Patrol Scrub Commands */ enum AcpiRasfPatrolScrubCommands { ACPI_RASF_GET_PATROL_PARAMETERS = 1, ACPI_RASF_START_PATROL_SCRUBBER = 2, ACPI_RASF_STOP_PATROL_SCRUBBER = 3 }; /* Channel Command flags */ #define ACPI_RASF_GENERATE_SCI (1<<15) /* Status values */ enum AcpiRasfStatus { ACPI_RASF_SUCCESS = 0, ACPI_RASF_NOT_VALID = 1, ACPI_RASF_NOT_SUPPORTED = 2, ACPI_RASF_BUSY = 3, ACPI_RASF_FAILED = 4, ACPI_RASF_ABORTED = 5, ACPI_RASF_INVALID_DATA = 6 }; /* Status flags */ #define ACPI_RASF_COMMAND_COMPLETE (1) #define ACPI_RASF_SCI_DOORBELL (1<<1) #define ACPI_RASF_ERROR (1<<2) #define ACPI_RASF_STATUS (0x1F<<3) /******************************************************************************* * * SBST - Smart Battery Specification Table * Version 1 * ******************************************************************************/ typedef struct acpi_table_sbst { ACPI_TABLE_HEADER Header; /* Common ACPI table header */ UINT32 WarningLevel; UINT32 LowLevel; UINT32 CriticalLevel; } ACPI_TABLE_SBST; /******************************************************************************* * * SDEI - Software Delegated Exception Interface Descriptor Table * * Conforms to "Software Delegated Exception Interface (SDEI)" ARM DEN0054A, * May 8th, 2017. Copyright 2017 ARM Ltd. * ******************************************************************************/ typedef struct acpi_table_sdei { ACPI_TABLE_HEADER Header; /* Common ACPI table header */ } ACPI_TABLE_SDEI; /******************************************************************************* * * SDEV - Secure Devices Table (ACPI 6.2) * Version 1 * ******************************************************************************/ typedef struct acpi_table_sdev { ACPI_TABLE_HEADER Header; /* Common ACPI table header */ } ACPI_TABLE_SDEV; typedef struct acpi_sdev_header { UINT8 Type; UINT8 Flags; UINT16 Length; } ACPI_SDEV_HEADER; /* Values for subtable type above */ enum AcpiSdevType { ACPI_SDEV_TYPE_NAMESPACE_DEVICE = 0, ACPI_SDEV_TYPE_PCIE_ENDPOINT_DEVICE = 1, ACPI_SDEV_TYPE_RESERVED = 2 /* 2 and greater are reserved */ }; /* Values for flags above */ #define ACPI_SDEV_HANDOFF_TO_UNSECURE_OS (1) /* * SDEV subtables */ /* 0: Namespace Device Based Secure Device Structure */ typedef struct acpi_sdev_namespace { ACPI_SDEV_HEADER Header; UINT16 DeviceIdOffset; UINT16 DeviceIdLength; UINT16 VendorDataOffset; UINT16 VendorDataLength; } ACPI_SDEV_NAMESPACE; /* 1: PCIe Endpoint Device Based Device Structure */ typedef struct acpi_sdev_pcie { ACPI_SDEV_HEADER Header; UINT16 Segment; UINT16 StartBus; UINT16 PathOffset; UINT16 PathLength; UINT16 VendorDataOffset; UINT16 VendorDataLength; } ACPI_SDEV_PCIE; /* 1a: PCIe Endpoint path entry */ typedef struct acpi_sdev_pcie_path { UINT8 Device; UINT8 Function; } ACPI_SDEV_PCIE_PATH; /* Reset to default packing */ #pragma pack() #endif /* __ACTBL2_H__ */