Index: head/sys/sys/iommu.h =================================================================== --- head/sys/sys/iommu.h (revision 363309) +++ head/sys/sys/iommu.h (revision 363310) @@ -1,155 +1,168 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_IOMMU_H_ #define _SYS_IOMMU_H_ #include +#include #include #include /* Host or physical memory address, after translation. */ typedef uint64_t iommu_haddr_t; /* Guest or bus address, before translation. */ typedef uint64_t iommu_gaddr_t; +struct bus_dma_tag_common; struct iommu_map_entry; TAILQ_HEAD(iommu_map_entries_tailq, iommu_map_entry); struct iommu_qi_genseq { u_int gen; uint32_t seq; }; struct iommu_map_entry { iommu_gaddr_t start; iommu_gaddr_t end; iommu_gaddr_t first; /* Least start in subtree */ iommu_gaddr_t last; /* Greatest end in subtree */ iommu_gaddr_t free_down; /* Max free space below the current R/B tree node */ u_int flags; TAILQ_ENTRY(iommu_map_entry) dmamap_link; /* Link for dmamap entries */ RB_ENTRY(iommu_map_entry) rb_entry; /* Links for domain entries */ TAILQ_ENTRY(iommu_map_entry) unroll_link; /* Link for unroll after dmamap_load failure */ struct iommu_domain *domain; struct iommu_qi_genseq gseq; }; #define IOMMU_MAP_ENTRY_PLACE 0x0001 /* Fake entry */ #define IOMMU_MAP_ENTRY_RMRR 0x0002 /* Permanent, not linked by dmamap_link */ #define IOMMU_MAP_ENTRY_MAP 0x0004 /* Busdma created, linked by dmamap_link */ #define IOMMU_MAP_ENTRY_UNMAPPED 0x0010 /* No backing pages */ #define IOMMU_MAP_ENTRY_QI_NF 0x0020 /* qi task, do not free entry */ #define IOMMU_MAP_ENTRY_READ 0x1000 /* Read permitted */ #define IOMMU_MAP_ENTRY_WRITE 0x2000 /* Write permitted */ #define IOMMU_MAP_ENTRY_SNOOP 0x4000 /* Snoop */ #define IOMMU_MAP_ENTRY_TM 0x8000 /* Transient */ struct iommu_unit { struct mtx lock; int unit; int dma_enabled; /* Busdma delayed map load */ struct task dmamap_load_task; TAILQ_HEAD(, bus_dmamap_iommu) delayed_maps; struct taskqueue *delayed_taskqueue; }; /* * Locking annotations: * (u) - Protected by iommu unit lock * (d) - Protected by domain lock * (c) - Immutable after initialization */ struct iommu_domain { struct iommu_unit *iommu; /* (c) */ struct mtx lock; /* (c) */ struct task unload_task; /* (c) */ + u_int entries_cnt; /* (d) */ struct iommu_map_entries_tailq unload_entries; /* (d) Entries to unload */ }; struct iommu_ctx { struct iommu_domain *domain; /* (c) */ struct bus_dma_tag_iommu *tag; /* (c) Root tag */ u_long loads; /* atomic updates, for stat only */ u_long unloads; /* same */ u_int flags; /* (u) */ }; /* struct iommu_ctx flags */ #define IOMMU_CTX_FAULTED 0x0001 /* Fault was reported, last_fault_rec is valid */ #define IOMMU_CTX_DISABLED 0x0002 /* Device is disabled, the ephemeral reference is kept to prevent context destruction */ #define IOMMU_LOCK(unit) mtx_lock(&(unit)->lock) #define IOMMU_UNLOCK(unit) mtx_unlock(&(unit)->lock) #define IOMMU_ASSERT_LOCKED(unit) mtx_assert(&(unit)->lock, MA_OWNED) #define IOMMU_DOMAIN_LOCK(dom) mtx_lock(&(dom)->lock) #define IOMMU_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->lock) #define IOMMU_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->lock, MA_OWNED) + +static inline bool +iommu_test_boundary(iommu_gaddr_t start, iommu_gaddr_t size, + iommu_gaddr_t boundary) +{ + + if (boundary == 0) + return (true); + return (start + size <= ((start + boundary) & ~(boundary - 1))); +} void iommu_free_ctx(struct iommu_ctx *ctx); void iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *ctx); struct iommu_ctx *iommu_get_ctx(struct iommu_unit *, device_t dev, uint16_t rid, bool id_mapped, bool rmrr_init); struct iommu_unit *iommu_find(device_t dev, bool verbose); void iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free); void iommu_domain_unload(struct iommu_domain *domain, struct iommu_map_entries_tailq *entries, bool cansleep); struct iommu_ctx *iommu_instantiate_ctx(struct iommu_unit *iommu, device_t dev, bool rmrr); device_t iommu_get_requester(device_t dev, uint16_t *rid); int iommu_init_busdma(struct iommu_unit *unit); void iommu_fini_busdma(struct iommu_unit *unit); struct iommu_map_entry *iommu_map_alloc_entry(struct iommu_domain *iodom, u_int flags); void iommu_map_free_entry(struct iommu_domain *, struct iommu_map_entry *); int iommu_map(struct iommu_domain *iodom, const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset, u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res); int iommu_map_region(struct iommu_domain *domain, struct iommu_map_entry *entry, u_int eflags, u_int flags, vm_page_t *ma); #endif /* !_SYS_IOMMU_H_ */ Index: head/sys/x86/iommu/busdma_dmar.c =================================================================== --- head/sys/x86/iommu/busdma_dmar.c (revision 363309) +++ head/sys/x86/iommu/busdma_dmar.c (revision 363310) @@ -1,1044 +1,1050 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#if defined(__amd64__) || defined(__i386__) #include #include #include #include #include +#endif /* * busdma_dmar.c, the implementation of the busdma(9) interface using * DMAR units from Intel VT-d. */ static bool iommu_bus_dma_is_dev_disabled(int domain, int bus, int slot, int func) { char str[128], *env; int default_bounce; bool ret; static const char bounce_str[] = "bounce"; static const char iommu_str[] = "iommu"; static const char dmar_str[] = "dmar"; /* compatibility */ default_bounce = 0; env = kern_getenv("hw.busdma.default"); if (env != NULL) { if (strcmp(env, bounce_str) == 0) default_bounce = 1; else if (strcmp(env, iommu_str) == 0 || strcmp(env, dmar_str) == 0) default_bounce = 0; freeenv(env); } snprintf(str, sizeof(str), "hw.busdma.pci%d.%d.%d.%d", domain, bus, slot, func); env = kern_getenv(str); if (env == NULL) return (default_bounce != 0); if (strcmp(env, bounce_str) == 0) ret = true; else if (strcmp(env, iommu_str) == 0 || strcmp(env, dmar_str) == 0) ret = false; else ret = default_bounce != 0; freeenv(env); return (ret); } /* * Given original device, find the requester ID that will be seen by - * the DMAR unit and used for page table lookup. PCI bridges may take + * the IOMMU unit and used for page table lookup. PCI bridges may take * ownership of transactions from downstream devices, so it may not be * the same as the BSF of the target device. In those cases, all * devices downstream of the bridge must share a single mapping - * domain, and must collectively be assigned to use either DMAR or + * domain, and must collectively be assigned to use either IOMMU or * bounce mapping. */ device_t iommu_get_requester(device_t dev, uint16_t *rid) { devclass_t pci_class; device_t l, pci, pcib, pcip, pcibp, requester; int cap_offset; uint16_t pcie_flags; bool bridge_is_pcie; pci_class = devclass_find("pci"); l = requester = dev; *rid = pci_get_rid(dev); /* * Walk the bridge hierarchy from the target device to the - * host port to find the translating bridge nearest the DMAR + * host port to find the translating bridge nearest the IOMMU * unit. */ for (;;) { pci = device_get_parent(l); KASSERT(pci != NULL, ("iommu_get_requester(%s): NULL parent " "for %s", device_get_name(dev), device_get_name(l))); KASSERT(device_get_devclass(pci) == pci_class, ("iommu_get_requester(%s): non-pci parent %s for %s", device_get_name(dev), device_get_name(pci), device_get_name(l))); pcib = device_get_parent(pci); KASSERT(pcib != NULL, ("iommu_get_requester(%s): NULL bridge " "for %s", device_get_name(dev), device_get_name(pci))); /* * The parent of our "bridge" isn't another PCI bus, * so pcib isn't a PCI->PCI bridge but rather a host * port, and the requester ID won't be translated * further. */ pcip = device_get_parent(pcib); if (device_get_devclass(pcip) != pci_class) break; pcibp = device_get_parent(pcip); if (pci_find_cap(l, PCIY_EXPRESS, &cap_offset) == 0) { /* * Do not stop the loop even if the target * device is PCIe, because it is possible (but * unlikely) to have a PCI->PCIe bridge * somewhere in the hierarchy. */ l = pcib; } else { /* * Device is not PCIe, it cannot be seen as a - * requester by DMAR unit. Check whether the + * requester by IOMMU unit. Check whether the * bridge is PCIe. */ bridge_is_pcie = pci_find_cap(pcib, PCIY_EXPRESS, &cap_offset) == 0; requester = pcib; /* * Check for a buggy PCIe/PCI bridge that * doesn't report the express capability. If * the bridge above it is express but isn't a * PCI bridge, then we know pcib is actually a * PCIe/PCI bridge. */ if (!bridge_is_pcie && pci_find_cap(pcibp, PCIY_EXPRESS, &cap_offset) == 0) { pcie_flags = pci_read_config(pcibp, cap_offset + PCIER_FLAGS, 2); if ((pcie_flags & PCIEM_FLAGS_TYPE) != PCIEM_TYPE_PCI_BRIDGE) bridge_is_pcie = true; } if (bridge_is_pcie) { /* * The current device is not PCIe, but * the bridge above it is. This is a * PCIe->PCI bridge. Assume that the * requester ID will be the secondary * bus number with slot and function * set to zero. * * XXX: Doesn't handle the case where * the bridge is PCIe->PCI-X, and the * bridge will only take ownership of * requests in some cases. We should * provide context entries with the * same page tables for taken and * non-taken transactions. */ *rid = PCI_RID(pci_get_bus(l), 0, 0); l = pcibp; } else { /* * Neither the device nor the bridge * above it are PCIe. This is a * conventional PCI->PCI bridge, which * will use the bridge's BSF as the * requester ID. */ *rid = pci_get_rid(pcib); l = pcib; } } } return (requester); } struct iommu_ctx * iommu_instantiate_ctx(struct iommu_unit *unit, device_t dev, bool rmrr) { device_t requester; struct iommu_ctx *ctx; bool disabled; uint16_t rid; requester = iommu_get_requester(dev, &rid); /* * If the user requested the IOMMU disabled for the device, we - * cannot disable the DMAR, due to possibility of other - * devices on the same DMAR still requiring translation. + * cannot disable the IOMMU unit, due to possibility of other + * devices on the same IOMMU unit still requiring translation. * Instead provide the identity mapping for the device * context. */ disabled = iommu_bus_dma_is_dev_disabled(pci_get_domain(requester), pci_get_bus(requester), pci_get_slot(requester), pci_get_function(requester)); ctx = iommu_get_ctx(unit, requester, rid, disabled, rmrr); if (ctx == NULL) return (NULL); if (disabled) { /* * Keep the first reference on context, release the * later refs. */ IOMMU_LOCK(unit); if ((ctx->flags & IOMMU_CTX_DISABLED) == 0) { ctx->flags |= IOMMU_CTX_DISABLED; IOMMU_UNLOCK(unit); } else { iommu_free_ctx_locked(unit, ctx); } ctx = NULL; } return (ctx); } bus_dma_tag_t acpi_iommu_get_dma_tag(device_t dev, device_t child) { struct iommu_unit *unit; struct iommu_ctx *ctx; bus_dma_tag_t res; unit = iommu_find(child, bootverbose); - /* Not in scope of any DMAR ? */ + /* Not in scope of any IOMMU ? */ if (unit == NULL) return (NULL); if (!unit->dma_enabled) return (NULL); + +#if defined(__amd64__) || defined(__i386__) dmar_quirks_pre_use(unit); dmar_instantiate_rmrr_ctxs(unit); +#endif ctx = iommu_instantiate_ctx(unit, child, false); res = ctx == NULL ? NULL : (bus_dma_tag_t)ctx->tag; return (res); } bool bus_dma_dmar_set_buswide(device_t dev) { struct iommu_unit *unit; device_t parent; u_int busno, slot, func; parent = device_get_parent(dev); if (device_get_devclass(parent) != devclass_find("pci")) return (false); unit = iommu_find(dev, bootverbose); if (unit == NULL) return (false); busno = pci_get_bus(dev); slot = pci_get_slot(dev); func = pci_get_function(dev); if (slot != 0 || func != 0) { if (bootverbose) { device_printf(dev, "dmar%d pci%d:%d:%d requested buswide busdma\n", unit->unit, busno, slot, func); } return (false); } dmar_set_buswide_ctx(unit, busno); return (true); } static MALLOC_DEFINE(M_IOMMU_DMAMAP, "iommu_dmamap", "IOMMU DMA Map"); static void iommu_bus_schedule_dmamap(struct iommu_unit *unit, struct bus_dmamap_iommu *map); static int iommu_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, void *lockfuncarg, bus_dma_tag_t *dmat) { struct bus_dma_tag_iommu *newtag, *oldtag; int error; *dmat = NULL; error = common_bus_dma_tag_create(parent != NULL ? &((struct bus_dma_tag_iommu *)parent)->common : NULL, alignment, boundary, lowaddr, highaddr, filter, filterarg, maxsize, nsegments, maxsegsz, flags, lockfunc, lockfuncarg, sizeof(struct bus_dma_tag_iommu), (void **)&newtag); if (error != 0) goto out; oldtag = (struct bus_dma_tag_iommu *)parent; newtag->common.impl = &bus_dma_iommu_impl; newtag->ctx = oldtag->ctx; newtag->owner = oldtag->owner; *dmat = (bus_dma_tag_t)newtag; out: CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", __func__, newtag, (newtag != NULL ? newtag->common.flags : 0), error); return (error); } static int iommu_bus_dma_tag_set_domain(bus_dma_tag_t dmat) { return (0); } static int iommu_bus_dma_tag_destroy(bus_dma_tag_t dmat1) { struct bus_dma_tag_iommu *dmat, *dmat_copy, *parent; int error; error = 0; dmat_copy = dmat = (struct bus_dma_tag_iommu *)dmat1; if (dmat != NULL) { if (dmat->map_count != 0) { error = EBUSY; goto out; } while (dmat != NULL) { parent = (struct bus_dma_tag_iommu *)dmat->common.parent; if (atomic_fetchadd_int(&dmat->common.ref_count, -1) == 1) { if (dmat == dmat->ctx->tag) iommu_free_ctx(dmat->ctx); free_domain(dmat->segments, M_IOMMU_DMAMAP); free(dmat, M_DEVBUF); dmat = parent; } else dmat = NULL; } } out: CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); return (error); } static bool iommu_bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen) { return (false); } static int iommu_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) { struct bus_dma_tag_iommu *tag; struct bus_dmamap_iommu *map; tag = (struct bus_dma_tag_iommu *)dmat; map = malloc_domainset(sizeof(*map), M_IOMMU_DMAMAP, DOMAINSET_PREF(tag->common.domain), M_NOWAIT | M_ZERO); if (map == NULL) { *mapp = NULL; return (ENOMEM); } if (tag->segments == NULL) { tag->segments = malloc_domainset(sizeof(bus_dma_segment_t) * tag->common.nsegments, M_IOMMU_DMAMAP, DOMAINSET_PREF(tag->common.domain), M_NOWAIT); if (tag->segments == NULL) { free_domain(map, M_IOMMU_DMAMAP); *mapp = NULL; return (ENOMEM); } } TAILQ_INIT(&map->map_entries); map->tag = tag; map->locked = true; map->cansleep = false; tag->map_count++; *mapp = (bus_dmamap_t)map; return (0); } static int iommu_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map1) { struct bus_dma_tag_iommu *tag; struct bus_dmamap_iommu *map; struct iommu_domain *domain; tag = (struct bus_dma_tag_iommu *)dmat; map = (struct bus_dmamap_iommu *)map1; if (map != NULL) { domain = tag->ctx->domain; IOMMU_DOMAIN_LOCK(domain); if (!TAILQ_EMPTY(&map->map_entries)) { IOMMU_DOMAIN_UNLOCK(domain); return (EBUSY); } IOMMU_DOMAIN_UNLOCK(domain); free_domain(map, M_IOMMU_DMAMAP); } tag->map_count--; return (0); } static int iommu_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, bus_dmamap_t *mapp) { struct bus_dma_tag_iommu *tag; struct bus_dmamap_iommu *map; int error, mflags; vm_memattr_t attr; error = iommu_bus_dmamap_create(dmat, flags, mapp); if (error != 0) return (error); mflags = (flags & BUS_DMA_NOWAIT) != 0 ? M_NOWAIT : M_WAITOK; mflags |= (flags & BUS_DMA_ZERO) != 0 ? M_ZERO : 0; attr = (flags & BUS_DMA_NOCACHE) != 0 ? VM_MEMATTR_UNCACHEABLE : VM_MEMATTR_DEFAULT; tag = (struct bus_dma_tag_iommu *)dmat; map = (struct bus_dmamap_iommu *)*mapp; if (tag->common.maxsize < PAGE_SIZE && tag->common.alignment <= tag->common.maxsize && attr == VM_MEMATTR_DEFAULT) { *vaddr = malloc_domainset(tag->common.maxsize, M_DEVBUF, DOMAINSET_PREF(tag->common.domain), mflags); map->flags |= BUS_DMAMAP_IOMMU_MALLOC; } else { *vaddr = (void *)kmem_alloc_attr_domainset( DOMAINSET_PREF(tag->common.domain), tag->common.maxsize, mflags, 0ul, BUS_SPACE_MAXADDR, attr); map->flags |= BUS_DMAMAP_IOMMU_KMEM_ALLOC; } if (*vaddr == NULL) { iommu_bus_dmamap_destroy(dmat, *mapp); *mapp = NULL; return (ENOMEM); } return (0); } static void iommu_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map1) { struct bus_dma_tag_iommu *tag; struct bus_dmamap_iommu *map; tag = (struct bus_dma_tag_iommu *)dmat; map = (struct bus_dmamap_iommu *)map1; if ((map->flags & BUS_DMAMAP_IOMMU_MALLOC) != 0) { free_domain(vaddr, M_DEVBUF); map->flags &= ~BUS_DMAMAP_IOMMU_MALLOC; } else { KASSERT((map->flags & BUS_DMAMAP_IOMMU_KMEM_ALLOC) != 0, ("iommu_bus_dmamem_free for non alloced map %p", map)); kmem_free((vm_offset_t)vaddr, tag->common.maxsize); map->flags &= ~BUS_DMAMAP_IOMMU_KMEM_ALLOC; } iommu_bus_dmamap_destroy(dmat, map1); } static int iommu_bus_dmamap_load_something1(struct bus_dma_tag_iommu *tag, struct bus_dmamap_iommu *map, vm_page_t *ma, int offset, bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp, struct iommu_map_entries_tailq *unroll_list) { struct iommu_ctx *ctx; struct iommu_domain *domain; struct iommu_map_entry *entry; iommu_gaddr_t size; bus_size_t buflen1; int error, idx, gas_flags, seg; - KASSERT(offset < DMAR_PAGE_SIZE, ("offset %d", offset)); + KASSERT(offset < IOMMU_PAGE_SIZE, ("offset %d", offset)); if (segs == NULL) segs = tag->segments; ctx = tag->ctx; domain = ctx->domain; seg = *segp; error = 0; idx = 0; while (buflen > 0) { seg++; if (seg >= tag->common.nsegments) { error = EFBIG; break; } buflen1 = buflen > tag->common.maxsegsz ? tag->common.maxsegsz : buflen; size = round_page(offset + buflen1); /* * (Too) optimistically allow split if there are more * then one segments left. */ gas_flags = map->cansleep ? IOMMU_MF_CANWAIT : 0; if (seg + 1 < tag->common.nsegments) gas_flags |= IOMMU_MF_CANSPLIT; error = iommu_map(domain, &tag->common, size, offset, IOMMU_MAP_ENTRY_READ | ((flags & BUS_DMA_NOWRITE) == 0 ? IOMMU_MAP_ENTRY_WRITE : 0), gas_flags, ma + idx, &entry); if (error != 0) break; if ((gas_flags & IOMMU_MF_CANSPLIT) != 0) { KASSERT(size >= entry->end - entry->start, ("split increased entry size %jx %jx %jx", (uintmax_t)size, (uintmax_t)entry->start, (uintmax_t)entry->end)); size = entry->end - entry->start; if (buflen1 > size) buflen1 = size; } else { KASSERT(entry->end - entry->start == size, ("no split allowed %jx %jx %jx", (uintmax_t)size, (uintmax_t)entry->start, (uintmax_t)entry->end)); } if (offset + buflen1 > size) buflen1 = size - offset; if (buflen1 > tag->common.maxsegsz) buflen1 = tag->common.maxsegsz; KASSERT(((entry->start + offset) & (tag->common.alignment - 1)) == 0, ("alignment failed: ctx %p start 0x%jx offset %x " "align 0x%jx", ctx, (uintmax_t)entry->start, offset, (uintmax_t)tag->common.alignment)); KASSERT(entry->end <= tag->common.lowaddr || entry->start >= tag->common.highaddr, ("entry placement failed: ctx %p start 0x%jx end 0x%jx " "lowaddr 0x%jx highaddr 0x%jx", ctx, (uintmax_t)entry->start, (uintmax_t)entry->end, (uintmax_t)tag->common.lowaddr, (uintmax_t)tag->common.highaddr)); KASSERT(iommu_test_boundary(entry->start + offset, buflen1, tag->common.boundary), ("boundary failed: ctx %p start 0x%jx end 0x%jx " "boundary 0x%jx", ctx, (uintmax_t)entry->start, (uintmax_t)entry->end, (uintmax_t)tag->common.boundary)); KASSERT(buflen1 <= tag->common.maxsegsz, ("segment too large: ctx %p start 0x%jx end 0x%jx " "buflen1 0x%jx maxsegsz 0x%jx", ctx, (uintmax_t)entry->start, (uintmax_t)entry->end, (uintmax_t)buflen1, (uintmax_t)tag->common.maxsegsz)); IOMMU_DOMAIN_LOCK(domain); TAILQ_INSERT_TAIL(&map->map_entries, entry, dmamap_link); entry->flags |= IOMMU_MAP_ENTRY_MAP; IOMMU_DOMAIN_UNLOCK(domain); TAILQ_INSERT_TAIL(unroll_list, entry, unroll_link); segs[seg].ds_addr = entry->start + offset; segs[seg].ds_len = buflen1; idx += OFF_TO_IDX(trunc_page(offset + buflen1)); offset += buflen1; - offset &= DMAR_PAGE_MASK; + offset &= IOMMU_PAGE_MASK; buflen -= buflen1; } if (error == 0) *segp = seg; return (error); } static int iommu_bus_dmamap_load_something(struct bus_dma_tag_iommu *tag, struct bus_dmamap_iommu *map, vm_page_t *ma, int offset, bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp) { struct iommu_ctx *ctx; struct iommu_domain *domain; struct iommu_map_entry *entry, *entry1; struct iommu_map_entries_tailq unroll_list; int error; ctx = tag->ctx; domain = ctx->domain; atomic_add_long(&ctx->loads, 1); TAILQ_INIT(&unroll_list); error = iommu_bus_dmamap_load_something1(tag, map, ma, offset, buflen, flags, segs, segp, &unroll_list); if (error != 0) { /* * The busdma interface does not allow us to report * partial buffer load, so unfortunately we have to * revert all work done. */ IOMMU_DOMAIN_LOCK(domain); TAILQ_FOREACH_SAFE(entry, &unroll_list, unroll_link, entry1) { /* * No entries other than what we have created * during the failed run might have been * inserted there in between, since we own ctx * pglock. */ TAILQ_REMOVE(&map->map_entries, entry, dmamap_link); TAILQ_REMOVE(&unroll_list, entry, unroll_link); TAILQ_INSERT_TAIL(&domain->unload_entries, entry, dmamap_link); } IOMMU_DOMAIN_UNLOCK(domain); taskqueue_enqueue(domain->iommu->delayed_taskqueue, &domain->unload_task); } if (error == ENOMEM && (flags & BUS_DMA_NOWAIT) == 0 && !map->cansleep) error = EINPROGRESS; if (error == EINPROGRESS) iommu_bus_schedule_dmamap(domain->iommu, map); return (error); } static int iommu_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map1, struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, bus_dma_segment_t *segs, int *segp) { struct bus_dma_tag_iommu *tag; struct bus_dmamap_iommu *map; tag = (struct bus_dma_tag_iommu *)dmat; map = (struct bus_dmamap_iommu *)map1; return (iommu_bus_dmamap_load_something(tag, map, ma, ma_offs, tlen, flags, segs, segp)); } static int iommu_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map1, vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp) { struct bus_dma_tag_iommu *tag; struct bus_dmamap_iommu *map; vm_page_t *ma, fma; vm_paddr_t pstart, pend, paddr; int error, i, ma_cnt, mflags, offset; tag = (struct bus_dma_tag_iommu *)dmat; map = (struct bus_dmamap_iommu *)map1; pstart = trunc_page(buf); pend = round_page(buf + buflen); offset = buf & PAGE_MASK; ma_cnt = OFF_TO_IDX(pend - pstart); mflags = map->cansleep ? M_WAITOK : M_NOWAIT; ma = malloc(sizeof(vm_page_t) * ma_cnt, M_DEVBUF, mflags); if (ma == NULL) return (ENOMEM); fma = NULL; for (i = 0; i < ma_cnt; i++) { paddr = pstart + ptoa(i); ma[i] = PHYS_TO_VM_PAGE(paddr); if (ma[i] == NULL || VM_PAGE_TO_PHYS(ma[i]) != paddr) { /* * If PHYS_TO_VM_PAGE() returned NULL or the * vm_page was not initialized we'll use a * fake page. */ if (fma == NULL) { fma = malloc(sizeof(struct vm_page) * ma_cnt, M_DEVBUF, M_ZERO | mflags); if (fma == NULL) { free(ma, M_DEVBUF); return (ENOMEM); } } vm_page_initfake(&fma[i], pstart + ptoa(i), VM_MEMATTR_DEFAULT); ma[i] = &fma[i]; } } error = iommu_bus_dmamap_load_something(tag, map, ma, offset, buflen, flags, segs, segp); free(fma, M_DEVBUF); free(ma, M_DEVBUF); return (error); } static int iommu_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map1, void *buf, bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs, int *segp) { struct bus_dma_tag_iommu *tag; struct bus_dmamap_iommu *map; vm_page_t *ma, fma; vm_paddr_t pstart, pend, paddr; int error, i, ma_cnt, mflags, offset; tag = (struct bus_dma_tag_iommu *)dmat; map = (struct bus_dmamap_iommu *)map1; pstart = trunc_page((vm_offset_t)buf); pend = round_page((vm_offset_t)buf + buflen); offset = (vm_offset_t)buf & PAGE_MASK; ma_cnt = OFF_TO_IDX(pend - pstart); mflags = map->cansleep ? M_WAITOK : M_NOWAIT; ma = malloc(sizeof(vm_page_t) * ma_cnt, M_DEVBUF, mflags); if (ma == NULL) return (ENOMEM); fma = NULL; for (i = 0; i < ma_cnt; i++, pstart += PAGE_SIZE) { if (pmap == kernel_pmap) paddr = pmap_kextract(pstart); else paddr = pmap_extract(pmap, pstart); ma[i] = PHYS_TO_VM_PAGE(paddr); if (ma[i] == NULL || VM_PAGE_TO_PHYS(ma[i]) != paddr) { /* * If PHYS_TO_VM_PAGE() returned NULL or the * vm_page was not initialized we'll use a * fake page. */ if (fma == NULL) { fma = malloc(sizeof(struct vm_page) * ma_cnt, M_DEVBUF, M_ZERO | mflags); if (fma == NULL) { free(ma, M_DEVBUF); return (ENOMEM); } } vm_page_initfake(&fma[i], paddr, VM_MEMATTR_DEFAULT); ma[i] = &fma[i]; } } error = iommu_bus_dmamap_load_something(tag, map, ma, offset, buflen, flags, segs, segp); free(ma, M_DEVBUF); free(fma, M_DEVBUF); return (error); } static void iommu_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map1, struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg) { struct bus_dmamap_iommu *map; if (map1 == NULL) return; map = (struct bus_dmamap_iommu *)map1; map->mem = *mem; map->tag = (struct bus_dma_tag_iommu *)dmat; map->callback = callback; map->callback_arg = callback_arg; } static bus_dma_segment_t * iommu_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map1, bus_dma_segment_t *segs, int nsegs, int error) { struct bus_dma_tag_iommu *tag; struct bus_dmamap_iommu *map; tag = (struct bus_dma_tag_iommu *)dmat; map = (struct bus_dmamap_iommu *)map1; if (!map->locked) { KASSERT(map->cansleep, ("map not locked and not sleepable context %p", map)); /* * We are called from the delayed context. Relock the * driver. */ (tag->common.lockfunc)(tag->common.lockfuncarg, BUS_DMA_LOCK); map->locked = true; } if (segs == NULL) segs = tag->segments; return (segs); } /* - * The limitations of busdma KPI forces the dmar to perform the actual + * The limitations of busdma KPI forces the iommu to perform the actual * unload, consisting of the unmapping of the map entries page tables, * from the delayed context on i386, since page table page mapping * might require a sleep to be successfull. The unfortunate * consequence is that the DMA requests can be served some time after * the bus_dmamap_unload() call returned. * * On amd64, we assume that sf allocation cannot fail. */ static void iommu_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map1) { struct bus_dma_tag_iommu *tag; struct bus_dmamap_iommu *map; struct iommu_ctx *ctx; struct iommu_domain *domain; #if defined(__amd64__) struct iommu_map_entries_tailq entries; #endif tag = (struct bus_dma_tag_iommu *)dmat; map = (struct bus_dmamap_iommu *)map1; ctx = tag->ctx; domain = ctx->domain; atomic_add_long(&ctx->unloads, 1); #if defined(__i386__) IOMMU_DOMAIN_LOCK(domain); TAILQ_CONCAT(&domain->unload_entries, &map->map_entries, dmamap_link); IOMMU_DOMAIN_UNLOCK(domain); taskqueue_enqueue(domain->iommu->delayed_taskqueue, &domain->unload_task); #else /* defined(__amd64__) */ TAILQ_INIT(&entries); IOMMU_DOMAIN_LOCK(domain); TAILQ_CONCAT(&entries, &map->map_entries, dmamap_link); IOMMU_DOMAIN_UNLOCK(domain); THREAD_NO_SLEEPING(); iommu_domain_unload(domain, &entries, false); THREAD_SLEEPING_OK(); KASSERT(TAILQ_EMPTY(&entries), ("lazy iommu_ctx_unload %p", ctx)); #endif } static void iommu_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) { } struct bus_dma_impl bus_dma_iommu_impl = { .tag_create = iommu_bus_dma_tag_create, .tag_destroy = iommu_bus_dma_tag_destroy, .tag_set_domain = iommu_bus_dma_tag_set_domain, .id_mapped = iommu_bus_dma_id_mapped, .map_create = iommu_bus_dmamap_create, .map_destroy = iommu_bus_dmamap_destroy, .mem_alloc = iommu_bus_dmamem_alloc, .mem_free = iommu_bus_dmamem_free, .load_phys = iommu_bus_dmamap_load_phys, .load_buffer = iommu_bus_dmamap_load_buffer, .load_ma = iommu_bus_dmamap_load_ma, .map_waitok = iommu_bus_dmamap_waitok, .map_complete = iommu_bus_dmamap_complete, .map_unload = iommu_bus_dmamap_unload, .map_sync = iommu_bus_dmamap_sync, }; static void iommu_bus_task_dmamap(void *arg, int pending) { struct bus_dma_tag_iommu *tag; struct bus_dmamap_iommu *map; struct iommu_unit *unit; unit = arg; IOMMU_LOCK(unit); while ((map = TAILQ_FIRST(&unit->delayed_maps)) != NULL) { TAILQ_REMOVE(&unit->delayed_maps, map, delay_link); IOMMU_UNLOCK(unit); tag = map->tag; map->cansleep = true; map->locked = false; bus_dmamap_load_mem((bus_dma_tag_t)tag, (bus_dmamap_t)map, &map->mem, map->callback, map->callback_arg, BUS_DMA_WAITOK); map->cansleep = false; if (map->locked) { (tag->common.lockfunc)(tag->common.lockfuncarg, BUS_DMA_UNLOCK); } else map->locked = true; map->cansleep = false; IOMMU_LOCK(unit); } IOMMU_UNLOCK(unit); } static void iommu_bus_schedule_dmamap(struct iommu_unit *unit, struct bus_dmamap_iommu *map) { map->locked = false; IOMMU_LOCK(unit); TAILQ_INSERT_TAIL(&unit->delayed_maps, map, delay_link); IOMMU_UNLOCK(unit); taskqueue_enqueue(unit->delayed_taskqueue, &unit->dmamap_load_task); } int iommu_init_busdma(struct iommu_unit *unit) { int error; unit->dma_enabled = 1; error = TUNABLE_INT_FETCH("hw.iommu.dma", &unit->dma_enabled); if (error == 0) /* compatibility */ TUNABLE_INT_FETCH("hw.dmar.dma", &unit->dma_enabled); TAILQ_INIT(&unit->delayed_maps); TASK_INIT(&unit->dmamap_load_task, 0, iommu_bus_task_dmamap, unit); unit->delayed_taskqueue = taskqueue_create("iommu", M_WAITOK, taskqueue_thread_enqueue, &unit->delayed_taskqueue); taskqueue_start_threads(&unit->delayed_taskqueue, 1, PI_DISK, "iommu%d busdma taskq", unit->unit); return (0); } void iommu_fini_busdma(struct iommu_unit *unit) { if (unit->delayed_taskqueue == NULL) return; taskqueue_drain(unit->delayed_taskqueue, &unit->dmamap_load_task); taskqueue_free(unit->delayed_taskqueue); unit->delayed_taskqueue = NULL; } int bus_dma_dmar_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map1, vm_paddr_t start, vm_size_t length, int flags) { struct bus_dma_tag_common *tc; struct bus_dma_tag_iommu *tag; struct bus_dmamap_iommu *map; struct iommu_ctx *ctx; struct iommu_domain *domain; struct iommu_map_entry *entry; vm_page_t *ma; vm_size_t i; int error; bool waitok; MPASS((start & PAGE_MASK) == 0); MPASS((length & PAGE_MASK) == 0); MPASS(length > 0); MPASS(start + length >= start); MPASS((flags & ~(BUS_DMA_NOWAIT | BUS_DMA_NOWRITE)) == 0); tc = (struct bus_dma_tag_common *)dmat; if (tc->impl != &bus_dma_iommu_impl) return (0); tag = (struct bus_dma_tag_iommu *)dmat; ctx = tag->ctx; domain = ctx->domain; map = (struct bus_dmamap_iommu *)map1; waitok = (flags & BUS_DMA_NOWAIT) != 0; entry = iommu_map_alloc_entry(domain, waitok ? 0 : DMAR_PGF_WAITOK); if (entry == NULL) return (ENOMEM); entry->start = start; entry->end = start + length; ma = malloc(sizeof(vm_page_t) * atop(length), M_TEMP, waitok ? M_WAITOK : M_NOWAIT); if (ma == NULL) { iommu_map_free_entry(domain, entry); return (ENOMEM); } for (i = 0; i < atop(length); i++) { ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i, VM_MEMATTR_DEFAULT); } error = iommu_map_region(domain, entry, IOMMU_MAP_ENTRY_READ | ((flags & BUS_DMA_NOWRITE) ? 0 : IOMMU_MAP_ENTRY_WRITE), waitok ? IOMMU_MF_CANWAIT : 0, ma); if (error == 0) { IOMMU_DOMAIN_LOCK(domain); TAILQ_INSERT_TAIL(&map->map_entries, entry, dmamap_link); entry->flags |= IOMMU_MAP_ENTRY_MAP; IOMMU_DOMAIN_UNLOCK(domain); } else { iommu_domain_unload_entry(entry, true); } for (i = 0; i < atop(length); i++) vm_page_putfake(ma[i]); free(ma, M_TEMP); return (error); } Index: head/sys/x86/iommu/intel_dmar.h =================================================================== --- head/sys/x86/iommu/intel_dmar.h (revision 363309) +++ head/sys/x86/iommu/intel_dmar.h (revision 363310) @@ -1,515 +1,504 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013-2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef __X86_IOMMU_INTEL_DMAR_H #define __X86_IOMMU_INTEL_DMAR_H #include struct dmar_unit; RB_HEAD(dmar_gas_entries_tree, iommu_map_entry); RB_PROTOTYPE(dmar_gas_entries_tree, iommu_map_entry, rb_entry, dmar_gas_cmp_entries); /* * Locking annotations: * (u) - Protected by iommu unit lock * (d) - Protected by domain lock * (c) - Immutable after initialization */ /* * The domain abstraction. Most non-constant members of the domain * are protected by owning dmar unit lock, not by the domain lock. * Most important, the dmar lock protects the contexts list. * * The domain lock protects the address map for the domain, and list * of unload entries delayed. * * Page tables pages and pages content is protected by the vm object * lock pgtbl_obj, which contains the page tables pages. */ struct dmar_domain { struct iommu_domain iodom; int domain; /* (c) DID, written in context entry */ int mgaw; /* (c) Real max address width */ int agaw; /* (c) Adjusted guest address width */ int pglvl; /* (c) The pagelevel */ int awlvl; /* (c) The pagelevel as the bitmask, to set in context entry */ iommu_gaddr_t end; /* (c) Highest address + 1 in the guest AS */ u_int ctx_cnt; /* (u) Number of contexts owned */ u_int refs; /* (u) Refs, including ctx */ struct dmar_unit *dmar; /* (c) */ LIST_ENTRY(dmar_domain) link; /* (u) Member in the dmar list */ LIST_HEAD(, dmar_ctx) contexts; /* (u) */ vm_object_t pgtbl_obj; /* (c) Page table pages */ u_int flags; /* (u) */ - u_int entries_cnt; /* (d) */ struct dmar_gas_entries_tree rb_root; /* (d) */ struct iommu_map_entry *first_place, *last_place; /* (d) */ u_int batch_no; }; struct dmar_ctx { struct iommu_ctx context; uint16_t rid; /* (c) pci RID */ uint64_t last_fault_rec[2]; /* Last fault reported */ LIST_ENTRY(dmar_ctx) link; /* (u) Member in the domain list */ u_int refs; /* (u) References from tags */ }; #define DMAR_DOMAIN_GAS_INITED 0x0001 #define DMAR_DOMAIN_PGTBL_INITED 0x0002 #define DMAR_DOMAIN_IDMAP 0x0010 /* Domain uses identity page table */ #define DMAR_DOMAIN_RMRR 0x0020 /* Domain contains RMRR entry, cannot be turned off */ #define DMAR_DOMAIN_PGLOCK(dom) VM_OBJECT_WLOCK((dom)->pgtbl_obj) #define DMAR_DOMAIN_PGTRYLOCK(dom) VM_OBJECT_TRYWLOCK((dom)->pgtbl_obj) #define DMAR_DOMAIN_PGUNLOCK(dom) VM_OBJECT_WUNLOCK((dom)->pgtbl_obj) #define DMAR_DOMAIN_ASSERT_PGLOCKED(dom) \ VM_OBJECT_ASSERT_WLOCKED((dom)->pgtbl_obj) #define DMAR_DOMAIN_LOCK(dom) mtx_lock(&(dom)->iodom.lock) #define DMAR_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->iodom.lock) #define DMAR_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->iodom.lock, MA_OWNED) struct dmar_msi_data { int irq; int irq_rid; struct resource *irq_res; void *intr_handle; int (*handler)(void *); int msi_data_reg; int msi_addr_reg; int msi_uaddr_reg; void (*enable_intr)(struct dmar_unit *); void (*disable_intr)(struct dmar_unit *); const char *name; }; #define DMAR_INTR_FAULT 0 #define DMAR_INTR_QI 1 #define DMAR_INTR_TOTAL 2 struct dmar_unit { struct iommu_unit iommu; device_t dev; uint16_t segment; uint64_t base; /* Resources */ int reg_rid; struct resource *regs; struct dmar_msi_data intrs[DMAR_INTR_TOTAL]; /* Hardware registers cache */ uint32_t hw_ver; uint64_t hw_cap; uint64_t hw_ecap; uint32_t hw_gcmd; /* Data for being a dmar */ LIST_HEAD(, dmar_domain) domains; struct unrhdr *domids; vm_object_t ctx_obj; u_int barrier_flags; /* Fault handler data */ struct mtx fault_lock; uint64_t *fault_log; int fault_log_head; int fault_log_tail; int fault_log_size; struct task fault_task; struct taskqueue *fault_taskqueue; /* QI */ int qi_enabled; vm_offset_t inv_queue; vm_size_t inv_queue_size; uint32_t inv_queue_avail; uint32_t inv_queue_tail; volatile uint32_t inv_waitd_seq_hw; /* hw writes there on wait descr completion */ uint64_t inv_waitd_seq_hw_phys; uint32_t inv_waitd_seq; /* next sequence number to use for wait descr */ u_int inv_waitd_gen; /* seq number generation AKA seq overflows */ u_int inv_seq_waiters; /* count of waiters for seq */ u_int inv_queue_full; /* informational counter */ /* IR */ int ir_enabled; vm_paddr_t irt_phys; dmar_irte_t *irt; u_int irte_cnt; vmem_t *irtids; /* Delayed freeing of map entries queue processing */ struct iommu_map_entries_tailq tlb_flush_entries; struct task qi_task; struct taskqueue *qi_taskqueue; /* * Bitmap of buses for which context must ignore slot:func, * duplicating the page table pointer into all context table * entries. This is a client-controlled quirk to support some * NTBs. */ uint32_t buswide_ctxs[(PCI_BUSMAX + 1) / NBBY / sizeof(uint32_t)]; }; #define DMAR_LOCK(dmar) mtx_lock(&(dmar)->iommu.lock) #define DMAR_UNLOCK(dmar) mtx_unlock(&(dmar)->iommu.lock) #define DMAR_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->iommu.lock, MA_OWNED) #define DMAR_FAULT_LOCK(dmar) mtx_lock_spin(&(dmar)->fault_lock) #define DMAR_FAULT_UNLOCK(dmar) mtx_unlock_spin(&(dmar)->fault_lock) #define DMAR_FAULT_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->fault_lock, MA_OWNED) #define DMAR_IS_COHERENT(dmar) (((dmar)->hw_ecap & DMAR_ECAP_C) != 0) #define DMAR_HAS_QI(dmar) (((dmar)->hw_ecap & DMAR_ECAP_QI) != 0) #define DMAR_X2APIC(dmar) \ (x2apic_mode && ((dmar)->hw_ecap & DMAR_ECAP_EIM) != 0) /* Barrier ids */ #define DMAR_BARRIER_RMRR 0 #define DMAR_BARRIER_USEQ 1 struct dmar_unit *dmar_find(device_t dev, bool verbose); struct dmar_unit *dmar_find_hpet(device_t dev, uint16_t *rid); struct dmar_unit *dmar_find_ioapic(u_int apic_id, uint16_t *rid); u_int dmar_nd2mask(u_int nd); bool dmar_pglvl_supported(struct dmar_unit *unit, int pglvl); int domain_set_agaw(struct dmar_domain *domain, int mgaw); int dmar_maxaddr2mgaw(struct dmar_unit *unit, iommu_gaddr_t maxaddr, bool allow_less); vm_pindex_t pglvl_max_pages(int pglvl); int domain_is_sp_lvl(struct dmar_domain *domain, int lvl); iommu_gaddr_t pglvl_page_size(int total_pglvl, int lvl); iommu_gaddr_t domain_page_size(struct dmar_domain *domain, int lvl); int calc_am(struct dmar_unit *unit, iommu_gaddr_t base, iommu_gaddr_t size, iommu_gaddr_t *isizep); struct vm_page *dmar_pgalloc(vm_object_t obj, vm_pindex_t idx, int flags); void dmar_pgfree(vm_object_t obj, vm_pindex_t idx, int flags); void *dmar_map_pgtbl(vm_object_t obj, vm_pindex_t idx, int flags, struct sf_buf **sf); void dmar_unmap_pgtbl(struct sf_buf *sf); int dmar_load_root_entry_ptr(struct dmar_unit *unit); int dmar_inv_ctx_glob(struct dmar_unit *unit); int dmar_inv_iotlb_glob(struct dmar_unit *unit); int dmar_flush_write_bufs(struct dmar_unit *unit); void dmar_flush_pte_to_ram(struct dmar_unit *unit, dmar_pte_t *dst); void dmar_flush_ctx_to_ram(struct dmar_unit *unit, dmar_ctx_entry_t *dst); void dmar_flush_root_to_ram(struct dmar_unit *unit, dmar_root_entry_t *dst); int dmar_enable_translation(struct dmar_unit *unit); int dmar_disable_translation(struct dmar_unit *unit); int dmar_load_irt_ptr(struct dmar_unit *unit); int dmar_enable_ir(struct dmar_unit *unit); int dmar_disable_ir(struct dmar_unit *unit); bool dmar_barrier_enter(struct dmar_unit *dmar, u_int barrier_id); void dmar_barrier_exit(struct dmar_unit *dmar, u_int barrier_id); uint64_t dmar_get_timeout(void); void dmar_update_timeout(uint64_t newval); int dmar_fault_intr(void *arg); void dmar_enable_fault_intr(struct dmar_unit *unit); void dmar_disable_fault_intr(struct dmar_unit *unit); int dmar_init_fault_log(struct dmar_unit *unit); void dmar_fini_fault_log(struct dmar_unit *unit); int dmar_qi_intr(void *arg); void dmar_enable_qi_intr(struct dmar_unit *unit); void dmar_disable_qi_intr(struct dmar_unit *unit); int dmar_init_qi(struct dmar_unit *unit); void dmar_fini_qi(struct dmar_unit *unit); void dmar_qi_invalidate_locked(struct dmar_domain *domain, iommu_gaddr_t start, iommu_gaddr_t size, struct iommu_qi_genseq *psec, bool emit_wait); void dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit); void dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit); void dmar_qi_invalidate_iec_glob(struct dmar_unit *unit); void dmar_qi_invalidate_iec(struct dmar_unit *unit, u_int start, u_int cnt); vm_object_t domain_get_idmap_pgtbl(struct dmar_domain *domain, iommu_gaddr_t maxaddr); void put_idmap_pgtbl(vm_object_t obj); int domain_map_buf(struct dmar_domain *domain, iommu_gaddr_t base, iommu_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags); int domain_unmap_buf(struct dmar_domain *domain, iommu_gaddr_t base, iommu_gaddr_t size, int flags); void domain_flush_iotlb_sync(struct dmar_domain *domain, iommu_gaddr_t base, iommu_gaddr_t size); int domain_alloc_pgtbl(struct dmar_domain *domain); void domain_free_pgtbl(struct dmar_domain *domain); int dmar_dev_depth(device_t child); void dmar_dev_path(device_t child, int *busno, void *path1, int depth); struct dmar_ctx *dmar_get_ctx_for_dev(struct dmar_unit *dmar, device_t dev, uint16_t rid, bool id_mapped, bool rmrr_init); struct dmar_ctx *dmar_get_ctx_for_devpath(struct dmar_unit *dmar, uint16_t rid, int dev_domain, int dev_busno, const void *dev_path, int dev_path_len, bool id_mapped, bool rmrr_init); int dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx); void dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx); void dmar_free_ctx(struct dmar_ctx *ctx); struct dmar_ctx *dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid); void dmar_domain_unload_entry(struct iommu_map_entry *entry, bool free); void dmar_domain_unload(struct dmar_domain *domain, struct iommu_map_entries_tailq *entries, bool cansleep); void dmar_domain_free_entry(struct iommu_map_entry *entry, bool free); void dmar_gas_init_domain(struct dmar_domain *domain); void dmar_gas_fini_domain(struct dmar_domain *domain); struct iommu_map_entry *dmar_gas_alloc_entry(struct dmar_domain *domain, u_int flags); void dmar_gas_free_entry(struct dmar_domain *domain, struct iommu_map_entry *entry); void dmar_gas_free_space(struct dmar_domain *domain, struct iommu_map_entry *entry); int dmar_gas_map(struct dmar_domain *domain, const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset, u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res); void dmar_gas_free_region(struct dmar_domain *domain, struct iommu_map_entry *entry); int dmar_gas_map_region(struct dmar_domain *domain, struct iommu_map_entry *entry, u_int eflags, u_int flags, vm_page_t *ma); int dmar_gas_reserve_region(struct dmar_domain *domain, iommu_gaddr_t start, iommu_gaddr_t end); void dmar_dev_parse_rmrr(struct dmar_domain *domain, int dev_domain, int dev_busno, const void *dev_path, int dev_path_len, struct iommu_map_entries_tailq *rmrr_entries); int dmar_instantiate_rmrr_ctxs(struct iommu_unit *dmar); void dmar_quirks_post_ident(struct dmar_unit *dmar); void dmar_quirks_pre_use(struct iommu_unit *dmar); int dmar_init_irt(struct dmar_unit *unit); void dmar_fini_irt(struct dmar_unit *unit); void dmar_set_buswide_ctx(struct iommu_unit *unit, u_int busno); bool dmar_is_buswide_ctx(struct dmar_unit *unit, u_int busno); /* Map flags */ #define IOMMU_MF_CANWAIT 0x0001 #define IOMMU_MF_CANSPLIT 0x0002 #define IOMMU_MF_RMRR 0x0004 #define DMAR_PGF_WAITOK 0x0001 #define DMAR_PGF_ZERO 0x0002 #define DMAR_PGF_ALLOC 0x0004 #define DMAR_PGF_NOALLOC 0x0008 #define DMAR_PGF_OBJL 0x0010 extern iommu_haddr_t dmar_high; extern int haw; extern int dmar_tbl_pagecnt; extern int dmar_batch_coalesce; extern int dmar_check_free; static inline uint32_t dmar_read4(const struct dmar_unit *unit, int reg) { return (bus_read_4(unit->regs, reg)); } static inline uint64_t dmar_read8(const struct dmar_unit *unit, int reg) { #ifdef __i386__ uint32_t high, low; low = bus_read_4(unit->regs, reg); high = bus_read_4(unit->regs, reg + 4); return (low | ((uint64_t)high << 32)); #else return (bus_read_8(unit->regs, reg)); #endif } static inline void dmar_write4(const struct dmar_unit *unit, int reg, uint32_t val) { KASSERT(reg != DMAR_GCMD_REG || (val & DMAR_GCMD_TE) == (unit->hw_gcmd & DMAR_GCMD_TE), ("dmar%d clearing TE 0x%08x 0x%08x", unit->iommu.unit, unit->hw_gcmd, val)); bus_write_4(unit->regs, reg, val); } static inline void dmar_write8(const struct dmar_unit *unit, int reg, uint64_t val) { KASSERT(reg != DMAR_GCMD_REG, ("8byte GCMD write")); #ifdef __i386__ uint32_t high, low; low = val; high = val >> 32; bus_write_4(unit->regs, reg, low); bus_write_4(unit->regs, reg + 4, high); #else bus_write_8(unit->regs, reg, val); #endif } /* * dmar_pte_store and dmar_pte_clear ensure that on i386, 32bit writes * are issued in the correct order. For store, the lower word, * containing the P or R and W bits, is set only after the high word * is written. For clear, the P bit is cleared first, then the high * word is cleared. * * dmar_pte_update updates the pte. For amd64, the update is atomic. * For i386, it first disables the entry by clearing the word * containing the P bit, and then defer to dmar_pte_store. The locked * cmpxchg8b is probably available on any machine having DMAR support, * but interrupt translation table may be mapped uncached. */ static inline void dmar_pte_store1(volatile uint64_t *dst, uint64_t val) { #ifdef __i386__ volatile uint32_t *p; uint32_t hi, lo; hi = val >> 32; lo = val; p = (volatile uint32_t *)dst; *(p + 1) = hi; *p = lo; #else *dst = val; #endif } static inline void dmar_pte_store(volatile uint64_t *dst, uint64_t val) { KASSERT(*dst == 0, ("used pte %p oldval %jx newval %jx", dst, (uintmax_t)*dst, (uintmax_t)val)); dmar_pte_store1(dst, val); } static inline void dmar_pte_update(volatile uint64_t *dst, uint64_t val) { #ifdef __i386__ volatile uint32_t *p; p = (volatile uint32_t *)dst; *p = 0; #endif dmar_pte_store1(dst, val); } static inline void dmar_pte_clear(volatile uint64_t *dst) { #ifdef __i386__ volatile uint32_t *p; p = (volatile uint32_t *)dst; *p = 0; *(p + 1) = 0; #else *dst = 0; #endif -} - -static inline bool -iommu_test_boundary(iommu_gaddr_t start, iommu_gaddr_t size, - iommu_gaddr_t boundary) -{ - - if (boundary == 0) - return (true); - return (start + size <= ((start + boundary) & ~(boundary - 1))); } extern struct timespec dmar_hw_timeout; #define DMAR_WAIT_UNTIL(cond) \ { \ struct timespec last, curr; \ bool forever; \ \ if (dmar_hw_timeout.tv_sec == 0 && \ dmar_hw_timeout.tv_nsec == 0) { \ forever = true; \ } else { \ forever = false; \ nanouptime(&curr); \ timespecadd(&curr, &dmar_hw_timeout, &last); \ } \ for (;;) { \ if (cond) { \ error = 0; \ break; \ } \ nanouptime(&curr); \ if (!forever && timespeccmp(&last, &curr, <)) { \ error = ETIMEDOUT; \ break; \ } \ cpu_spinwait(); \ } \ } #ifdef INVARIANTS #define TD_PREP_PINNED_ASSERT \ int old_td_pinned; \ old_td_pinned = curthread->td_pinned #define TD_PINNED_ASSERT \ KASSERT(curthread->td_pinned == old_td_pinned, \ ("pin count leak: %d %d %s:%d", curthread->td_pinned, \ old_td_pinned, __FILE__, __LINE__)) #else #define TD_PREP_PINNED_ASSERT #define TD_PINNED_ASSERT #endif #endif Index: head/sys/x86/iommu/intel_drv.c =================================================================== --- head/sys/x86/iommu/intel_drv.c (revision 363309) +++ head/sys/x86/iommu/intel_drv.c (revision 363310) @@ -1,1362 +1,1362 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013-2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_acpi.h" #if defined(__amd64__) #define DEV_APIC #else #include "opt_apic.h" #endif #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEV_APIC #include "pcib_if.h" #include #include #include #endif #define DMAR_FAULT_IRQ_RID 0 #define DMAR_QI_IRQ_RID 1 #define DMAR_REG_RID 2 static devclass_t dmar_devclass; static device_t *dmar_devs; static int dmar_devcnt; typedef int (*dmar_iter_t)(ACPI_DMAR_HEADER *, void *); static void dmar_iterate_tbl(dmar_iter_t iter, void *arg) { ACPI_TABLE_DMAR *dmartbl; ACPI_DMAR_HEADER *dmarh; char *ptr, *ptrend; ACPI_STATUS status; status = AcpiGetTable(ACPI_SIG_DMAR, 1, (ACPI_TABLE_HEADER **)&dmartbl); if (ACPI_FAILURE(status)) return; ptr = (char *)dmartbl + sizeof(*dmartbl); ptrend = (char *)dmartbl + dmartbl->Header.Length; for (;;) { if (ptr >= ptrend) break; dmarh = (ACPI_DMAR_HEADER *)ptr; if (dmarh->Length <= 0) { printf("dmar_identify: corrupted DMAR table, l %d\n", dmarh->Length); break; } ptr += dmarh->Length; if (!iter(dmarh, arg)) break; } AcpiPutTable((ACPI_TABLE_HEADER *)dmartbl); } struct find_iter_args { int i; ACPI_DMAR_HARDWARE_UNIT *res; }; static int dmar_find_iter(ACPI_DMAR_HEADER *dmarh, void *arg) { struct find_iter_args *fia; if (dmarh->Type != ACPI_DMAR_TYPE_HARDWARE_UNIT) return (1); fia = arg; if (fia->i == 0) { fia->res = (ACPI_DMAR_HARDWARE_UNIT *)dmarh; return (0); } fia->i--; return (1); } static ACPI_DMAR_HARDWARE_UNIT * dmar_find_by_index(int idx) { struct find_iter_args fia; fia.i = idx; fia.res = NULL; dmar_iterate_tbl(dmar_find_iter, &fia); return (fia.res); } static int dmar_count_iter(ACPI_DMAR_HEADER *dmarh, void *arg) { if (dmarh->Type == ACPI_DMAR_TYPE_HARDWARE_UNIT) dmar_devcnt++; return (1); } static int dmar_enable = 0; static void dmar_identify(driver_t *driver, device_t parent) { ACPI_TABLE_DMAR *dmartbl; ACPI_DMAR_HARDWARE_UNIT *dmarh; ACPI_STATUS status; int i, error; if (acpi_disabled("dmar")) return; TUNABLE_INT_FETCH("hw.dmar.enable", &dmar_enable); if (!dmar_enable) return; #ifdef INVARIANTS TUNABLE_INT_FETCH("hw.dmar.check_free", &dmar_check_free); #endif status = AcpiGetTable(ACPI_SIG_DMAR, 1, (ACPI_TABLE_HEADER **)&dmartbl); if (ACPI_FAILURE(status)) return; haw = dmartbl->Width + 1; if ((1ULL << (haw + 1)) > BUS_SPACE_MAXADDR) dmar_high = BUS_SPACE_MAXADDR; else dmar_high = 1ULL << (haw + 1); if (bootverbose) { printf("DMAR HAW=%d flags=<%b>\n", dmartbl->Width, (unsigned)dmartbl->Flags, "\020\001INTR_REMAP\002X2APIC_OPT_OUT"); } AcpiPutTable((ACPI_TABLE_HEADER *)dmartbl); dmar_iterate_tbl(dmar_count_iter, NULL); if (dmar_devcnt == 0) return; dmar_devs = malloc(sizeof(device_t) * dmar_devcnt, M_DEVBUF, M_WAITOK | M_ZERO); for (i = 0; i < dmar_devcnt; i++) { dmarh = dmar_find_by_index(i); if (dmarh == NULL) { printf("dmar_identify: cannot find HWUNIT %d\n", i); continue; } dmar_devs[i] = BUS_ADD_CHILD(parent, 1, "dmar", i); if (dmar_devs[i] == NULL) { printf("dmar_identify: cannot create instance %d\n", i); continue; } error = bus_set_resource(dmar_devs[i], SYS_RES_MEMORY, DMAR_REG_RID, dmarh->Address, PAGE_SIZE); if (error != 0) { printf( "dmar%d: unable to alloc register window at 0x%08jx: error %d\n", i, (uintmax_t)dmarh->Address, error); device_delete_child(parent, dmar_devs[i]); dmar_devs[i] = NULL; } } } static int dmar_probe(device_t dev) { if (acpi_get_handle(dev) != NULL) return (ENXIO); device_set_desc(dev, "DMA remap"); return (BUS_PROBE_NOWILDCARD); } static void dmar_release_intr(device_t dev, struct dmar_unit *unit, int idx) { struct dmar_msi_data *dmd; dmd = &unit->intrs[idx]; if (dmd->irq == -1) return; bus_teardown_intr(dev, dmd->irq_res, dmd->intr_handle); bus_release_resource(dev, SYS_RES_IRQ, dmd->irq_rid, dmd->irq_res); bus_delete_resource(dev, SYS_RES_IRQ, dmd->irq_rid); PCIB_RELEASE_MSIX(device_get_parent(device_get_parent(dev)), dev, dmd->irq); dmd->irq = -1; } static void dmar_release_resources(device_t dev, struct dmar_unit *unit) { int i; iommu_fini_busdma(&unit->iommu); dmar_fini_irt(unit); dmar_fini_qi(unit); dmar_fini_fault_log(unit); for (i = 0; i < DMAR_INTR_TOTAL; i++) dmar_release_intr(dev, unit, i); if (unit->regs != NULL) { bus_deactivate_resource(dev, SYS_RES_MEMORY, unit->reg_rid, unit->regs); bus_release_resource(dev, SYS_RES_MEMORY, unit->reg_rid, unit->regs); unit->regs = NULL; } if (unit->domids != NULL) { delete_unrhdr(unit->domids); unit->domids = NULL; } if (unit->ctx_obj != NULL) { vm_object_deallocate(unit->ctx_obj); unit->ctx_obj = NULL; } } static int dmar_alloc_irq(device_t dev, struct dmar_unit *unit, int idx) { device_t pcib; struct dmar_msi_data *dmd; uint64_t msi_addr; uint32_t msi_data; int error; dmd = &unit->intrs[idx]; pcib = device_get_parent(device_get_parent(dev)); /* Really not pcib */ error = PCIB_ALLOC_MSIX(pcib, dev, &dmd->irq); if (error != 0) { device_printf(dev, "cannot allocate %s interrupt, %d\n", dmd->name, error); goto err1; } error = bus_set_resource(dev, SYS_RES_IRQ, dmd->irq_rid, dmd->irq, 1); if (error != 0) { device_printf(dev, "cannot set %s interrupt resource, %d\n", dmd->name, error); goto err2; } dmd->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &dmd->irq_rid, RF_ACTIVE); if (dmd->irq_res == NULL) { device_printf(dev, "cannot allocate resource for %s interrupt\n", dmd->name); error = ENXIO; goto err3; } error = bus_setup_intr(dev, dmd->irq_res, INTR_TYPE_MISC, dmd->handler, NULL, unit, &dmd->intr_handle); if (error != 0) { device_printf(dev, "cannot setup %s interrupt, %d\n", dmd->name, error); goto err4; } bus_describe_intr(dev, dmd->irq_res, dmd->intr_handle, "%s", dmd->name); error = PCIB_MAP_MSI(pcib, dev, dmd->irq, &msi_addr, &msi_data); if (error != 0) { device_printf(dev, "cannot map %s interrupt, %d\n", dmd->name, error); goto err5; } dmar_write4(unit, dmd->msi_data_reg, msi_data); dmar_write4(unit, dmd->msi_addr_reg, msi_addr); /* Only for xAPIC mode */ dmar_write4(unit, dmd->msi_uaddr_reg, msi_addr >> 32); return (0); err5: bus_teardown_intr(dev, dmd->irq_res, dmd->intr_handle); err4: bus_release_resource(dev, SYS_RES_IRQ, dmd->irq_rid, dmd->irq_res); err3: bus_delete_resource(dev, SYS_RES_IRQ, dmd->irq_rid); err2: PCIB_RELEASE_MSIX(pcib, dev, dmd->irq); dmd->irq = -1; err1: return (error); } #ifdef DEV_APIC static int dmar_remap_intr(device_t dev, device_t child, u_int irq) { struct dmar_unit *unit; struct dmar_msi_data *dmd; uint64_t msi_addr; uint32_t msi_data; int i, error; unit = device_get_softc(dev); for (i = 0; i < DMAR_INTR_TOTAL; i++) { dmd = &unit->intrs[i]; if (irq == dmd->irq) { error = PCIB_MAP_MSI(device_get_parent( device_get_parent(dev)), dev, irq, &msi_addr, &msi_data); if (error != 0) return (error); DMAR_LOCK(unit); (dmd->disable_intr)(unit); dmar_write4(unit, dmd->msi_data_reg, msi_data); dmar_write4(unit, dmd->msi_addr_reg, msi_addr); dmar_write4(unit, dmd->msi_uaddr_reg, msi_addr >> 32); (dmd->enable_intr)(unit); DMAR_UNLOCK(unit); return (0); } } return (ENOENT); } #endif static void dmar_print_caps(device_t dev, struct dmar_unit *unit, ACPI_DMAR_HARDWARE_UNIT *dmaru) { uint32_t caphi, ecaphi; device_printf(dev, "regs@0x%08jx, ver=%d.%d, seg=%d, flags=<%b>\n", (uintmax_t)dmaru->Address, DMAR_MAJOR_VER(unit->hw_ver), DMAR_MINOR_VER(unit->hw_ver), dmaru->Segment, dmaru->Flags, "\020\001INCLUDE_ALL_PCI"); caphi = unit->hw_cap >> 32; device_printf(dev, "cap=%b,", (u_int)unit->hw_cap, "\020\004AFL\005WBF\006PLMR\007PHMR\010CM\027ZLR\030ISOCH"); printf("%b, ", caphi, "\020\010PSI\027DWD\030DRD\031FL1GP\034PSI"); printf("ndoms=%d, sagaw=%d, mgaw=%d, fro=%d, nfr=%d, superp=%d", DMAR_CAP_ND(unit->hw_cap), DMAR_CAP_SAGAW(unit->hw_cap), DMAR_CAP_MGAW(unit->hw_cap), DMAR_CAP_FRO(unit->hw_cap), DMAR_CAP_NFR(unit->hw_cap), DMAR_CAP_SPS(unit->hw_cap)); if ((unit->hw_cap & DMAR_CAP_PSI) != 0) printf(", mamv=%d", DMAR_CAP_MAMV(unit->hw_cap)); printf("\n"); ecaphi = unit->hw_ecap >> 32; device_printf(dev, "ecap=%b,", (u_int)unit->hw_ecap, "\020\001C\002QI\003DI\004IR\005EIM\007PT\010SC\031ECS\032MTS" "\033NEST\034DIS\035PASID\036PRS\037ERS\040SRS"); printf("%b, ", ecaphi, "\020\002NWFS\003EAFS"); printf("mhmw=%d, iro=%d\n", DMAR_ECAP_MHMV(unit->hw_ecap), DMAR_ECAP_IRO(unit->hw_ecap)); } static int dmar_attach(device_t dev) { struct dmar_unit *unit; ACPI_DMAR_HARDWARE_UNIT *dmaru; uint64_t timeout; int i, error; unit = device_get_softc(dev); unit->dev = dev; unit->iommu.unit = device_get_unit(dev); dmaru = dmar_find_by_index(unit->iommu.unit); if (dmaru == NULL) return (EINVAL); unit->segment = dmaru->Segment; unit->base = dmaru->Address; unit->reg_rid = DMAR_REG_RID; unit->regs = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &unit->reg_rid, RF_ACTIVE); if (unit->regs == NULL) { device_printf(dev, "cannot allocate register window\n"); return (ENOMEM); } unit->hw_ver = dmar_read4(unit, DMAR_VER_REG); unit->hw_cap = dmar_read8(unit, DMAR_CAP_REG); unit->hw_ecap = dmar_read8(unit, DMAR_ECAP_REG); if (bootverbose) dmar_print_caps(dev, unit, dmaru); dmar_quirks_post_ident(unit); timeout = dmar_get_timeout(); TUNABLE_UINT64_FETCH("hw.dmar.timeout", &timeout); dmar_update_timeout(timeout); for (i = 0; i < DMAR_INTR_TOTAL; i++) unit->intrs[i].irq = -1; unit->intrs[DMAR_INTR_FAULT].name = "fault"; unit->intrs[DMAR_INTR_FAULT].irq_rid = DMAR_FAULT_IRQ_RID; unit->intrs[DMAR_INTR_FAULT].handler = dmar_fault_intr; unit->intrs[DMAR_INTR_FAULT].msi_data_reg = DMAR_FEDATA_REG; unit->intrs[DMAR_INTR_FAULT].msi_addr_reg = DMAR_FEADDR_REG; unit->intrs[DMAR_INTR_FAULT].msi_uaddr_reg = DMAR_FEUADDR_REG; unit->intrs[DMAR_INTR_FAULT].enable_intr = dmar_enable_fault_intr; unit->intrs[DMAR_INTR_FAULT].disable_intr = dmar_disable_fault_intr; error = dmar_alloc_irq(dev, unit, DMAR_INTR_FAULT); if (error != 0) { dmar_release_resources(dev, unit); return (error); } if (DMAR_HAS_QI(unit)) { unit->intrs[DMAR_INTR_QI].name = "qi"; unit->intrs[DMAR_INTR_QI].irq_rid = DMAR_QI_IRQ_RID; unit->intrs[DMAR_INTR_QI].handler = dmar_qi_intr; unit->intrs[DMAR_INTR_QI].msi_data_reg = DMAR_IEDATA_REG; unit->intrs[DMAR_INTR_QI].msi_addr_reg = DMAR_IEADDR_REG; unit->intrs[DMAR_INTR_QI].msi_uaddr_reg = DMAR_IEUADDR_REG; unit->intrs[DMAR_INTR_QI].enable_intr = dmar_enable_qi_intr; unit->intrs[DMAR_INTR_QI].disable_intr = dmar_disable_qi_intr; error = dmar_alloc_irq(dev, unit, DMAR_INTR_QI); if (error != 0) { dmar_release_resources(dev, unit); return (error); } } mtx_init(&unit->iommu.lock, "dmarhw", NULL, MTX_DEF); unit->domids = new_unrhdr(0, dmar_nd2mask(DMAR_CAP_ND(unit->hw_cap)), &unit->iommu.lock); LIST_INIT(&unit->domains); /* * 9.2 "Context Entry": * When Caching Mode (CM) field is reported as Set, the * domain-id value of zero is architecturally reserved. * Software must not use domain-id value of zero * when CM is Set. */ if ((unit->hw_cap & DMAR_CAP_CM) != 0) alloc_unr_specific(unit->domids, 0); unit->ctx_obj = vm_pager_allocate(OBJT_PHYS, NULL, IDX_TO_OFF(1 + DMAR_CTX_CNT), 0, 0, NULL); /* * Allocate and load the root entry table pointer. Enable the * address translation after the required invalidations are * done. */ dmar_pgalloc(unit->ctx_obj, 0, DMAR_PGF_WAITOK | DMAR_PGF_ZERO); DMAR_LOCK(unit); error = dmar_load_root_entry_ptr(unit); if (error != 0) { DMAR_UNLOCK(unit); dmar_release_resources(dev, unit); return (error); } error = dmar_inv_ctx_glob(unit); if (error != 0) { DMAR_UNLOCK(unit); dmar_release_resources(dev, unit); return (error); } if ((unit->hw_ecap & DMAR_ECAP_DI) != 0) { error = dmar_inv_iotlb_glob(unit); if (error != 0) { DMAR_UNLOCK(unit); dmar_release_resources(dev, unit); return (error); } } DMAR_UNLOCK(unit); error = dmar_init_fault_log(unit); if (error != 0) { dmar_release_resources(dev, unit); return (error); } error = dmar_init_qi(unit); if (error != 0) { dmar_release_resources(dev, unit); return (error); } error = dmar_init_irt(unit); if (error != 0) { dmar_release_resources(dev, unit); return (error); } error = iommu_init_busdma(&unit->iommu); if (error != 0) { dmar_release_resources(dev, unit); return (error); } #ifdef NOTYET DMAR_LOCK(unit); error = dmar_enable_translation(unit); if (error != 0) { DMAR_UNLOCK(unit); dmar_release_resources(dev, unit); return (error); } DMAR_UNLOCK(unit); #endif return (0); } static int dmar_detach(device_t dev) { return (EBUSY); } static int dmar_suspend(device_t dev) { return (0); } static int dmar_resume(device_t dev) { /* XXXKIB */ return (0); } static device_method_t dmar_methods[] = { DEVMETHOD(device_identify, dmar_identify), DEVMETHOD(device_probe, dmar_probe), DEVMETHOD(device_attach, dmar_attach), DEVMETHOD(device_detach, dmar_detach), DEVMETHOD(device_suspend, dmar_suspend), DEVMETHOD(device_resume, dmar_resume), #ifdef DEV_APIC DEVMETHOD(bus_remap_intr, dmar_remap_intr), #endif DEVMETHOD_END }; static driver_t dmar_driver = { "dmar", dmar_methods, sizeof(struct dmar_unit), }; DRIVER_MODULE(dmar, acpi, dmar_driver, dmar_devclass, 0, 0); MODULE_DEPEND(dmar, acpi, 1, 1, 1); void dmar_set_buswide_ctx(struct iommu_unit *unit, u_int busno) { struct dmar_unit *dmar; dmar = (struct dmar_unit *)unit; MPASS(busno <= PCI_BUSMAX); DMAR_LOCK(dmar); dmar->buswide_ctxs[busno / NBBY / sizeof(uint32_t)] |= 1 << (busno % (NBBY * sizeof(uint32_t))); DMAR_UNLOCK(dmar); } bool dmar_is_buswide_ctx(struct dmar_unit *unit, u_int busno) { MPASS(busno <= PCI_BUSMAX); return ((unit->buswide_ctxs[busno / NBBY / sizeof(uint32_t)] & (1U << (busno % (NBBY * sizeof(uint32_t))))) != 0); } static void dmar_print_path(int busno, int depth, const ACPI_DMAR_PCI_PATH *path) { int i; printf("[%d, ", busno); for (i = 0; i < depth; i++) { if (i != 0) printf(", "); printf("(%d, %d)", path[i].Device, path[i].Function); } printf("]"); } int dmar_dev_depth(device_t child) { devclass_t pci_class; device_t bus, pcib; int depth; pci_class = devclass_find("pci"); for (depth = 1; ; depth++) { bus = device_get_parent(child); pcib = device_get_parent(bus); if (device_get_devclass(device_get_parent(pcib)) != pci_class) return (depth); child = pcib; } } void dmar_dev_path(device_t child, int *busno, void *path1, int depth) { devclass_t pci_class; device_t bus, pcib; ACPI_DMAR_PCI_PATH *path; pci_class = devclass_find("pci"); path = path1; for (depth--; depth != -1; depth--) { path[depth].Device = pci_get_slot(child); path[depth].Function = pci_get_function(child); bus = device_get_parent(child); pcib = device_get_parent(bus); if (device_get_devclass(device_get_parent(pcib)) != pci_class) { /* reached a host bridge */ *busno = pcib_get_bus(bus); return; } child = pcib; } panic("wrong depth"); } static int dmar_match_pathes(int busno1, const ACPI_DMAR_PCI_PATH *path1, int depth1, int busno2, const ACPI_DMAR_PCI_PATH *path2, int depth2, enum AcpiDmarScopeType scope_type) { int i, depth; if (busno1 != busno2) return (0); if (scope_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && depth1 != depth2) return (0); depth = depth1; if (depth2 < depth) depth = depth2; for (i = 0; i < depth; i++) { if (path1[i].Device != path2[i].Device || path1[i].Function != path2[i].Function) return (0); } return (1); } static int dmar_match_devscope(ACPI_DMAR_DEVICE_SCOPE *devscope, int dev_busno, const ACPI_DMAR_PCI_PATH *dev_path, int dev_path_len) { ACPI_DMAR_PCI_PATH *path; int path_len; if (devscope->Length < sizeof(*devscope)) { printf("dmar_match_devscope: corrupted DMAR table, dl %d\n", devscope->Length); return (-1); } if (devscope->EntryType != ACPI_DMAR_SCOPE_TYPE_ENDPOINT && devscope->EntryType != ACPI_DMAR_SCOPE_TYPE_BRIDGE) return (0); path_len = devscope->Length - sizeof(*devscope); if (path_len % 2 != 0) { printf("dmar_match_devscope: corrupted DMAR table, dl %d\n", devscope->Length); return (-1); } path_len /= 2; path = (ACPI_DMAR_PCI_PATH *)(devscope + 1); if (path_len == 0) { printf("dmar_match_devscope: corrupted DMAR table, dl %d\n", devscope->Length); return (-1); } return (dmar_match_pathes(devscope->Bus, path, path_len, dev_busno, dev_path, dev_path_len, devscope->EntryType)); } static bool dmar_match_by_path(struct dmar_unit *unit, int dev_domain, int dev_busno, const ACPI_DMAR_PCI_PATH *dev_path, int dev_path_len, const char **banner) { ACPI_DMAR_HARDWARE_UNIT *dmarh; ACPI_DMAR_DEVICE_SCOPE *devscope; char *ptr, *ptrend; int match; dmarh = dmar_find_by_index(unit->iommu.unit); if (dmarh == NULL) return (false); if (dmarh->Segment != dev_domain) return (false); if ((dmarh->Flags & ACPI_DMAR_INCLUDE_ALL) != 0) { if (banner != NULL) *banner = "INCLUDE_ALL"; return (true); } ptr = (char *)dmarh + sizeof(*dmarh); ptrend = (char *)dmarh + dmarh->Header.Length; while (ptr < ptrend) { devscope = (ACPI_DMAR_DEVICE_SCOPE *)ptr; ptr += devscope->Length; match = dmar_match_devscope(devscope, dev_busno, dev_path, dev_path_len); if (match == -1) return (false); if (match == 1) { if (banner != NULL) *banner = "specific match"; return (true); } } return (false); } static struct dmar_unit * dmar_find_by_scope(int dev_domain, int dev_busno, const ACPI_DMAR_PCI_PATH *dev_path, int dev_path_len) { struct dmar_unit *unit; int i; for (i = 0; i < dmar_devcnt; i++) { if (dmar_devs[i] == NULL) continue; unit = device_get_softc(dmar_devs[i]); if (dmar_match_by_path(unit, dev_domain, dev_busno, dev_path, dev_path_len, NULL)) return (unit); } return (NULL); } struct dmar_unit * dmar_find(device_t dev, bool verbose) { device_t dmar_dev; struct dmar_unit *unit; const char *banner; int i, dev_domain, dev_busno, dev_path_len; /* * This function can only handle PCI(e) devices. */ if (device_get_devclass(device_get_parent(dev)) != devclass_find("pci")) return (NULL); dmar_dev = NULL; dev_domain = pci_get_domain(dev); dev_path_len = dmar_dev_depth(dev); ACPI_DMAR_PCI_PATH dev_path[dev_path_len]; dmar_dev_path(dev, &dev_busno, dev_path, dev_path_len); banner = ""; for (i = 0; i < dmar_devcnt; i++) { if (dmar_devs[i] == NULL) continue; unit = device_get_softc(dmar_devs[i]); if (dmar_match_by_path(unit, dev_domain, dev_busno, dev_path, dev_path_len, &banner)) break; } if (i == dmar_devcnt) return (NULL); if (verbose) { device_printf(dev, "pci%d:%d:%d:%d matched dmar%d by %s", dev_domain, pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), unit->iommu.unit, banner); printf(" scope path "); dmar_print_path(dev_busno, dev_path_len, dev_path); printf("\n"); } return (unit); } static struct dmar_unit * dmar_find_nonpci(u_int id, u_int entry_type, uint16_t *rid) { device_t dmar_dev; struct dmar_unit *unit; ACPI_DMAR_HARDWARE_UNIT *dmarh; ACPI_DMAR_DEVICE_SCOPE *devscope; ACPI_DMAR_PCI_PATH *path; char *ptr, *ptrend; #ifdef DEV_APIC int error; #endif int i; for (i = 0; i < dmar_devcnt; i++) { dmar_dev = dmar_devs[i]; if (dmar_dev == NULL) continue; unit = (struct dmar_unit *)device_get_softc(dmar_dev); dmarh = dmar_find_by_index(i); if (dmarh == NULL) continue; ptr = (char *)dmarh + sizeof(*dmarh); ptrend = (char *)dmarh + dmarh->Header.Length; for (;;) { if (ptr >= ptrend) break; devscope = (ACPI_DMAR_DEVICE_SCOPE *)ptr; ptr += devscope->Length; if (devscope->EntryType != entry_type) continue; if (devscope->EnumerationId != id) continue; #ifdef DEV_APIC if (entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) { error = ioapic_get_rid(id, rid); /* * If our IOAPIC has PCI bindings then * use the PCI device rid. */ if (error == 0) return (unit); } #endif if (devscope->Length - sizeof(ACPI_DMAR_DEVICE_SCOPE) == 2) { if (rid != NULL) { path = (ACPI_DMAR_PCI_PATH *) (devscope + 1); *rid = PCI_RID(devscope->Bus, path->Device, path->Function); } return (unit); } printf( "dmar_find_nonpci: id %d type %d path length != 2\n", id, entry_type); break; } } return (NULL); } struct dmar_unit * dmar_find_hpet(device_t dev, uint16_t *rid) { return (dmar_find_nonpci(hpet_get_uid(dev), ACPI_DMAR_SCOPE_TYPE_HPET, rid)); } struct dmar_unit * dmar_find_ioapic(u_int apic_id, uint16_t *rid) { return (dmar_find_nonpci(apic_id, ACPI_DMAR_SCOPE_TYPE_IOAPIC, rid)); } struct rmrr_iter_args { struct dmar_domain *domain; int dev_domain; int dev_busno; const ACPI_DMAR_PCI_PATH *dev_path; int dev_path_len; struct iommu_map_entries_tailq *rmrr_entries; }; static int dmar_rmrr_iter(ACPI_DMAR_HEADER *dmarh, void *arg) { struct rmrr_iter_args *ria; ACPI_DMAR_RESERVED_MEMORY *resmem; ACPI_DMAR_DEVICE_SCOPE *devscope; struct iommu_map_entry *entry; char *ptr, *ptrend; int match; if (dmarh->Type != ACPI_DMAR_TYPE_RESERVED_MEMORY) return (1); ria = arg; resmem = (ACPI_DMAR_RESERVED_MEMORY *)dmarh; if (resmem->Segment != ria->dev_domain) return (1); ptr = (char *)resmem + sizeof(*resmem); ptrend = (char *)resmem + resmem->Header.Length; for (;;) { if (ptr >= ptrend) break; devscope = (ACPI_DMAR_DEVICE_SCOPE *)ptr; ptr += devscope->Length; match = dmar_match_devscope(devscope, ria->dev_busno, ria->dev_path, ria->dev_path_len); if (match == 1) { entry = dmar_gas_alloc_entry(ria->domain, DMAR_PGF_WAITOK); entry->start = resmem->BaseAddress; /* The RMRR entry end address is inclusive. */ entry->end = resmem->EndAddress; TAILQ_INSERT_TAIL(ria->rmrr_entries, entry, unroll_link); } } return (1); } void dmar_dev_parse_rmrr(struct dmar_domain *domain, int dev_domain, int dev_busno, const void *dev_path, int dev_path_len, struct iommu_map_entries_tailq *rmrr_entries) { struct rmrr_iter_args ria; ria.domain = domain; ria.dev_domain = dev_domain; ria.dev_busno = dev_busno; ria.dev_path = (const ACPI_DMAR_PCI_PATH *)dev_path; ria.dev_path_len = dev_path_len; ria.rmrr_entries = rmrr_entries; dmar_iterate_tbl(dmar_rmrr_iter, &ria); } struct inst_rmrr_iter_args { struct dmar_unit *dmar; }; static device_t dmar_path_dev(int segment, int path_len, int busno, const ACPI_DMAR_PCI_PATH *path, uint16_t *rid) { device_t dev; int i; dev = NULL; for (i = 0; i < path_len; i++) { dev = pci_find_dbsf(segment, busno, path->Device, path->Function); if (i != path_len - 1) { busno = pci_cfgregread(busno, path->Device, path->Function, PCIR_SECBUS_1, 1); path++; } } *rid = PCI_RID(busno, path->Device, path->Function); return (dev); } static int dmar_inst_rmrr_iter(ACPI_DMAR_HEADER *dmarh, void *arg) { const ACPI_DMAR_RESERVED_MEMORY *resmem; const ACPI_DMAR_DEVICE_SCOPE *devscope; struct inst_rmrr_iter_args *iria; const char *ptr, *ptrend; device_t dev; struct dmar_unit *unit; int dev_path_len; uint16_t rid; iria = arg; if (dmarh->Type != ACPI_DMAR_TYPE_RESERVED_MEMORY) return (1); resmem = (ACPI_DMAR_RESERVED_MEMORY *)dmarh; if (resmem->Segment != iria->dmar->segment) return (1); ptr = (const char *)resmem + sizeof(*resmem); ptrend = (const char *)resmem + resmem->Header.Length; for (;;) { if (ptr >= ptrend) break; devscope = (const ACPI_DMAR_DEVICE_SCOPE *)ptr; ptr += devscope->Length; /* XXXKIB bridge */ if (devscope->EntryType != ACPI_DMAR_SCOPE_TYPE_ENDPOINT) continue; rid = 0; dev_path_len = (devscope->Length - sizeof(ACPI_DMAR_DEVICE_SCOPE)) / 2; dev = dmar_path_dev(resmem->Segment, dev_path_len, devscope->Bus, (const ACPI_DMAR_PCI_PATH *)(devscope + 1), &rid); if (dev == NULL) { if (bootverbose) { printf("dmar%d no dev found for RMRR " "[%#jx, %#jx] rid %#x scope path ", iria->dmar->iommu.unit, (uintmax_t)resmem->BaseAddress, (uintmax_t)resmem->EndAddress, rid); dmar_print_path(devscope->Bus, dev_path_len, (const ACPI_DMAR_PCI_PATH *)(devscope + 1)); printf("\n"); } unit = dmar_find_by_scope(resmem->Segment, devscope->Bus, (const ACPI_DMAR_PCI_PATH *)(devscope + 1), dev_path_len); if (iria->dmar != unit) continue; dmar_get_ctx_for_devpath(iria->dmar, rid, resmem->Segment, devscope->Bus, (const ACPI_DMAR_PCI_PATH *)(devscope + 1), dev_path_len, false, true); } else { unit = dmar_find(dev, false); if (iria->dmar != unit) continue; iommu_instantiate_ctx(&(iria)->dmar->iommu, dev, true); } } return (1); } /* * Pre-create all contexts for the DMAR which have RMRR entries. */ int dmar_instantiate_rmrr_ctxs(struct iommu_unit *unit) { struct dmar_unit *dmar; struct inst_rmrr_iter_args iria; int error; dmar = (struct dmar_unit *)unit; if (!dmar_barrier_enter(dmar, DMAR_BARRIER_RMRR)) return (0); error = 0; iria.dmar = dmar; dmar_iterate_tbl(dmar_inst_rmrr_iter, &iria); DMAR_LOCK(dmar); if (!LIST_EMPTY(&dmar->domains)) { KASSERT((dmar->hw_gcmd & DMAR_GCMD_TE) == 0, ("dmar%d: RMRR not handled but translation is already enabled", dmar->iommu.unit)); error = dmar_enable_translation(dmar); if (bootverbose) { if (error == 0) { printf("dmar%d: enabled translation\n", dmar->iommu.unit); } else { printf("dmar%d: enabling translation failed, " "error %d\n", dmar->iommu.unit, error); } } } dmar_barrier_exit(dmar, DMAR_BARRIER_RMRR); return (error); } #ifdef DDB #include #include static void dmar_print_domain_entry(const struct iommu_map_entry *entry) { struct iommu_map_entry *l, *r; db_printf( " start %jx end %jx first %jx last %jx free_down %jx flags %x ", entry->start, entry->end, entry->first, entry->last, entry->free_down, entry->flags); db_printf("left "); l = RB_LEFT(entry, rb_entry); if (l == NULL) db_printf("NULL "); else db_printf("%jx ", l->start); db_printf("right "); r = RB_RIGHT(entry, rb_entry); if (r == NULL) db_printf("NULL"); else db_printf("%jx", r->start); db_printf("\n"); } static void dmar_print_ctx(struct dmar_ctx *ctx) { db_printf( " @%p pci%d:%d:%d refs %d flags %x loads %lu unloads %lu\n", ctx, pci_get_bus(ctx->context.tag->owner), pci_get_slot(ctx->context.tag->owner), pci_get_function(ctx->context.tag->owner), ctx->refs, ctx->context.flags, ctx->context.loads, ctx->context.unloads); } static void dmar_print_domain(struct dmar_domain *domain, bool show_mappings) { struct iommu_map_entry *entry; struct dmar_ctx *ctx; db_printf( " @%p dom %d mgaw %d agaw %d pglvl %d end %jx refs %d\n" " ctx_cnt %d flags %x pgobj %p map_ents %u\n", domain, domain->domain, domain->mgaw, domain->agaw, domain->pglvl, (uintmax_t)domain->end, domain->refs, domain->ctx_cnt, - domain->flags, domain->pgtbl_obj, domain->entries_cnt); + domain->flags, domain->pgtbl_obj, domain->iodom.entries_cnt); if (!LIST_EMPTY(&domain->contexts)) { db_printf(" Contexts:\n"); LIST_FOREACH(ctx, &domain->contexts, link) dmar_print_ctx(ctx); } if (!show_mappings) return; db_printf(" mapped:\n"); RB_FOREACH(entry, dmar_gas_entries_tree, &domain->rb_root) { dmar_print_domain_entry(entry); if (db_pager_quit) break; } if (db_pager_quit) return; db_printf(" unloading:\n"); TAILQ_FOREACH(entry, &domain->iodom.unload_entries, dmamap_link) { dmar_print_domain_entry(entry); if (db_pager_quit) break; } } DB_FUNC(dmar_domain, db_dmar_print_domain, db_show_table, CS_OWN, NULL) { struct dmar_unit *unit; struct dmar_domain *domain; struct dmar_ctx *ctx; bool show_mappings, valid; int pci_domain, bus, device, function, i, t; db_expr_t radix; valid = false; radix = db_radix; db_radix = 10; t = db_read_token(); if (t == tSLASH) { t = db_read_token(); if (t != tIDENT) { db_printf("Bad modifier\n"); db_radix = radix; db_skip_to_eol(); return; } show_mappings = strchr(db_tok_string, 'm') != NULL; t = db_read_token(); } else { show_mappings = false; } if (t == tNUMBER) { pci_domain = db_tok_number; t = db_read_token(); if (t == tNUMBER) { bus = db_tok_number; t = db_read_token(); if (t == tNUMBER) { device = db_tok_number; t = db_read_token(); if (t == tNUMBER) { function = db_tok_number; valid = true; } } } } db_radix = radix; db_skip_to_eol(); if (!valid) { db_printf("usage: show dmar_domain [/m] " " \n"); return; } for (i = 0; i < dmar_devcnt; i++) { unit = device_get_softc(dmar_devs[i]); LIST_FOREACH(domain, &unit->domains, link) { LIST_FOREACH(ctx, &domain->contexts, link) { if (pci_domain == unit->segment && bus == pci_get_bus(ctx->context.tag->owner) && device == pci_get_slot(ctx->context.tag->owner) && function == pci_get_function(ctx->context.tag->owner)) { dmar_print_domain(domain, show_mappings); goto out; } } } } out:; } static void dmar_print_one(int idx, bool show_domains, bool show_mappings) { struct dmar_unit *unit; struct dmar_domain *domain; int i, frir; unit = device_get_softc(dmar_devs[idx]); db_printf("dmar%d at %p, root at 0x%jx, ver 0x%x\n", unit->iommu.unit, unit, dmar_read8(unit, DMAR_RTADDR_REG), dmar_read4(unit, DMAR_VER_REG)); db_printf("cap 0x%jx ecap 0x%jx gsts 0x%x fsts 0x%x fectl 0x%x\n", (uintmax_t)dmar_read8(unit, DMAR_CAP_REG), (uintmax_t)dmar_read8(unit, DMAR_ECAP_REG), dmar_read4(unit, DMAR_GSTS_REG), dmar_read4(unit, DMAR_FSTS_REG), dmar_read4(unit, DMAR_FECTL_REG)); if (unit->ir_enabled) { db_printf("ir is enabled; IRT @%p phys 0x%jx maxcnt %d\n", unit->irt, (uintmax_t)unit->irt_phys, unit->irte_cnt); } db_printf("fed 0x%x fea 0x%x feua 0x%x\n", dmar_read4(unit, DMAR_FEDATA_REG), dmar_read4(unit, DMAR_FEADDR_REG), dmar_read4(unit, DMAR_FEUADDR_REG)); db_printf("primary fault log:\n"); for (i = 0; i < DMAR_CAP_NFR(unit->hw_cap); i++) { frir = (DMAR_CAP_FRO(unit->hw_cap) + i) * 16; db_printf(" %d at 0x%x: %jx %jx\n", i, frir, (uintmax_t)dmar_read8(unit, frir), (uintmax_t)dmar_read8(unit, frir + 8)); } if (DMAR_HAS_QI(unit)) { db_printf("ied 0x%x iea 0x%x ieua 0x%x\n", dmar_read4(unit, DMAR_IEDATA_REG), dmar_read4(unit, DMAR_IEADDR_REG), dmar_read4(unit, DMAR_IEUADDR_REG)); if (unit->qi_enabled) { db_printf("qi is enabled: queue @0x%jx (IQA 0x%jx) " "size 0x%jx\n" " head 0x%x tail 0x%x avail 0x%x status 0x%x ctrl 0x%x\n" " hw compl 0x%x@%p/phys@%jx next seq 0x%x gen 0x%x\n", (uintmax_t)unit->inv_queue, (uintmax_t)dmar_read8(unit, DMAR_IQA_REG), (uintmax_t)unit->inv_queue_size, dmar_read4(unit, DMAR_IQH_REG), dmar_read4(unit, DMAR_IQT_REG), unit->inv_queue_avail, dmar_read4(unit, DMAR_ICS_REG), dmar_read4(unit, DMAR_IECTL_REG), unit->inv_waitd_seq_hw, &unit->inv_waitd_seq_hw, (uintmax_t)unit->inv_waitd_seq_hw_phys, unit->inv_waitd_seq, unit->inv_waitd_gen); } else { db_printf("qi is disabled\n"); } } if (show_domains) { db_printf("domains:\n"); LIST_FOREACH(domain, &unit->domains, link) { dmar_print_domain(domain, show_mappings); if (db_pager_quit) break; } } } DB_SHOW_COMMAND(dmar, db_dmar_print) { bool show_domains, show_mappings; show_domains = strchr(modif, 'd') != NULL; show_mappings = strchr(modif, 'm') != NULL; if (!have_addr) { db_printf("usage: show dmar [/d] [/m] index\n"); return; } dmar_print_one((int)addr, show_domains, show_mappings); } DB_SHOW_ALL_COMMAND(dmars, db_show_all_dmars) { int i; bool show_domains, show_mappings; show_domains = strchr(modif, 'd') != NULL; show_mappings = strchr(modif, 'm') != NULL; for (i = 0; i < dmar_devcnt; i++) { dmar_print_one(i, show_domains, show_mappings); if (db_pager_quit) break; } } #endif struct iommu_unit * iommu_find(device_t dev, bool verbose) { struct dmar_unit *dmar; dmar = dmar_find(dev, verbose); return (&dmar->iommu); } Index: head/sys/x86/iommu/intel_gas.c =================================================================== --- head/sys/x86/iommu/intel_gas.c (revision 363309) +++ head/sys/x86/iommu/intel_gas.c (revision 363310) @@ -1,741 +1,742 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #define RB_AUGMENT(entry) dmar_gas_augment_entry(entry) #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Guest Address Space management. */ static uma_zone_t iommu_map_entry_zone; static void intel_gas_init(void) { iommu_map_entry_zone = uma_zcreate("IOMMU_MAP_ENTRY", sizeof(struct iommu_map_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NODUMP); } SYSINIT(intel_gas, SI_SUB_DRIVERS, SI_ORDER_FIRST, intel_gas_init, NULL); struct iommu_map_entry * dmar_gas_alloc_entry(struct dmar_domain *domain, u_int flags) { struct iommu_map_entry *res; KASSERT((flags & ~(DMAR_PGF_WAITOK)) == 0, ("unsupported flags %x", flags)); res = uma_zalloc(iommu_map_entry_zone, ((flags & DMAR_PGF_WAITOK) != 0 ? M_WAITOK : M_NOWAIT) | M_ZERO); if (res != NULL) { res->domain = (struct iommu_domain *)domain; - atomic_add_int(&domain->entries_cnt, 1); + atomic_add_int(&domain->iodom.entries_cnt, 1); } return (res); } void dmar_gas_free_entry(struct dmar_domain *domain, struct iommu_map_entry *entry) { KASSERT(domain == (struct dmar_domain *)entry->domain, ("mismatched free domain %p entry %p entry->domain %p", domain, entry, entry->domain)); - atomic_subtract_int(&domain->entries_cnt, 1); + atomic_subtract_int(&domain->iodom.entries_cnt, 1); uma_zfree(iommu_map_entry_zone, entry); } static int dmar_gas_cmp_entries(struct iommu_map_entry *a, struct iommu_map_entry *b) { /* Last entry have zero size, so <= */ KASSERT(a->start <= a->end, ("inverted entry %p (%jx, %jx)", a, (uintmax_t)a->start, (uintmax_t)a->end)); KASSERT(b->start <= b->end, ("inverted entry %p (%jx, %jx)", b, (uintmax_t)b->start, (uintmax_t)b->end)); KASSERT(a->end <= b->start || b->end <= a->start || a->end == a->start || b->end == b->start, ("overlapping entries %p (%jx, %jx) %p (%jx, %jx)", a, (uintmax_t)a->start, (uintmax_t)a->end, b, (uintmax_t)b->start, (uintmax_t)b->end)); if (a->end < b->end) return (-1); else if (b->end < a->end) return (1); return (0); } static void dmar_gas_augment_entry(struct iommu_map_entry *entry) { struct iommu_map_entry *child; iommu_gaddr_t free_down; free_down = 0; if ((child = RB_LEFT(entry, rb_entry)) != NULL) { free_down = MAX(free_down, child->free_down); free_down = MAX(free_down, entry->start - child->last); entry->first = child->first; } else entry->first = entry->start; if ((child = RB_RIGHT(entry, rb_entry)) != NULL) { free_down = MAX(free_down, child->free_down); free_down = MAX(free_down, child->first - entry->end); entry->last = child->last; } else entry->last = entry->end; entry->free_down = free_down; } RB_GENERATE(dmar_gas_entries_tree, iommu_map_entry, rb_entry, dmar_gas_cmp_entries); #ifdef INVARIANTS static void dmar_gas_check_free(struct dmar_domain *domain) { struct iommu_map_entry *entry, *l, *r; iommu_gaddr_t v; RB_FOREACH(entry, dmar_gas_entries_tree, &domain->rb_root) { KASSERT(domain == (struct dmar_domain *)entry->domain, ("mismatched free domain %p entry %p entry->domain %p", domain, entry, entry->domain)); l = RB_LEFT(entry, rb_entry); r = RB_RIGHT(entry, rb_entry); v = 0; if (l != NULL) { v = MAX(v, l->free_down); v = MAX(v, entry->start - l->last); } if (r != NULL) { v = MAX(v, r->free_down); v = MAX(v, r->first - entry->end); } MPASS(entry->free_down == v); } } #endif static bool dmar_gas_rb_insert(struct dmar_domain *domain, struct iommu_map_entry *entry) { struct iommu_map_entry *found; found = RB_INSERT(dmar_gas_entries_tree, &domain->rb_root, entry); return (found == NULL); } static void dmar_gas_rb_remove(struct dmar_domain *domain, struct iommu_map_entry *entry) { RB_REMOVE(dmar_gas_entries_tree, &domain->rb_root, entry); } void dmar_gas_init_domain(struct dmar_domain *domain) { struct iommu_map_entry *begin, *end; begin = dmar_gas_alloc_entry(domain, DMAR_PGF_WAITOK); end = dmar_gas_alloc_entry(domain, DMAR_PGF_WAITOK); DMAR_DOMAIN_LOCK(domain); - KASSERT(domain->entries_cnt == 2, ("dirty domain %p", domain)); + KASSERT(domain->iodom.entries_cnt == 2, ("dirty domain %p", domain)); KASSERT(RB_EMPTY(&domain->rb_root), ("non-empty entries %p", domain)); begin->start = 0; begin->end = DMAR_PAGE_SIZE; begin->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED; dmar_gas_rb_insert(domain, begin); end->start = domain->end; end->end = domain->end; end->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED; dmar_gas_rb_insert(domain, end); domain->first_place = begin; domain->last_place = end; domain->flags |= DMAR_DOMAIN_GAS_INITED; DMAR_DOMAIN_UNLOCK(domain); } void dmar_gas_fini_domain(struct dmar_domain *domain) { struct iommu_map_entry *entry, *entry1; DMAR_DOMAIN_ASSERT_LOCKED(domain); - KASSERT(domain->entries_cnt == 2, ("domain still in use %p", domain)); + KASSERT(domain->iodom.entries_cnt == 2, + ("domain still in use %p", domain)); entry = RB_MIN(dmar_gas_entries_tree, &domain->rb_root); KASSERT(entry->start == 0, ("start entry start %p", domain)); KASSERT(entry->end == DMAR_PAGE_SIZE, ("start entry end %p", domain)); KASSERT(entry->flags == IOMMU_MAP_ENTRY_PLACE, ("start entry flags %p", domain)); RB_REMOVE(dmar_gas_entries_tree, &domain->rb_root, entry); dmar_gas_free_entry(domain, entry); entry = RB_MAX(dmar_gas_entries_tree, &domain->rb_root); KASSERT(entry->start == domain->end, ("end entry start %p", domain)); KASSERT(entry->end == domain->end, ("end entry end %p", domain)); KASSERT(entry->flags == IOMMU_MAP_ENTRY_PLACE, ("end entry flags %p", domain)); RB_REMOVE(dmar_gas_entries_tree, &domain->rb_root, entry); dmar_gas_free_entry(domain, entry); RB_FOREACH_SAFE(entry, dmar_gas_entries_tree, &domain->rb_root, entry1) { KASSERT((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0, ("non-RMRR entry left %p", domain)); RB_REMOVE(dmar_gas_entries_tree, &domain->rb_root, entry); dmar_gas_free_entry(domain, entry); } } struct dmar_gas_match_args { struct dmar_domain *domain; iommu_gaddr_t size; int offset; const struct bus_dma_tag_common *common; u_int gas_flags; struct iommu_map_entry *entry; }; /* * The interval [beg, end) is a free interval between two dmar_map_entries. * maxaddr is an upper bound on addresses that can be allocated. Try to * allocate space in the free interval, subject to the conditions expressed * by a, and return 'true' if and only if the allocation attempt succeeds. */ static bool dmar_gas_match_one(struct dmar_gas_match_args *a, iommu_gaddr_t beg, iommu_gaddr_t end, iommu_gaddr_t maxaddr) { iommu_gaddr_t bs, start; a->entry->start = roundup2(beg + DMAR_PAGE_SIZE, a->common->alignment); if (a->entry->start + a->size > maxaddr) return (false); /* DMAR_PAGE_SIZE to create gap after new entry. */ if (a->entry->start < beg + DMAR_PAGE_SIZE || a->entry->start + a->size + a->offset + DMAR_PAGE_SIZE > end) return (false); /* No boundary crossing. */ if (iommu_test_boundary(a->entry->start + a->offset, a->size, a->common->boundary)) return (true); /* * The start + offset to start + offset + size region crosses * the boundary. Check if there is enough space after the * next boundary after the beg. */ bs = rounddown2(a->entry->start + a->offset + a->common->boundary, a->common->boundary); start = roundup2(bs, a->common->alignment); /* DMAR_PAGE_SIZE to create gap after new entry. */ if (start + a->offset + a->size + DMAR_PAGE_SIZE <= end && start + a->offset + a->size <= maxaddr && iommu_test_boundary(start + a->offset, a->size, a->common->boundary)) { a->entry->start = start; return (true); } /* * Not enough space to align at the requested boundary, or * boundary is smaller than the size, but allowed to split. * We already checked that start + size does not overlap maxaddr. * * XXXKIB. It is possible that bs is exactly at the start of * the next entry, then we do not have gap. Ignore for now. */ if ((a->gas_flags & IOMMU_MF_CANSPLIT) != 0) { a->size = bs - a->entry->start; return (true); } return (false); } static void dmar_gas_match_insert(struct dmar_gas_match_args *a) { bool found; /* * The prev->end is always aligned on the page size, which * causes page alignment for the entry->start too. The size * is checked to be multiple of the page size. * * The page sized gap is created between consequent * allocations to ensure that out-of-bounds accesses fault. */ a->entry->end = a->entry->start + a->size; found = dmar_gas_rb_insert(a->domain, a->entry); KASSERT(found, ("found dup %p start %jx size %jx", a->domain, (uintmax_t)a->entry->start, (uintmax_t)a->size)); a->entry->flags = IOMMU_MAP_ENTRY_MAP; } static int dmar_gas_lowermatch(struct dmar_gas_match_args *a, struct iommu_map_entry *entry) { struct iommu_map_entry *child; child = RB_RIGHT(entry, rb_entry); if (child != NULL && entry->end < a->common->lowaddr && dmar_gas_match_one(a, entry->end, child->first, a->common->lowaddr)) { dmar_gas_match_insert(a); return (0); } if (entry->free_down < a->size + a->offset + DMAR_PAGE_SIZE) return (ENOMEM); if (entry->first >= a->common->lowaddr) return (ENOMEM); child = RB_LEFT(entry, rb_entry); if (child != NULL && 0 == dmar_gas_lowermatch(a, child)) return (0); if (child != NULL && child->last < a->common->lowaddr && dmar_gas_match_one(a, child->last, entry->start, a->common->lowaddr)) { dmar_gas_match_insert(a); return (0); } child = RB_RIGHT(entry, rb_entry); if (child != NULL && 0 == dmar_gas_lowermatch(a, child)) return (0); return (ENOMEM); } static int dmar_gas_uppermatch(struct dmar_gas_match_args *a, struct iommu_map_entry *entry) { struct iommu_map_entry *child; if (entry->free_down < a->size + a->offset + DMAR_PAGE_SIZE) return (ENOMEM); if (entry->last < a->common->highaddr) return (ENOMEM); child = RB_LEFT(entry, rb_entry); if (child != NULL && 0 == dmar_gas_uppermatch(a, child)) return (0); if (child != NULL && child->last >= a->common->highaddr && dmar_gas_match_one(a, child->last, entry->start, a->domain->end)) { dmar_gas_match_insert(a); return (0); } child = RB_RIGHT(entry, rb_entry); if (child != NULL && entry->end >= a->common->highaddr && dmar_gas_match_one(a, entry->end, child->first, a->domain->end)) { dmar_gas_match_insert(a); return (0); } if (child != NULL && 0 == dmar_gas_uppermatch(a, child)) return (0); return (ENOMEM); } static int dmar_gas_find_space(struct dmar_domain *domain, const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset, u_int flags, struct iommu_map_entry *entry) { struct dmar_gas_match_args a; int error; DMAR_DOMAIN_ASSERT_LOCKED(domain); KASSERT(entry->flags == 0, ("dirty entry %p %p", domain, entry)); KASSERT((size & DMAR_PAGE_MASK) == 0, ("size %jx", (uintmax_t)size)); a.domain = domain; a.size = size; a.offset = offset; a.common = common; a.gas_flags = flags; a.entry = entry; /* Handle lower region. */ if (common->lowaddr > 0) { error = dmar_gas_lowermatch(&a, RB_ROOT(&domain->rb_root)); if (error == 0) return (0); KASSERT(error == ENOMEM, ("error %d from dmar_gas_lowermatch", error)); } /* Handle upper region. */ if (common->highaddr >= domain->end) return (ENOMEM); error = dmar_gas_uppermatch(&a, RB_ROOT(&domain->rb_root)); KASSERT(error == ENOMEM, ("error %d from dmar_gas_uppermatch", error)); return (error); } static int dmar_gas_alloc_region(struct dmar_domain *domain, struct iommu_map_entry *entry, u_int flags) { struct iommu_map_entry *next, *prev; bool found; DMAR_DOMAIN_ASSERT_LOCKED(domain); if ((entry->start & DMAR_PAGE_MASK) != 0 || (entry->end & DMAR_PAGE_MASK) != 0) return (EINVAL); if (entry->start >= entry->end) return (EINVAL); if (entry->end >= domain->end) return (EINVAL); next = RB_NFIND(dmar_gas_entries_tree, &domain->rb_root, entry); KASSERT(next != NULL, ("next must be non-null %p %jx", domain, (uintmax_t)entry->start)); prev = RB_PREV(dmar_gas_entries_tree, &domain->rb_root, next); /* prev could be NULL */ /* * Adapt to broken BIOSes which specify overlapping RMRR * entries. * * XXXKIB: this does not handle a case when prev or next * entries are completely covered by the current one, which * extends both ways. */ if (prev != NULL && prev->end > entry->start && (prev->flags & IOMMU_MAP_ENTRY_PLACE) == 0) { if ((flags & IOMMU_MF_RMRR) == 0 || (prev->flags & IOMMU_MAP_ENTRY_RMRR) == 0) return (EBUSY); entry->start = prev->end; } if (next->start < entry->end && (next->flags & IOMMU_MAP_ENTRY_PLACE) == 0) { if ((flags & IOMMU_MF_RMRR) == 0 || (next->flags & IOMMU_MAP_ENTRY_RMRR) == 0) return (EBUSY); entry->end = next->start; } if (entry->end == entry->start) return (0); if (prev != NULL && prev->end > entry->start) { /* This assumes that prev is the placeholder entry. */ dmar_gas_rb_remove(domain, prev); prev = NULL; } if (next->start < entry->end) { dmar_gas_rb_remove(domain, next); next = NULL; } found = dmar_gas_rb_insert(domain, entry); KASSERT(found, ("found RMRR dup %p start %jx end %jx", domain, (uintmax_t)entry->start, (uintmax_t)entry->end)); if ((flags & IOMMU_MF_RMRR) != 0) entry->flags = IOMMU_MAP_ENTRY_RMRR; #ifdef INVARIANTS struct iommu_map_entry *ip, *in; ip = RB_PREV(dmar_gas_entries_tree, &domain->rb_root, entry); in = RB_NEXT(dmar_gas_entries_tree, &domain->rb_root, entry); KASSERT(prev == NULL || ip == prev, ("RMRR %p (%jx %jx) prev %p (%jx %jx) ins prev %p (%jx %jx)", entry, entry->start, entry->end, prev, prev == NULL ? 0 : prev->start, prev == NULL ? 0 : prev->end, ip, ip == NULL ? 0 : ip->start, ip == NULL ? 0 : ip->end)); KASSERT(next == NULL || in == next, ("RMRR %p (%jx %jx) next %p (%jx %jx) ins next %p (%jx %jx)", entry, entry->start, entry->end, next, next == NULL ? 0 : next->start, next == NULL ? 0 : next->end, in, in == NULL ? 0 : in->start, in == NULL ? 0 : in->end)); #endif return (0); } void dmar_gas_free_space(struct dmar_domain *domain, struct iommu_map_entry *entry) { DMAR_DOMAIN_ASSERT_LOCKED(domain); KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR | IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_MAP, ("permanent entry %p %p", domain, entry)); dmar_gas_rb_remove(domain, entry); entry->flags &= ~IOMMU_MAP_ENTRY_MAP; #ifdef INVARIANTS if (dmar_check_free) dmar_gas_check_free(domain); #endif } void dmar_gas_free_region(struct dmar_domain *domain, struct iommu_map_entry *entry) { struct iommu_map_entry *next, *prev; DMAR_DOMAIN_ASSERT_LOCKED(domain); KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR | IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_RMRR, ("non-RMRR entry %p %p", domain, entry)); prev = RB_PREV(dmar_gas_entries_tree, &domain->rb_root, entry); next = RB_NEXT(dmar_gas_entries_tree, &domain->rb_root, entry); dmar_gas_rb_remove(domain, entry); entry->flags &= ~IOMMU_MAP_ENTRY_RMRR; if (prev == NULL) dmar_gas_rb_insert(domain, domain->first_place); if (next == NULL) dmar_gas_rb_insert(domain, domain->last_place); } int dmar_gas_map(struct dmar_domain *domain, const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset, u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res) { struct iommu_map_entry *entry; int error; KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_CANSPLIT)) == 0, ("invalid flags 0x%x", flags)); entry = dmar_gas_alloc_entry(domain, (flags & IOMMU_MF_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0); if (entry == NULL) return (ENOMEM); DMAR_DOMAIN_LOCK(domain); error = dmar_gas_find_space(domain, common, size, offset, flags, entry); if (error == ENOMEM) { DMAR_DOMAIN_UNLOCK(domain); dmar_gas_free_entry(domain, entry); return (error); } #ifdef INVARIANTS if (dmar_check_free) dmar_gas_check_free(domain); #endif KASSERT(error == 0, ("unexpected error %d from dmar_gas_find_entry", error)); KASSERT(entry->end < domain->end, ("allocated GPA %jx, max GPA %jx", (uintmax_t)entry->end, (uintmax_t)domain->end)); entry->flags |= eflags; DMAR_DOMAIN_UNLOCK(domain); error = domain_map_buf(domain, entry->start, entry->end - entry->start, ma, ((eflags & IOMMU_MAP_ENTRY_READ) != 0 ? DMAR_PTE_R : 0) | ((eflags & IOMMU_MAP_ENTRY_WRITE) != 0 ? DMAR_PTE_W : 0) | ((eflags & IOMMU_MAP_ENTRY_SNOOP) != 0 ? DMAR_PTE_SNP : 0) | ((eflags & IOMMU_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0), (flags & IOMMU_MF_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0); if (error == ENOMEM) { dmar_domain_unload_entry(entry, true); return (error); } KASSERT(error == 0, ("unexpected error %d from domain_map_buf", error)); *res = entry; return (0); } int dmar_gas_map_region(struct dmar_domain *domain, struct iommu_map_entry *entry, u_int eflags, u_int flags, vm_page_t *ma) { iommu_gaddr_t start; int error; KASSERT(entry->flags == 0, ("used RMRR entry %p %p %x", domain, entry, entry->flags)); KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_RMRR)) == 0, ("invalid flags 0x%x", flags)); start = entry->start; DMAR_DOMAIN_LOCK(domain); error = dmar_gas_alloc_region(domain, entry, flags); if (error != 0) { DMAR_DOMAIN_UNLOCK(domain); return (error); } entry->flags |= eflags; DMAR_DOMAIN_UNLOCK(domain); if (entry->end == entry->start) return (0); error = domain_map_buf(domain, entry->start, entry->end - entry->start, ma + OFF_TO_IDX(start - entry->start), ((eflags & IOMMU_MAP_ENTRY_READ) != 0 ? DMAR_PTE_R : 0) | ((eflags & IOMMU_MAP_ENTRY_WRITE) != 0 ? DMAR_PTE_W : 0) | ((eflags & IOMMU_MAP_ENTRY_SNOOP) != 0 ? DMAR_PTE_SNP : 0) | ((eflags & IOMMU_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0), (flags & IOMMU_MF_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0); if (error == ENOMEM) { dmar_domain_unload_entry(entry, false); return (error); } KASSERT(error == 0, ("unexpected error %d from domain_map_buf", error)); return (0); } int dmar_gas_reserve_region(struct dmar_domain *domain, iommu_gaddr_t start, iommu_gaddr_t end) { struct iommu_map_entry *entry; int error; entry = dmar_gas_alloc_entry(domain, DMAR_PGF_WAITOK); entry->start = start; entry->end = end; DMAR_DOMAIN_LOCK(domain); error = dmar_gas_alloc_region(domain, entry, IOMMU_MF_CANWAIT); if (error == 0) entry->flags |= IOMMU_MAP_ENTRY_UNMAPPED; DMAR_DOMAIN_UNLOCK(domain); if (error != 0) dmar_gas_free_entry(domain, entry); return (error); } struct iommu_map_entry * iommu_map_alloc_entry(struct iommu_domain *iodom, u_int flags) { struct dmar_domain *domain; struct iommu_map_entry *res; domain = (struct dmar_domain *)iodom; res = dmar_gas_alloc_entry(domain, flags); return (res); } void iommu_map_free_entry(struct iommu_domain *iodom, struct iommu_map_entry *entry) { struct dmar_domain *domain; domain = (struct dmar_domain *)iodom; dmar_gas_free_entry(domain, entry); } int iommu_map(struct iommu_domain *iodom, const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset, u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res) { struct dmar_domain *domain; int error; domain = (struct dmar_domain *)iodom; error = dmar_gas_map(domain, common, size, offset, eflags, flags, ma, res); return (error); } int iommu_map_region(struct iommu_domain *iodom, struct iommu_map_entry *entry, u_int eflags, u_int flags, vm_page_t *ma) { struct dmar_domain *domain; int error; domain = (struct dmar_domain *)iodom; error = dmar_gas_map_region(domain, entry, eflags, flags, ma); return (error); } Index: head/sys/x86/iommu/intel_reg.h =================================================================== --- head/sys/x86/iommu/intel_reg.h (revision 363309) +++ head/sys/x86/iommu/intel_reg.h (revision 363310) @@ -1,413 +1,416 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013-2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef __X86_IOMMU_INTEL_REG_H #define __X86_IOMMU_INTEL_REG_H #define DMAR_PAGE_SIZE PAGE_SIZE #define DMAR_PAGE_MASK (DMAR_PAGE_SIZE - 1) #define DMAR_PAGE_SHIFT PAGE_SHIFT #define DMAR_NPTEPG (DMAR_PAGE_SIZE / sizeof(dmar_pte_t)) #define DMAR_NPTEPGSHIFT 9 #define DMAR_PTEMASK (DMAR_NPTEPG - 1) +#define IOMMU_PAGE_SIZE DMAR_PAGE_SIZE +#define IOMMU_PAGE_MASK DMAR_PAGE_MASK + typedef struct dmar_root_entry { uint64_t r1; uint64_t r2; } dmar_root_entry_t; #define DMAR_ROOT_R1_P 1 /* Present */ #define DMAR_ROOT_R1_CTP_MASK 0xfffffffffffff000 /* Mask for Context-Entry Table Pointer */ #define DMAR_CTX_CNT (DMAR_PAGE_SIZE / sizeof(dmar_root_entry_t)) typedef struct dmar_ctx_entry { uint64_t ctx1; uint64_t ctx2; } dmar_ctx_entry_t; #define DMAR_CTX1_P 1 /* Present */ #define DMAR_CTX1_FPD 2 /* Fault Processing Disable */ /* Translation Type: */ #define DMAR_CTX1_T_UNTR 0 /* only Untranslated */ #define DMAR_CTX1_T_TR 4 /* both Untranslated and Translated */ #define DMAR_CTX1_T_PASS 8 /* Pass-Through */ #define DMAR_CTX1_ASR_MASK 0xfffffffffffff000 /* Mask for the Address Space Root */ #define DMAR_CTX2_AW_2LVL 0 /* 2-level page tables */ #define DMAR_CTX2_AW_3LVL 1 /* 3-level page tables */ #define DMAR_CTX2_AW_4LVL 2 /* 4-level page tables */ #define DMAR_CTX2_AW_5LVL 3 /* 5-level page tables */ #define DMAR_CTX2_AW_6LVL 4 /* 6-level page tables */ #define DMAR_CTX2_DID_MASK 0xffff0 #define DMAR_CTX2_DID(x) ((x) << 8) /* Domain Identifier */ #define DMAR_CTX2_GET_DID(ctx2) (((ctx2) & DMAR_CTX2_DID_MASK) >> 8) typedef struct dmar_pte { uint64_t pte; } dmar_pte_t; #define DMAR_PTE_R 1 /* Read */ #define DMAR_PTE_W (1 << 1) /* Write */ #define DMAR_PTE_SP (1 << 7) /* Super Page */ #define DMAR_PTE_SNP (1 << 11) /* Snoop Behaviour */ #define DMAR_PTE_ADDR_MASK 0xffffffffff000 /* Address Mask */ #define DMAR_PTE_TM (1ULL << 62) /* Transient Mapping */ typedef struct dmar_irte { uint64_t irte1; uint64_t irte2; } dmar_irte_t; /* Source Validation Type */ #define DMAR_IRTE2_SVT_NONE (0ULL << (82 - 64)) #define DMAR_IRTE2_SVT_RID (1ULL << (82 - 64)) #define DMAR_IRTE2_SVT_BUS (2ULL << (82 - 64)) /* Source-id Qualifier */ #define DMAR_IRTE2_SQ_RID (0ULL << (80 - 64)) #define DMAR_IRTE2_SQ_RID_N2 (1ULL << (80 - 64)) #define DMAR_IRTE2_SQ_RID_N21 (2ULL << (80 - 64)) #define DMAR_IRTE2_SQ_RID_N210 (3ULL << (80 - 64)) /* Source Identifier */ #define DMAR_IRTE2_SID_RID(x) ((uint64_t)(x)) #define DMAR_IRTE2_SID_BUS(start, end) ((((uint64_t)(start)) << 8) | (end)) /* Destination Id */ #define DMAR_IRTE1_DST_xAPIC(x) (((uint64_t)(x)) << 40) #define DMAR_IRTE1_DST_x2APIC(x) (((uint64_t)(x)) << 32) /* Vector */ #define DMAR_IRTE1_V(x) (((uint64_t)x) << 16) #define DMAR_IRTE1_IM_POSTED (1ULL << 15) /* Posted */ /* Delivery Mode */ #define DMAR_IRTE1_DLM_FM (0ULL << 5) #define DMAR_IRTE1_DLM_LP (1ULL << 5) #define DMAR_IRTE1_DLM_SMI (2ULL << 5) #define DMAR_IRTE1_DLM_NMI (4ULL << 5) #define DMAR_IRTE1_DLM_INIT (5ULL << 5) #define DMAR_IRTE1_DLM_ExtINT (7ULL << 5) /* Trigger Mode */ #define DMAR_IRTE1_TM_EDGE (0ULL << 4) #define DMAR_IRTE1_TM_LEVEL (1ULL << 4) /* Redirection Hint */ #define DMAR_IRTE1_RH_DIRECT (0ULL << 3) #define DMAR_IRTE1_RH_SELECT (1ULL << 3) /* Destination Mode */ #define DMAR_IRTE1_DM_PHYSICAL (0ULL << 2) #define DMAR_IRTE1_DM_LOGICAL (1ULL << 2) #define DMAR_IRTE1_FPD (1ULL << 1) /* Fault Processing Disable */ #define DMAR_IRTE1_P (1ULL) /* Present */ /* Version register */ #define DMAR_VER_REG 0 #define DMAR_MAJOR_VER(x) (((x) >> 4) & 0xf) #define DMAR_MINOR_VER(x) ((x) & 0xf) /* Capabilities register */ #define DMAR_CAP_REG 0x8 #define DMAR_CAP_PI (1ULL << 59) /* Posted Interrupts */ #define DMAR_CAP_FL1GP (1ULL << 56) /* First Level 1GByte Page */ #define DMAR_CAP_DRD (1ULL << 55) /* DMA Read Draining */ #define DMAR_CAP_DWD (1ULL << 54) /* DMA Write Draining */ #define DMAR_CAP_MAMV(x) ((u_int)(((x) >> 48) & 0x3f)) /* Maximum Address Mask */ #define DMAR_CAP_NFR(x) ((u_int)(((x) >> 40) & 0xff) + 1) /* Num of Fault-recording regs */ #define DMAR_CAP_PSI (1ULL << 39) /* Page Selective Invalidation */ #define DMAR_CAP_SPS(x) ((u_int)(((x) >> 34) & 0xf)) /* Super-Page Support */ #define DMAR_CAP_SPS_2M 0x1 #define DMAR_CAP_SPS_1G 0x2 #define DMAR_CAP_SPS_512G 0x4 #define DMAR_CAP_SPS_1T 0x8 #define DMAR_CAP_FRO(x) ((u_int)(((x) >> 24) & 0x1ff)) /* Fault-recording reg offset */ #define DMAR_CAP_ISOCH (1 << 23) /* Isochrony */ #define DMAR_CAP_ZLR (1 << 22) /* Zero-length reads */ #define DMAR_CAP_MGAW(x) ((u_int)(((x) >> 16) & 0x3f)) /* Max Guest Address Width */ #define DMAR_CAP_SAGAW(x) ((u_int)(((x) >> 8) & 0x1f)) /* Adjusted Guest Address Width */ #define DMAR_CAP_SAGAW_2LVL 0x01 #define DMAR_CAP_SAGAW_3LVL 0x02 #define DMAR_CAP_SAGAW_4LVL 0x04 #define DMAR_CAP_SAGAW_5LVL 0x08 #define DMAR_CAP_SAGAW_6LVL 0x10 #define DMAR_CAP_CM (1 << 7) /* Caching mode */ #define DMAR_CAP_PHMR (1 << 6) /* Protected High-mem Region */ #define DMAR_CAP_PLMR (1 << 5) /* Protected Low-mem Region */ #define DMAR_CAP_RWBF (1 << 4) /* Required Write-Buffer Flushing */ #define DMAR_CAP_AFL (1 << 3) /* Advanced Fault Logging */ #define DMAR_CAP_ND(x) ((u_int)((x) & 0x3)) /* Number of domains */ /* Extended Capabilities register */ #define DMAR_ECAP_REG 0x10 #define DMAR_ECAP_PSS(x) (((x) >> 35) & 0xf) /* PASID Size Supported */ #define DMAR_ECAP_EAFS (1ULL << 34) /* Extended Accessed Flag */ #define DMAR_ECAP_NWFS (1ULL << 33) /* No Write Flag */ #define DMAR_ECAP_SRS (1ULL << 31) /* Supervisor Request */ #define DMAR_ECAP_ERS (1ULL << 30) /* Execute Request */ #define DMAR_ECAP_PRS (1ULL << 29) /* Page Request */ #define DMAR_ECAP_PASID (1ULL << 28) /* Process Address Space Id */ #define DMAR_ECAP_DIS (1ULL << 27) /* Deferred Invalidate */ #define DMAR_ECAP_NEST (1ULL << 26) /* Nested Translation */ #define DMAR_ECAP_MTS (1ULL << 25) /* Memory Type */ #define DMAR_ECAP_ECS (1ULL << 24) /* Extended Context */ #define DMAR_ECAP_MHMV(x) ((u_int)(((x) >> 20) & 0xf)) /* Maximum Handle Mask Value */ #define DMAR_ECAP_IRO(x) ((u_int)(((x) >> 8) & 0x3ff)) /* IOTLB Register Offset */ #define DMAR_ECAP_SC (1 << 7) /* Snoop Control */ #define DMAR_ECAP_PT (1 << 6) /* Pass Through */ #define DMAR_ECAP_EIM (1 << 4) /* Extended Interrupt Mode (x2APIC) */ #define DMAR_ECAP_IR (1 << 3) /* Interrupt Remapping */ #define DMAR_ECAP_DI (1 << 2) /* Device IOTLB */ #define DMAR_ECAP_QI (1 << 1) /* Queued Invalidation */ #define DMAR_ECAP_C (1 << 0) /* Coherency */ /* Global Command register */ #define DMAR_GCMD_REG 0x18 #define DMAR_GCMD_TE (1U << 31) /* Translation Enable */ #define DMAR_GCMD_SRTP (1 << 30) /* Set Root Table Pointer */ #define DMAR_GCMD_SFL (1 << 29) /* Set Fault Log */ #define DMAR_GCMD_EAFL (1 << 28) /* Enable Advanced Fault Logging */ #define DMAR_GCMD_WBF (1 << 27) /* Write Buffer Flush */ #define DMAR_GCMD_QIE (1 << 26) /* Queued Invalidation Enable */ #define DMAR_GCMD_IRE (1 << 25) /* Interrupt Remapping Enable */ #define DMAR_GCMD_SIRTP (1 << 24) /* Set Interrupt Remap Table Pointer */ #define DMAR_GCMD_CFI (1 << 23) /* Compatibility Format Interrupt */ /* Global Status register */ #define DMAR_GSTS_REG 0x1c #define DMAR_GSTS_TES (1U << 31) /* Translation Enable Status */ #define DMAR_GSTS_RTPS (1 << 30) /* Root Table Pointer Status */ #define DMAR_GSTS_FLS (1 << 29) /* Fault Log Status */ #define DMAR_GSTS_AFLS (1 << 28) /* Advanced Fault Logging Status */ #define DMAR_GSTS_WBFS (1 << 27) /* Write Buffer Flush Status */ #define DMAR_GSTS_QIES (1 << 26) /* Queued Invalidation Enable Status */ #define DMAR_GSTS_IRES (1 << 25) /* Interrupt Remapping Enable Status */ #define DMAR_GSTS_IRTPS (1 << 24) /* Interrupt Remapping Table Pointer Status */ #define DMAR_GSTS_CFIS (1 << 23) /* Compatibility Format Interrupt Status */ /* Root-Entry Table Address register */ #define DMAR_RTADDR_REG 0x20 #define DMAR_RTADDR_RTT (1 << 11) /* Root Table Type */ #define DMAR_RTADDR_RTA_MASK 0xfffffffffffff000 /* Context Command register */ #define DMAR_CCMD_REG 0x28 #define DMAR_CCMD_ICC (1ULL << 63) /* Invalidate Context-Cache */ #define DMAR_CCMD_ICC32 (1U << 31) #define DMAR_CCMD_CIRG_MASK (0x3ULL << 61) /* Context Invalidation Request Granularity */ #define DMAR_CCMD_CIRG_GLOB (0x1ULL << 61) /* Global */ #define DMAR_CCMD_CIRG_DOM (0x2ULL << 61) /* Domain */ #define DMAR_CCMD_CIRG_DEV (0x3ULL << 61) /* Device */ #define DMAR_CCMD_CAIG(x) (((x) >> 59) & 0x3) /* Context Actual Invalidation Granularity */ #define DMAR_CCMD_CAIG_GLOB 0x1 /* Global */ #define DMAR_CCMD_CAIG_DOM 0x2 /* Domain */ #define DMAR_CCMD_CAIG_DEV 0x3 /* Device */ #define DMAR_CCMD_FM (0x3UUL << 32) /* Function Mask */ #define DMAR_CCMD_SID(x) (((x) & 0xffff) << 16) /* Source-ID */ #define DMAR_CCMD_DID(x) ((x) & 0xffff) /* Domain-ID */ /* Invalidate Address register */ #define DMAR_IVA_REG_OFF 0 #define DMAR_IVA_IH (1 << 6) /* Invalidation Hint */ #define DMAR_IVA_AM(x) ((x) & 0x1f) /* Address Mask */ #define DMAR_IVA_ADDR(x) ((x) & ~0xfffULL) /* Address */ /* IOTLB Invalidate register */ #define DMAR_IOTLB_REG_OFF 0x8 #define DMAR_IOTLB_IVT (1ULL << 63) /* Invalidate IOTLB */ #define DMAR_IOTLB_IVT32 (1U << 31) #define DMAR_IOTLB_IIRG_MASK (0x3ULL << 60) /* Invalidation Request Granularity */ #define DMAR_IOTLB_IIRG_GLB (0x1ULL << 60) /* Global */ #define DMAR_IOTLB_IIRG_DOM (0x2ULL << 60) /* Domain-selective */ #define DMAR_IOTLB_IIRG_PAGE (0x3ULL << 60) /* Page-selective */ #define DMAR_IOTLB_IAIG_MASK (0x3ULL << 57) /* Actual Invalidation Granularity */ #define DMAR_IOTLB_IAIG_INVLD 0 /* Hw detected error */ #define DMAR_IOTLB_IAIG_GLB (0x1ULL << 57) /* Global */ #define DMAR_IOTLB_IAIG_DOM (0x2ULL << 57) /* Domain-selective */ #define DMAR_IOTLB_IAIG_PAGE (0x3ULL << 57) /* Page-selective */ #define DMAR_IOTLB_DR (0x1ULL << 49) /* Drain Reads */ #define DMAR_IOTLB_DW (0x1ULL << 48) /* Drain Writes */ #define DMAR_IOTLB_DID(x) (((uint64_t)(x) & 0xffff) << 32) /* Domain Id */ /* Fault Status register */ #define DMAR_FSTS_REG 0x34 #define DMAR_FSTS_FRI(x) (((x) >> 8) & 0xff) /* Fault Record Index */ #define DMAR_FSTS_ITE (1 << 6) /* Invalidation Time-out */ #define DMAR_FSTS_ICE (1 << 5) /* Invalidation Completion */ #define DMAR_FSTS_IQE (1 << 4) /* Invalidation Queue */ #define DMAR_FSTS_APF (1 << 3) /* Advanced Pending Fault */ #define DMAR_FSTS_AFO (1 << 2) /* Advanced Fault Overflow */ #define DMAR_FSTS_PPF (1 << 1) /* Primary Pending Fault */ #define DMAR_FSTS_PFO 1 /* Fault Overflow */ /* Fault Event Control register */ #define DMAR_FECTL_REG 0x38 #define DMAR_FECTL_IM (1U << 31) /* Interrupt Mask */ #define DMAR_FECTL_IP (1 << 30) /* Interrupt Pending */ /* Fault Event Data register */ #define DMAR_FEDATA_REG 0x3c /* Fault Event Address register */ #define DMAR_FEADDR_REG 0x40 /* Fault Event Upper Address register */ #define DMAR_FEUADDR_REG 0x44 /* Advanced Fault Log register */ #define DMAR_AFLOG_REG 0x58 /* Fault Recording Register, also usable for Advanced Fault Log records */ #define DMAR_FRCD2_F (1ULL << 63) /* Fault */ #define DMAR_FRCD2_F32 (1U << 31) #define DMAR_FRCD2_T(x) ((int)((x >> 62) & 1)) /* Type */ #define DMAR_FRCD2_T_W 0 /* Write request */ #define DMAR_FRCD2_T_R 1 /* Read or AtomicOp */ #define DMAR_FRCD2_AT(x) ((int)((x >> 60) & 0x3)) /* Address Type */ #define DMAR_FRCD2_FR(x) ((int)((x >> 32) & 0xff)) /* Fault Reason */ #define DMAR_FRCD2_SID(x) ((int)(x & 0xffff)) /* Source Identifier */ #define DMAR_FRCS1_FI_MASK 0xffffffffff000 /* Fault Info, Address Mask */ /* Protected Memory Enable register */ #define DMAR_PMEN_REG 0x64 #define DMAR_PMEN_EPM (1U << 31) /* Enable Protected Memory */ #define DMAR_PMEN_PRS 1 /* Protected Region Status */ /* Protected Low-Memory Base register */ #define DMAR_PLMBASE_REG 0x68 /* Protected Low-Memory Limit register */ #define DMAR_PLMLIMIT_REG 0x6c /* Protected High-Memory Base register */ #define DMAR_PHMBASE_REG 0x70 /* Protected High-Memory Limit register */ #define DMAR_PHMLIMIT_REG 0x78 /* Queued Invalidation Descriptors */ #define DMAR_IQ_DESCR_SZ_SHIFT 4 /* Shift for descriptor count to ring offset */ #define DMAR_IQ_DESCR_SZ (1 << DMAR_IQ_DESCR_SZ_SHIFT) /* Descriptor size */ /* Context-cache Invalidate Descriptor */ #define DMAR_IQ_DESCR_CTX_INV 0x1 #define DMAR_IQ_DESCR_CTX_GLOB (0x1 << 4) /* Granularity: Global */ #define DMAR_IQ_DESCR_CTX_DOM (0x2 << 4) /* Granularity: Domain */ #define DMAR_IQ_DESCR_CTX_DEV (0x3 << 4) /* Granularity: Device */ #define DMAR_IQ_DESCR_CTX_DID(x) (((uint32_t)(x)) << 16) /* Domain Id */ #define DMAR_IQ_DESCR_CTX_SRC(x) (((uint64_t)(x)) << 32) /* Source Id */ #define DMAR_IQ_DESCR_CTX_FM(x) (((uint64_t)(x)) << 48) /* Function Mask */ /* IOTLB Invalidate Descriptor */ #define DMAR_IQ_DESCR_IOTLB_INV 0x2 #define DMAR_IQ_DESCR_IOTLB_GLOB (0x1 << 4) /* Granularity: Global */ #define DMAR_IQ_DESCR_IOTLB_DOM (0x2 << 4) /* Granularity: Domain */ #define DMAR_IQ_DESCR_IOTLB_PAGE (0x3 << 4) /* Granularity: Page */ #define DMAR_IQ_DESCR_IOTLB_DW (1 << 6) /* Drain Writes */ #define DMAR_IQ_DESCR_IOTLB_DR (1 << 7) /* Drain Reads */ #define DMAR_IQ_DESCR_IOTLB_DID(x) (((uint32_t)(x)) << 16) /* Domain Id */ /* Device-TLB Invalidate Descriptor */ #define DMAR_IQ_DESCR_DTLB_INV 0x3 /* Invalidate Interrupt Entry Cache */ #define DMAR_IQ_DESCR_IEC_INV 0x4 #define DMAR_IQ_DESCR_IEC_IDX (1 << 4) /* Index-Selective Invalidation */ #define DMAR_IQ_DESCR_IEC_IIDX(x) (((uint64_t)x) << 32) /* Interrupt Index */ #define DMAR_IQ_DESCR_IEC_IM(x) ((x) << 27) /* Index Mask */ /* Invalidation Wait Descriptor */ #define DMAR_IQ_DESCR_WAIT_ID 0x5 #define DMAR_IQ_DESCR_WAIT_IF (1 << 4) /* Interrupt Flag */ #define DMAR_IQ_DESCR_WAIT_SW (1 << 5) /* Status Write */ #define DMAR_IQ_DESCR_WAIT_FN (1 << 6) /* Fence */ #define DMAR_IQ_DESCR_WAIT_SD(x) (((uint64_t)(x)) << 32) /* Status Data */ /* Extended IOTLB Invalidate Descriptor */ #define DMAR_IQ_DESCR_EIOTLB_INV 0x6 /* PASID-Cache Invalidate Descriptor */ #define DMAR_IQ_DESCR_PASIDC_INV 0x7 /* Extended Device-TLB Invalidate Descriptor */ #define DMAR_IQ_DESCR_EDTLB_INV 0x8 /* Invalidation Queue Head register */ #define DMAR_IQH_REG 0x80 #define DMAR_IQH_MASK 0x7fff0 /* Next cmd index mask */ /* Invalidation Queue Tail register */ #define DMAR_IQT_REG 0x88 #define DMAR_IQT_MASK 0x7fff0 /* Invalidation Queue Address register */ #define DMAR_IQA_REG 0x90 #define DMAR_IQA_IQA_MASK 0xfffffffffffff000 /* Invalidation Queue Base Address mask */ #define DMAR_IQA_QS_MASK 0x7 /* Queue Size in pages */ #define DMAR_IQA_QS_MAX 0x7 /* Max Queue size */ #define DMAR_IQA_QS_DEF 3 /* Invalidation Completion Status register */ #define DMAR_ICS_REG 0x9c #define DMAR_ICS_IWC 1 /* Invalidation Wait Descriptor Complete */ /* Invalidation Event Control register */ #define DMAR_IECTL_REG 0xa0 #define DMAR_IECTL_IM (1U << 31) /* Interrupt Mask */ #define DMAR_IECTL_IP (1 << 30) /* Interrupt Pending */ /* Invalidation Event Data register */ #define DMAR_IEDATA_REG 0xa4 /* Invalidation Event Address register */ #define DMAR_IEADDR_REG 0xa8 /* Invalidation Event Upper Address register */ #define DMAR_IEUADDR_REG 0xac /* Interrupt Remapping Table Address register */ #define DMAR_IRTA_REG 0xb8 #define DMAR_IRTA_EIME (1 << 11) /* Extended Interrupt Mode Enable */ #define DMAR_IRTA_S_MASK 0xf /* Size Mask */ #endif