Index: head/sys/x86/iommu/busdma_dmar.c =================================================================== --- head/sys/x86/iommu/busdma_dmar.c (revision 284868) +++ head/sys/x86/iommu/busdma_dmar.c (revision 284869) @@ -1,881 +1,890 @@ /*- * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * busdma_dmar.c, the implementation of the busdma(9) interface using * DMAR units from Intel VT-d. */ static bool dmar_bus_dma_is_dev_disabled(int domain, int bus, int slot, int func) { char str[128], *env; snprintf(str, sizeof(str), "hw.busdma.pci%d.%d.%d.%d.bounce", domain, bus, slot, func); env = kern_getenv(str); if (env == NULL) return (false); freeenv(env); return (true); } /* * Given original device, find the requester ID that will be seen by * the DMAR unit and used for page table lookup. PCI bridges may take * ownership of transactions from downstream devices, so it may not be * the same as the BSF of the target device. In those cases, all * devices downstream of the bridge must share a single mapping * domain, and must collectively be assigned to use either DMAR or * bounce mapping. */ device_t dmar_get_requester(device_t dev, uint16_t *rid) { devclass_t pci_class; device_t l, pci, pcib, pcip, pcibp, requester; int cap_offset; uint16_t pcie_flags; bool bridge_is_pcie; pci_class = devclass_find("pci"); l = requester = dev; *rid = pci_get_rid(dev); /* * Walk the bridge hierarchy from the target device to the * host port to find the translating bridge nearest the DMAR * unit. */ for (;;) { pci = device_get_parent(l); KASSERT(pci != NULL, ("dmar_get_requester(%s): NULL parent " "for %s", device_get_name(dev), device_get_name(l))); KASSERT(device_get_devclass(pci) == pci_class, ("dmar_get_requester(%s): non-pci parent %s for %s", device_get_name(dev), device_get_name(pci), device_get_name(l))); pcib = device_get_parent(pci); KASSERT(pcib != NULL, ("dmar_get_requester(%s): NULL bridge " "for %s", device_get_name(dev), device_get_name(pci))); /* * The parent of our "bridge" isn't another PCI bus, * so pcib isn't a PCI->PCI bridge but rather a host * port, and the requester ID won't be translated * further. */ pcip = device_get_parent(pcib); if (device_get_devclass(pcip) != pci_class) break; pcibp = device_get_parent(pcip); if (pci_find_cap(l, PCIY_EXPRESS, &cap_offset) == 0) { /* * Do not stop the loop even if the target * device is PCIe, because it is possible (but * unlikely) to have a PCI->PCIe bridge * somewhere in the hierarchy. */ l = pcib; } else { /* * Device is not PCIe, it cannot be seen as a * requester by DMAR unit. Check whether the * bridge is PCIe. */ bridge_is_pcie = pci_find_cap(pcib, PCIY_EXPRESS, &cap_offset) == 0; requester = pcib; /* * Check for a buggy PCIe/PCI bridge that * doesn't report the express capability. If * the bridge above it is express but isn't a * PCI bridge, then we know pcib is actually a * PCIe/PCI bridge. */ if (!bridge_is_pcie && pci_find_cap(pcibp, PCIY_EXPRESS, &cap_offset) == 0) { pcie_flags = pci_read_config(pcibp, cap_offset + PCIER_FLAGS, 2); if ((pcie_flags & PCIEM_FLAGS_TYPE) != PCIEM_TYPE_PCI_BRIDGE) bridge_is_pcie = true; } if (bridge_is_pcie) { /* * The current device is not PCIe, but * the bridge above it is. This is a * PCIe->PCI bridge. Assume that the * requester ID will be the secondary * bus number with slot and function * set to zero. * * XXX: Doesn't handle the case where * the bridge is PCIe->PCI-X, and the * bridge will only take ownership of * requests in some cases. We should * provide context entries with the * same page tables for taken and * non-taken transactions. */ *rid = PCI_RID(pci_get_bus(l), 0, 0); l = pcibp; } else { /* * Neither the device nor the bridge * above it are PCIe. This is a * conventional PCI->PCI bridge, which * will use the bridge's BSF as the * requester ID. */ *rid = pci_get_rid(pcib); l = pcib; } } } return (requester); } struct dmar_ctx * dmar_instantiate_ctx(struct dmar_unit *dmar, device_t dev, bool rmrr) { device_t requester; struct dmar_ctx *ctx; bool disabled; uint16_t rid; requester = dmar_get_requester(dev, &rid); /* * If the user requested the IOMMU disabled for the device, we * cannot disable the DMAR, due to possibility of other * devices on the same DMAR still requiring translation. * Instead provide the identity mapping for the device * context. */ disabled = dmar_bus_dma_is_dev_disabled(pci_get_domain(requester), pci_get_bus(requester), pci_get_slot(requester), pci_get_function(requester)); - ctx = dmar_get_ctx(dmar, requester, rid, disabled, rmrr); + ctx = dmar_get_ctx_for_dev(dmar, requester, rid, disabled, rmrr); if (ctx == NULL) return (NULL); if (disabled) { /* * Keep the first reference on context, release the * later refs. */ DMAR_LOCK(dmar); if ((ctx->flags & DMAR_CTX_DISABLED) == 0) { ctx->flags |= DMAR_CTX_DISABLED; DMAR_UNLOCK(dmar); } else { dmar_free_ctx_locked(dmar, ctx); } ctx = NULL; } return (ctx); } bus_dma_tag_t dmar_get_dma_tag(device_t dev, device_t child) { struct dmar_unit *dmar; struct dmar_ctx *ctx; bus_dma_tag_t res; dmar = dmar_find(child); /* Not in scope of any DMAR ? */ if (dmar == NULL) return (NULL); if (!dmar->dma_enabled) return (NULL); dmar_quirks_pre_use(dmar); dmar_instantiate_rmrr_ctxs(dmar); ctx = dmar_instantiate_ctx(dmar, child, false); res = ctx == NULL ? NULL : (bus_dma_tag_t)&ctx->ctx_tag; return (res); } static MALLOC_DEFINE(M_DMAR_DMAMAP, "dmar_dmamap", "Intel DMAR DMA Map"); static void dmar_bus_schedule_dmamap(struct dmar_unit *unit, struct bus_dmamap_dmar *map); static int dmar_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, void *lockfuncarg, bus_dma_tag_t *dmat) { struct bus_dma_tag_dmar *newtag, *oldtag; int error; *dmat = NULL; error = common_bus_dma_tag_create(parent != NULL ? &((struct bus_dma_tag_dmar *)parent)->common : NULL, alignment, boundary, lowaddr, highaddr, filter, filterarg, maxsize, nsegments, maxsegsz, flags, lockfunc, lockfuncarg, sizeof(struct bus_dma_tag_dmar), (void **)&newtag); if (error != 0) goto out; oldtag = (struct bus_dma_tag_dmar *)parent; newtag->common.impl = &bus_dma_dmar_impl; newtag->ctx = oldtag->ctx; newtag->owner = oldtag->owner; *dmat = (bus_dma_tag_t)newtag; out: CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", __func__, newtag, (newtag != NULL ? newtag->common.flags : 0), error); return (error); } static int dmar_bus_dma_tag_destroy(bus_dma_tag_t dmat1) { struct bus_dma_tag_dmar *dmat, *dmat_copy, *parent; int error; error = 0; dmat_copy = dmat = (struct bus_dma_tag_dmar *)dmat1; if (dmat != NULL) { if (dmat->map_count != 0) { error = EBUSY; goto out; } while (dmat != NULL) { parent = (struct bus_dma_tag_dmar *)dmat->common.parent; if (atomic_fetchadd_int(&dmat->common.ref_count, -1) == 1) { if (dmat == &dmat->ctx->ctx_tag) dmar_free_ctx(dmat->ctx); free(dmat->segments, M_DMAR_DMAMAP); free(dmat, M_DEVBUF); dmat = parent; } else dmat = NULL; } } out: CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); return (error); } static int dmar_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) { struct bus_dma_tag_dmar *tag; struct bus_dmamap_dmar *map; tag = (struct bus_dma_tag_dmar *)dmat; map = malloc(sizeof(*map), M_DMAR_DMAMAP, M_NOWAIT | M_ZERO); if (map == NULL) { *mapp = NULL; return (ENOMEM); } if (tag->segments == NULL) { tag->segments = malloc(sizeof(bus_dma_segment_t) * tag->common.nsegments, M_DMAR_DMAMAP, M_NOWAIT); if (tag->segments == NULL) { free(map, M_DMAR_DMAMAP); *mapp = NULL; return (ENOMEM); } } TAILQ_INIT(&map->map_entries); map->tag = tag; map->locked = true; map->cansleep = false; tag->map_count++; *mapp = (bus_dmamap_t)map; return (0); } static int dmar_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map1) { struct bus_dma_tag_dmar *tag; struct bus_dmamap_dmar *map; + struct dmar_domain *domain; tag = (struct bus_dma_tag_dmar *)dmat; map = (struct bus_dmamap_dmar *)map1; if (map != NULL) { - DMAR_CTX_LOCK(tag->ctx); + domain = tag->ctx->domain; + DMAR_DOMAIN_LOCK(domain); if (!TAILQ_EMPTY(&map->map_entries)) { - DMAR_CTX_UNLOCK(tag->ctx); + DMAR_DOMAIN_UNLOCK(domain); return (EBUSY); } - DMAR_CTX_UNLOCK(tag->ctx); + DMAR_DOMAIN_UNLOCK(domain); free(map, M_DMAR_DMAMAP); } tag->map_count--; return (0); } static int dmar_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, bus_dmamap_t *mapp) { struct bus_dma_tag_dmar *tag; struct bus_dmamap_dmar *map; int error, mflags; vm_memattr_t attr; error = dmar_bus_dmamap_create(dmat, flags, mapp); if (error != 0) return (error); mflags = (flags & BUS_DMA_NOWAIT) != 0 ? M_NOWAIT : M_WAITOK; mflags |= (flags & BUS_DMA_ZERO) != 0 ? M_ZERO : 0; attr = (flags & BUS_DMA_NOCACHE) != 0 ? VM_MEMATTR_UNCACHEABLE : VM_MEMATTR_DEFAULT; tag = (struct bus_dma_tag_dmar *)dmat; map = (struct bus_dmamap_dmar *)*mapp; if (tag->common.maxsize < PAGE_SIZE && tag->common.alignment <= tag->common.maxsize && attr == VM_MEMATTR_DEFAULT) { *vaddr = malloc(tag->common.maxsize, M_DEVBUF, mflags); map->flags |= BUS_DMAMAP_DMAR_MALLOC; } else { *vaddr = (void *)kmem_alloc_attr(kernel_arena, tag->common.maxsize, mflags, 0ul, BUS_SPACE_MAXADDR, attr); map->flags |= BUS_DMAMAP_DMAR_KMEM_ALLOC; } if (*vaddr == NULL) { dmar_bus_dmamap_destroy(dmat, *mapp); *mapp = NULL; return (ENOMEM); } return (0); } static void dmar_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map1) { struct bus_dma_tag_dmar *tag; struct bus_dmamap_dmar *map; tag = (struct bus_dma_tag_dmar *)dmat; map = (struct bus_dmamap_dmar *)map1; if ((map->flags & BUS_DMAMAP_DMAR_MALLOC) != 0) { free(vaddr, M_DEVBUF); map->flags &= ~BUS_DMAMAP_DMAR_MALLOC; } else { KASSERT((map->flags & BUS_DMAMAP_DMAR_KMEM_ALLOC) != 0, ("dmar_bus_dmamem_free for non alloced map %p", map)); kmem_free(kernel_arena, (vm_offset_t)vaddr, tag->common.maxsize); map->flags &= ~BUS_DMAMAP_DMAR_KMEM_ALLOC; } dmar_bus_dmamap_destroy(dmat, map1); } static int dmar_bus_dmamap_load_something1(struct bus_dma_tag_dmar *tag, struct bus_dmamap_dmar *map, vm_page_t *ma, int offset, bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp, struct dmar_map_entries_tailq *unroll_list) { struct dmar_ctx *ctx; + struct dmar_domain *domain; struct dmar_map_entry *entry; dmar_gaddr_t size; bus_size_t buflen1; int error, idx, gas_flags, seg; KASSERT(offset < DMAR_PAGE_SIZE, ("offset %d", offset)); if (segs == NULL) segs = tag->segments; ctx = tag->ctx; + domain = ctx->domain; seg = *segp; error = 0; idx = 0; while (buflen > 0) { seg++; if (seg >= tag->common.nsegments) { error = EFBIG; break; } buflen1 = buflen > tag->common.maxsegsz ? tag->common.maxsegsz : buflen; size = round_page(offset + buflen1); /* * (Too) optimistically allow split if there are more * then one segments left. */ gas_flags = map->cansleep ? DMAR_GM_CANWAIT : 0; if (seg + 1 < tag->common.nsegments) gas_flags |= DMAR_GM_CANSPLIT; - error = dmar_gas_map(ctx, &tag->common, size, offset, + error = dmar_gas_map(domain, &tag->common, size, offset, DMAR_MAP_ENTRY_READ | DMAR_MAP_ENTRY_WRITE, gas_flags, ma + idx, &entry); if (error != 0) break; if ((gas_flags & DMAR_GM_CANSPLIT) != 0) { KASSERT(size >= entry->end - entry->start, ("split increased entry size %jx %jx %jx", (uintmax_t)size, (uintmax_t)entry->start, (uintmax_t)entry->end)); size = entry->end - entry->start; if (buflen1 > size) buflen1 = size; } else { KASSERT(entry->end - entry->start == size, ("no split allowed %jx %jx %jx", (uintmax_t)size, (uintmax_t)entry->start, (uintmax_t)entry->end)); } if (offset + buflen1 > size) buflen1 = size - offset; if (buflen1 > tag->common.maxsegsz) buflen1 = tag->common.maxsegsz; KASSERT(((entry->start + offset) & (tag->common.alignment - 1)) == 0, ("alignment failed: ctx %p start 0x%jx offset %x " "align 0x%jx", ctx, (uintmax_t)entry->start, offset, (uintmax_t)tag->common.alignment)); KASSERT(entry->end <= tag->common.lowaddr || entry->start >= tag->common.highaddr, ("entry placement failed: ctx %p start 0x%jx end 0x%jx " "lowaddr 0x%jx highaddr 0x%jx", ctx, (uintmax_t)entry->start, (uintmax_t)entry->end, (uintmax_t)tag->common.lowaddr, (uintmax_t)tag->common.highaddr)); KASSERT(dmar_test_boundary(entry->start + offset, buflen1, tag->common.boundary), ("boundary failed: ctx %p start 0x%jx end 0x%jx " "boundary 0x%jx", ctx, (uintmax_t)entry->start, (uintmax_t)entry->end, (uintmax_t)tag->common.boundary)); KASSERT(buflen1 <= tag->common.maxsegsz, ("segment too large: ctx %p start 0x%jx end 0x%jx " "buflen1 0x%jx maxsegsz 0x%jx", ctx, (uintmax_t)entry->start, (uintmax_t)entry->end, (uintmax_t)buflen1, (uintmax_t)tag->common.maxsegsz)); - DMAR_CTX_LOCK(ctx); + DMAR_DOMAIN_LOCK(domain); TAILQ_INSERT_TAIL(&map->map_entries, entry, dmamap_link); entry->flags |= DMAR_MAP_ENTRY_MAP; - DMAR_CTX_UNLOCK(ctx); + DMAR_DOMAIN_UNLOCK(domain); TAILQ_INSERT_TAIL(unroll_list, entry, unroll_link); segs[seg].ds_addr = entry->start + offset; segs[seg].ds_len = buflen1; idx += OFF_TO_IDX(trunc_page(offset + buflen1)); offset += buflen1; offset &= DMAR_PAGE_MASK; buflen -= buflen1; } if (error == 0) *segp = seg; return (error); } static int dmar_bus_dmamap_load_something(struct bus_dma_tag_dmar *tag, struct bus_dmamap_dmar *map, vm_page_t *ma, int offset, bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp) { struct dmar_ctx *ctx; + struct dmar_domain *domain; struct dmar_map_entry *entry, *entry1; struct dmar_map_entries_tailq unroll_list; int error; ctx = tag->ctx; + domain = ctx->domain; atomic_add_long(&ctx->loads, 1); TAILQ_INIT(&unroll_list); error = dmar_bus_dmamap_load_something1(tag, map, ma, offset, buflen, flags, segs, segp, &unroll_list); if (error != 0) { /* * The busdma interface does not allow us to report * partial buffer load, so unfortunately we have to * revert all work done. */ - DMAR_CTX_LOCK(ctx); + DMAR_DOMAIN_LOCK(domain); TAILQ_FOREACH_SAFE(entry, &unroll_list, unroll_link, entry1) { /* * No entries other than what we have created * during the failed run might have been * inserted there in between, since we own ctx * pglock. */ TAILQ_REMOVE(&map->map_entries, entry, dmamap_link); TAILQ_REMOVE(&unroll_list, entry, unroll_link); - TAILQ_INSERT_TAIL(&ctx->unload_entries, entry, + TAILQ_INSERT_TAIL(&domain->unload_entries, entry, dmamap_link); } - DMAR_CTX_UNLOCK(ctx); - taskqueue_enqueue(ctx->dmar->delayed_taskqueue, - &ctx->unload_task); + DMAR_DOMAIN_UNLOCK(domain); + taskqueue_enqueue(domain->dmar->delayed_taskqueue, + &domain->unload_task); } if (error == ENOMEM && (flags & BUS_DMA_NOWAIT) == 0 && !map->cansleep) error = EINPROGRESS; if (error == EINPROGRESS) - dmar_bus_schedule_dmamap(ctx->dmar, map); + dmar_bus_schedule_dmamap(domain->dmar, map); return (error); } static int dmar_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map1, struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, bus_dma_segment_t *segs, int *segp) { struct bus_dma_tag_dmar *tag; struct bus_dmamap_dmar *map; tag = (struct bus_dma_tag_dmar *)dmat; map = (struct bus_dmamap_dmar *)map1; return (dmar_bus_dmamap_load_something(tag, map, ma, ma_offs, tlen, flags, segs, segp)); } static int dmar_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map1, vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp) { struct bus_dma_tag_dmar *tag; struct bus_dmamap_dmar *map; vm_page_t *ma; vm_paddr_t pstart, pend; int error, i, ma_cnt, offset; tag = (struct bus_dma_tag_dmar *)dmat; map = (struct bus_dmamap_dmar *)map1; pstart = trunc_page(buf); pend = round_page(buf + buflen); offset = buf & PAGE_MASK; ma_cnt = OFF_TO_IDX(pend - pstart); ma = malloc(sizeof(vm_page_t) * ma_cnt, M_DEVBUF, map->cansleep ? M_WAITOK : M_NOWAIT); if (ma == NULL) return (ENOMEM); for (i = 0; i < ma_cnt; i++) ma[i] = PHYS_TO_VM_PAGE(pstart + i * PAGE_SIZE); error = dmar_bus_dmamap_load_something(tag, map, ma, offset, buflen, flags, segs, segp); free(ma, M_DEVBUF); return (error); } static int dmar_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map1, void *buf, bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs, int *segp) { struct bus_dma_tag_dmar *tag; struct bus_dmamap_dmar *map; vm_page_t *ma, fma; vm_paddr_t pstart, pend, paddr; int error, i, ma_cnt, offset; tag = (struct bus_dma_tag_dmar *)dmat; map = (struct bus_dmamap_dmar *)map1; pstart = trunc_page((vm_offset_t)buf); pend = round_page((vm_offset_t)buf + buflen); offset = (vm_offset_t)buf & PAGE_MASK; ma_cnt = OFF_TO_IDX(pend - pstart); ma = malloc(sizeof(vm_page_t) * ma_cnt, M_DEVBUF, map->cansleep ? M_WAITOK : M_NOWAIT); if (ma == NULL) return (ENOMEM); if (dumping) { /* * If dumping, do not attempt to call * PHYS_TO_VM_PAGE() at all. It may return non-NULL * but the vm_page returned might be not initialized, * e.g. for the kernel itself. */ KASSERT(pmap == kernel_pmap, ("non-kernel address write")); fma = malloc(sizeof(struct vm_page) * ma_cnt, M_DEVBUF, M_ZERO | (map->cansleep ? M_WAITOK : M_NOWAIT)); if (fma == NULL) { free(ma, M_DEVBUF); return (ENOMEM); } for (i = 0; i < ma_cnt; i++, pstart += PAGE_SIZE) { paddr = pmap_kextract(pstart); vm_page_initfake(&fma[i], paddr, VM_MEMATTR_DEFAULT); ma[i] = &fma[i]; } } else { fma = NULL; for (i = 0; i < ma_cnt; i++, pstart += PAGE_SIZE) { if (pmap == kernel_pmap) paddr = pmap_kextract(pstart); else paddr = pmap_extract(pmap, pstart); ma[i] = PHYS_TO_VM_PAGE(paddr); KASSERT(VM_PAGE_TO_PHYS(ma[i]) == paddr, ("PHYS_TO_VM_PAGE failed %jx %jx m %p", (uintmax_t)paddr, (uintmax_t)VM_PAGE_TO_PHYS(ma[i]), ma[i])); } } error = dmar_bus_dmamap_load_something(tag, map, ma, offset, buflen, flags, segs, segp); free(ma, M_DEVBUF); free(fma, M_DEVBUF); return (error); } static void dmar_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map1, struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg) { struct bus_dmamap_dmar *map; if (map1 == NULL) return; map = (struct bus_dmamap_dmar *)map1; map->mem = *mem; map->tag = (struct bus_dma_tag_dmar *)dmat; map->callback = callback; map->callback_arg = callback_arg; } static bus_dma_segment_t * dmar_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map1, bus_dma_segment_t *segs, int nsegs, int error) { struct bus_dma_tag_dmar *tag; struct bus_dmamap_dmar *map; tag = (struct bus_dma_tag_dmar *)dmat; map = (struct bus_dmamap_dmar *)map1; if (!map->locked) { KASSERT(map->cansleep, ("map not locked and not sleepable context %p", map)); /* * We are called from the delayed context. Relock the * driver. */ (tag->common.lockfunc)(tag->common.lockfuncarg, BUS_DMA_LOCK); map->locked = true; } if (segs == NULL) segs = tag->segments; return (segs); } /* * The limitations of busdma KPI forces the dmar to perform the actual * unload, consisting of the unmapping of the map entries page tables, * from the delayed context on i386, since page table page mapping * might require a sleep to be successfull. The unfortunate * consequence is that the DMA requests can be served some time after * the bus_dmamap_unload() call returned. * * On amd64, we assume that sf allocation cannot fail. */ static void dmar_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map1) { struct bus_dma_tag_dmar *tag; struct bus_dmamap_dmar *map; struct dmar_ctx *ctx; + struct dmar_domain *domain; #if defined(__amd64__) struct dmar_map_entries_tailq entries; #endif tag = (struct bus_dma_tag_dmar *)dmat; map = (struct bus_dmamap_dmar *)map1; ctx = tag->ctx; + domain = ctx->domain; atomic_add_long(&ctx->unloads, 1); #if defined(__i386__) - DMAR_CTX_LOCK(ctx); - TAILQ_CONCAT(&ctx->unload_entries, &map->map_entries, dmamap_link); - DMAR_CTX_UNLOCK(ctx); - taskqueue_enqueue(ctx->dmar->delayed_taskqueue, &ctx->unload_task); + DMAR_DOMAIN_LOCK(domain); + TAILQ_CONCAT(&domain->unload_entries, &map->map_entries, dmamap_link); + DMAR_DOMAIN_UNLOCK(domain); + taskqueue_enqueue(domain->dmar->delayed_taskqueue, + &domain->unload_task); #else /* defined(__amd64__) */ TAILQ_INIT(&entries); - DMAR_CTX_LOCK(ctx); + DMAR_DOMAIN_LOCK(domain); TAILQ_CONCAT(&entries, &map->map_entries, dmamap_link); - DMAR_CTX_UNLOCK(ctx); + DMAR_DOMAIN_UNLOCK(domain); THREAD_NO_SLEEPING(); - dmar_ctx_unload(ctx, &entries, false); + dmar_domain_unload(domain, &entries, false); THREAD_SLEEPING_OK(); KASSERT(TAILQ_EMPTY(&entries), ("lazy dmar_ctx_unload %p", ctx)); #endif } static void dmar_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) { } struct bus_dma_impl bus_dma_dmar_impl = { .tag_create = dmar_bus_dma_tag_create, .tag_destroy = dmar_bus_dma_tag_destroy, .map_create = dmar_bus_dmamap_create, .map_destroy = dmar_bus_dmamap_destroy, .mem_alloc = dmar_bus_dmamem_alloc, .mem_free = dmar_bus_dmamem_free, .load_phys = dmar_bus_dmamap_load_phys, .load_buffer = dmar_bus_dmamap_load_buffer, .load_ma = dmar_bus_dmamap_load_ma, .map_waitok = dmar_bus_dmamap_waitok, .map_complete = dmar_bus_dmamap_complete, .map_unload = dmar_bus_dmamap_unload, .map_sync = dmar_bus_dmamap_sync }; static void dmar_bus_task_dmamap(void *arg, int pending) { struct bus_dma_tag_dmar *tag; struct bus_dmamap_dmar *map; struct dmar_unit *unit; unit = arg; DMAR_LOCK(unit); while ((map = TAILQ_FIRST(&unit->delayed_maps)) != NULL) { TAILQ_REMOVE(&unit->delayed_maps, map, delay_link); DMAR_UNLOCK(unit); tag = map->tag; map->cansleep = true; map->locked = false; bus_dmamap_load_mem((bus_dma_tag_t)tag, (bus_dmamap_t)map, &map->mem, map->callback, map->callback_arg, BUS_DMA_WAITOK); map->cansleep = false; if (map->locked) { (tag->common.lockfunc)(tag->common.lockfuncarg, BUS_DMA_UNLOCK); } else map->locked = true; map->cansleep = false; DMAR_LOCK(unit); } DMAR_UNLOCK(unit); } static void dmar_bus_schedule_dmamap(struct dmar_unit *unit, struct bus_dmamap_dmar *map) { map->locked = false; DMAR_LOCK(unit); TAILQ_INSERT_TAIL(&unit->delayed_maps, map, delay_link); DMAR_UNLOCK(unit); taskqueue_enqueue(unit->delayed_taskqueue, &unit->dmamap_load_task); } int dmar_init_busdma(struct dmar_unit *unit) { unit->dma_enabled = 1; TUNABLE_INT_FETCH("hw.dmar.dma", &unit->dma_enabled); TAILQ_INIT(&unit->delayed_maps); TASK_INIT(&unit->dmamap_load_task, 0, dmar_bus_task_dmamap, unit); unit->delayed_taskqueue = taskqueue_create("dmar", M_WAITOK, taskqueue_thread_enqueue, &unit->delayed_taskqueue); taskqueue_start_threads(&unit->delayed_taskqueue, 1, PI_DISK, "dmar%d busdma taskq", unit->unit); return (0); } void dmar_fini_busdma(struct dmar_unit *unit) { if (unit->delayed_taskqueue == NULL) return; taskqueue_drain(unit->delayed_taskqueue, &unit->dmamap_load_task); taskqueue_free(unit->delayed_taskqueue); unit->delayed_taskqueue = NULL; } Index: head/sys/x86/iommu/intel_ctx.c =================================================================== --- head/sys/x86/iommu/intel_ctx.c (revision 284868) +++ head/sys/x86/iommu/intel_ctx.c (revision 284869) @@ -1,651 +1,777 @@ /*- * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_DMAR_CTX, "dmar_ctx", "Intel DMAR Context"); +static MALLOC_DEFINE(M_DMAR_DOMAIN, "dmar_dom", "Intel DMAR Domain"); -static void dmar_ctx_unload_task(void *arg, int pending); +static void dmar_domain_unload_task(void *arg, int pending); +static void dmar_unref_domain_locked(struct dmar_unit *dmar, + struct dmar_domain *domain); +static void dmar_domain_destroy(struct dmar_domain *domain); +static void dmar_ctx_dtr(struct dmar_ctx *ctx); static void dmar_ensure_ctx_page(struct dmar_unit *dmar, int bus) { struct sf_buf *sf; dmar_root_entry_t *re; vm_page_t ctxm; /* * Allocated context page must be linked. */ ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, DMAR_PGF_NOALLOC); if (ctxm != NULL) return; /* * Page not present, allocate and link. Note that other * thread might execute this sequence in parallel. This * should be safe, because the context entries written by both * threads are equal. */ TD_PREP_PINNED_ASSERT; ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, DMAR_PGF_ZERO | DMAR_PGF_WAITOK); re = dmar_map_pgtbl(dmar->ctx_obj, 0, DMAR_PGF_NOALLOC, &sf); re += bus; dmar_pte_store(&re->r1, DMAR_ROOT_R1_P | (DMAR_ROOT_R1_CTP_MASK & VM_PAGE_TO_PHYS(ctxm))); dmar_flush_root_to_ram(dmar, re); dmar_unmap_pgtbl(sf); TD_PINNED_ASSERT; } static dmar_ctx_entry_t * dmar_map_ctx_entry(struct dmar_ctx *ctx, struct sf_buf **sfp) { dmar_ctx_entry_t *ctxp; - ctxp = dmar_map_pgtbl(ctx->dmar->ctx_obj, 1 + PCI_RID2BUS(ctx->rid), - DMAR_PGF_NOALLOC | DMAR_PGF_WAITOK, sfp); + ctxp = dmar_map_pgtbl(ctx->domain->dmar->ctx_obj, 1 + + PCI_RID2BUS(ctx->rid), DMAR_PGF_NOALLOC | DMAR_PGF_WAITOK, sfp); ctxp += ctx->rid & 0xff; return (ctxp); } static void ctx_tag_init(struct dmar_ctx *ctx, device_t dev) { bus_addr_t maxaddr; - maxaddr = MIN(ctx->end, BUS_SPACE_MAXADDR); + maxaddr = MIN(ctx->domain->end, BUS_SPACE_MAXADDR); ctx->ctx_tag.common.ref_count = 1; /* Prevent free */ ctx->ctx_tag.common.impl = &bus_dma_dmar_impl; ctx->ctx_tag.common.boundary = PCI_DMA_BOUNDARY; ctx->ctx_tag.common.lowaddr = maxaddr; ctx->ctx_tag.common.highaddr = maxaddr; ctx->ctx_tag.common.maxsize = maxaddr; ctx->ctx_tag.common.nsegments = BUS_SPACE_UNRESTRICTED; ctx->ctx_tag.common.maxsegsz = maxaddr; ctx->ctx_tag.ctx = ctx; ctx->ctx_tag.owner = dev; - /* XXXKIB initialize tag further */ } static void -ctx_id_entry_init(struct dmar_ctx *ctx, dmar_ctx_entry_t *ctxp) +ctx_id_entry_init(struct dmar_ctx *ctx, dmar_ctx_entry_t *ctxp, bool move) { struct dmar_unit *unit; + struct dmar_domain *domain; vm_page_t ctx_root; - unit = ctx->dmar; - KASSERT(ctxp->ctx1 == 0 && ctxp->ctx2 == 0, + domain = ctx->domain; + unit = domain->dmar; + KASSERT(move || (ctxp->ctx1 == 0 && ctxp->ctx2 == 0), ("dmar%d: initialized ctx entry %d:%d:%d 0x%jx 0x%jx", unit->unit, pci_get_bus(ctx->ctx_tag.owner), pci_get_slot(ctx->ctx_tag.owner), pci_get_function(ctx->ctx_tag.owner), - ctxp->ctx1, - ctxp->ctx2)); - ctxp->ctx2 = DMAR_CTX2_DID(ctx->domain); - ctxp->ctx2 |= ctx->awlvl; - if ((ctx->flags & DMAR_CTX_IDMAP) != 0 && + ctxp->ctx1, ctxp->ctx2)); + /* + * For update due to move, the store is not atomic. It is + * possible that DMAR read upper doubleword, while low + * doubleword is not yet updated. The domain id is stored in + * the upper doubleword, while the table pointer in the lower. + * + * There is no good solution, for the same reason it is wrong + * to clear P bit in the ctx entry for update. + */ + dmar_pte_store1(&ctxp->ctx2, DMAR_CTX2_DID(domain->domain) | + domain->awlvl); + if ((domain->flags & DMAR_DOMAIN_IDMAP) != 0 && (unit->hw_ecap & DMAR_ECAP_PT) != 0) { - KASSERT(ctx->pgtbl_obj == NULL, + KASSERT(domain->pgtbl_obj == NULL, ("ctx %p non-null pgtbl_obj", ctx)); - dmar_pte_store(&ctxp->ctx1, DMAR_CTX1_T_PASS | DMAR_CTX1_P); + dmar_pte_store1(&ctxp->ctx1, DMAR_CTX1_T_PASS | DMAR_CTX1_P); } else { - ctx_root = dmar_pgalloc(ctx->pgtbl_obj, 0, DMAR_PGF_NOALLOC); - dmar_pte_store(&ctxp->ctx1, DMAR_CTX1_T_UNTR | + ctx_root = dmar_pgalloc(domain->pgtbl_obj, 0, DMAR_PGF_NOALLOC); + dmar_pte_store1(&ctxp->ctx1, DMAR_CTX1_T_UNTR | (DMAR_CTX1_ASR_MASK & VM_PAGE_TO_PHYS(ctx_root)) | DMAR_CTX1_P); } dmar_flush_ctx_to_ram(unit, ctxp); } static int -ctx_init_rmrr(struct dmar_ctx *ctx, device_t dev) +dmar_flush_for_ctx_entry(struct dmar_unit *dmar, bool force) { + int error; + + /* + * If dmar declares Caching Mode as Set, follow 11.5 "Caching + * Mode Consideration" and do the (global) invalidation of the + * negative TLB entries. + */ + if ((dmar->hw_cap & DMAR_CAP_CM) == 0 && !force) + return (0); + if (dmar->qi_enabled) { + dmar_qi_invalidate_ctx_glob_locked(dmar); + if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0 || force) + dmar_qi_invalidate_iotlb_glob_locked(dmar); + return (0); + } + error = dmar_inv_ctx_glob(dmar); + if (error == 0 && ((dmar->hw_ecap & DMAR_ECAP_DI) != 0 || force)) + error = dmar_inv_iotlb_glob(dmar); + return (error); +} + +static int +domain_init_rmrr(struct dmar_domain *domain, device_t dev) +{ struct dmar_map_entries_tailq rmrr_entries; struct dmar_map_entry *entry, *entry1; vm_page_t *ma; dmar_gaddr_t start, end; vm_pindex_t size, i; int error, error1; error = 0; TAILQ_INIT(&rmrr_entries); - dmar_ctx_parse_rmrr(ctx, dev, &rmrr_entries); + dmar_dev_parse_rmrr(domain, dev, &rmrr_entries); TAILQ_FOREACH_SAFE(entry, &rmrr_entries, unroll_link, entry1) { /* * VT-d specification requires that the start of an * RMRR entry is 4k-aligned. Buggy BIOSes put * anything into the start and end fields. Truncate * and round as neccesary. * * We also allow the overlapping RMRR entries, see * dmar_gas_alloc_region(). */ start = entry->start; end = entry->end; entry->start = trunc_page(start); entry->end = round_page(end); if (entry->start == entry->end) { /* Workaround for some AMI (?) BIOSes */ if (bootverbose) { device_printf(dev, "BIOS bug: dmar%d RMRR " "region (%jx, %jx) corrected\n", - ctx->dmar->unit, start, end); + domain->dmar->unit, start, end); } entry->end += DMAR_PAGE_SIZE * 0x20; } size = OFF_TO_IDX(entry->end - entry->start); ma = malloc(sizeof(vm_page_t) * size, M_TEMP, M_WAITOK); for (i = 0; i < size; i++) { ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i, VM_MEMATTR_DEFAULT); } - error1 = dmar_gas_map_region(ctx, entry, DMAR_MAP_ENTRY_READ | - DMAR_MAP_ENTRY_WRITE, DMAR_GM_CANWAIT, ma); + error1 = dmar_gas_map_region(domain, entry, + DMAR_MAP_ENTRY_READ | DMAR_MAP_ENTRY_WRITE, + DMAR_GM_CANWAIT, ma); /* * Non-failed RMRR entries are owned by context rb * tree. Get rid of the failed entry, but do not stop * the loop. Rest of the parsed RMRR entries are * loaded and removed on the context destruction. */ if (error1 == 0 && entry->end != entry->start) { - DMAR_LOCK(ctx->dmar); - ctx->flags |= DMAR_CTX_RMRR; - DMAR_UNLOCK(ctx->dmar); + DMAR_LOCK(domain->dmar); + domain->refs++; /* XXXKIB prevent free */ + domain->flags |= DMAR_DOMAIN_RMRR; + DMAR_UNLOCK(domain->dmar); } else { if (error1 != 0) { device_printf(dev, "dmar%d failed to map RMRR region (%jx, %jx) %d\n", - ctx->dmar->unit, start, end, error1); + domain->dmar->unit, start, end, error1); error = error1; } TAILQ_REMOVE(&rmrr_entries, entry, unroll_link); - dmar_gas_free_entry(ctx, entry); + dmar_gas_free_entry(domain, entry); } for (i = 0; i < size; i++) vm_page_putfake(ma[i]); free(ma, M_TEMP); } return (error); } +static struct dmar_domain * +dmar_domain_alloc(struct dmar_unit *dmar, bool id_mapped) +{ + struct dmar_domain *domain; + int error, id, mgaw; + + id = alloc_unr(dmar->domids); + if (id == -1) + return (NULL); + domain = malloc(sizeof(*domain), M_DMAR_DOMAIN, M_WAITOK | M_ZERO); + domain->domain = id; + LIST_INIT(&domain->contexts); + RB_INIT(&domain->rb_root); + TAILQ_INIT(&domain->unload_entries); + TASK_INIT(&domain->unload_task, 0, dmar_domain_unload_task, domain); + mtx_init(&domain->lock, "dmardom", NULL, MTX_DEF); + domain->dmar = dmar; + + /* + * For now, use the maximal usable physical address of the + * installed memory to calculate the mgaw on id_mapped domain. + * It is useful for the identity mapping, and less so for the + * virtualized bus address space. + */ + domain->end = id_mapped ? ptoa(Maxmem) : BUS_SPACE_MAXADDR; + mgaw = dmar_maxaddr2mgaw(dmar, domain->end, !id_mapped); + error = domain_set_agaw(domain, mgaw); + if (error != 0) + goto fail; + if (!id_mapped) + /* Use all supported address space for remapping. */ + domain->end = 1ULL << (domain->agaw - 1); + + dmar_gas_init_domain(domain); + + if (id_mapped) { + if ((dmar->hw_ecap & DMAR_ECAP_PT) == 0) { + domain->pgtbl_obj = domain_get_idmap_pgtbl(domain, + domain->end); + } + domain->flags |= DMAR_DOMAIN_IDMAP; + } else { + error = domain_alloc_pgtbl(domain); + if (error != 0) + goto fail; + /* Disable local apic region access */ + error = dmar_gas_reserve_region(domain, 0xfee00000, + 0xfeefffff + 1); + if (error != 0) + goto fail; + } + return (domain); + +fail: + dmar_domain_destroy(domain); + return (NULL); +} + static struct dmar_ctx * -dmar_get_ctx_alloc(struct dmar_unit *dmar, uint16_t rid) +dmar_ctx_alloc(struct dmar_domain *domain, uint16_t rid) { struct dmar_ctx *ctx; ctx = malloc(sizeof(*ctx), M_DMAR_CTX, M_WAITOK | M_ZERO); - RB_INIT(&ctx->rb_root); - TAILQ_INIT(&ctx->unload_entries); - TASK_INIT(&ctx->unload_task, 0, dmar_ctx_unload_task, ctx); - mtx_init(&ctx->lock, "dmarctx", NULL, MTX_DEF); - ctx->dmar = dmar; + ctx->domain = domain; ctx->rid = rid; + ctx->refs = 1; return (ctx); } static void -dmar_ctx_dtr(struct dmar_ctx *ctx, bool gas_inited, bool pgtbl_inited) +dmar_ctx_link(struct dmar_ctx *ctx) { + struct dmar_domain *domain; - if (gas_inited) { - DMAR_CTX_LOCK(ctx); - dmar_gas_fini_ctx(ctx); - DMAR_CTX_UNLOCK(ctx); + domain = ctx->domain; + DMAR_ASSERT_LOCKED(domain->dmar); + KASSERT(domain->refs >= domain->ctx_cnt, + ("dom %p ref underflow %d %d", domain, domain->refs, + domain->ctx_cnt)); + domain->refs++; + domain->ctx_cnt++; + LIST_INSERT_HEAD(&domain->contexts, ctx, link); +} + +static void +dmar_ctx_unlink(struct dmar_ctx *ctx) +{ + struct dmar_domain *domain; + + domain = ctx->domain; + DMAR_ASSERT_LOCKED(domain->dmar); + KASSERT(domain->refs > 0, + ("domain %p ctx dtr refs %d", domain, domain->refs)); + KASSERT(domain->ctx_cnt >= domain->refs, + ("domain %p ctx dtr refs %d ctx_cnt %d", domain, + domain->refs, domain->ctx_cnt)); + domain->refs--; + domain->ctx_cnt--; + LIST_REMOVE(ctx, link); +} + +static void +dmar_domain_destroy(struct dmar_domain *domain) +{ + + KASSERT(TAILQ_EMPTY(&domain->unload_entries), + ("unfinished unloads %p", domain)); + KASSERT(LIST_EMPTY(&domain->contexts), + ("destroying dom %p with contexts", domain)); + KASSERT(domain->ctx_cnt == 0, + ("destroying dom %p with ctx_cnt %d", domain, domain->ctx_cnt)); + KASSERT(domain->refs == 0, + ("destroying dom %p with refs %d", domain, domain->refs)); + if ((domain->flags & DMAR_DOMAIN_GAS_INITED) != 0) { + DMAR_DOMAIN_LOCK(domain); + dmar_gas_fini_domain(domain); + DMAR_DOMAIN_UNLOCK(domain); } - if (pgtbl_inited) { - if (ctx->pgtbl_obj != NULL) - DMAR_CTX_PGLOCK(ctx); - ctx_free_pgtbl(ctx); + if ((domain->flags & DMAR_DOMAIN_PGTBL_INITED) != 0) { + if (domain->pgtbl_obj != NULL) + DMAR_DOMAIN_PGLOCK(domain); + domain_free_pgtbl(domain); } - mtx_destroy(&ctx->lock); - free(ctx, M_DMAR_CTX); + mtx_destroy(&domain->lock); + free_unr(domain->dmar->domids, domain->domain); + free(domain, M_DMAR_DOMAIN); } struct dmar_ctx * -dmar_get_ctx(struct dmar_unit *dmar, device_t dev, uint16_t rid, bool id_mapped, - bool rmrr_init) +dmar_get_ctx_for_dev(struct dmar_unit *dmar, device_t dev, uint16_t rid, + bool id_mapped, bool rmrr_init) { + struct dmar_domain *domain, *domain1; struct dmar_ctx *ctx, *ctx1; dmar_ctx_entry_t *ctxp; struct sf_buf *sf; - int bus, slot, func, error, mgaw; + int bus, slot, func, error; bool enable; bus = pci_get_bus(dev); slot = pci_get_slot(dev); func = pci_get_function(dev); enable = false; TD_PREP_PINNED_ASSERT; DMAR_LOCK(dmar); ctx = dmar_find_ctx_locked(dmar, rid); error = 0; if (ctx == NULL) { /* * Perform the allocations which require sleep or have * higher chance to succeed if the sleep is allowed. */ DMAR_UNLOCK(dmar); dmar_ensure_ctx_page(dmar, PCI_RID2BUS(rid)); - ctx1 = dmar_get_ctx_alloc(dmar, rid); - - if (id_mapped) { - /* - * For now, use the maximal usable physical - * address of the installed memory to - * calculate the mgaw. It is useful for the - * identity mapping, and less so for the - * virtualized bus address space. - */ - ctx1->end = ptoa(Maxmem); - mgaw = dmar_maxaddr2mgaw(dmar, ctx1->end, false); - error = ctx_set_agaw(ctx1, mgaw); - if (error != 0) { - dmar_ctx_dtr(ctx1, false, false); - TD_PINNED_ASSERT; - return (NULL); - } - } else { - ctx1->end = BUS_SPACE_MAXADDR; - mgaw = dmar_maxaddr2mgaw(dmar, ctx1->end, true); - error = ctx_set_agaw(ctx1, mgaw); - if (error != 0) { - dmar_ctx_dtr(ctx1, false, false); - TD_PINNED_ASSERT; - return (NULL); - } - /* Use all supported address space for remapping. */ - ctx1->end = 1ULL << (ctx1->agaw - 1); + domain1 = dmar_domain_alloc(dmar, id_mapped); + if (domain1 == NULL) { + TD_PINNED_ASSERT; + return (NULL); } - - - dmar_gas_init_ctx(ctx1); - if (id_mapped) { - if ((dmar->hw_ecap & DMAR_ECAP_PT) == 0) { - ctx1->pgtbl_obj = ctx_get_idmap_pgtbl(ctx1, - ctx1->end); - } - ctx1->flags |= DMAR_CTX_IDMAP; - } else { - error = ctx_alloc_pgtbl(ctx1); - if (error != 0) { - dmar_ctx_dtr(ctx1, true, false); - TD_PINNED_ASSERT; - return (NULL); - } - /* Disable local apic region access */ - error = dmar_gas_reserve_region(ctx1, 0xfee00000, - 0xfeefffff + 1); - if (error != 0) { - dmar_ctx_dtr(ctx1, true, true); - TD_PINNED_ASSERT; - return (NULL); - } - error = ctx_init_rmrr(ctx1, dev); - if (error != 0) { - dmar_ctx_dtr(ctx1, true, true); - TD_PINNED_ASSERT; - return (NULL); - } + error = domain_init_rmrr(domain1, dev); + if (error != 0) { + dmar_domain_destroy(domain1); + TD_PINNED_ASSERT; + return (NULL); } + ctx1 = dmar_ctx_alloc(domain1, rid); ctxp = dmar_map_ctx_entry(ctx1, &sf); DMAR_LOCK(dmar); /* * Recheck the contexts, other thread might have * already allocated needed one. */ ctx = dmar_find_ctx_locked(dmar, rid); if (ctx == NULL) { + domain = domain1; ctx = ctx1; + dmar_ctx_link(ctx); ctx->ctx_tag.owner = dev; - ctx->domain = alloc_unrl(dmar->domids); - if (ctx->domain == -1) { - DMAR_UNLOCK(dmar); - dmar_unmap_pgtbl(sf); - dmar_ctx_dtr(ctx, true, true); - TD_PINNED_ASSERT; - return (NULL); - } ctx_tag_init(ctx, dev); /* * This is the first activated context for the * DMAR unit. Enable the translation after * everything is set up. */ - if (LIST_EMPTY(&dmar->contexts)) + if (LIST_EMPTY(&dmar->domains)) enable = true; - LIST_INSERT_HEAD(&dmar->contexts, ctx, link); - ctx_id_entry_init(ctx, ctxp); + LIST_INSERT_HEAD(&dmar->domains, domain, link); + ctx_id_entry_init(ctx, ctxp, false); device_printf(dev, "dmar%d pci%d:%d:%d:%d rid %x domain %d mgaw %d " "agaw %d %s-mapped\n", dmar->unit, dmar->segment, bus, slot, - func, rid, ctx->domain, ctx->mgaw, ctx->agaw, - id_mapped ? "id" : "re"); + func, rid, domain->domain, domain->mgaw, + domain->agaw, id_mapped ? "id" : "re"); } else { - dmar_ctx_dtr(ctx1, true, true); + /* Nothing needs to be done to destroy ctx1. */ + dmar_domain_destroy(domain1); + domain = ctx->domain; + ctx->refs++; /* tag referenced us */ } dmar_unmap_pgtbl(sf); + } else { + domain = ctx->domain; + ctx->refs++; /* tag referenced us */ } - ctx->refs++; - if ((ctx->flags & DMAR_CTX_RMRR) != 0) - ctx->refs++; /* XXXKIB */ - /* - * If dmar declares Caching Mode as Set, follow 11.5 "Caching - * Mode Consideration" and do the (global) invalidation of the - * negative TLB entries. - */ - if ((dmar->hw_cap & DMAR_CAP_CM) != 0 || enable) { - if (dmar->qi_enabled) { - dmar_qi_invalidate_ctx_glob_locked(dmar); - if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0) - dmar_qi_invalidate_iotlb_glob_locked(dmar); - } else { - error = dmar_inv_ctx_glob(dmar); - if (error == 0 && - (dmar->hw_ecap & DMAR_ECAP_DI) != 0) - error = dmar_inv_iotlb_glob(dmar); - if (error != 0) { - dmar_free_ctx_locked(dmar, ctx); - TD_PINNED_ASSERT; - return (NULL); - } - } + error = dmar_flush_for_ctx_entry(dmar, enable); + if (error != 0) { + dmar_free_ctx_locked(dmar, ctx); + TD_PINNED_ASSERT; + return (NULL); } /* * The dmar lock was potentially dropped between check for the * empty context list and now. Recheck the state of GCMD_TE * to avoid unneeded command. */ if (enable && !rmrr_init && (dmar->hw_gcmd & DMAR_GCMD_TE) == 0) { error = dmar_enable_translation(dmar); if (error != 0) { dmar_free_ctx_locked(dmar, ctx); TD_PINNED_ASSERT; return (NULL); } } DMAR_UNLOCK(dmar); TD_PINNED_ASSERT; return (ctx); } +int +dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx) +{ + struct dmar_unit *dmar; + struct dmar_domain *old_domain; + dmar_ctx_entry_t *ctxp; + struct sf_buf *sf; + int error; + + dmar = domain->dmar; + old_domain = ctx->domain; + if (domain == old_domain) + return (0); + KASSERT(old_domain->dmar == dmar, + ("domain %p %u moving between dmars %u %u", domain, + domain->domain, old_domain->dmar->unit, domain->dmar->unit)); + TD_PREP_PINNED_ASSERT; + + ctxp = dmar_map_ctx_entry(ctx, &sf); + DMAR_LOCK(dmar); + dmar_ctx_unlink(ctx); + ctx->domain = domain; + dmar_ctx_link(ctx); + ctx_id_entry_init(ctx, ctxp, true); + dmar_unmap_pgtbl(sf); + error = dmar_flush_for_ctx_entry(dmar, true); + /* If flush failed, rolling back would not work as well. */ + printf("dmar%d rid %x domain %d->%d %s-mapped\n", + dmar->unit, ctx->rid, old_domain->domain, domain->domain, + (domain->flags & DMAR_DOMAIN_IDMAP) != 0 ? "id" : "re"); + dmar_unref_domain_locked(dmar, old_domain); + TD_PINNED_ASSERT; + return (error); +} + +static void +dmar_unref_domain_locked(struct dmar_unit *dmar, struct dmar_domain *domain) +{ + + DMAR_ASSERT_LOCKED(dmar); + KASSERT(domain->refs >= 1, + ("dmar %d domain %p refs %u", dmar->unit, domain, domain->refs)); + KASSERT(domain->refs > domain->ctx_cnt, + ("dmar %d domain %p refs %d ctx_cnt %d", dmar->unit, domain, + domain->refs, domain->ctx_cnt)); + + if (domain->refs > 1) { + domain->refs--; + DMAR_UNLOCK(dmar); + return; + } + + KASSERT((domain->flags & DMAR_DOMAIN_RMRR) == 0, + ("lost ref on RMRR domain %p", domain)); + + LIST_REMOVE(domain, link); + DMAR_UNLOCK(dmar); + + taskqueue_drain(dmar->delayed_taskqueue, &domain->unload_task); + dmar_domain_destroy(domain); +} + void dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx) { struct sf_buf *sf; dmar_ctx_entry_t *ctxp; + struct dmar_domain *domain; DMAR_ASSERT_LOCKED(dmar); KASSERT(ctx->refs >= 1, ("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs)); /* * If our reference is not last, only the dereference should * be performed. */ if (ctx->refs > 1) { ctx->refs--; DMAR_UNLOCK(dmar); return; } - KASSERT((ctx->flags & DMAR_CTX_RMRR) == 0, - ("lost ref on RMRR ctx %p", ctx)); KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0, ("lost ref on disabled ctx %p", ctx)); /* * Otherwise, the context entry must be cleared before the * page table is destroyed. The mapping of the context * entries page could require sleep, unlock the dmar. */ DMAR_UNLOCK(dmar); TD_PREP_PINNED_ASSERT; ctxp = dmar_map_ctx_entry(ctx, &sf); DMAR_LOCK(dmar); KASSERT(ctx->refs >= 1, ("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs)); /* * Other thread might have referenced the context, in which * case again only the dereference should be performed. */ if (ctx->refs > 1) { ctx->refs--; DMAR_UNLOCK(dmar); dmar_unmap_pgtbl(sf); TD_PINNED_ASSERT; return; } - KASSERT((ctx->flags & DMAR_CTX_RMRR) == 0, - ("lost ref on RMRR ctx %p", ctx)); KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0, ("lost ref on disabled ctx %p", ctx)); /* * Clear the context pointer and flush the caches. * XXXKIB: cannot do this if any RMRR entries are still present. */ dmar_pte_clear(&ctxp->ctx1); ctxp->ctx2 = 0; dmar_flush_ctx_to_ram(dmar, ctxp); dmar_inv_ctx_glob(dmar); if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0) { if (dmar->qi_enabled) dmar_qi_invalidate_iotlb_glob_locked(dmar); else dmar_inv_iotlb_glob(dmar); } - LIST_REMOVE(ctx, link); - DMAR_UNLOCK(dmar); - - /* - * The rest of the destruction is invisible for other users of - * the dmar unit. - */ - taskqueue_drain(dmar->delayed_taskqueue, &ctx->unload_task); - KASSERT(TAILQ_EMPTY(&ctx->unload_entries), - ("unfinished unloads %p", ctx)); dmar_unmap_pgtbl(sf); - free_unr(dmar->domids, ctx->domain); - dmar_ctx_dtr(ctx, true, true); + domain = ctx->domain; + dmar_ctx_unlink(ctx); + free(ctx, M_DMAR_CTX); + dmar_unref_domain_locked(dmar, domain); TD_PINNED_ASSERT; } void dmar_free_ctx(struct dmar_ctx *ctx) { struct dmar_unit *dmar; - dmar = ctx->dmar; + dmar = ctx->domain->dmar; DMAR_LOCK(dmar); dmar_free_ctx_locked(dmar, ctx); } +/* + * Returns with the domain locked. + */ struct dmar_ctx * dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid) { + struct dmar_domain *domain; struct dmar_ctx *ctx; DMAR_ASSERT_LOCKED(dmar); - LIST_FOREACH(ctx, &dmar->contexts, link) { - if (ctx->rid == rid) - return (ctx); + LIST_FOREACH(domain, &dmar->domains, link) { + LIST_FOREACH(ctx, &domain->contexts, link) { + if (ctx->rid == rid) + return (ctx); + } } return (NULL); } void -dmar_ctx_free_entry(struct dmar_map_entry *entry, bool free) +dmar_domain_free_entry(struct dmar_map_entry *entry, bool free) { - struct dmar_ctx *ctx; + struct dmar_domain *domain; - ctx = entry->ctx; - DMAR_CTX_LOCK(ctx); + domain = entry->domain; + DMAR_DOMAIN_LOCK(domain); if ((entry->flags & DMAR_MAP_ENTRY_RMRR) != 0) - dmar_gas_free_region(ctx, entry); + dmar_gas_free_region(domain, entry); else - dmar_gas_free_space(ctx, entry); - DMAR_CTX_UNLOCK(ctx); + dmar_gas_free_space(domain, entry); + DMAR_DOMAIN_UNLOCK(domain); if (free) - dmar_gas_free_entry(ctx, entry); + dmar_gas_free_entry(domain, entry); else entry->flags = 0; } void -dmar_ctx_unload_entry(struct dmar_map_entry *entry, bool free) +dmar_domain_unload_entry(struct dmar_map_entry *entry, bool free) { struct dmar_unit *unit; - unit = entry->ctx->dmar; + unit = entry->domain->dmar; if (unit->qi_enabled) { DMAR_LOCK(unit); - dmar_qi_invalidate_locked(entry->ctx, entry->start, + dmar_qi_invalidate_locked(entry->domain, entry->start, entry->end - entry->start, &entry->gseq); if (!free) entry->flags |= DMAR_MAP_ENTRY_QI_NF; TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link); DMAR_UNLOCK(unit); } else { - ctx_flush_iotlb_sync(entry->ctx, entry->start, entry->end - - entry->start); - dmar_ctx_free_entry(entry, free); + domain_flush_iotlb_sync(entry->domain, entry->start, + entry->end - entry->start); + dmar_domain_free_entry(entry, free); } } void -dmar_ctx_unload(struct dmar_ctx *ctx, struct dmar_map_entries_tailq *entries, - bool cansleep) +dmar_domain_unload(struct dmar_domain *domain, + struct dmar_map_entries_tailq *entries, bool cansleep) { struct dmar_unit *unit; struct dmar_map_entry *entry, *entry1; struct dmar_qi_genseq gseq; int error; - unit = ctx->dmar; + unit = domain->dmar; TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) { KASSERT((entry->flags & DMAR_MAP_ENTRY_MAP) != 0, - ("not mapped entry %p %p", ctx, entry)); - error = ctx_unmap_buf(ctx, entry->start, entry->end - + ("not mapped entry %p %p", domain, entry)); + error = domain_unmap_buf(domain, entry->start, entry->end - entry->start, cansleep ? DMAR_PGF_WAITOK : 0); - KASSERT(error == 0, ("unmap %p error %d", ctx, error)); + KASSERT(error == 0, ("unmap %p error %d", domain, error)); if (!unit->qi_enabled) { - ctx_flush_iotlb_sync(ctx, entry->start, + domain_flush_iotlb_sync(domain, entry->start, entry->end - entry->start); TAILQ_REMOVE(entries, entry, dmamap_link); - dmar_ctx_free_entry(entry, true); + dmar_domain_free_entry(entry, true); } } if (TAILQ_EMPTY(entries)) return; KASSERT(unit->qi_enabled, ("loaded entry left")); DMAR_LOCK(unit); TAILQ_FOREACH(entry, entries, dmamap_link) { entry->gseq.gen = 0; entry->gseq.seq = 0; - dmar_qi_invalidate_locked(ctx, entry->start, entry->end - + dmar_qi_invalidate_locked(domain, entry->start, entry->end - entry->start, TAILQ_NEXT(entry, dmamap_link) == NULL ? &gseq : NULL); } TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) { entry->gseq = gseq; TAILQ_REMOVE(entries, entry, dmamap_link); TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link); } DMAR_UNLOCK(unit); } static void -dmar_ctx_unload_task(void *arg, int pending) +dmar_domain_unload_task(void *arg, int pending) { - struct dmar_ctx *ctx; + struct dmar_domain *domain; struct dmar_map_entries_tailq entries; - ctx = arg; + domain = arg; TAILQ_INIT(&entries); for (;;) { - DMAR_CTX_LOCK(ctx); - TAILQ_SWAP(&ctx->unload_entries, &entries, dmar_map_entry, + DMAR_DOMAIN_LOCK(domain); + TAILQ_SWAP(&domain->unload_entries, &entries, dmar_map_entry, dmamap_link); - DMAR_CTX_UNLOCK(ctx); + DMAR_DOMAIN_UNLOCK(domain); if (TAILQ_EMPTY(&entries)) break; - dmar_ctx_unload(ctx, &entries, true); + dmar_domain_unload(domain, &entries, true); } } Index: head/sys/x86/iommu/intel_dmar.h =================================================================== --- head/sys/x86/iommu/intel_dmar.h (revision 284868) +++ head/sys/x86/iommu/intel_dmar.h (revision 284869) @@ -1,482 +1,521 @@ /*- * Copyright (c) 2013-2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef __X86_IOMMU_INTEL_DMAR_H #define __X86_IOMMU_INTEL_DMAR_H /* Host or physical memory address, after translation. */ typedef uint64_t dmar_haddr_t; /* Guest or bus address, before translation. */ typedef uint64_t dmar_gaddr_t; struct dmar_qi_genseq { u_int gen; uint32_t seq; }; struct dmar_map_entry { dmar_gaddr_t start; dmar_gaddr_t end; dmar_gaddr_t free_after; /* Free space after the entry */ dmar_gaddr_t free_down; /* Max free space below the current R/B tree node */ u_int flags; TAILQ_ENTRY(dmar_map_entry) dmamap_link; /* Link for dmamap entries */ - RB_ENTRY(dmar_map_entry) rb_entry; /* Links for ctx entries */ + RB_ENTRY(dmar_map_entry) rb_entry; /* Links for domain entries */ TAILQ_ENTRY(dmar_map_entry) unroll_link; /* Link for unroll after dmamap_load failure */ - struct dmar_ctx *ctx; + struct dmar_domain *domain; struct dmar_qi_genseq gseq; }; RB_HEAD(dmar_gas_entries_tree, dmar_map_entry); RB_PROTOTYPE(dmar_gas_entries_tree, dmar_map_entry, rb_entry, dmar_gas_cmp_entries); #define DMAR_MAP_ENTRY_PLACE 0x0001 /* Fake entry */ #define DMAR_MAP_ENTRY_RMRR 0x0002 /* Permanent, not linked by dmamap_link */ #define DMAR_MAP_ENTRY_MAP 0x0004 /* Busdma created, linked by dmamap_link */ #define DMAR_MAP_ENTRY_UNMAPPED 0x0010 /* No backing pages */ #define DMAR_MAP_ENTRY_QI_NF 0x0020 /* qi task, do not free entry */ #define DMAR_MAP_ENTRY_READ 0x1000 /* Read permitted */ #define DMAR_MAP_ENTRY_WRITE 0x2000 /* Write permitted */ #define DMAR_MAP_ENTRY_SNOOP 0x4000 /* Snoop */ #define DMAR_MAP_ENTRY_TM 0x8000 /* Transient */ +/* + * Locking annotations: + * (u) - Protected by dmar unit lock + * (d) - Protected by domain lock + * (c) - Immutable after initialization + */ + +/* + * The domain abstraction. Most non-constant members of the domain + * are locked by the owning dmar unit lock, not by the domain lock. + * Most important, dmar lock protects the contexts list. + * + * The domain lock protects the address map for the domain, and list + * of unload entries delayed. + * + * Page tables pages and pages content is protected by the vm object + * lock pgtbl_obj, which contains the page tables pages. + */ +struct dmar_domain { + int domain; /* (c) DID, written in context entry */ + int mgaw; /* (c) Real max address width */ + int agaw; /* (c) Adjusted guest address width */ + int pglvl; /* (c) The pagelevel */ + int awlvl; /* (c) The pagelevel as the bitmask, + to set in context entry */ + dmar_gaddr_t end; /* (c) Highest address + 1 in + the guest AS */ + u_int ctx_cnt; /* (u) Number of contexts owned */ + u_int refs; /* (u) Refs, including ctx */ + struct dmar_unit *dmar; /* (c) */ + struct mtx lock; /* (c) */ + LIST_ENTRY(dmar_domain) link; /* (u) Member in the dmar list */ + LIST_HEAD(, dmar_ctx) contexts; /* (u) */ + vm_object_t pgtbl_obj; /* (c) Page table pages */ + u_int flags; /* (u) */ + u_int entries_cnt; /* (d) */ + struct dmar_gas_entries_tree rb_root; /* (d) */ + struct dmar_map_entries_tailq unload_entries; /* (d) Entries to + unload */ + struct dmar_map_entry *first_place, *last_place; /* (d) */ + struct task unload_task; /* (c) */ +}; + struct dmar_ctx { - uint16_t rid; /* pci RID */ - int domain; /* DID */ - int mgaw; /* Real max address width */ - int agaw; /* Adjusted guest address width */ - int pglvl; /* The pagelevel */ - int awlvl; /* The pagelevel as the bitmask, to set in - context entry */ - dmar_gaddr_t end;/* Highest address + 1 in the guest AS */ - u_int refs; /* References to the context, from tags */ - struct dmar_unit *dmar; - struct bus_dma_tag_dmar ctx_tag; /* Root tag */ - struct mtx lock; - LIST_ENTRY(dmar_ctx) link; /* Member in the dmar list */ - vm_object_t pgtbl_obj; /* Page table pages */ - u_int flags; /* Protected by dmar lock */ + struct bus_dma_tag_dmar ctx_tag; /* (c) Root tag */ + uint16_t rid; /* (c) pci RID */ uint64_t last_fault_rec[2]; /* Last fault reported */ - u_int entries_cnt; - u_long loads; - u_long unloads; - struct dmar_gas_entries_tree rb_root; - struct dmar_map_entries_tailq unload_entries; /* Entries to unload */ - struct dmar_map_entry *first_place, *last_place; - struct task unload_task; + struct dmar_domain *domain; /* (c) */ + LIST_ENTRY(dmar_ctx) link; /* (u) Member in the domain list */ + u_int refs; /* (u) References from tags */ + u_int flags; /* (u) */ + u_long loads; /* atomic updates, for stat only */ + u_long unloads; /* same */ }; +#define DMAR_DOMAIN_GAS_INITED 0x0001 +#define DMAR_DOMAIN_PGTBL_INITED 0x0002 +#define DMAR_DOMAIN_IDMAP 0x0010 /* Domain uses identity + page table */ +#define DMAR_DOMAIN_RMRR 0x0020 /* Domain contains RMRR entry, + cannot be turned off */ + /* struct dmar_ctx flags */ #define DMAR_CTX_FAULTED 0x0001 /* Fault was reported, last_fault_rec is valid */ -#define DMAR_CTX_IDMAP 0x0002 /* Context uses identity page table */ -#define DMAR_CTX_RMRR 0x0004 /* Context contains RMRR entry, - cannot be turned off */ -#define DMAR_CTX_DISABLED 0x0008 /* Device is disabled, the +#define DMAR_CTX_DISABLED 0x0002 /* Device is disabled, the ephemeral reference is kept to prevent context destruction */ -#define DMAR_CTX_PGLOCK(ctx) VM_OBJECT_WLOCK((ctx)->pgtbl_obj) -#define DMAR_CTX_PGTRYLOCK(ctx) VM_OBJECT_TRYWLOCK((ctx)->pgtbl_obj) -#define DMAR_CTX_PGUNLOCK(ctx) VM_OBJECT_WUNLOCK((ctx)->pgtbl_obj) -#define DMAR_CTX_ASSERT_PGLOCKED(ctx) \ - VM_OBJECT_ASSERT_WLOCKED((ctx)->pgtbl_obj) +#define DMAR_DOMAIN_PGLOCK(dom) VM_OBJECT_WLOCK((dom)->pgtbl_obj) +#define DMAR_DOMAIN_PGTRYLOCK(dom) VM_OBJECT_TRYWLOCK((dom)->pgtbl_obj) +#define DMAR_DOMAIN_PGUNLOCK(dom) VM_OBJECT_WUNLOCK((dom)->pgtbl_obj) +#define DMAR_DOMAIN_ASSERT_PGLOCKED(dom) \ + VM_OBJECT_ASSERT_WLOCKED((dom)->pgtbl_obj) -#define DMAR_CTX_LOCK(ctx) mtx_lock(&(ctx)->lock) -#define DMAR_CTX_UNLOCK(ctx) mtx_unlock(&(ctx)->lock) -#define DMAR_CTX_ASSERT_LOCKED(ctx) mtx_assert(&(ctx)->lock, MA_OWNED) +#define DMAR_DOMAIN_LOCK(dom) mtx_lock(&(dom)->lock) +#define DMAR_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->lock) +#define DMAR_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->lock, MA_OWNED) struct dmar_msi_data { int irq; int irq_rid; struct resource *irq_res; void *intr_handle; int (*handler)(void *); int msi_data_reg; int msi_addr_reg; int msi_uaddr_reg; void (*enable_intr)(struct dmar_unit *); void (*disable_intr)(struct dmar_unit *); const char *name; }; #define DMAR_INTR_FAULT 0 #define DMAR_INTR_QI 1 #define DMAR_INTR_TOTAL 2 struct dmar_unit { device_t dev; int unit; uint16_t segment; uint64_t base; /* Resources */ int reg_rid; struct resource *regs; struct dmar_msi_data intrs[DMAR_INTR_TOTAL]; /* Hardware registers cache */ uint32_t hw_ver; uint64_t hw_cap; uint64_t hw_ecap; uint32_t hw_gcmd; /* Data for being a dmar */ struct mtx lock; - LIST_HEAD(, dmar_ctx) contexts; + LIST_HEAD(, dmar_domain) domains; struct unrhdr *domids; vm_object_t ctx_obj; u_int barrier_flags; /* Fault handler data */ struct mtx fault_lock; uint64_t *fault_log; int fault_log_head; int fault_log_tail; int fault_log_size; struct task fault_task; struct taskqueue *fault_taskqueue; /* QI */ int qi_enabled; vm_offset_t inv_queue; vm_size_t inv_queue_size; uint32_t inv_queue_avail; uint32_t inv_queue_tail; volatile uint32_t inv_waitd_seq_hw; /* hw writes there on wait descr completion */ uint64_t inv_waitd_seq_hw_phys; uint32_t inv_waitd_seq; /* next sequence number to use for wait descr */ u_int inv_waitd_gen; /* seq number generation AKA seq overflows */ u_int inv_seq_waiters; /* count of waiters for seq */ u_int inv_queue_full; /* informational counter */ /* IR */ int ir_enabled; vm_paddr_t irt_phys; dmar_irte_t *irt; u_int irte_cnt; vmem_t *irtids; /* Delayed freeing of map entries queue processing */ struct dmar_map_entries_tailq tlb_flush_entries; struct task qi_task; struct taskqueue *qi_taskqueue; /* Busdma delayed map load */ struct task dmamap_load_task; TAILQ_HEAD(, bus_dmamap_dmar) delayed_maps; struct taskqueue *delayed_taskqueue; int dma_enabled; }; #define DMAR_LOCK(dmar) mtx_lock(&(dmar)->lock) #define DMAR_UNLOCK(dmar) mtx_unlock(&(dmar)->lock) #define DMAR_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->lock, MA_OWNED) #define DMAR_FAULT_LOCK(dmar) mtx_lock_spin(&(dmar)->fault_lock) #define DMAR_FAULT_UNLOCK(dmar) mtx_unlock_spin(&(dmar)->fault_lock) #define DMAR_FAULT_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->fault_lock, MA_OWNED) #define DMAR_IS_COHERENT(dmar) (((dmar)->hw_ecap & DMAR_ECAP_C) != 0) #define DMAR_HAS_QI(dmar) (((dmar)->hw_ecap & DMAR_ECAP_QI) != 0) #define DMAR_X2APIC(dmar) \ (x2apic_mode && ((dmar)->hw_ecap & DMAR_ECAP_EIM) != 0) /* Barrier ids */ #define DMAR_BARRIER_RMRR 0 #define DMAR_BARRIER_USEQ 1 struct dmar_unit *dmar_find(device_t dev); struct dmar_unit *dmar_find_hpet(device_t dev, uint16_t *rid); struct dmar_unit *dmar_find_ioapic(u_int apic_id, uint16_t *rid); u_int dmar_nd2mask(u_int nd); bool dmar_pglvl_supported(struct dmar_unit *unit, int pglvl); -int ctx_set_agaw(struct dmar_ctx *ctx, int mgaw); -int dmar_maxaddr2mgaw(struct dmar_unit* unit, dmar_gaddr_t maxaddr, +int domain_set_agaw(struct dmar_domain *domain, int mgaw); +int dmar_maxaddr2mgaw(struct dmar_unit *unit, dmar_gaddr_t maxaddr, bool allow_less); vm_pindex_t pglvl_max_pages(int pglvl); -int ctx_is_sp_lvl(struct dmar_ctx *ctx, int lvl); +int domain_is_sp_lvl(struct dmar_domain *domain, int lvl); dmar_gaddr_t pglvl_page_size(int total_pglvl, int lvl); -dmar_gaddr_t ctx_page_size(struct dmar_ctx *ctx, int lvl); +dmar_gaddr_t domain_page_size(struct dmar_domain *domain, int lvl); int calc_am(struct dmar_unit *unit, dmar_gaddr_t base, dmar_gaddr_t size, dmar_gaddr_t *isizep); struct vm_page *dmar_pgalloc(vm_object_t obj, vm_pindex_t idx, int flags); void dmar_pgfree(vm_object_t obj, vm_pindex_t idx, int flags); void *dmar_map_pgtbl(vm_object_t obj, vm_pindex_t idx, int flags, struct sf_buf **sf); void dmar_unmap_pgtbl(struct sf_buf *sf); int dmar_load_root_entry_ptr(struct dmar_unit *unit); int dmar_inv_ctx_glob(struct dmar_unit *unit); int dmar_inv_iotlb_glob(struct dmar_unit *unit); int dmar_flush_write_bufs(struct dmar_unit *unit); void dmar_flush_pte_to_ram(struct dmar_unit *unit, dmar_pte_t *dst); void dmar_flush_ctx_to_ram(struct dmar_unit *unit, dmar_ctx_entry_t *dst); void dmar_flush_root_to_ram(struct dmar_unit *unit, dmar_root_entry_t *dst); int dmar_enable_translation(struct dmar_unit *unit); int dmar_disable_translation(struct dmar_unit *unit); int dmar_load_irt_ptr(struct dmar_unit *unit); int dmar_enable_ir(struct dmar_unit *unit); int dmar_disable_ir(struct dmar_unit *unit); bool dmar_barrier_enter(struct dmar_unit *dmar, u_int barrier_id); void dmar_barrier_exit(struct dmar_unit *dmar, u_int barrier_id); int dmar_fault_intr(void *arg); void dmar_enable_fault_intr(struct dmar_unit *unit); void dmar_disable_fault_intr(struct dmar_unit *unit); int dmar_init_fault_log(struct dmar_unit *unit); void dmar_fini_fault_log(struct dmar_unit *unit); int dmar_qi_intr(void *arg); void dmar_enable_qi_intr(struct dmar_unit *unit); void dmar_disable_qi_intr(struct dmar_unit *unit); int dmar_init_qi(struct dmar_unit *unit); void dmar_fini_qi(struct dmar_unit *unit); -void dmar_qi_invalidate_locked(struct dmar_ctx *ctx, dmar_gaddr_t start, +void dmar_qi_invalidate_locked(struct dmar_domain *domain, dmar_gaddr_t start, dmar_gaddr_t size, struct dmar_qi_genseq *pseq); void dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit); void dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit); void dmar_qi_invalidate_iec_glob(struct dmar_unit *unit); void dmar_qi_invalidate_iec(struct dmar_unit *unit, u_int start, u_int cnt); -vm_object_t ctx_get_idmap_pgtbl(struct dmar_ctx *ctx, dmar_gaddr_t maxaddr); +vm_object_t domain_get_idmap_pgtbl(struct dmar_domain *domain, + dmar_gaddr_t maxaddr); void put_idmap_pgtbl(vm_object_t obj); -int ctx_map_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size, - vm_page_t *ma, uint64_t pflags, int flags); -int ctx_unmap_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size, - int flags); -void ctx_flush_iotlb_sync(struct dmar_ctx *ctx, dmar_gaddr_t base, +int domain_map_buf(struct dmar_domain *domain, dmar_gaddr_t base, + dmar_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags); +int domain_unmap_buf(struct dmar_domain *domain, dmar_gaddr_t base, + dmar_gaddr_t size, int flags); +void domain_flush_iotlb_sync(struct dmar_domain *domain, dmar_gaddr_t base, dmar_gaddr_t size); -int ctx_alloc_pgtbl(struct dmar_ctx *ctx); -void ctx_free_pgtbl(struct dmar_ctx *ctx); +int domain_alloc_pgtbl(struct dmar_domain *domain); +void domain_free_pgtbl(struct dmar_domain *domain); struct dmar_ctx *dmar_instantiate_ctx(struct dmar_unit *dmar, device_t dev, bool rmrr); -struct dmar_ctx *dmar_get_ctx(struct dmar_unit *dmar, device_t dev, +struct dmar_ctx *dmar_get_ctx_for_dev(struct dmar_unit *dmar, device_t dev, uint16_t rid, bool id_mapped, bool rmrr_init); +int dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx); void dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx); void dmar_free_ctx(struct dmar_ctx *ctx); struct dmar_ctx *dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid); -void dmar_ctx_unload_entry(struct dmar_map_entry *entry, bool free); -void dmar_ctx_unload(struct dmar_ctx *ctx, +void dmar_domain_unload_entry(struct dmar_map_entry *entry, bool free); +void dmar_domain_unload(struct dmar_domain *domain, struct dmar_map_entries_tailq *entries, bool cansleep); -void dmar_ctx_free_entry(struct dmar_map_entry *entry, bool free); +void dmar_domain_free_entry(struct dmar_map_entry *entry, bool free); int dmar_init_busdma(struct dmar_unit *unit); void dmar_fini_busdma(struct dmar_unit *unit); device_t dmar_get_requester(device_t dev, uint16_t *rid); -void dmar_gas_init_ctx(struct dmar_ctx *ctx); -void dmar_gas_fini_ctx(struct dmar_ctx *ctx); -struct dmar_map_entry *dmar_gas_alloc_entry(struct dmar_ctx *ctx, u_int flags); -void dmar_gas_free_entry(struct dmar_ctx *ctx, struct dmar_map_entry *entry); -void dmar_gas_free_space(struct dmar_ctx *ctx, struct dmar_map_entry *entry); -int dmar_gas_map(struct dmar_ctx *ctx, const struct bus_dma_tag_common *common, - dmar_gaddr_t size, int offset, u_int eflags, u_int flags, vm_page_t *ma, - struct dmar_map_entry **res); -void dmar_gas_free_region(struct dmar_ctx *ctx, struct dmar_map_entry *entry); -int dmar_gas_map_region(struct dmar_ctx *ctx, struct dmar_map_entry *entry, - u_int eflags, u_int flags, vm_page_t *ma); -int dmar_gas_reserve_region(struct dmar_ctx *ctx, dmar_gaddr_t start, +void dmar_gas_init_domain(struct dmar_domain *domain); +void dmar_gas_fini_domain(struct dmar_domain *domain); +struct dmar_map_entry *dmar_gas_alloc_entry(struct dmar_domain *domain, + u_int flags); +void dmar_gas_free_entry(struct dmar_domain *domain, + struct dmar_map_entry *entry); +void dmar_gas_free_space(struct dmar_domain *domain, + struct dmar_map_entry *entry); +int dmar_gas_map(struct dmar_domain *domain, + const struct bus_dma_tag_common *common, dmar_gaddr_t size, int offset, + u_int eflags, u_int flags, vm_page_t *ma, struct dmar_map_entry **res); +void dmar_gas_free_region(struct dmar_domain *domain, + struct dmar_map_entry *entry); +int dmar_gas_map_region(struct dmar_domain *domain, + struct dmar_map_entry *entry, u_int eflags, u_int flags, vm_page_t *ma); +int dmar_gas_reserve_region(struct dmar_domain *domain, dmar_gaddr_t start, dmar_gaddr_t end); -void dmar_ctx_parse_rmrr(struct dmar_ctx *ctx, device_t dev, +void dmar_dev_parse_rmrr(struct dmar_domain *domain, device_t dev, struct dmar_map_entries_tailq *rmrr_entries); int dmar_instantiate_rmrr_ctxs(struct dmar_unit *dmar); void dmar_quirks_post_ident(struct dmar_unit *dmar); void dmar_quirks_pre_use(struct dmar_unit *dmar); int dmar_init_irt(struct dmar_unit *unit); void dmar_fini_irt(struct dmar_unit *unit); #define DMAR_GM_CANWAIT 0x0001 #define DMAR_GM_CANSPLIT 0x0002 #define DMAR_PGF_WAITOK 0x0001 #define DMAR_PGF_ZERO 0x0002 #define DMAR_PGF_ALLOC 0x0004 #define DMAR_PGF_NOALLOC 0x0008 #define DMAR_PGF_OBJL 0x0010 extern dmar_haddr_t dmar_high; extern int haw; extern int dmar_tbl_pagecnt; extern int dmar_match_verbose; extern int dmar_check_free; static inline uint32_t dmar_read4(const struct dmar_unit *unit, int reg) { return (bus_read_4(unit->regs, reg)); } static inline uint64_t dmar_read8(const struct dmar_unit *unit, int reg) { #ifdef __i386__ uint32_t high, low; low = bus_read_4(unit->regs, reg); high = bus_read_4(unit->regs, reg + 4); return (low | ((uint64_t)high << 32)); #else return (bus_read_8(unit->regs, reg)); #endif } static inline void dmar_write4(const struct dmar_unit *unit, int reg, uint32_t val) { KASSERT(reg != DMAR_GCMD_REG || (val & DMAR_GCMD_TE) == (unit->hw_gcmd & DMAR_GCMD_TE), ("dmar%d clearing TE 0x%08x 0x%08x", unit->unit, unit->hw_gcmd, val)); bus_write_4(unit->regs, reg, val); } static inline void dmar_write8(const struct dmar_unit *unit, int reg, uint64_t val) { KASSERT(reg != DMAR_GCMD_REG, ("8byte GCMD write")); #ifdef __i386__ uint32_t high, low; low = val; high = val >> 32; bus_write_4(unit->regs, reg, low); bus_write_4(unit->regs, reg + 4, high); #else bus_write_8(unit->regs, reg, val); #endif } /* * dmar_pte_store and dmar_pte_clear ensure that on i386, 32bit writes * are issued in the correct order. For store, the lower word, * containing the P or R and W bits, is set only after the high word * is written. For clear, the P bit is cleared first, then the high * word is cleared. * * dmar_pte_update updates the pte. For amd64, the update is atomic. * For i386, it first disables the entry by clearing the word * containing the P bit, and then defer to dmar_pte_store. The locked * cmpxchg8b is probably available on any machine having DMAR support, * but interrupt translation table may be mapped uncached. */ static inline void dmar_pte_store1(volatile uint64_t *dst, uint64_t val) { #ifdef __i386__ volatile uint32_t *p; uint32_t hi, lo; hi = val >> 32; lo = val; p = (volatile uint32_t *)dst; *(p + 1) = hi; *p = lo; #else *dst = val; #endif } static inline void dmar_pte_store(volatile uint64_t *dst, uint64_t val) { KASSERT(*dst == 0, ("used pte %p oldval %jx newval %jx", dst, (uintmax_t)*dst, (uintmax_t)val)); dmar_pte_store1(dst, val); } static inline void dmar_pte_update(volatile uint64_t *dst, uint64_t val) { #ifdef __i386__ volatile uint32_t *p; p = (volatile uint32_t *)dst; *p = 0; #endif dmar_pte_store1(dst, val); } static inline void dmar_pte_clear(volatile uint64_t *dst) { #ifdef __i386__ volatile uint32_t *p; p = (volatile uint32_t *)dst; *p = 0; *(p + 1) = 0; #else *dst = 0; #endif } static inline bool dmar_test_boundary(dmar_gaddr_t start, dmar_gaddr_t size, dmar_gaddr_t boundary) { if (boundary == 0) return (true); return (start + size <= ((start + boundary) & ~(boundary - 1))); } #ifdef INVARIANTS #define TD_PREP_PINNED_ASSERT \ int old_td_pinned; \ old_td_pinned = curthread->td_pinned #define TD_PINNED_ASSERT \ KASSERT(curthread->td_pinned == old_td_pinned, \ ("pin count leak: %d %d %s:%d", curthread->td_pinned, \ old_td_pinned, __FILE__, __LINE__)) #else #define TD_PREP_PINNED_ASSERT #define TD_PINNED_ASSERT #endif #endif Index: head/sys/x86/iommu/intel_drv.c =================================================================== --- head/sys/x86/iommu/intel_drv.c (revision 284868) +++ head/sys/x86/iommu/intel_drv.c (revision 284869) @@ -1,1265 +1,1292 @@ /*- * Copyright (c) 2013-2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_acpi.h" #if defined(__amd64__) #define DEV_APIC #else #include "opt_apic.h" #endif #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEV_APIC #include "pcib_if.h" #endif #define DMAR_FAULT_IRQ_RID 0 #define DMAR_QI_IRQ_RID 1 #define DMAR_REG_RID 2 static devclass_t dmar_devclass; static device_t *dmar_devs; static int dmar_devcnt; typedef int (*dmar_iter_t)(ACPI_DMAR_HEADER *, void *); static void dmar_iterate_tbl(dmar_iter_t iter, void *arg) { ACPI_TABLE_DMAR *dmartbl; ACPI_DMAR_HEADER *dmarh; char *ptr, *ptrend; ACPI_STATUS status; status = AcpiGetTable(ACPI_SIG_DMAR, 1, (ACPI_TABLE_HEADER **)&dmartbl); if (ACPI_FAILURE(status)) return; ptr = (char *)dmartbl + sizeof(*dmartbl); ptrend = (char *)dmartbl + dmartbl->Header.Length; for (;;) { if (ptr >= ptrend) break; dmarh = (ACPI_DMAR_HEADER *)ptr; if (dmarh->Length <= 0) { printf("dmar_identify: corrupted DMAR table, l %d\n", dmarh->Length); break; } ptr += dmarh->Length; if (!iter(dmarh, arg)) break; } } struct find_iter_args { int i; ACPI_DMAR_HARDWARE_UNIT *res; }; static int dmar_find_iter(ACPI_DMAR_HEADER *dmarh, void *arg) { struct find_iter_args *fia; if (dmarh->Type != ACPI_DMAR_TYPE_HARDWARE_UNIT) return (1); fia = arg; if (fia->i == 0) { fia->res = (ACPI_DMAR_HARDWARE_UNIT *)dmarh; return (0); } fia->i--; return (1); } static ACPI_DMAR_HARDWARE_UNIT * dmar_find_by_index(int idx) { struct find_iter_args fia; fia.i = idx; fia.res = NULL; dmar_iterate_tbl(dmar_find_iter, &fia); return (fia.res); } static int dmar_count_iter(ACPI_DMAR_HEADER *dmarh, void *arg) { if (dmarh->Type == ACPI_DMAR_TYPE_HARDWARE_UNIT) dmar_devcnt++; return (1); } static int dmar_enable = 0; static void dmar_identify(driver_t *driver, device_t parent) { ACPI_TABLE_DMAR *dmartbl; ACPI_DMAR_HARDWARE_UNIT *dmarh; ACPI_STATUS status; int i, error; if (acpi_disabled("dmar")) return; TUNABLE_INT_FETCH("hw.dmar.enable", &dmar_enable); if (!dmar_enable) return; #ifdef INVARIANTS TUNABLE_INT_FETCH("hw.dmar.check_free", &dmar_check_free); #endif TUNABLE_INT_FETCH("hw.dmar.match_verbose", &dmar_match_verbose); status = AcpiGetTable(ACPI_SIG_DMAR, 1, (ACPI_TABLE_HEADER **)&dmartbl); if (ACPI_FAILURE(status)) return; haw = dmartbl->Width + 1; if ((1ULL << (haw + 1)) > BUS_SPACE_MAXADDR) dmar_high = BUS_SPACE_MAXADDR; else dmar_high = 1ULL << (haw + 1); if (bootverbose) { printf("DMAR HAW=%d flags=<%b>\n", dmartbl->Width, (unsigned)dmartbl->Flags, "\020\001INTR_REMAP\002X2APIC_OPT_OUT"); } dmar_iterate_tbl(dmar_count_iter, NULL); if (dmar_devcnt == 0) return; dmar_devs = malloc(sizeof(device_t) * dmar_devcnt, M_DEVBUF, M_WAITOK | M_ZERO); for (i = 0; i < dmar_devcnt; i++) { dmarh = dmar_find_by_index(i); if (dmarh == NULL) { printf("dmar_identify: cannot find HWUNIT %d\n", i); continue; } dmar_devs[i] = BUS_ADD_CHILD(parent, 1, "dmar", i); if (dmar_devs[i] == NULL) { printf("dmar_identify: cannot create instance %d\n", i); continue; } error = bus_set_resource(dmar_devs[i], SYS_RES_MEMORY, DMAR_REG_RID, dmarh->Address, PAGE_SIZE); if (error != 0) { printf( "dmar%d: unable to alloc register window at 0x%08jx: error %d\n", i, (uintmax_t)dmarh->Address, error); device_delete_child(parent, dmar_devs[i]); dmar_devs[i] = NULL; } } } static int dmar_probe(device_t dev) { if (acpi_get_handle(dev) != NULL) return (ENXIO); device_set_desc(dev, "DMA remap"); return (BUS_PROBE_NOWILDCARD); } static void dmar_release_intr(device_t dev, struct dmar_unit *unit, int idx) { struct dmar_msi_data *dmd; dmd = &unit->intrs[idx]; if (dmd->irq == -1) return; bus_teardown_intr(dev, dmd->irq_res, dmd->intr_handle); bus_release_resource(dev, SYS_RES_IRQ, dmd->irq_rid, dmd->irq_res); bus_delete_resource(dev, SYS_RES_IRQ, dmd->irq_rid); PCIB_RELEASE_MSIX(device_get_parent(device_get_parent(dev)), dev, dmd->irq); dmd->irq = -1; } static void dmar_release_resources(device_t dev, struct dmar_unit *unit) { int i; dmar_fini_busdma(unit); dmar_fini_irt(unit); dmar_fini_qi(unit); dmar_fini_fault_log(unit); for (i = 0; i < DMAR_INTR_TOTAL; i++) dmar_release_intr(dev, unit, i); if (unit->regs != NULL) { bus_deactivate_resource(dev, SYS_RES_MEMORY, unit->reg_rid, unit->regs); bus_release_resource(dev, SYS_RES_MEMORY, unit->reg_rid, unit->regs); unit->regs = NULL; } if (unit->domids != NULL) { delete_unrhdr(unit->domids); unit->domids = NULL; } if (unit->ctx_obj != NULL) { vm_object_deallocate(unit->ctx_obj); unit->ctx_obj = NULL; } } static int dmar_alloc_irq(device_t dev, struct dmar_unit *unit, int idx) { device_t pcib; struct dmar_msi_data *dmd; uint64_t msi_addr; uint32_t msi_data; int error; dmd = &unit->intrs[idx]; pcib = device_get_parent(device_get_parent(dev)); /* Really not pcib */ error = PCIB_ALLOC_MSIX(pcib, dev, &dmd->irq); if (error != 0) { device_printf(dev, "cannot allocate %s interrupt, %d\n", dmd->name, error); goto err1; } error = bus_set_resource(dev, SYS_RES_IRQ, dmd->irq_rid, dmd->irq, 1); if (error != 0) { device_printf(dev, "cannot set %s interrupt resource, %d\n", dmd->name, error); goto err2; } dmd->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &dmd->irq_rid, RF_ACTIVE); if (dmd->irq_res == NULL) { device_printf(dev, "cannot allocate resource for %s interrupt\n", dmd->name); error = ENXIO; goto err3; } error = bus_setup_intr(dev, dmd->irq_res, INTR_TYPE_MISC, dmd->handler, NULL, unit, &dmd->intr_handle); if (error != 0) { device_printf(dev, "cannot setup %s interrupt, %d\n", dmd->name, error); goto err4; } bus_describe_intr(dev, dmd->irq_res, dmd->intr_handle, dmd->name); error = PCIB_MAP_MSI(pcib, dev, dmd->irq, &msi_addr, &msi_data); if (error != 0) { device_printf(dev, "cannot map %s interrupt, %d\n", dmd->name, error); goto err5; } dmar_write4(unit, dmd->msi_data_reg, msi_data); dmar_write4(unit, dmd->msi_addr_reg, msi_addr); /* Only for xAPIC mode */ dmar_write4(unit, dmd->msi_uaddr_reg, msi_addr >> 32); return (0); err5: bus_teardown_intr(dev, dmd->irq_res, dmd->intr_handle); err4: bus_release_resource(dev, SYS_RES_IRQ, dmd->irq_rid, dmd->irq_res); err3: bus_delete_resource(dev, SYS_RES_IRQ, dmd->irq_rid); err2: PCIB_RELEASE_MSIX(pcib, dev, dmd->irq); dmd->irq = -1; err1: return (error); } #ifdef DEV_APIC static int dmar_remap_intr(device_t dev, device_t child, u_int irq) { struct dmar_unit *unit; struct dmar_msi_data *dmd; uint64_t msi_addr; uint32_t msi_data; int i, error; unit = device_get_softc(dev); for (i = 0; i < DMAR_INTR_TOTAL; i++) { dmd = &unit->intrs[i]; if (irq == dmd->irq) { error = PCIB_MAP_MSI(device_get_parent( device_get_parent(dev)), dev, irq, &msi_addr, &msi_data); if (error != 0) return (error); DMAR_LOCK(unit); (dmd->disable_intr)(unit); dmar_write4(unit, dmd->msi_data_reg, msi_data); dmar_write4(unit, dmd->msi_addr_reg, msi_addr); dmar_write4(unit, dmd->msi_uaddr_reg, msi_addr >> 32); (dmd->enable_intr)(unit); DMAR_UNLOCK(unit); return (0); } } return (ENOENT); } #endif static void dmar_print_caps(device_t dev, struct dmar_unit *unit, ACPI_DMAR_HARDWARE_UNIT *dmaru) { uint32_t caphi, ecaphi; device_printf(dev, "regs@0x%08jx, ver=%d.%d, seg=%d, flags=<%b>\n", (uintmax_t)dmaru->Address, DMAR_MAJOR_VER(unit->hw_ver), DMAR_MINOR_VER(unit->hw_ver), dmaru->Segment, dmaru->Flags, "\020\001INCLUDE_ALL_PCI"); caphi = unit->hw_cap >> 32; device_printf(dev, "cap=%b,", (u_int)unit->hw_cap, "\020\004AFL\005WBF\006PLMR\007PHMR\010CM\027ZLR\030ISOCH"); printf("%b, ", caphi, "\020\010PSI\027DWD\030DRD\031FL1GP\034PSI"); printf("ndoms=%d, sagaw=%d, mgaw=%d, fro=%d, nfr=%d, superp=%d", DMAR_CAP_ND(unit->hw_cap), DMAR_CAP_SAGAW(unit->hw_cap), DMAR_CAP_MGAW(unit->hw_cap), DMAR_CAP_FRO(unit->hw_cap), DMAR_CAP_NFR(unit->hw_cap), DMAR_CAP_SPS(unit->hw_cap)); if ((unit->hw_cap & DMAR_CAP_PSI) != 0) printf(", mamv=%d", DMAR_CAP_MAMV(unit->hw_cap)); printf("\n"); ecaphi = unit->hw_ecap >> 32; device_printf(dev, "ecap=%b,", (u_int)unit->hw_ecap, "\020\001C\002QI\003DI\004IR\005EIM\007PT\010SC\031ECS\032MTS" "\033NEST\034DIS\035PASID\036PRS\037ERS\040SRS"); printf("%b, ", ecaphi, "\020\002NWFS\003EAFS"); printf("mhmw=%d, iro=%d\n", DMAR_ECAP_MHMV(unit->hw_ecap), DMAR_ECAP_IRO(unit->hw_ecap)); } static int dmar_attach(device_t dev) { struct dmar_unit *unit; ACPI_DMAR_HARDWARE_UNIT *dmaru; int i, error; unit = device_get_softc(dev); unit->dev = dev; unit->unit = device_get_unit(dev); dmaru = dmar_find_by_index(unit->unit); if (dmaru == NULL) return (EINVAL); unit->segment = dmaru->Segment; unit->base = dmaru->Address; unit->reg_rid = DMAR_REG_RID; unit->regs = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &unit->reg_rid, RF_ACTIVE); if (unit->regs == NULL) { device_printf(dev, "cannot allocate register window\n"); return (ENOMEM); } unit->hw_ver = dmar_read4(unit, DMAR_VER_REG); unit->hw_cap = dmar_read8(unit, DMAR_CAP_REG); unit->hw_ecap = dmar_read8(unit, DMAR_ECAP_REG); if (bootverbose) dmar_print_caps(dev, unit, dmaru); dmar_quirks_post_ident(unit); for (i = 0; i < DMAR_INTR_TOTAL; i++) unit->intrs[i].irq = -1; unit->intrs[DMAR_INTR_FAULT].name = "fault"; unit->intrs[DMAR_INTR_FAULT].irq_rid = DMAR_FAULT_IRQ_RID; unit->intrs[DMAR_INTR_FAULT].handler = dmar_fault_intr; unit->intrs[DMAR_INTR_FAULT].msi_data_reg = DMAR_FEDATA_REG; unit->intrs[DMAR_INTR_FAULT].msi_addr_reg = DMAR_FEADDR_REG; unit->intrs[DMAR_INTR_FAULT].msi_uaddr_reg = DMAR_FEUADDR_REG; unit->intrs[DMAR_INTR_FAULT].enable_intr = dmar_enable_fault_intr; unit->intrs[DMAR_INTR_FAULT].disable_intr = dmar_disable_fault_intr; error = dmar_alloc_irq(dev, unit, DMAR_INTR_FAULT); if (error != 0) { dmar_release_resources(dev, unit); return (error); } if (DMAR_HAS_QI(unit)) { unit->intrs[DMAR_INTR_QI].name = "qi"; unit->intrs[DMAR_INTR_QI].irq_rid = DMAR_QI_IRQ_RID; unit->intrs[DMAR_INTR_QI].handler = dmar_qi_intr; unit->intrs[DMAR_INTR_QI].msi_data_reg = DMAR_IEDATA_REG; unit->intrs[DMAR_INTR_QI].msi_addr_reg = DMAR_IEADDR_REG; unit->intrs[DMAR_INTR_QI].msi_uaddr_reg = DMAR_IEUADDR_REG; unit->intrs[DMAR_INTR_QI].enable_intr = dmar_enable_qi_intr; unit->intrs[DMAR_INTR_QI].disable_intr = dmar_disable_qi_intr; error = dmar_alloc_irq(dev, unit, DMAR_INTR_QI); if (error != 0) { dmar_release_resources(dev, unit); return (error); } } mtx_init(&unit->lock, "dmarhw", NULL, MTX_DEF); unit->domids = new_unrhdr(0, dmar_nd2mask(DMAR_CAP_ND(unit->hw_cap)), &unit->lock); + LIST_INIT(&unit->domains); /* * 9.2 "Context Entry": * When Caching Mode (CM) field is reported as Set, the * domain-id value of zero is architecturally reserved. * Software must not use domain-id value of zero * when CM is Set. */ if ((unit->hw_cap & DMAR_CAP_CM) != 0) alloc_unr_specific(unit->domids, 0); unit->ctx_obj = vm_pager_allocate(OBJT_PHYS, NULL, IDX_TO_OFF(1 + DMAR_CTX_CNT), 0, 0, NULL); /* * Allocate and load the root entry table pointer. Enable the * address translation after the required invalidations are * done. */ dmar_pgalloc(unit->ctx_obj, 0, DMAR_PGF_WAITOK | DMAR_PGF_ZERO); DMAR_LOCK(unit); error = dmar_load_root_entry_ptr(unit); if (error != 0) { DMAR_UNLOCK(unit); dmar_release_resources(dev, unit); return (error); } error = dmar_inv_ctx_glob(unit); if (error != 0) { DMAR_UNLOCK(unit); dmar_release_resources(dev, unit); return (error); } if ((unit->hw_ecap & DMAR_ECAP_DI) != 0) { error = dmar_inv_iotlb_glob(unit); if (error != 0) { DMAR_UNLOCK(unit); dmar_release_resources(dev, unit); return (error); } } DMAR_UNLOCK(unit); error = dmar_init_fault_log(unit); if (error != 0) { dmar_release_resources(dev, unit); return (error); } error = dmar_init_qi(unit); if (error != 0) { dmar_release_resources(dev, unit); return (error); } error = dmar_init_irt(unit); if (error != 0) { dmar_release_resources(dev, unit); return (error); } error = dmar_init_busdma(unit); if (error != 0) { dmar_release_resources(dev, unit); return (error); } #ifdef NOTYET DMAR_LOCK(unit); error = dmar_enable_translation(unit); if (error != 0) { DMAR_UNLOCK(unit); dmar_release_resources(dev, unit); return (error); } DMAR_UNLOCK(unit); #endif return (0); } static int dmar_detach(device_t dev) { return (EBUSY); } static int dmar_suspend(device_t dev) { return (0); } static int dmar_resume(device_t dev) { /* XXXKIB */ return (0); } static device_method_t dmar_methods[] = { DEVMETHOD(device_identify, dmar_identify), DEVMETHOD(device_probe, dmar_probe), DEVMETHOD(device_attach, dmar_attach), DEVMETHOD(device_detach, dmar_detach), DEVMETHOD(device_suspend, dmar_suspend), DEVMETHOD(device_resume, dmar_resume), #ifdef DEV_APIC DEVMETHOD(bus_remap_intr, dmar_remap_intr), #endif DEVMETHOD_END }; static driver_t dmar_driver = { "dmar", dmar_methods, sizeof(struct dmar_unit), }; DRIVER_MODULE(dmar, acpi, dmar_driver, dmar_devclass, 0, 0); MODULE_DEPEND(dmar, acpi, 1, 1, 1); static void dmar_print_path(device_t dev, const char *banner, int busno, int depth, const ACPI_DMAR_PCI_PATH *path) { int i; device_printf(dev, "%s [%d, ", banner, busno); for (i = 0; i < depth; i++) { if (i != 0) printf(", "); printf("(%d, %d)", path[i].Device, path[i].Function); } printf("]\n"); } static int dmar_dev_depth(device_t child) { devclass_t pci_class; device_t bus, pcib; int depth; pci_class = devclass_find("pci"); for (depth = 1; ; depth++) { bus = device_get_parent(child); pcib = device_get_parent(bus); if (device_get_devclass(device_get_parent(pcib)) != pci_class) return (depth); child = pcib; } } static void dmar_dev_path(device_t child, int *busno, ACPI_DMAR_PCI_PATH *path, int depth) { devclass_t pci_class; device_t bus, pcib; pci_class = devclass_find("pci"); for (depth--; depth != -1; depth--) { path[depth].Device = pci_get_slot(child); path[depth].Function = pci_get_function(child); bus = device_get_parent(child); pcib = device_get_parent(bus); if (device_get_devclass(device_get_parent(pcib)) != pci_class) { /* reached a host bridge */ *busno = pcib_get_bus(bus); return; } child = pcib; } panic("wrong depth"); } static int dmar_match_pathes(int busno1, const ACPI_DMAR_PCI_PATH *path1, int depth1, int busno2, const ACPI_DMAR_PCI_PATH *path2, int depth2, enum AcpiDmarScopeType scope_type) { int i, depth; if (busno1 != busno2) return (0); if (scope_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && depth1 != depth2) return (0); depth = depth1; if (depth2 < depth) depth = depth2; for (i = 0; i < depth; i++) { if (path1[i].Device != path2[i].Device || path1[i].Function != path2[i].Function) return (0); } return (1); } static int dmar_match_devscope(ACPI_DMAR_DEVICE_SCOPE *devscope, device_t dev, int dev_busno, const ACPI_DMAR_PCI_PATH *dev_path, int dev_path_len) { ACPI_DMAR_PCI_PATH *path; int path_len; if (devscope->Length < sizeof(*devscope)) { printf("dmar_find: corrupted DMAR table, dl %d\n", devscope->Length); return (-1); } if (devscope->EntryType != ACPI_DMAR_SCOPE_TYPE_ENDPOINT && devscope->EntryType != ACPI_DMAR_SCOPE_TYPE_BRIDGE) return (0); path_len = devscope->Length - sizeof(*devscope); if (path_len % 2 != 0) { printf("dmar_find_bsf: corrupted DMAR table, dl %d\n", devscope->Length); return (-1); } path_len /= 2; path = (ACPI_DMAR_PCI_PATH *)(devscope + 1); if (path_len == 0) { printf("dmar_find: corrupted DMAR table, dl %d\n", devscope->Length); return (-1); } if (dmar_match_verbose) dmar_print_path(dev, "DMAR", devscope->Bus, path_len, path); return (dmar_match_pathes(devscope->Bus, path, path_len, dev_busno, dev_path, dev_path_len, devscope->EntryType)); } struct dmar_unit * dmar_find(device_t dev) { device_t dmar_dev; ACPI_DMAR_HARDWARE_UNIT *dmarh; ACPI_DMAR_DEVICE_SCOPE *devscope; char *ptr, *ptrend; int i, match, dev_domain, dev_busno, dev_path_len; dmar_dev = NULL; dev_domain = pci_get_domain(dev); dev_path_len = dmar_dev_depth(dev); ACPI_DMAR_PCI_PATH dev_path[dev_path_len]; dmar_dev_path(dev, &dev_busno, dev_path, dev_path_len); if (dmar_match_verbose) dmar_print_path(dev, "PCI", dev_busno, dev_path_len, dev_path); for (i = 0; i < dmar_devcnt; i++) { if (dmar_devs[i] == NULL) continue; dmarh = dmar_find_by_index(i); if (dmarh == NULL) continue; if (dmarh->Segment != dev_domain) continue; if ((dmarh->Flags & ACPI_DMAR_INCLUDE_ALL) != 0) { dmar_dev = dmar_devs[i]; if (dmar_match_verbose) { device_printf(dev, "pci%d:%d:%d:%d matched dmar%d INCLUDE_ALL\n", dev_domain, pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), ((struct dmar_unit *)device_get_softc( dmar_dev))->unit); } goto found; } ptr = (char *)dmarh + sizeof(*dmarh); ptrend = (char *)dmarh + dmarh->Header.Length; for (;;) { if (ptr >= ptrend) break; devscope = (ACPI_DMAR_DEVICE_SCOPE *)ptr; ptr += devscope->Length; if (dmar_match_verbose) { device_printf(dev, "pci%d:%d:%d:%d matching dmar%d\n", dev_domain, pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), ((struct dmar_unit *)device_get_softc( dmar_devs[i]))->unit); } match = dmar_match_devscope(devscope, dev, dev_busno, dev_path, dev_path_len); if (dmar_match_verbose) { if (match == -1) printf("table error\n"); else if (match == 0) printf("not matched\n"); else printf("matched\n"); } if (match == -1) return (NULL); else if (match == 1) { dmar_dev = dmar_devs[i]; goto found; } } } return (NULL); found: return (device_get_softc(dmar_dev)); } static struct dmar_unit * dmar_find_nonpci(u_int id, u_int entry_type, uint16_t *rid) { device_t dmar_dev; struct dmar_unit *unit; ACPI_DMAR_HARDWARE_UNIT *dmarh; ACPI_DMAR_DEVICE_SCOPE *devscope; ACPI_DMAR_PCI_PATH *path; char *ptr, *ptrend; int i; for (i = 0; i < dmar_devcnt; i++) { dmar_dev = dmar_devs[i]; if (dmar_dev == NULL) continue; unit = (struct dmar_unit *)device_get_softc(dmar_dev); dmarh = dmar_find_by_index(i); if (dmarh == NULL) continue; ptr = (char *)dmarh + sizeof(*dmarh); ptrend = (char *)dmarh + dmarh->Header.Length; for (;;) { if (ptr >= ptrend) break; devscope = (ACPI_DMAR_DEVICE_SCOPE *)ptr; ptr += devscope->Length; if (devscope->EntryType != entry_type) continue; if (devscope->EnumerationId != id) continue; if (devscope->Length - sizeof(ACPI_DMAR_DEVICE_SCOPE) == 2) { if (rid != NULL) { path = (ACPI_DMAR_PCI_PATH *) (devscope + 1); *rid = PCI_RID(devscope->Bus, path->Device, path->Function); } return (unit); } else { /* XXXKIB */ printf( "dmar_find_nonpci: id %d type %d path length != 2\n", id, entry_type); } } } return (NULL); } struct dmar_unit * dmar_find_hpet(device_t dev, uint16_t *rid) { ACPI_HANDLE handle; uint32_t hpet_id; handle = acpi_get_handle(dev); if (ACPI_FAILURE(acpi_GetInteger(handle, "_UID", &hpet_id))) return (NULL); return (dmar_find_nonpci(hpet_id, ACPI_DMAR_SCOPE_TYPE_HPET, rid)); } struct dmar_unit * dmar_find_ioapic(u_int apic_id, uint16_t *rid) { return (dmar_find_nonpci(apic_id, ACPI_DMAR_SCOPE_TYPE_IOAPIC, rid)); } struct rmrr_iter_args { - struct dmar_ctx *ctx; + struct dmar_domain *domain; device_t dev; int dev_domain; int dev_busno; ACPI_DMAR_PCI_PATH *dev_path; int dev_path_len; struct dmar_map_entries_tailq *rmrr_entries; }; static int dmar_rmrr_iter(ACPI_DMAR_HEADER *dmarh, void *arg) { struct rmrr_iter_args *ria; ACPI_DMAR_RESERVED_MEMORY *resmem; ACPI_DMAR_DEVICE_SCOPE *devscope; struct dmar_map_entry *entry; char *ptr, *ptrend; int match; if (dmarh->Type != ACPI_DMAR_TYPE_RESERVED_MEMORY) return (1); ria = arg; resmem = (ACPI_DMAR_RESERVED_MEMORY *)dmarh; if (dmar_match_verbose) { printf("RMRR [%jx,%jx] segment %d\n", (uintmax_t)resmem->BaseAddress, (uintmax_t)resmem->EndAddress, resmem->Segment); } if (resmem->Segment != ria->dev_domain) return (1); ptr = (char *)resmem + sizeof(*resmem); ptrend = (char *)resmem + resmem->Header.Length; for (;;) { if (ptr >= ptrend) break; devscope = (ACPI_DMAR_DEVICE_SCOPE *)ptr; ptr += devscope->Length; match = dmar_match_devscope(devscope, ria->dev, ria->dev_busno, ria->dev_path, ria->dev_path_len); if (match == 1) { if (dmar_match_verbose) printf("matched\n"); - entry = dmar_gas_alloc_entry(ria->ctx, DMAR_PGF_WAITOK); + entry = dmar_gas_alloc_entry(ria->domain, + DMAR_PGF_WAITOK); entry->start = resmem->BaseAddress; /* The RMRR entry end address is inclusive. */ entry->end = resmem->EndAddress; TAILQ_INSERT_TAIL(ria->rmrr_entries, entry, unroll_link); } else if (dmar_match_verbose) { printf("not matched, err %d\n", match); } } return (1); } void -dmar_ctx_parse_rmrr(struct dmar_ctx *ctx, device_t dev, +dmar_dev_parse_rmrr(struct dmar_domain *domain, device_t dev, struct dmar_map_entries_tailq *rmrr_entries) { struct rmrr_iter_args ria; ria.dev_domain = pci_get_domain(dev); ria.dev_path_len = dmar_dev_depth(dev); ACPI_DMAR_PCI_PATH dev_path[ria.dev_path_len]; dmar_dev_path(dev, &ria.dev_busno, dev_path, ria.dev_path_len); if (dmar_match_verbose) { device_printf(dev, "parsing RMRR entries for "); dmar_print_path(dev, "PCI", ria.dev_busno, ria.dev_path_len, dev_path); } - ria.ctx = ctx; + ria.domain = domain; ria.dev = dev; ria.dev_path = dev_path; ria.rmrr_entries = rmrr_entries; dmar_iterate_tbl(dmar_rmrr_iter, &ria); } struct inst_rmrr_iter_args { struct dmar_unit *dmar; }; static device_t dmar_path_dev(int segment, int path_len, int busno, const ACPI_DMAR_PCI_PATH *path) { devclass_t pci_class; device_t bus, pcib, dev; int i; pci_class = devclass_find("pci"); dev = NULL; for (i = 0; i < path_len; i++, path++) { dev = pci_find_dbsf(segment, busno, path->Device, path->Function); if (dev == NULL) break; if (i != path_len - 1) { bus = device_get_parent(dev); pcib = device_get_parent(bus); if (device_get_devclass(device_get_parent(pcib)) != pci_class) return (NULL); } busno = pcib_get_bus(dev); } return (dev); } static int dmar_inst_rmrr_iter(ACPI_DMAR_HEADER *dmarh, void *arg) { const ACPI_DMAR_RESERVED_MEMORY *resmem; const ACPI_DMAR_DEVICE_SCOPE *devscope; struct inst_rmrr_iter_args *iria; const char *ptr, *ptrend; struct dmar_unit *dev_dmar; device_t dev; if (dmarh->Type != ACPI_DMAR_TYPE_RESERVED_MEMORY) return (1); iria = arg; resmem = (ACPI_DMAR_RESERVED_MEMORY *)dmarh; if (resmem->Segment != iria->dmar->segment) return (1); if (dmar_match_verbose) { printf("dmar%d: RMRR [%jx,%jx]\n", iria->dmar->unit, (uintmax_t)resmem->BaseAddress, (uintmax_t)resmem->EndAddress); } ptr = (const char *)resmem + sizeof(*resmem); ptrend = (const char *)resmem + resmem->Header.Length; for (;;) { if (ptr >= ptrend) break; devscope = (const ACPI_DMAR_DEVICE_SCOPE *)ptr; ptr += devscope->Length; /* XXXKIB bridge */ if (devscope->EntryType != ACPI_DMAR_SCOPE_TYPE_ENDPOINT) continue; if (dmar_match_verbose) { dmar_print_path(iria->dmar->dev, "RMRR scope", devscope->Bus, (devscope->Length - sizeof(ACPI_DMAR_DEVICE_SCOPE)) / 2, (const ACPI_DMAR_PCI_PATH *)(devscope + 1)); } dev = dmar_path_dev(resmem->Segment, (devscope->Length - sizeof(ACPI_DMAR_DEVICE_SCOPE)) / 2, devscope->Bus, (const ACPI_DMAR_PCI_PATH *)(devscope + 1)); if (dev == NULL) { if (dmar_match_verbose) printf("null dev\n"); continue; } dev_dmar = dmar_find(dev); if (dev_dmar != iria->dmar) { if (dmar_match_verbose) { printf("dmar%d matched, skipping\n", dev_dmar->unit); } continue; } if (dmar_match_verbose) printf("matched, instantiating RMRR context\n"); dmar_instantiate_ctx(iria->dmar, dev, true); } return (1); } /* * Pre-create all contexts for the DMAR which have RMRR entries. */ int dmar_instantiate_rmrr_ctxs(struct dmar_unit *dmar) { struct inst_rmrr_iter_args iria; int error; if (!dmar_barrier_enter(dmar, DMAR_BARRIER_RMRR)) return (0); error = 0; iria.dmar = dmar; if (dmar_match_verbose) printf("dmar%d: instantiating RMRR contexts\n", dmar->unit); dmar_iterate_tbl(dmar_inst_rmrr_iter, &iria); DMAR_LOCK(dmar); - if (!LIST_EMPTY(&dmar->contexts)) { + if (!LIST_EMPTY(&dmar->domains)) { KASSERT((dmar->hw_gcmd & DMAR_GCMD_TE) == 0, ("dmar%d: RMRR not handled but translation is already enabled", dmar->unit)); error = dmar_enable_translation(dmar); } dmar_barrier_exit(dmar, DMAR_BARRIER_RMRR); return (error); } #ifdef DDB #include #include static void -dmar_print_ctx_entry(const struct dmar_map_entry *entry) +dmar_print_domain_entry(const struct dmar_map_entry *entry) { struct dmar_map_entry *l, *r; db_printf( " start %jx end %jx free_after %jx free_down %jx flags %x ", entry->start, entry->end, entry->free_after, entry->free_down, entry->flags); db_printf("left "); l = RB_LEFT(entry, rb_entry); if (l == NULL) db_printf("NULL "); else db_printf("%jx ", l->start); db_printf("right "); r = RB_RIGHT(entry, rb_entry); if (r == NULL) db_printf("NULL"); else db_printf("%jx", r->start); db_printf("\n"); } static void -dmar_print_ctx(struct dmar_ctx *ctx, bool show_mappings) +dmar_print_ctx(struct dmar_ctx *ctx) { - struct dmar_map_entry *entry; db_printf( - " @%p pci%d:%d:%d dom %d mgaw %d agaw %d pglvl %d end %jx\n" - " refs %d flags %x pgobj %p map_ents %u loads %lu unloads %lu\n", + " @%p pci%d:%d:%d refs %d flags %x loads %lu unloads %lu\n", ctx, pci_get_bus(ctx->ctx_tag.owner), pci_get_slot(ctx->ctx_tag.owner), - pci_get_function(ctx->ctx_tag.owner), ctx->domain, ctx->mgaw, - ctx->agaw, ctx->pglvl, (uintmax_t)ctx->end, ctx->refs, - ctx->flags, ctx->pgtbl_obj, ctx->entries_cnt, ctx->loads, - ctx->unloads); + pci_get_function(ctx->ctx_tag.owner), ctx->refs, ctx->flags, + ctx->loads, ctx->unloads); +} + +static void +dmar_print_domain(struct dmar_domain *domain, bool show_mappings) +{ + struct dmar_map_entry *entry; + struct dmar_ctx *ctx; + + db_printf( + " @%p dom %d mgaw %d agaw %d pglvl %d end %jx refs %d\n" + " ctx_cnt %d flags %x pgobj %p map_ents %u\n", + domain, domain->domain, domain->mgaw, domain->agaw, domain->pglvl, + (uintmax_t)domain->end, domain->refs, domain->ctx_cnt, + domain->flags, domain->pgtbl_obj, domain->entries_cnt); + if (!LIST_EMPTY(&domain->contexts)) { + db_printf(" Contexts:\n"); + LIST_FOREACH(ctx, &domain->contexts, link) + dmar_print_ctx(ctx); + } if (!show_mappings) return; db_printf(" mapped:\n"); - RB_FOREACH(entry, dmar_gas_entries_tree, &ctx->rb_root) { - dmar_print_ctx_entry(entry); + RB_FOREACH(entry, dmar_gas_entries_tree, &domain->rb_root) { + dmar_print_domain_entry(entry); if (db_pager_quit) break; } if (db_pager_quit) return; db_printf(" unloading:\n"); - TAILQ_FOREACH(entry, &ctx->unload_entries, dmamap_link) { - dmar_print_ctx_entry(entry); + TAILQ_FOREACH(entry, &domain->unload_entries, dmamap_link) { + dmar_print_domain_entry(entry); if (db_pager_quit) break; } } -DB_FUNC(dmar_ctx, db_dmar_print_ctx, db_show_table, CS_OWN, NULL) +DB_FUNC(dmar_domain, db_dmar_print_domain, db_show_table, CS_OWN, NULL) { struct dmar_unit *unit; + struct dmar_domain *domain; struct dmar_ctx *ctx; bool show_mappings, valid; - int domain, bus, device, function, i, t; + int pci_domain, bus, device, function, i, t; db_expr_t radix; valid = false; radix = db_radix; db_radix = 10; t = db_read_token(); if (t == tSLASH) { t = db_read_token(); if (t != tIDENT) { db_printf("Bad modifier\n"); db_radix = radix; db_skip_to_eol(); return; } show_mappings = strchr(db_tok_string, 'm') != NULL; t = db_read_token(); } else { show_mappings = false; } if (t == tNUMBER) { - domain = db_tok_number; + pci_domain = db_tok_number; t = db_read_token(); if (t == tNUMBER) { bus = db_tok_number; t = db_read_token(); if (t == tNUMBER) { device = db_tok_number; t = db_read_token(); if (t == tNUMBER) { function = db_tok_number; valid = true; } } } } db_radix = radix; db_skip_to_eol(); if (!valid) { - db_printf("usage: show dmar_ctx [/m] " + db_printf("usage: show dmar_domain [/m] " " \n"); return; } for (i = 0; i < dmar_devcnt; i++) { unit = device_get_softc(dmar_devs[i]); - LIST_FOREACH(ctx, &unit->contexts, link) { - if (domain == unit->segment && - bus == pci_get_bus(ctx->ctx_tag.owner) && - device == pci_get_slot(ctx->ctx_tag.owner) && - function == pci_get_function(ctx->ctx_tag.owner)) { - dmar_print_ctx(ctx, show_mappings); - goto out; + LIST_FOREACH(domain, &unit->domains, link) { + LIST_FOREACH(ctx, &domain->contexts, link) { + if (pci_domain == unit->segment && + bus == pci_get_bus(ctx->ctx_tag.owner) && + device == + pci_get_slot(ctx->ctx_tag.owner) && + function == + pci_get_function(ctx->ctx_tag.owner)) { + dmar_print_domain(domain, + show_mappings); + goto out; + } } } } out:; } static void -dmar_print_one(int idx, bool show_ctxs, bool show_mappings) +dmar_print_one(int idx, bool show_domains, bool show_mappings) { struct dmar_unit *unit; - struct dmar_ctx *ctx; + struct dmar_domain *domain; int i, frir; unit = device_get_softc(dmar_devs[idx]); db_printf("dmar%d at %p, root at 0x%jx, ver 0x%x\n", unit->unit, unit, dmar_read8(unit, DMAR_RTADDR_REG), dmar_read4(unit, DMAR_VER_REG)); db_printf("cap 0x%jx ecap 0x%jx gsts 0x%x fsts 0x%x fectl 0x%x\n", (uintmax_t)dmar_read8(unit, DMAR_CAP_REG), (uintmax_t)dmar_read8(unit, DMAR_ECAP_REG), dmar_read4(unit, DMAR_GSTS_REG), dmar_read4(unit, DMAR_FSTS_REG), dmar_read4(unit, DMAR_FECTL_REG)); + if (unit->ir_enabled) { + db_printf("ir is enabled; IRT @%p phys 0x%jx maxcnt %d\n", + unit->irt, (uintmax_t)unit->irt_phys, unit->irte_cnt); + } db_printf("fed 0x%x fea 0x%x feua 0x%x\n", dmar_read4(unit, DMAR_FEDATA_REG), dmar_read4(unit, DMAR_FEADDR_REG), dmar_read4(unit, DMAR_FEUADDR_REG)); db_printf("primary fault log:\n"); for (i = 0; i < DMAR_CAP_NFR(unit->hw_cap); i++) { frir = (DMAR_CAP_FRO(unit->hw_cap) + i) * 16; db_printf(" %d at 0x%x: %jx %jx\n", i, frir, (uintmax_t)dmar_read8(unit, frir), (uintmax_t)dmar_read8(unit, frir + 8)); } if (DMAR_HAS_QI(unit)) { db_printf("ied 0x%x iea 0x%x ieua 0x%x\n", dmar_read4(unit, DMAR_IEDATA_REG), dmar_read4(unit, DMAR_IEADDR_REG), dmar_read4(unit, DMAR_IEUADDR_REG)); if (unit->qi_enabled) { db_printf("qi is enabled: queue @0x%jx (IQA 0x%jx) " "size 0x%jx\n" " head 0x%x tail 0x%x avail 0x%x status 0x%x ctrl 0x%x\n" " hw compl 0x%x@%p/phys@%jx next seq 0x%x gen 0x%x\n", (uintmax_t)unit->inv_queue, (uintmax_t)dmar_read8(unit, DMAR_IQA_REG), (uintmax_t)unit->inv_queue_size, dmar_read4(unit, DMAR_IQH_REG), dmar_read4(unit, DMAR_IQT_REG), unit->inv_queue_avail, dmar_read4(unit, DMAR_ICS_REG), dmar_read4(unit, DMAR_IECTL_REG), unit->inv_waitd_seq_hw, &unit->inv_waitd_seq_hw, (uintmax_t)unit->inv_waitd_seq_hw_phys, unit->inv_waitd_seq, unit->inv_waitd_gen); } else { db_printf("qi is disabled\n"); } } - if (show_ctxs) { - db_printf("contexts:\n"); - LIST_FOREACH(ctx, &unit->contexts, link) { - dmar_print_ctx(ctx, show_mappings); + if (show_domains) { + db_printf("domains:\n"); + LIST_FOREACH(domain, &unit->domains, link) { + dmar_print_domain(domain, show_mappings); if (db_pager_quit) break; } } } DB_SHOW_COMMAND(dmar, db_dmar_print) { - bool show_ctxs, show_mappings; + bool show_domains, show_mappings; - show_ctxs = strchr(modif, 'c') != NULL; + show_domains = strchr(modif, 'd') != NULL; show_mappings = strchr(modif, 'm') != NULL; if (!have_addr) { - db_printf("usage: show dmar [/c] [/m] index\n"); + db_printf("usage: show dmar [/d] [/m] index\n"); return; } - dmar_print_one((int)addr, show_ctxs, show_mappings); + dmar_print_one((int)addr, show_domains, show_mappings); } DB_SHOW_ALL_COMMAND(dmars, db_show_all_dmars) { int i; - bool show_ctxs, show_mappings; + bool show_domains, show_mappings; - show_ctxs = strchr(modif, 'c') != NULL; + show_domains = strchr(modif, 'd') != NULL; show_mappings = strchr(modif, 'm') != NULL; for (i = 0; i < dmar_devcnt; i++) { - dmar_print_one(i, show_ctxs, show_mappings); + dmar_print_one(i, show_domains, show_mappings); if (db_pager_quit) break; } } #endif Index: head/sys/x86/iommu/intel_gas.c =================================================================== --- head/sys/x86/iommu/intel_gas.c (revision 284868) +++ head/sys/x86/iommu/intel_gas.c (revision 284869) @@ -1,733 +1,739 @@ /*- * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #define RB_AUGMENT(entry) dmar_gas_augment_entry(entry) #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Guest Address Space management. */ static uma_zone_t dmar_map_entry_zone; static void intel_gas_init(void) { dmar_map_entry_zone = uma_zcreate("DMAR_MAP_ENTRY", sizeof(struct dmar_map_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); } SYSINIT(intel_gas, SI_SUB_DRIVERS, SI_ORDER_FIRST, intel_gas_init, NULL); struct dmar_map_entry * -dmar_gas_alloc_entry(struct dmar_ctx *ctx, u_int flags) +dmar_gas_alloc_entry(struct dmar_domain *domain, u_int flags) { struct dmar_map_entry *res; KASSERT((flags & ~(DMAR_PGF_WAITOK)) == 0, ("unsupported flags %x", flags)); res = uma_zalloc(dmar_map_entry_zone, ((flags & DMAR_PGF_WAITOK) != 0 ? M_WAITOK : M_NOWAIT) | M_ZERO); if (res != NULL) { - res->ctx = ctx; - atomic_add_int(&ctx->entries_cnt, 1); + res->domain = domain; + atomic_add_int(&domain->entries_cnt, 1); } return (res); } void -dmar_gas_free_entry(struct dmar_ctx *ctx, struct dmar_map_entry *entry) +dmar_gas_free_entry(struct dmar_domain *domain, struct dmar_map_entry *entry) { - KASSERT(ctx == entry->ctx, - ("mismatched free ctx %p entry %p entry->ctx %p", ctx, - entry, entry->ctx)); - atomic_subtract_int(&ctx->entries_cnt, 1); + KASSERT(domain == entry->domain, + ("mismatched free domain %p entry %p entry->domain %p", domain, + entry, entry->domain)); + atomic_subtract_int(&domain->entries_cnt, 1); uma_zfree(dmar_map_entry_zone, entry); } static int dmar_gas_cmp_entries(struct dmar_map_entry *a, struct dmar_map_entry *b) { /* Last entry have zero size, so <= */ KASSERT(a->start <= a->end, ("inverted entry %p (%jx, %jx)", a, (uintmax_t)a->start, (uintmax_t)a->end)); KASSERT(b->start <= b->end, ("inverted entry %p (%jx, %jx)", b, (uintmax_t)b->start, (uintmax_t)b->end)); KASSERT(a->end <= b->start || b->end <= a->start || a->end == a->start || b->end == b->start, ("overlapping entries %p (%jx, %jx) %p (%jx, %jx)", a, (uintmax_t)a->start, (uintmax_t)a->end, b, (uintmax_t)b->start, (uintmax_t)b->end)); if (a->end < b->end) return (-1); else if (b->end < a->end) return (1); return (0); } static void dmar_gas_augment_entry(struct dmar_map_entry *entry) { struct dmar_map_entry *l, *r; for (; entry != NULL; entry = RB_PARENT(entry, rb_entry)) { l = RB_LEFT(entry, rb_entry); r = RB_RIGHT(entry, rb_entry); if (l == NULL && r == NULL) { entry->free_down = entry->free_after; } else if (l == NULL && r != NULL) { entry->free_down = MAX(entry->free_after, r->free_down); } else if (/*l != NULL && */ r == NULL) { entry->free_down = MAX(entry->free_after, l->free_down); } else /* if (l != NULL && r != NULL) */ { entry->free_down = MAX(entry->free_after, l->free_down); entry->free_down = MAX(entry->free_down, r->free_down); } } } RB_GENERATE(dmar_gas_entries_tree, dmar_map_entry, rb_entry, dmar_gas_cmp_entries); static void -dmar_gas_fix_free(struct dmar_ctx *ctx, struct dmar_map_entry *entry) +dmar_gas_fix_free(struct dmar_domain *domain, struct dmar_map_entry *entry) { struct dmar_map_entry *next; - next = RB_NEXT(dmar_gas_entries_tree, &ctx->rb_root, entry); - entry->free_after = (next != NULL ? next->start : ctx->end) - + next = RB_NEXT(dmar_gas_entries_tree, &domain->rb_root, entry); + entry->free_after = (next != NULL ? next->start : domain->end) - entry->end; dmar_gas_augment_entry(entry); } #ifdef INVARIANTS static void -dmar_gas_check_free(struct dmar_ctx *ctx) +dmar_gas_check_free(struct dmar_domain *domain) { struct dmar_map_entry *entry, *next, *l, *r; dmar_gaddr_t v; - RB_FOREACH(entry, dmar_gas_entries_tree, &ctx->rb_root) { - KASSERT(ctx == entry->ctx, - ("mismatched free ctx %p entry %p entry->ctx %p", ctx, - entry, entry->ctx)); - next = RB_NEXT(dmar_gas_entries_tree, &ctx->rb_root, entry); + RB_FOREACH(entry, dmar_gas_entries_tree, &domain->rb_root) { + KASSERT(domain == entry->domain, + ("mismatched free domain %p entry %p entry->domain %p", + domain, entry, entry->domain)); + next = RB_NEXT(dmar_gas_entries_tree, &domain->rb_root, entry); if (next == NULL) { - MPASS(entry->free_after == ctx->end - entry->end); + MPASS(entry->free_after == domain->end - entry->end); } else { MPASS(entry->free_after = next->start - entry->end); MPASS(entry->end <= next->start); } l = RB_LEFT(entry, rb_entry); r = RB_RIGHT(entry, rb_entry); if (l == NULL && r == NULL) { MPASS(entry->free_down == entry->free_after); } else if (l == NULL && r != NULL) { MPASS(entry->free_down = MAX(entry->free_after, r->free_down)); } else if (r == NULL) { MPASS(entry->free_down = MAX(entry->free_after, l->free_down)); } else { v = MAX(entry->free_after, l->free_down); v = MAX(entry->free_down, r->free_down); MPASS(entry->free_down == v); } } } #endif static bool -dmar_gas_rb_insert(struct dmar_ctx *ctx, struct dmar_map_entry *entry) +dmar_gas_rb_insert(struct dmar_domain *domain, struct dmar_map_entry *entry) { struct dmar_map_entry *prev, *found; - found = RB_INSERT(dmar_gas_entries_tree, &ctx->rb_root, entry); - dmar_gas_fix_free(ctx, entry); - prev = RB_PREV(dmar_gas_entries_tree, &ctx->rb_root, entry); + found = RB_INSERT(dmar_gas_entries_tree, &domain->rb_root, entry); + dmar_gas_fix_free(domain, entry); + prev = RB_PREV(dmar_gas_entries_tree, &domain->rb_root, entry); if (prev != NULL) - dmar_gas_fix_free(ctx, prev); + dmar_gas_fix_free(domain, prev); return (found == NULL); } static void -dmar_gas_rb_remove(struct dmar_ctx *ctx, struct dmar_map_entry *entry) +dmar_gas_rb_remove(struct dmar_domain *domain, struct dmar_map_entry *entry) { struct dmar_map_entry *prev; - prev = RB_PREV(dmar_gas_entries_tree, &ctx->rb_root, entry); - RB_REMOVE(dmar_gas_entries_tree, &ctx->rb_root, entry); + prev = RB_PREV(dmar_gas_entries_tree, &domain->rb_root, entry); + RB_REMOVE(dmar_gas_entries_tree, &domain->rb_root, entry); if (prev != NULL) - dmar_gas_fix_free(ctx, prev); + dmar_gas_fix_free(domain, prev); } void -dmar_gas_init_ctx(struct dmar_ctx *ctx) +dmar_gas_init_domain(struct dmar_domain *domain) { struct dmar_map_entry *begin, *end; - begin = dmar_gas_alloc_entry(ctx, DMAR_PGF_WAITOK); - end = dmar_gas_alloc_entry(ctx, DMAR_PGF_WAITOK); + begin = dmar_gas_alloc_entry(domain, DMAR_PGF_WAITOK); + end = dmar_gas_alloc_entry(domain, DMAR_PGF_WAITOK); - DMAR_CTX_LOCK(ctx); - KASSERT(ctx->entries_cnt == 2, ("dirty ctx %p", ctx)); - KASSERT(RB_EMPTY(&ctx->rb_root), ("non-empty entries %p", ctx)); + DMAR_DOMAIN_LOCK(domain); + KASSERT(domain->entries_cnt == 2, ("dirty domain %p", domain)); + KASSERT(RB_EMPTY(&domain->rb_root), ("non-empty entries %p", domain)); begin->start = 0; begin->end = DMAR_PAGE_SIZE; - begin->free_after = ctx->end - begin->end; + begin->free_after = domain->end - begin->end; begin->flags = DMAR_MAP_ENTRY_PLACE | DMAR_MAP_ENTRY_UNMAPPED; - dmar_gas_rb_insert(ctx, begin); + dmar_gas_rb_insert(domain, begin); - end->start = ctx->end; - end->end = ctx->end; + end->start = domain->end; + end->end = domain->end; end->free_after = 0; end->flags = DMAR_MAP_ENTRY_PLACE | DMAR_MAP_ENTRY_UNMAPPED; - dmar_gas_rb_insert(ctx, end); + dmar_gas_rb_insert(domain, end); - ctx->first_place = begin; - ctx->last_place = end; - DMAR_CTX_UNLOCK(ctx); + domain->first_place = begin; + domain->last_place = end; + domain->flags |= DMAR_DOMAIN_GAS_INITED; + DMAR_DOMAIN_UNLOCK(domain); } void -dmar_gas_fini_ctx(struct dmar_ctx *ctx) +dmar_gas_fini_domain(struct dmar_domain *domain) { struct dmar_map_entry *entry, *entry1; - DMAR_CTX_ASSERT_LOCKED(ctx); - KASSERT(ctx->entries_cnt == 2, ("ctx still in use %p", ctx)); + DMAR_DOMAIN_ASSERT_LOCKED(domain); + KASSERT(domain->entries_cnt == 2, ("domain still in use %p", domain)); - entry = RB_MIN(dmar_gas_entries_tree, &ctx->rb_root); - KASSERT(entry->start == 0, ("start entry start %p", ctx)); - KASSERT(entry->end == DMAR_PAGE_SIZE, ("start entry end %p", ctx)); + entry = RB_MIN(dmar_gas_entries_tree, &domain->rb_root); + KASSERT(entry->start == 0, ("start entry start %p", domain)); + KASSERT(entry->end == DMAR_PAGE_SIZE, ("start entry end %p", domain)); KASSERT(entry->flags == DMAR_MAP_ENTRY_PLACE, - ("start entry flags %p", ctx)); - RB_REMOVE(dmar_gas_entries_tree, &ctx->rb_root, entry); - dmar_gas_free_entry(ctx, entry); + ("start entry flags %p", domain)); + RB_REMOVE(dmar_gas_entries_tree, &domain->rb_root, entry); + dmar_gas_free_entry(domain, entry); - entry = RB_MAX(dmar_gas_entries_tree, &ctx->rb_root); - KASSERT(entry->start == ctx->end, ("end entry start %p", ctx)); - KASSERT(entry->end == ctx->end, ("end entry end %p", ctx)); - KASSERT(entry->free_after == 0, ("end entry free_after%p", ctx)); + entry = RB_MAX(dmar_gas_entries_tree, &domain->rb_root); + KASSERT(entry->start == domain->end, ("end entry start %p", domain)); + KASSERT(entry->end == domain->end, ("end entry end %p", domain)); + KASSERT(entry->free_after == 0, ("end entry free_after %p", domain)); KASSERT(entry->flags == DMAR_MAP_ENTRY_PLACE, - ("end entry flags %p", ctx)); - RB_REMOVE(dmar_gas_entries_tree, &ctx->rb_root, entry); - dmar_gas_free_entry(ctx, entry); + ("end entry flags %p", domain)); + RB_REMOVE(dmar_gas_entries_tree, &domain->rb_root, entry); + dmar_gas_free_entry(domain, entry); - RB_FOREACH_SAFE(entry, dmar_gas_entries_tree, &ctx->rb_root, entry1) { + RB_FOREACH_SAFE(entry, dmar_gas_entries_tree, &domain->rb_root, + entry1) { KASSERT((entry->flags & DMAR_MAP_ENTRY_RMRR) != 0, - ("non-RMRR entry left %p", ctx)); - RB_REMOVE(dmar_gas_entries_tree, &ctx->rb_root, entry); - dmar_gas_free_entry(ctx, entry); + ("non-RMRR entry left %p", domain)); + RB_REMOVE(dmar_gas_entries_tree, &domain->rb_root, entry); + dmar_gas_free_entry(domain, entry); } } struct dmar_gas_match_args { - struct dmar_ctx *ctx; + struct dmar_domain *domain; dmar_gaddr_t size; int offset; const struct bus_dma_tag_common *common; u_int gas_flags; struct dmar_map_entry *entry; }; static bool dmar_gas_match_one(struct dmar_gas_match_args *a, struct dmar_map_entry *prev, dmar_gaddr_t end) { dmar_gaddr_t bs, start; if (a->entry->start + a->size > end) return (false); /* DMAR_PAGE_SIZE to create gap after new entry. */ if (a->entry->start < prev->end + DMAR_PAGE_SIZE || a->entry->start + a->size + a->offset + DMAR_PAGE_SIZE > prev->end + prev->free_after) return (false); /* No boundary crossing. */ if (dmar_test_boundary(a->entry->start + a->offset, a->size, a->common->boundary)) return (true); /* * The start + offset to start + offset + size region crosses * the boundary. Check if there is enough space after the * next boundary after the prev->end. */ bs = (a->entry->start + a->offset + a->common->boundary) & ~(a->common->boundary - 1); start = roundup2(bs, a->common->alignment); /* DMAR_PAGE_SIZE to create gap after new entry. */ if (start + a->offset + a->size + DMAR_PAGE_SIZE <= prev->end + prev->free_after && start + a->offset + a->size <= end && dmar_test_boundary(start + a->offset, a->size, a->common->boundary)) { a->entry->start = start; return (true); } /* * Not enough space to align at the requested boundary, or * boundary is smaller than the size, but allowed to split. * We already checked that start + size does not overlap end. * * XXXKIB. It is possible that bs is exactly at the start of * the next entry, then we do not have gap. Ignore for now. */ if ((a->gas_flags & DMAR_GM_CANSPLIT) != 0) { a->size = bs - a->entry->start; return (true); } return (false); } static void dmar_gas_match_insert(struct dmar_gas_match_args *a, struct dmar_map_entry *prev) { struct dmar_map_entry *next; bool found; /* * The prev->end is always aligned on the page size, which * causes page alignment for the entry->start too. The size * is checked to be multiple of the page size. * * The page sized gap is created between consequent * allocations to ensure that out-of-bounds accesses fault. */ a->entry->end = a->entry->start + a->size; - next = RB_NEXT(dmar_gas_entries_tree, &a->ctx->rb_root, prev); + next = RB_NEXT(dmar_gas_entries_tree, &a->domain->rb_root, prev); KASSERT(next->start >= a->entry->end && next->start - a->entry->start >= a->size && prev->end <= a->entry->end, ("dmar_gas_match_insert hole failed %p prev (%jx, %jx) " - "free_after %jx next (%jx, %jx) entry (%jx, %jx)", a->ctx, + "free_after %jx next (%jx, %jx) entry (%jx, %jx)", a->domain, (uintmax_t)prev->start, (uintmax_t)prev->end, (uintmax_t)prev->free_after, (uintmax_t)next->start, (uintmax_t)next->end, (uintmax_t)a->entry->start, (uintmax_t)a->entry->end)); prev->free_after = a->entry->start - prev->end; a->entry->free_after = next->start - a->entry->end; - found = dmar_gas_rb_insert(a->ctx, a->entry); + found = dmar_gas_rb_insert(a->domain, a->entry); KASSERT(found, ("found dup %p start %jx size %jx", - a->ctx, (uintmax_t)a->entry->start, (uintmax_t)a->size)); + a->domain, (uintmax_t)a->entry->start, (uintmax_t)a->size)); a->entry->flags = DMAR_MAP_ENTRY_MAP; - KASSERT(RB_PREV(dmar_gas_entries_tree, &a->ctx->rb_root, + KASSERT(RB_PREV(dmar_gas_entries_tree, &a->domain->rb_root, a->entry) == prev, ("entry %p prev %p inserted prev %p", a->entry, prev, - RB_PREV(dmar_gas_entries_tree, &a->ctx->rb_root, a->entry))); - KASSERT(RB_NEXT(dmar_gas_entries_tree, &a->ctx->rb_root, + RB_PREV(dmar_gas_entries_tree, &a->domain->rb_root, a->entry))); + KASSERT(RB_NEXT(dmar_gas_entries_tree, &a->domain->rb_root, a->entry) == next, ("entry %p next %p inserted next %p", a->entry, next, - RB_NEXT(dmar_gas_entries_tree, &a->ctx->rb_root, a->entry))); + RB_NEXT(dmar_gas_entries_tree, &a->domain->rb_root, a->entry))); } static int dmar_gas_lowermatch(struct dmar_gas_match_args *a, struct dmar_map_entry *prev) { struct dmar_map_entry *l; int ret; if (prev->end < a->common->lowaddr) { a->entry->start = roundup2(prev->end + DMAR_PAGE_SIZE, a->common->alignment); if (dmar_gas_match_one(a, prev, a->common->lowaddr)) { dmar_gas_match_insert(a, prev); return (0); } } if (prev->free_down < a->size + a->offset + DMAR_PAGE_SIZE) return (ENOMEM); l = RB_LEFT(prev, rb_entry); if (l != NULL) { ret = dmar_gas_lowermatch(a, l); if (ret == 0) return (0); } l = RB_RIGHT(prev, rb_entry); if (l != NULL) return (dmar_gas_lowermatch(a, l)); return (ENOMEM); } static int dmar_gas_uppermatch(struct dmar_gas_match_args *a) { struct dmar_map_entry *next, *prev, find_entry; find_entry.start = a->common->highaddr; - next = RB_NFIND(dmar_gas_entries_tree, &a->ctx->rb_root, &find_entry); + next = RB_NFIND(dmar_gas_entries_tree, &a->domain->rb_root, + &find_entry); if (next == NULL) return (ENOMEM); - prev = RB_PREV(dmar_gas_entries_tree, &a->ctx->rb_root, next); - KASSERT(prev != NULL, ("no prev %p %jx", a->ctx, + prev = RB_PREV(dmar_gas_entries_tree, &a->domain->rb_root, next); + KASSERT(prev != NULL, ("no prev %p %jx", a->domain, (uintmax_t)find_entry.start)); for (;;) { a->entry->start = prev->start + DMAR_PAGE_SIZE; if (a->entry->start < a->common->highaddr) a->entry->start = a->common->highaddr; a->entry->start = roundup2(a->entry->start, a->common->alignment); - if (dmar_gas_match_one(a, prev, a->ctx->end)) { + if (dmar_gas_match_one(a, prev, a->domain->end)) { dmar_gas_match_insert(a, prev); return (0); } /* * XXXKIB. This falls back to linear iteration over * the free space in the high region. But high * regions are almost unused, the code should be * enough to cover the case, although in the * non-optimal way. */ prev = next; - next = RB_NEXT(dmar_gas_entries_tree, &a->ctx->rb_root, prev); - KASSERT(next != NULL, ("no next %p %jx", a->ctx, + next = RB_NEXT(dmar_gas_entries_tree, &a->domain->rb_root, + prev); + KASSERT(next != NULL, ("no next %p %jx", a->domain, (uintmax_t)find_entry.start)); - if (next->end >= a->ctx->end) + if (next->end >= a->domain->end) return (ENOMEM); } } static int -dmar_gas_find_space(struct dmar_ctx *ctx, +dmar_gas_find_space(struct dmar_domain *domain, const struct bus_dma_tag_common *common, dmar_gaddr_t size, int offset, u_int flags, struct dmar_map_entry *entry) { struct dmar_gas_match_args a; int error; - DMAR_CTX_ASSERT_LOCKED(ctx); - KASSERT(entry->flags == 0, ("dirty entry %p %p", ctx, entry)); + DMAR_DOMAIN_ASSERT_LOCKED(domain); + KASSERT(entry->flags == 0, ("dirty entry %p %p", domain, entry)); KASSERT((size & DMAR_PAGE_MASK) == 0, ("size %jx", (uintmax_t)size)); - a.ctx = ctx; + a.domain = domain; a.size = size; a.offset = offset; a.common = common; a.gas_flags = flags; a.entry = entry; /* Handle lower region. */ if (common->lowaddr > 0) { - error = dmar_gas_lowermatch(&a, RB_ROOT(&ctx->rb_root)); + error = dmar_gas_lowermatch(&a, RB_ROOT(&domain->rb_root)); if (error == 0) return (0); KASSERT(error == ENOMEM, ("error %d from dmar_gas_lowermatch", error)); } /* Handle upper region. */ - if (common->highaddr >= ctx->end) + if (common->highaddr >= domain->end) return (ENOMEM); error = dmar_gas_uppermatch(&a); KASSERT(error == ENOMEM, ("error %d from dmar_gas_uppermatch", error)); return (error); } static int -dmar_gas_alloc_region(struct dmar_ctx *ctx, struct dmar_map_entry *entry, +dmar_gas_alloc_region(struct dmar_domain *domain, struct dmar_map_entry *entry, u_int flags) { struct dmar_map_entry *next, *prev; bool found; - DMAR_CTX_ASSERT_LOCKED(ctx); + DMAR_DOMAIN_ASSERT_LOCKED(domain); if ((entry->start & DMAR_PAGE_MASK) != 0 || (entry->end & DMAR_PAGE_MASK) != 0) return (EINVAL); if (entry->start >= entry->end) return (EINVAL); - if (entry->end >= ctx->end) + if (entry->end >= domain->end) return (EINVAL); - next = RB_NFIND(dmar_gas_entries_tree, &ctx->rb_root, entry); - KASSERT(next != NULL, ("next must be non-null %p %jx", ctx, + next = RB_NFIND(dmar_gas_entries_tree, &domain->rb_root, entry); + KASSERT(next != NULL, ("next must be non-null %p %jx", domain, (uintmax_t)entry->start)); - prev = RB_PREV(dmar_gas_entries_tree, &ctx->rb_root, next); + prev = RB_PREV(dmar_gas_entries_tree, &domain->rb_root, next); /* prev could be NULL */ /* * Adapt to broken BIOSes which specify overlapping RMRR * entries. * * XXXKIB: this does not handle a case when prev or next * entries are completely covered by the current one, which * extends both ways. */ if (prev != NULL && prev->end > entry->start && (prev->flags & DMAR_MAP_ENTRY_PLACE) == 0) { if ((prev->flags & DMAR_MAP_ENTRY_RMRR) == 0) return (EBUSY); entry->start = prev->end; } if (next != NULL && next->start < entry->end && (next->flags & DMAR_MAP_ENTRY_PLACE) == 0) { if ((next->flags & DMAR_MAP_ENTRY_RMRR) == 0) return (EBUSY); entry->end = next->start; } if (entry->end == entry->start) return (0); if (prev != NULL && prev->end > entry->start) { /* This assumes that prev is the placeholder entry. */ - dmar_gas_rb_remove(ctx, prev); + dmar_gas_rb_remove(domain, prev); prev = NULL; } if (next != NULL && next->start < entry->end) { - dmar_gas_rb_remove(ctx, next); + dmar_gas_rb_remove(domain, next); next = NULL; } - found = dmar_gas_rb_insert(ctx, entry); + found = dmar_gas_rb_insert(domain, entry); KASSERT(found, ("found RMRR dup %p start %jx end %jx", - ctx, (uintmax_t)entry->start, (uintmax_t)entry->end)); + domain, (uintmax_t)entry->start, (uintmax_t)entry->end)); entry->flags = DMAR_MAP_ENTRY_RMRR; #ifdef INVARIANTS struct dmar_map_entry *ip, *in; - ip = RB_PREV(dmar_gas_entries_tree, &ctx->rb_root, entry); - in = RB_NEXT(dmar_gas_entries_tree, &ctx->rb_root, entry); + ip = RB_PREV(dmar_gas_entries_tree, &domain->rb_root, entry); + in = RB_NEXT(dmar_gas_entries_tree, &domain->rb_root, entry); KASSERT(prev == NULL || ip == prev, ("RMRR %p (%jx %jx) prev %p (%jx %jx) ins prev %p (%jx %jx)", entry, entry->start, entry->end, prev, prev == NULL ? 0 : prev->start, prev == NULL ? 0 : prev->end, ip, ip == NULL ? 0 : ip->start, ip == NULL ? 0 : ip->end)); KASSERT(next == NULL || in == next, ("RMRR %p (%jx %jx) next %p (%jx %jx) ins next %p (%jx %jx)", entry, entry->start, entry->end, next, next == NULL ? 0 : next->start, next == NULL ? 0 : next->end, in, in == NULL ? 0 : in->start, in == NULL ? 0 : in->end)); #endif return (0); } void -dmar_gas_free_space(struct dmar_ctx *ctx, struct dmar_map_entry *entry) +dmar_gas_free_space(struct dmar_domain *domain, struct dmar_map_entry *entry) { - DMAR_CTX_ASSERT_LOCKED(ctx); + DMAR_DOMAIN_ASSERT_LOCKED(domain); KASSERT((entry->flags & (DMAR_MAP_ENTRY_PLACE | DMAR_MAP_ENTRY_RMRR | DMAR_MAP_ENTRY_MAP)) == DMAR_MAP_ENTRY_MAP, - ("permanent entry %p %p", ctx, entry)); + ("permanent entry %p %p", domain, entry)); - dmar_gas_rb_remove(ctx, entry); + dmar_gas_rb_remove(domain, entry); entry->flags &= ~DMAR_MAP_ENTRY_MAP; #ifdef INVARIANTS if (dmar_check_free) - dmar_gas_check_free(ctx); + dmar_gas_check_free(domain); #endif } void -dmar_gas_free_region(struct dmar_ctx *ctx, struct dmar_map_entry *entry) +dmar_gas_free_region(struct dmar_domain *domain, struct dmar_map_entry *entry) { struct dmar_map_entry *next, *prev; - DMAR_CTX_ASSERT_LOCKED(ctx); + DMAR_DOMAIN_ASSERT_LOCKED(domain); KASSERT((entry->flags & (DMAR_MAP_ENTRY_PLACE | DMAR_MAP_ENTRY_RMRR | DMAR_MAP_ENTRY_MAP)) == DMAR_MAP_ENTRY_RMRR, - ("non-RMRR entry %p %p", ctx, entry)); + ("non-RMRR entry %p %p", domain, entry)); - prev = RB_PREV(dmar_gas_entries_tree, &ctx->rb_root, entry); - next = RB_NEXT(dmar_gas_entries_tree, &ctx->rb_root, entry); - dmar_gas_rb_remove(ctx, entry); + prev = RB_PREV(dmar_gas_entries_tree, &domain->rb_root, entry); + next = RB_NEXT(dmar_gas_entries_tree, &domain->rb_root, entry); + dmar_gas_rb_remove(domain, entry); entry->flags &= ~DMAR_MAP_ENTRY_RMRR; if (prev == NULL) - dmar_gas_rb_insert(ctx, ctx->first_place); + dmar_gas_rb_insert(domain, domain->first_place); if (next == NULL) - dmar_gas_rb_insert(ctx, ctx->last_place); + dmar_gas_rb_insert(domain, domain->last_place); } int -dmar_gas_map(struct dmar_ctx *ctx, const struct bus_dma_tag_common *common, - dmar_gaddr_t size, int offset, u_int eflags, u_int flags, vm_page_t *ma, - struct dmar_map_entry **res) +dmar_gas_map(struct dmar_domain *domain, + const struct bus_dma_tag_common *common, dmar_gaddr_t size, int offset, + u_int eflags, u_int flags, vm_page_t *ma, struct dmar_map_entry **res) { struct dmar_map_entry *entry; int error; KASSERT((flags & ~(DMAR_GM_CANWAIT | DMAR_GM_CANSPLIT)) == 0, ("invalid flags 0x%x", flags)); - entry = dmar_gas_alloc_entry(ctx, (flags & DMAR_GM_CANWAIT) != 0 ? + entry = dmar_gas_alloc_entry(domain, (flags & DMAR_GM_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0); if (entry == NULL) return (ENOMEM); - DMAR_CTX_LOCK(ctx); - error = dmar_gas_find_space(ctx, common, size, offset, flags, entry); + DMAR_DOMAIN_LOCK(domain); + error = dmar_gas_find_space(domain, common, size, offset, flags, + entry); if (error == ENOMEM) { - DMAR_CTX_UNLOCK(ctx); - dmar_gas_free_entry(ctx, entry); + DMAR_DOMAIN_UNLOCK(domain); + dmar_gas_free_entry(domain, entry); return (error); } #ifdef INVARIANTS if (dmar_check_free) - dmar_gas_check_free(ctx); + dmar_gas_check_free(domain); #endif KASSERT(error == 0, ("unexpected error %d from dmar_gas_find_entry", error)); - KASSERT(entry->end < ctx->end, ("allocated GPA %jx, max GPA %jx", - (uintmax_t)entry->end, (uintmax_t)ctx->end)); + KASSERT(entry->end < domain->end, ("allocated GPA %jx, max GPA %jx", + (uintmax_t)entry->end, (uintmax_t)domain->end)); entry->flags |= eflags; - DMAR_CTX_UNLOCK(ctx); + DMAR_DOMAIN_UNLOCK(domain); - error = ctx_map_buf(ctx, entry->start, entry->end - entry->start, ma, + error = domain_map_buf(domain, entry->start, entry->end - entry->start, + ma, ((eflags & DMAR_MAP_ENTRY_READ) != 0 ? DMAR_PTE_R : 0) | ((eflags & DMAR_MAP_ENTRY_WRITE) != 0 ? DMAR_PTE_W : 0) | ((eflags & DMAR_MAP_ENTRY_SNOOP) != 0 ? DMAR_PTE_SNP : 0) | ((eflags & DMAR_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0), (flags & DMAR_GM_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0); if (error == ENOMEM) { - dmar_ctx_unload_entry(entry, true); + dmar_domain_unload_entry(entry, true); return (error); } KASSERT(error == 0, - ("unexpected error %d from ctx_map_buf", error)); + ("unexpected error %d from domain_map_buf", error)); *res = entry; return (0); } int -dmar_gas_map_region(struct dmar_ctx *ctx, struct dmar_map_entry *entry, +dmar_gas_map_region(struct dmar_domain *domain, struct dmar_map_entry *entry, u_int eflags, u_int flags, vm_page_t *ma) { dmar_gaddr_t start; int error; - KASSERT(entry->flags == 0, ("used RMRR entry %p %p %x", ctx, + KASSERT(entry->flags == 0, ("used RMRR entry %p %p %x", domain, entry, entry->flags)); KASSERT((flags & ~(DMAR_GM_CANWAIT)) == 0, ("invalid flags 0x%x", flags)); start = entry->start; - DMAR_CTX_LOCK(ctx); - error = dmar_gas_alloc_region(ctx, entry, flags); + DMAR_DOMAIN_LOCK(domain); + error = dmar_gas_alloc_region(domain, entry, flags); if (error != 0) { - DMAR_CTX_UNLOCK(ctx); + DMAR_DOMAIN_UNLOCK(domain); return (error); } entry->flags |= eflags; - DMAR_CTX_UNLOCK(ctx); + DMAR_DOMAIN_UNLOCK(domain); if (entry->end == entry->start) return (0); - error = ctx_map_buf(ctx, entry->start, entry->end - entry->start, + error = domain_map_buf(domain, entry->start, entry->end - entry->start, ma + OFF_TO_IDX(start - entry->start), ((eflags & DMAR_MAP_ENTRY_READ) != 0 ? DMAR_PTE_R : 0) | ((eflags & DMAR_MAP_ENTRY_WRITE) != 0 ? DMAR_PTE_W : 0) | ((eflags & DMAR_MAP_ENTRY_SNOOP) != 0 ? DMAR_PTE_SNP : 0) | ((eflags & DMAR_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0), (flags & DMAR_GM_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0); if (error == ENOMEM) { - dmar_ctx_unload_entry(entry, false); + dmar_domain_unload_entry(entry, false); return (error); } KASSERT(error == 0, - ("unexpected error %d from ctx_map_buf", error)); + ("unexpected error %d from domain_map_buf", error)); return (0); } int -dmar_gas_reserve_region(struct dmar_ctx *ctx, dmar_gaddr_t start, +dmar_gas_reserve_region(struct dmar_domain *domain, dmar_gaddr_t start, dmar_gaddr_t end) { struct dmar_map_entry *entry; int error; - entry = dmar_gas_alloc_entry(ctx, DMAR_PGF_WAITOK); + entry = dmar_gas_alloc_entry(domain, DMAR_PGF_WAITOK); entry->start = start; entry->end = end; - DMAR_CTX_LOCK(ctx); - error = dmar_gas_alloc_region(ctx, entry, DMAR_GM_CANWAIT); + DMAR_DOMAIN_LOCK(domain); + error = dmar_gas_alloc_region(domain, entry, DMAR_GM_CANWAIT); if (error == 0) entry->flags |= DMAR_MAP_ENTRY_UNMAPPED; - DMAR_CTX_UNLOCK(ctx); + DMAR_DOMAIN_UNLOCK(domain); if (error != 0) - dmar_gas_free_entry(ctx, entry); + dmar_gas_free_entry(domain, entry); return (error); } Index: head/sys/x86/iommu/intel_idpgtbl.c =================================================================== --- head/sys/x86/iommu/intel_idpgtbl.c (revision 284868) +++ head/sys/x86/iommu/intel_idpgtbl.c (revision 284869) @@ -1,784 +1,799 @@ /*- * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -static int ctx_unmap_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base, - dmar_gaddr_t size, int flags); +static int domain_unmap_buf_locked(struct dmar_domain *domain, + dmar_gaddr_t base, dmar_gaddr_t size, int flags); /* * The cache of the identity mapping page tables for the DMARs. Using * the cache saves significant amount of memory for page tables by * reusing the page tables, since usually DMARs are identical and have * the same capabilities. Still, cache records the information needed * to match DMAR capabilities and page table format, to correctly * handle different DMARs. */ struct idpgtbl { dmar_gaddr_t maxaddr; /* Page table covers the guest address range [0..maxaddr) */ int pglvl; /* Total page table levels ignoring superpages */ int leaf; /* The last materialized page table level, it is non-zero if superpages are supported */ vm_object_t pgtbl_obj; /* The page table pages */ LIST_ENTRY(idpgtbl) link; }; static struct sx idpgtbl_lock; SX_SYSINIT(idpgtbl, &idpgtbl_lock, "idpgtbl"); static LIST_HEAD(, idpgtbl) idpgtbls = LIST_HEAD_INITIALIZER(idpgtbls); static MALLOC_DEFINE(M_DMAR_IDPGTBL, "dmar_idpgtbl", "Intel DMAR Identity mappings cache elements"); /* * Build the next level of the page tables for the identity mapping. * - lvl is the level to build; * - idx is the index of the page table page in the pgtbl_obj, which is * being allocated filled now; * - addr is the starting address in the bus address space which is * mapped by the page table page. */ static void -ctx_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx, +domain_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx, dmar_gaddr_t addr) { vm_page_t m1; dmar_pte_t *pte; struct sf_buf *sf; dmar_gaddr_t f, pg_sz; vm_pindex_t base; int i; VM_OBJECT_ASSERT_LOCKED(tbl->pgtbl_obj); if (addr >= tbl->maxaddr) return; (void)dmar_pgalloc(tbl->pgtbl_obj, idx, DMAR_PGF_OBJL | DMAR_PGF_WAITOK | DMAR_PGF_ZERO); base = idx * DMAR_NPTEPG + 1; /* Index of the first child page of idx */ pg_sz = pglvl_page_size(tbl->pglvl, lvl); if (lvl != tbl->leaf) { for (i = 0, f = addr; i < DMAR_NPTEPG; i++, f += pg_sz) - ctx_idmap_nextlvl(tbl, lvl + 1, base + i, f); + domain_idmap_nextlvl(tbl, lvl + 1, base + i, f); } VM_OBJECT_WUNLOCK(tbl->pgtbl_obj); pte = dmar_map_pgtbl(tbl->pgtbl_obj, idx, DMAR_PGF_WAITOK, &sf); if (lvl == tbl->leaf) { for (i = 0, f = addr; i < DMAR_NPTEPG; i++, f += pg_sz) { if (f >= tbl->maxaddr) break; pte[i].pte = (DMAR_PTE_ADDR_MASK & f) | DMAR_PTE_R | DMAR_PTE_W; } } else { for (i = 0, f = addr; i < DMAR_NPTEPG; i++, f += pg_sz) { if (f >= tbl->maxaddr) break; m1 = dmar_pgalloc(tbl->pgtbl_obj, base + i, DMAR_PGF_NOALLOC); KASSERT(m1 != NULL, ("lost page table page")); pte[i].pte = (DMAR_PTE_ADDR_MASK & VM_PAGE_TO_PHYS(m1)) | DMAR_PTE_R | DMAR_PTE_W; } } - /* ctx_get_idmap_pgtbl flushes CPU cache if needed. */ + /* domain_get_idmap_pgtbl flushes CPU cache if needed. */ dmar_unmap_pgtbl(sf); VM_OBJECT_WLOCK(tbl->pgtbl_obj); } /* * Find a ready and compatible identity-mapping page table in the * cache. If not found, populate the identity-mapping page table for * the context, up to the maxaddr. The maxaddr byte is allowed to be * not mapped, which is aligned with the definition of Maxmem as the * highest usable physical address + 1. If superpages are used, the * maxaddr is typically mapped. */ vm_object_t -ctx_get_idmap_pgtbl(struct dmar_ctx *ctx, dmar_gaddr_t maxaddr) +domain_get_idmap_pgtbl(struct dmar_domain *domain, dmar_gaddr_t maxaddr) { struct dmar_unit *unit; struct idpgtbl *tbl; vm_object_t res; vm_page_t m; int leaf, i; leaf = 0; /* silence gcc */ /* * First, determine where to stop the paging structures. */ - for (i = 0; i < ctx->pglvl; i++) { - if (i == ctx->pglvl - 1 || ctx_is_sp_lvl(ctx, i)) { + for (i = 0; i < domain->pglvl; i++) { + if (i == domain->pglvl - 1 || domain_is_sp_lvl(domain, i)) { leaf = i; break; } } /* * Search the cache for a compatible page table. Qualified * page table must map up to maxaddr, its level must be * supported by the DMAR and leaf should be equal to the * calculated value. The later restriction could be lifted * but I believe it is currently impossible to have any * deviations for existing hardware. */ sx_slock(&idpgtbl_lock); LIST_FOREACH(tbl, &idpgtbls, link) { if (tbl->maxaddr >= maxaddr && - dmar_pglvl_supported(ctx->dmar, tbl->pglvl) && + dmar_pglvl_supported(domain->dmar, tbl->pglvl) && tbl->leaf == leaf) { res = tbl->pgtbl_obj; vm_object_reference(res); sx_sunlock(&idpgtbl_lock); - ctx->pglvl = tbl->pglvl; /* XXXKIB ? */ + domain->pglvl = tbl->pglvl; /* XXXKIB ? */ goto end; } } /* * Not found in cache, relock the cache into exclusive mode to * be able to add element, and recheck cache again after the * relock. */ sx_sunlock(&idpgtbl_lock); sx_xlock(&idpgtbl_lock); LIST_FOREACH(tbl, &idpgtbls, link) { if (tbl->maxaddr >= maxaddr && - dmar_pglvl_supported(ctx->dmar, tbl->pglvl) && + dmar_pglvl_supported(domain->dmar, tbl->pglvl) && tbl->leaf == leaf) { res = tbl->pgtbl_obj; vm_object_reference(res); sx_xunlock(&idpgtbl_lock); - ctx->pglvl = tbl->pglvl; /* XXXKIB ? */ + domain->pglvl = tbl->pglvl; /* XXXKIB ? */ return (res); } } /* * Still not found, create new page table. */ tbl = malloc(sizeof(*tbl), M_DMAR_IDPGTBL, M_WAITOK); - tbl->pglvl = ctx->pglvl; + tbl->pglvl = domain->pglvl; tbl->leaf = leaf; tbl->maxaddr = maxaddr; tbl->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL, IDX_TO_OFF(pglvl_max_pages(tbl->pglvl)), 0, 0, NULL); VM_OBJECT_WLOCK(tbl->pgtbl_obj); - ctx_idmap_nextlvl(tbl, 0, 0, 0); + domain_idmap_nextlvl(tbl, 0, 0, 0); VM_OBJECT_WUNLOCK(tbl->pgtbl_obj); LIST_INSERT_HEAD(&idpgtbls, tbl, link); res = tbl->pgtbl_obj; vm_object_reference(res); sx_xunlock(&idpgtbl_lock); end: /* * Table was found or created. * * If DMAR does not snoop paging structures accesses, flush * CPU cache to memory. Note that dmar_unmap_pgtbl() coherent * argument was possibly invalid at the time of the identity * page table creation, since DMAR which was passed at the * time of creation could be coherent, while current DMAR is * not. * * If DMAR cannot look into the chipset write buffer, flush it * as well. */ - unit = ctx->dmar; + unit = domain->dmar; if (!DMAR_IS_COHERENT(unit)) { VM_OBJECT_WLOCK(res); for (m = vm_page_lookup(res, 0); m != NULL; m = vm_page_next(m)) pmap_invalidate_cache_pages(&m, 1); VM_OBJECT_WUNLOCK(res); } if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) { DMAR_LOCK(unit); dmar_flush_write_bufs(unit); DMAR_UNLOCK(unit); } return (res); } /* * Return a reference to the identity mapping page table to the cache. */ void put_idmap_pgtbl(vm_object_t obj) { struct idpgtbl *tbl, *tbl1; vm_object_t rmobj; sx_slock(&idpgtbl_lock); KASSERT(obj->ref_count >= 2, ("lost cache reference")); vm_object_deallocate(obj); /* * Cache always owns one last reference on the page table object. * If there is an additional reference, object must stay. */ if (obj->ref_count > 1) { sx_sunlock(&idpgtbl_lock); return; } /* * Cache reference is the last, remove cache element and free * page table object, returning the page table pages to the * system. */ sx_sunlock(&idpgtbl_lock); sx_xlock(&idpgtbl_lock); LIST_FOREACH_SAFE(tbl, &idpgtbls, link, tbl1) { rmobj = tbl->pgtbl_obj; if (rmobj->ref_count == 1) { LIST_REMOVE(tbl, link); atomic_subtract_int(&dmar_tbl_pagecnt, rmobj->resident_page_count); vm_object_deallocate(rmobj); free(tbl, M_DMAR_IDPGTBL); } } sx_xunlock(&idpgtbl_lock); } /* * The core routines to map and unmap host pages at the given guest * address. Support superpages. */ /* * Index of the pte for the guest address base in the page table at * the level lvl. */ static int -ctx_pgtbl_pte_off(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl) +domain_pgtbl_pte_off(struct dmar_domain *domain, dmar_gaddr_t base, int lvl) { - base >>= DMAR_PAGE_SHIFT + (ctx->pglvl - lvl - 1) * DMAR_NPTEPGSHIFT; + base >>= DMAR_PAGE_SHIFT + (domain->pglvl - lvl - 1) * + DMAR_NPTEPGSHIFT; return (base & DMAR_PTEMASK); } /* * Returns the page index of the page table page in the page table * object, which maps the given address base at the page table level * lvl. */ static vm_pindex_t -ctx_pgtbl_get_pindex(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl) +domain_pgtbl_get_pindex(struct dmar_domain *domain, dmar_gaddr_t base, int lvl) { vm_pindex_t idx, pidx; int i; - KASSERT(lvl >= 0 && lvl < ctx->pglvl, ("wrong lvl %p %d", ctx, lvl)); + KASSERT(lvl >= 0 && lvl < domain->pglvl, + ("wrong lvl %p %d", domain, lvl)); - for (pidx = idx = 0, i = 0; i < lvl; i++, pidx = idx) - idx = ctx_pgtbl_pte_off(ctx, base, i) + pidx * DMAR_NPTEPG + 1; + for (pidx = idx = 0, i = 0; i < lvl; i++, pidx = idx) { + idx = domain_pgtbl_pte_off(domain, base, i) + + pidx * DMAR_NPTEPG + 1; + } return (idx); } static dmar_pte_t * -ctx_pgtbl_map_pte(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl, int flags, - vm_pindex_t *idxp, struct sf_buf **sf) +domain_pgtbl_map_pte(struct dmar_domain *domain, dmar_gaddr_t base, int lvl, + int flags, vm_pindex_t *idxp, struct sf_buf **sf) { vm_page_t m; struct sf_buf *sfp; dmar_pte_t *pte, *ptep; vm_pindex_t idx, idx1; - DMAR_CTX_ASSERT_PGLOCKED(ctx); + DMAR_DOMAIN_ASSERT_PGLOCKED(domain); KASSERT((flags & DMAR_PGF_OBJL) != 0, ("lost PGF_OBJL")); - idx = ctx_pgtbl_get_pindex(ctx, base, lvl); + idx = domain_pgtbl_get_pindex(domain, base, lvl); if (*sf != NULL && idx == *idxp) { pte = (dmar_pte_t *)sf_buf_kva(*sf); } else { if (*sf != NULL) dmar_unmap_pgtbl(*sf); *idxp = idx; retry: - pte = dmar_map_pgtbl(ctx->pgtbl_obj, idx, flags, sf); + pte = dmar_map_pgtbl(domain->pgtbl_obj, idx, flags, sf); if (pte == NULL) { - KASSERT(lvl > 0, ("lost root page table page %p", ctx)); + KASSERT(lvl > 0, + ("lost root page table page %p", domain)); /* * Page table page does not exists, allocate * it and create pte in the up level. */ - m = dmar_pgalloc(ctx->pgtbl_obj, idx, flags | + m = dmar_pgalloc(domain->pgtbl_obj, idx, flags | DMAR_PGF_ZERO); if (m == NULL) return (NULL); /* * Prevent potential free while pgtbl_obj is * unlocked in the recursive call to - * ctx_pgtbl_map_pte(), if other thread did + * domain_pgtbl_map_pte(), if other thread did * pte write and clean while the lock if * dropped. */ m->wire_count++; sfp = NULL; - ptep = ctx_pgtbl_map_pte(ctx, base, lvl - 1, flags, - &idx1, &sfp); + ptep = domain_pgtbl_map_pte(domain, base, lvl - 1, + flags, &idx1, &sfp); if (ptep == NULL) { KASSERT(m->pindex != 0, - ("loosing root page %p", ctx)); + ("loosing root page %p", domain)); m->wire_count--; - dmar_pgfree(ctx->pgtbl_obj, m->pindex, flags); + dmar_pgfree(domain->pgtbl_obj, m->pindex, + flags); return (NULL); } dmar_pte_store(&ptep->pte, DMAR_PTE_R | DMAR_PTE_W | VM_PAGE_TO_PHYS(m)); - dmar_flush_pte_to_ram(ctx->dmar, ptep); + dmar_flush_pte_to_ram(domain->dmar, ptep); sf_buf_page(sfp)->wire_count += 1; m->wire_count--; dmar_unmap_pgtbl(sfp); /* Only executed once. */ goto retry; } } - pte += ctx_pgtbl_pte_off(ctx, base, lvl); + pte += domain_pgtbl_pte_off(domain, base, lvl); return (pte); } static int -ctx_map_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size, - vm_page_t *ma, uint64_t pflags, int flags) +domain_map_buf_locked(struct dmar_domain *domain, dmar_gaddr_t base, + dmar_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags) { dmar_pte_t *pte; struct sf_buf *sf; dmar_gaddr_t pg_sz, base1, size1; vm_pindex_t pi, c, idx, run_sz; int lvl; bool superpage; - DMAR_CTX_ASSERT_PGLOCKED(ctx); + DMAR_DOMAIN_ASSERT_PGLOCKED(domain); base1 = base; size1 = size; flags |= DMAR_PGF_OBJL; TD_PREP_PINNED_ASSERT; for (sf = NULL, pi = 0; size > 0; base += pg_sz, size -= pg_sz, pi += run_sz) { for (lvl = 0, c = 0, superpage = false;; lvl++) { - pg_sz = ctx_page_size(ctx, lvl); + pg_sz = domain_page_size(domain, lvl); run_sz = pg_sz >> DMAR_PAGE_SHIFT; - if (lvl == ctx->pglvl - 1) + if (lvl == domain->pglvl - 1) break; /* * Check if the current base suitable for the * superpage mapping. First, verify the level. */ - if (!ctx_is_sp_lvl(ctx, lvl)) + if (!domain_is_sp_lvl(domain, lvl)) continue; /* * Next, look at the size of the mapping and * alignment of both guest and host addresses. */ if (size < pg_sz || (base & (pg_sz - 1)) != 0 || (VM_PAGE_TO_PHYS(ma[pi]) & (pg_sz - 1)) != 0) continue; /* All passed, check host pages contiguouty. */ if (c == 0) { for (c = 1; c < run_sz; c++) { if (VM_PAGE_TO_PHYS(ma[pi + c]) != VM_PAGE_TO_PHYS(ma[pi + c - 1]) + PAGE_SIZE) break; } } if (c >= run_sz) { superpage = true; break; } } KASSERT(size >= pg_sz, - ("mapping loop overflow %p %jx %jx %jx", ctx, + ("mapping loop overflow %p %jx %jx %jx", domain, (uintmax_t)base, (uintmax_t)size, (uintmax_t)pg_sz)); KASSERT(pg_sz > 0, ("pg_sz 0 lvl %d", lvl)); - pte = ctx_pgtbl_map_pte(ctx, base, lvl, flags, &idx, &sf); + pte = domain_pgtbl_map_pte(domain, base, lvl, flags, &idx, &sf); if (pte == NULL) { KASSERT((flags & DMAR_PGF_WAITOK) == 0, - ("failed waitable pte alloc %p", ctx)); + ("failed waitable pte alloc %p", domain)); if (sf != NULL) dmar_unmap_pgtbl(sf); - ctx_unmap_buf_locked(ctx, base1, base - base1, flags); + domain_unmap_buf_locked(domain, base1, base - base1, + flags); TD_PINNED_ASSERT; return (ENOMEM); } dmar_pte_store(&pte->pte, VM_PAGE_TO_PHYS(ma[pi]) | pflags | (superpage ? DMAR_PTE_SP : 0)); - dmar_flush_pte_to_ram(ctx->dmar, pte); + dmar_flush_pte_to_ram(domain->dmar, pte); sf_buf_page(sf)->wire_count += 1; } if (sf != NULL) dmar_unmap_pgtbl(sf); TD_PINNED_ASSERT; return (0); } int -ctx_map_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size, +domain_map_buf(struct dmar_domain *domain, dmar_gaddr_t base, dmar_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags) { struct dmar_unit *unit; int error; - unit = ctx->dmar; + unit = domain->dmar; - KASSERT((ctx->flags & DMAR_CTX_IDMAP) == 0, - ("modifying idmap pagetable ctx %p", ctx)); + KASSERT((domain->flags & DMAR_DOMAIN_IDMAP) == 0, + ("modifying idmap pagetable domain %p", domain)); KASSERT((base & DMAR_PAGE_MASK) == 0, - ("non-aligned base %p %jx %jx", ctx, (uintmax_t)base, + ("non-aligned base %p %jx %jx", domain, (uintmax_t)base, (uintmax_t)size)); KASSERT((size & DMAR_PAGE_MASK) == 0, - ("non-aligned size %p %jx %jx", ctx, (uintmax_t)base, + ("non-aligned size %p %jx %jx", domain, (uintmax_t)base, (uintmax_t)size)); - KASSERT(size > 0, ("zero size %p %jx %jx", ctx, (uintmax_t)base, + KASSERT(size > 0, ("zero size %p %jx %jx", domain, (uintmax_t)base, (uintmax_t)size)); - KASSERT(base < (1ULL << ctx->agaw), - ("base too high %p %jx %jx agaw %d", ctx, (uintmax_t)base, - (uintmax_t)size, ctx->agaw)); - KASSERT(base + size < (1ULL << ctx->agaw), - ("end too high %p %jx %jx agaw %d", ctx, (uintmax_t)base, - (uintmax_t)size, ctx->agaw)); + KASSERT(base < (1ULL << domain->agaw), + ("base too high %p %jx %jx agaw %d", domain, (uintmax_t)base, + (uintmax_t)size, domain->agaw)); + KASSERT(base + size < (1ULL << domain->agaw), + ("end too high %p %jx %jx agaw %d", domain, (uintmax_t)base, + (uintmax_t)size, domain->agaw)); KASSERT(base + size > base, - ("size overflow %p %jx %jx", ctx, (uintmax_t)base, + ("size overflow %p %jx %jx", domain, (uintmax_t)base, (uintmax_t)size)); KASSERT((pflags & (DMAR_PTE_R | DMAR_PTE_W)) != 0, ("neither read nor write %jx", (uintmax_t)pflags)); KASSERT((pflags & ~(DMAR_PTE_R | DMAR_PTE_W | DMAR_PTE_SNP | DMAR_PTE_TM)) == 0, ("invalid pte flags %jx", (uintmax_t)pflags)); KASSERT((pflags & DMAR_PTE_SNP) == 0 || (unit->hw_ecap & DMAR_ECAP_SC) != 0, ("PTE_SNP for dmar without snoop control %p %jx", - ctx, (uintmax_t)pflags)); + domain, (uintmax_t)pflags)); KASSERT((pflags & DMAR_PTE_TM) == 0 || (unit->hw_ecap & DMAR_ECAP_DI) != 0, ("PTE_TM for dmar without DIOTLB %p %jx", - ctx, (uintmax_t)pflags)); + domain, (uintmax_t)pflags)); KASSERT((flags & ~DMAR_PGF_WAITOK) == 0, ("invalid flags %x", flags)); - DMAR_CTX_PGLOCK(ctx); - error = ctx_map_buf_locked(ctx, base, size, ma, pflags, flags); - DMAR_CTX_PGUNLOCK(ctx); + DMAR_DOMAIN_PGLOCK(domain); + error = domain_map_buf_locked(domain, base, size, ma, pflags, flags); + DMAR_DOMAIN_PGUNLOCK(domain); if (error != 0) return (error); if ((unit->hw_cap & DMAR_CAP_CM) != 0) - ctx_flush_iotlb_sync(ctx, base, size); + domain_flush_iotlb_sync(domain, base, size); else if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) { /* See 11.1 Write Buffer Flushing. */ DMAR_LOCK(unit); dmar_flush_write_bufs(unit); DMAR_UNLOCK(unit); } return (0); } -static void ctx_unmap_clear_pte(struct dmar_ctx *ctx, dmar_gaddr_t base, - int lvl, int flags, dmar_pte_t *pte, struct sf_buf **sf, bool free_fs); +static void domain_unmap_clear_pte(struct dmar_domain *domain, + dmar_gaddr_t base, int lvl, int flags, dmar_pte_t *pte, + struct sf_buf **sf, bool free_fs); static void -ctx_free_pgtbl_pde(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl, int flags) +domain_free_pgtbl_pde(struct dmar_domain *domain, dmar_gaddr_t base, + int lvl, int flags) { struct sf_buf *sf; dmar_pte_t *pde; vm_pindex_t idx; sf = NULL; - pde = ctx_pgtbl_map_pte(ctx, base, lvl, flags, &idx, &sf); - ctx_unmap_clear_pte(ctx, base, lvl, flags, pde, &sf, true); + pde = domain_pgtbl_map_pte(domain, base, lvl, flags, &idx, &sf); + domain_unmap_clear_pte(domain, base, lvl, flags, pde, &sf, true); } static void -ctx_unmap_clear_pte(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl, +domain_unmap_clear_pte(struct dmar_domain *domain, dmar_gaddr_t base, int lvl, int flags, dmar_pte_t *pte, struct sf_buf **sf, bool free_sf) { vm_page_t m; dmar_pte_clear(&pte->pte); - dmar_flush_pte_to_ram(ctx->dmar, pte); + dmar_flush_pte_to_ram(domain->dmar, pte); m = sf_buf_page(*sf); if (free_sf) { dmar_unmap_pgtbl(*sf); *sf = NULL; } m->wire_count--; if (m->wire_count != 0) return; KASSERT(lvl != 0, - ("lost reference (lvl) on root pg ctx %p base %jx lvl %d", - ctx, (uintmax_t)base, lvl)); + ("lost reference (lvl) on root pg domain %p base %jx lvl %d", + domain, (uintmax_t)base, lvl)); KASSERT(m->pindex != 0, - ("lost reference (idx) on root pg ctx %p base %jx lvl %d", - ctx, (uintmax_t)base, lvl)); - dmar_pgfree(ctx->pgtbl_obj, m->pindex, flags); - ctx_free_pgtbl_pde(ctx, base, lvl - 1, flags); + ("lost reference (idx) on root pg domain %p base %jx lvl %d", + domain, (uintmax_t)base, lvl)); + dmar_pgfree(domain->pgtbl_obj, m->pindex, flags); + domain_free_pgtbl_pde(domain, base, lvl - 1, flags); } /* * Assumes that the unmap is never partial. */ static int -ctx_unmap_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base, +domain_unmap_buf_locked(struct dmar_domain *domain, dmar_gaddr_t base, dmar_gaddr_t size, int flags) { dmar_pte_t *pte; struct sf_buf *sf; vm_pindex_t idx; dmar_gaddr_t pg_sz; int lvl; - DMAR_CTX_ASSERT_PGLOCKED(ctx); + DMAR_DOMAIN_ASSERT_PGLOCKED(domain); if (size == 0) return (0); - KASSERT((ctx->flags & DMAR_CTX_IDMAP) == 0, - ("modifying idmap pagetable ctx %p", ctx)); + KASSERT((domain->flags & DMAR_DOMAIN_IDMAP) == 0, + ("modifying idmap pagetable domain %p", domain)); KASSERT((base & DMAR_PAGE_MASK) == 0, - ("non-aligned base %p %jx %jx", ctx, (uintmax_t)base, + ("non-aligned base %p %jx %jx", domain, (uintmax_t)base, (uintmax_t)size)); KASSERT((size & DMAR_PAGE_MASK) == 0, - ("non-aligned size %p %jx %jx", ctx, (uintmax_t)base, + ("non-aligned size %p %jx %jx", domain, (uintmax_t)base, (uintmax_t)size)); - KASSERT(base < (1ULL << ctx->agaw), - ("base too high %p %jx %jx agaw %d", ctx, (uintmax_t)base, - (uintmax_t)size, ctx->agaw)); - KASSERT(base + size < (1ULL << ctx->agaw), - ("end too high %p %jx %jx agaw %d", ctx, (uintmax_t)base, - (uintmax_t)size, ctx->agaw)); + KASSERT(base < (1ULL << domain->agaw), + ("base too high %p %jx %jx agaw %d", domain, (uintmax_t)base, + (uintmax_t)size, domain->agaw)); + KASSERT(base + size < (1ULL << domain->agaw), + ("end too high %p %jx %jx agaw %d", domain, (uintmax_t)base, + (uintmax_t)size, domain->agaw)); KASSERT(base + size > base, - ("size overflow %p %jx %jx", ctx, (uintmax_t)base, + ("size overflow %p %jx %jx", domain, (uintmax_t)base, (uintmax_t)size)); KASSERT((flags & ~DMAR_PGF_WAITOK) == 0, ("invalid flags %x", flags)); pg_sz = 0; /* silence gcc */ flags |= DMAR_PGF_OBJL; TD_PREP_PINNED_ASSERT; for (sf = NULL; size > 0; base += pg_sz, size -= pg_sz) { - for (lvl = 0; lvl < ctx->pglvl; lvl++) { - if (lvl != ctx->pglvl - 1 && !ctx_is_sp_lvl(ctx, lvl)) + for (lvl = 0; lvl < domain->pglvl; lvl++) { + if (lvl != domain->pglvl - 1 && + !domain_is_sp_lvl(domain, lvl)) continue; - pg_sz = ctx_page_size(ctx, lvl); + pg_sz = domain_page_size(domain, lvl); if (pg_sz > size) continue; - pte = ctx_pgtbl_map_pte(ctx, base, lvl, flags, + pte = domain_pgtbl_map_pte(domain, base, lvl, flags, &idx, &sf); KASSERT(pte != NULL, ("sleeping or page missed %p %jx %d 0x%x", - ctx, (uintmax_t)base, lvl, flags)); + domain, (uintmax_t)base, lvl, flags)); if ((pte->pte & DMAR_PTE_SP) != 0 || - lvl == ctx->pglvl - 1) { - ctx_unmap_clear_pte(ctx, base, lvl, flags, - pte, &sf, false); + lvl == domain->pglvl - 1) { + domain_unmap_clear_pte(domain, base, lvl, + flags, pte, &sf, false); break; } } KASSERT(size >= pg_sz, - ("unmapping loop overflow %p %jx %jx %jx", ctx, + ("unmapping loop overflow %p %jx %jx %jx", domain, (uintmax_t)base, (uintmax_t)size, (uintmax_t)pg_sz)); } if (sf != NULL) dmar_unmap_pgtbl(sf); /* * See 11.1 Write Buffer Flushing for an explanation why RWBF * can be ignored there. */ TD_PINNED_ASSERT; return (0); } int -ctx_unmap_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size, - int flags) +domain_unmap_buf(struct dmar_domain *domain, dmar_gaddr_t base, + dmar_gaddr_t size, int flags) { int error; - DMAR_CTX_PGLOCK(ctx); - error = ctx_unmap_buf_locked(ctx, base, size, flags); - DMAR_CTX_PGUNLOCK(ctx); + DMAR_DOMAIN_PGLOCK(domain); + error = domain_unmap_buf_locked(domain, base, size, flags); + DMAR_DOMAIN_PGUNLOCK(domain); return (error); } int -ctx_alloc_pgtbl(struct dmar_ctx *ctx) +domain_alloc_pgtbl(struct dmar_domain *domain) { vm_page_t m; - KASSERT(ctx->pgtbl_obj == NULL, ("already initialized %p", ctx)); + KASSERT(domain->pgtbl_obj == NULL, + ("already initialized %p", domain)); - ctx->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL, - IDX_TO_OFF(pglvl_max_pages(ctx->pglvl)), 0, 0, NULL); - DMAR_CTX_PGLOCK(ctx); - m = dmar_pgalloc(ctx->pgtbl_obj, 0, DMAR_PGF_WAITOK | + domain->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL, + IDX_TO_OFF(pglvl_max_pages(domain->pglvl)), 0, 0, NULL); + DMAR_DOMAIN_PGLOCK(domain); + m = dmar_pgalloc(domain->pgtbl_obj, 0, DMAR_PGF_WAITOK | DMAR_PGF_ZERO | DMAR_PGF_OBJL); /* No implicit free of the top level page table page. */ m->wire_count = 1; - DMAR_CTX_PGUNLOCK(ctx); + DMAR_DOMAIN_PGUNLOCK(domain); + DMAR_LOCK(domain->dmar); + domain->flags |= DMAR_DOMAIN_PGTBL_INITED; + DMAR_UNLOCK(domain->dmar); return (0); } void -ctx_free_pgtbl(struct dmar_ctx *ctx) +domain_free_pgtbl(struct dmar_domain *domain) { vm_object_t obj; vm_page_t m; - obj = ctx->pgtbl_obj; + obj = domain->pgtbl_obj; if (obj == NULL) { - KASSERT((ctx->dmar->hw_ecap & DMAR_ECAP_PT) != 0 && - (ctx->flags & DMAR_CTX_IDMAP) != 0, - ("lost pagetable object ctx %p", ctx)); + KASSERT((domain->dmar->hw_ecap & DMAR_ECAP_PT) != 0 && + (domain->flags & DMAR_DOMAIN_IDMAP) != 0, + ("lost pagetable object domain %p", domain)); return; } - DMAR_CTX_ASSERT_PGLOCKED(ctx); - ctx->pgtbl_obj = NULL; + DMAR_DOMAIN_ASSERT_PGLOCKED(domain); + domain->pgtbl_obj = NULL; - if ((ctx->flags & DMAR_CTX_IDMAP) != 0) { + if ((domain->flags & DMAR_DOMAIN_IDMAP) != 0) { put_idmap_pgtbl(obj); - ctx->flags &= ~DMAR_CTX_IDMAP; + domain->flags &= ~DMAR_DOMAIN_IDMAP; return; } /* Obliterate wire_counts */ VM_OBJECT_ASSERT_WLOCKED(obj); for (m = vm_page_lookup(obj, 0); m != NULL; m = vm_page_next(m)) m->wire_count = 0; VM_OBJECT_WUNLOCK(obj); vm_object_deallocate(obj); } static inline uint64_t -ctx_wait_iotlb_flush(struct dmar_unit *unit, uint64_t wt, int iro) +domain_wait_iotlb_flush(struct dmar_unit *unit, uint64_t wt, int iro) { uint64_t iotlbr; dmar_write8(unit, iro + DMAR_IOTLB_REG_OFF, DMAR_IOTLB_IVT | DMAR_IOTLB_DR | DMAR_IOTLB_DW | wt); for (;;) { iotlbr = dmar_read8(unit, iro + DMAR_IOTLB_REG_OFF); if ((iotlbr & DMAR_IOTLB_IVT) == 0) break; cpu_spinwait(); } return (iotlbr); } void -ctx_flush_iotlb_sync(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size) +domain_flush_iotlb_sync(struct dmar_domain *domain, dmar_gaddr_t base, + dmar_gaddr_t size) { struct dmar_unit *unit; dmar_gaddr_t isize; uint64_t iotlbr; int am, iro; - unit = ctx->dmar; + unit = domain->dmar; KASSERT(!unit->qi_enabled, ("dmar%d: sync iotlb flush call", unit->unit)); iro = DMAR_ECAP_IRO(unit->hw_ecap) * 16; DMAR_LOCK(unit); if ((unit->hw_cap & DMAR_CAP_PSI) == 0 || size > 2 * 1024 * 1024) { - iotlbr = ctx_wait_iotlb_flush(unit, DMAR_IOTLB_IIRG_DOM | - DMAR_IOTLB_DID(ctx->domain), iro); + iotlbr = domain_wait_iotlb_flush(unit, DMAR_IOTLB_IIRG_DOM | + DMAR_IOTLB_DID(domain->domain), iro); KASSERT((iotlbr & DMAR_IOTLB_IAIG_MASK) != DMAR_IOTLB_IAIG_INVLD, ("dmar%d: invalidation failed %jx", unit->unit, (uintmax_t)iotlbr)); } else { for (; size > 0; base += isize, size -= isize) { am = calc_am(unit, base, size, &isize); dmar_write8(unit, iro, base | am); - iotlbr = ctx_wait_iotlb_flush(unit, - DMAR_IOTLB_IIRG_PAGE | DMAR_IOTLB_DID(ctx->domain), - iro); + iotlbr = domain_wait_iotlb_flush(unit, + DMAR_IOTLB_IIRG_PAGE | + DMAR_IOTLB_DID(domain->domain), iro); KASSERT((iotlbr & DMAR_IOTLB_IAIG_MASK) != DMAR_IOTLB_IAIG_INVLD, ("dmar%d: PSI invalidation failed " "iotlbr 0x%jx base 0x%jx size 0x%jx am %d", unit->unit, (uintmax_t)iotlbr, (uintmax_t)base, (uintmax_t)size, am)); /* * Any non-page granularity covers whole guest * address space for the domain. */ if ((iotlbr & DMAR_IOTLB_IAIG_MASK) != DMAR_IOTLB_IAIG_PAGE) break; } } DMAR_UNLOCK(unit); } Index: head/sys/x86/iommu/intel_qi.c =================================================================== --- head/sys/x86/iommu/intel_qi.c (revision 284868) +++ head/sys/x86/iommu/intel_qi.c (revision 284869) @@ -1,471 +1,471 @@ /*- * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_acpi.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static bool dmar_qi_seq_processed(const struct dmar_unit *unit, const struct dmar_qi_genseq *pseq) { return (pseq->gen < unit->inv_waitd_gen || (pseq->gen == unit->inv_waitd_gen && pseq->seq <= unit->inv_waitd_seq_hw)); } static int dmar_enable_qi(struct dmar_unit *unit) { DMAR_ASSERT_LOCKED(unit); unit->hw_gcmd |= DMAR_GCMD_QIE; dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd); /* XXXKIB should have a timeout */ while ((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_QIES) == 0) cpu_spinwait(); return (0); } static int dmar_disable_qi(struct dmar_unit *unit) { DMAR_ASSERT_LOCKED(unit); unit->hw_gcmd &= ~DMAR_GCMD_QIE; dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd); /* XXXKIB should have a timeout */ while ((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_QIES) != 0) cpu_spinwait(); return (0); } static void dmar_qi_advance_tail(struct dmar_unit *unit) { DMAR_ASSERT_LOCKED(unit); dmar_write4(unit, DMAR_IQT_REG, unit->inv_queue_tail); } static void dmar_qi_ensure(struct dmar_unit *unit, int descr_count) { uint32_t head; int bytes; DMAR_ASSERT_LOCKED(unit); bytes = descr_count << DMAR_IQ_DESCR_SZ_SHIFT; for (;;) { if (bytes <= unit->inv_queue_avail) break; /* refill */ head = dmar_read4(unit, DMAR_IQH_REG); head &= DMAR_IQH_MASK; unit->inv_queue_avail = head - unit->inv_queue_tail - DMAR_IQ_DESCR_SZ; if (head <= unit->inv_queue_tail) unit->inv_queue_avail += unit->inv_queue_size; if (bytes <= unit->inv_queue_avail) break; /* * No space in the queue, do busy wait. Hardware must * make a progress. But first advance the tail to * inform the descriptor streamer about entries we * might have already filled, otherwise they could * clog the whole queue.. */ dmar_qi_advance_tail(unit); unit->inv_queue_full++; cpu_spinwait(); } unit->inv_queue_avail -= bytes; } static void dmar_qi_emit(struct dmar_unit *unit, uint64_t data1, uint64_t data2) { DMAR_ASSERT_LOCKED(unit); *(volatile uint64_t *)(unit->inv_queue + unit->inv_queue_tail) = data1; unit->inv_queue_tail += DMAR_IQ_DESCR_SZ / 2; KASSERT(unit->inv_queue_tail <= unit->inv_queue_size, ("tail overflow 0x%x 0x%jx", unit->inv_queue_tail, (uintmax_t)unit->inv_queue_size)); unit->inv_queue_tail &= unit->inv_queue_size - 1; *(volatile uint64_t *)(unit->inv_queue + unit->inv_queue_tail) = data2; unit->inv_queue_tail += DMAR_IQ_DESCR_SZ / 2; KASSERT(unit->inv_queue_tail <= unit->inv_queue_size, ("tail overflow 0x%x 0x%jx", unit->inv_queue_tail, (uintmax_t)unit->inv_queue_size)); unit->inv_queue_tail &= unit->inv_queue_size - 1; } static void dmar_qi_emit_wait_descr(struct dmar_unit *unit, uint32_t seq, bool intr, bool memw, bool fence) { DMAR_ASSERT_LOCKED(unit); dmar_qi_emit(unit, DMAR_IQ_DESCR_WAIT_ID | (intr ? DMAR_IQ_DESCR_WAIT_IF : 0) | (memw ? DMAR_IQ_DESCR_WAIT_SW : 0) | (fence ? DMAR_IQ_DESCR_WAIT_FN : 0) | (memw ? DMAR_IQ_DESCR_WAIT_SD(seq) : 0), memw ? unit->inv_waitd_seq_hw_phys : 0); } static void dmar_qi_emit_wait_seq(struct dmar_unit *unit, struct dmar_qi_genseq *pseq) { struct dmar_qi_genseq gsec; uint32_t seq; KASSERT(pseq != NULL, ("wait descriptor with no place for seq")); DMAR_ASSERT_LOCKED(unit); if (unit->inv_waitd_seq == 0xffffffff) { gsec.gen = unit->inv_waitd_gen; gsec.seq = unit->inv_waitd_seq; dmar_qi_ensure(unit, 1); dmar_qi_emit_wait_descr(unit, gsec.seq, false, true, false); dmar_qi_advance_tail(unit); while (!dmar_qi_seq_processed(unit, &gsec)) cpu_spinwait(); unit->inv_waitd_gen++; unit->inv_waitd_seq = 1; } seq = unit->inv_waitd_seq++; pseq->gen = unit->inv_waitd_gen; pseq->seq = seq; dmar_qi_emit_wait_descr(unit, seq, true, true, false); } static void dmar_qi_wait_for_seq(struct dmar_unit *unit, const struct dmar_qi_genseq *gseq, bool nowait) { DMAR_ASSERT_LOCKED(unit); unit->inv_seq_waiters++; while (!dmar_qi_seq_processed(unit, gseq)) { if (cold || nowait) { cpu_spinwait(); } else { msleep(&unit->inv_seq_waiters, &unit->lock, 0, "dmarse", hz); } } unit->inv_seq_waiters--; } void -dmar_qi_invalidate_locked(struct dmar_ctx *ctx, dmar_gaddr_t base, +dmar_qi_invalidate_locked(struct dmar_domain *domain, dmar_gaddr_t base, dmar_gaddr_t size, struct dmar_qi_genseq *pseq) { struct dmar_unit *unit; dmar_gaddr_t isize; int am; - unit = ctx->dmar; + unit = domain->dmar; DMAR_ASSERT_LOCKED(unit); for (; size > 0; base += isize, size -= isize) { am = calc_am(unit, base, size, &isize); dmar_qi_ensure(unit, 1); dmar_qi_emit(unit, DMAR_IQ_DESCR_IOTLB_INV | DMAR_IQ_DESCR_IOTLB_PAGE | DMAR_IQ_DESCR_IOTLB_DW | DMAR_IQ_DESCR_IOTLB_DR | - DMAR_IQ_DESCR_IOTLB_DID(ctx->domain), + DMAR_IQ_DESCR_IOTLB_DID(domain->domain), base | am); } if (pseq != NULL) { dmar_qi_ensure(unit, 1); dmar_qi_emit_wait_seq(unit, pseq); } dmar_qi_advance_tail(unit); } void dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit) { struct dmar_qi_genseq gseq; DMAR_ASSERT_LOCKED(unit); dmar_qi_ensure(unit, 2); dmar_qi_emit(unit, DMAR_IQ_DESCR_CTX_INV | DMAR_IQ_DESCR_CTX_GLOB, 0); dmar_qi_emit_wait_seq(unit, &gseq); dmar_qi_advance_tail(unit); dmar_qi_wait_for_seq(unit, &gseq, false); } void dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit) { struct dmar_qi_genseq gseq; DMAR_ASSERT_LOCKED(unit); dmar_qi_ensure(unit, 2); dmar_qi_emit(unit, DMAR_IQ_DESCR_IOTLB_INV | DMAR_IQ_DESCR_IOTLB_GLOB | DMAR_IQ_DESCR_IOTLB_DW | DMAR_IQ_DESCR_IOTLB_DR, 0); dmar_qi_emit_wait_seq(unit, &gseq); dmar_qi_advance_tail(unit); dmar_qi_wait_for_seq(unit, &gseq, false); } void dmar_qi_invalidate_iec_glob(struct dmar_unit *unit) { struct dmar_qi_genseq gseq; DMAR_ASSERT_LOCKED(unit); dmar_qi_ensure(unit, 2); dmar_qi_emit(unit, DMAR_IQ_DESCR_IEC_INV, 0); dmar_qi_emit_wait_seq(unit, &gseq); dmar_qi_advance_tail(unit); dmar_qi_wait_for_seq(unit, &gseq, false); } void dmar_qi_invalidate_iec(struct dmar_unit *unit, u_int start, u_int cnt) { struct dmar_qi_genseq gseq; u_int c, l; DMAR_ASSERT_LOCKED(unit); KASSERT(start < unit->irte_cnt && start < start + cnt && start + cnt <= unit->irte_cnt, ("inv iec overflow %d %d %d", unit->irte_cnt, start, cnt)); for (; cnt > 0; cnt -= c, start += c) { l = ffs(start | cnt) - 1; c = 1 << l; dmar_qi_ensure(unit, 1); dmar_qi_emit(unit, DMAR_IQ_DESCR_IEC_INV | DMAR_IQ_DESCR_IEC_IDX | DMAR_IQ_DESCR_IEC_IIDX(start) | DMAR_IQ_DESCR_IEC_IM(l), 0); } dmar_qi_ensure(unit, 1); dmar_qi_emit_wait_seq(unit, &gseq); dmar_qi_advance_tail(unit); /* * The caller of the function, in particular, * dmar_ir_program_irte(), may be called from the context * where the sleeping is forbidden (in fact, the * intr_table_lock mutex may be held, locked from * intr_shuffle_irqs()). Wait for the invalidation completion * using the busy wait. * * The impact on the interrupt input setup code is small, the * expected overhead is comparable with the chipset register * read. It is more harmful for the parallel DMA operations, * since we own the dmar unit lock until whole invalidation * queue is processed, which includes requests possibly issued * before our request. */ dmar_qi_wait_for_seq(unit, &gseq, true); } int dmar_qi_intr(void *arg) { struct dmar_unit *unit; unit = arg; KASSERT(unit->qi_enabled, ("dmar%d: QI is not enabled", unit->unit)); taskqueue_enqueue_fast(unit->qi_taskqueue, &unit->qi_task); return (FILTER_HANDLED); } static void dmar_qi_task(void *arg, int pending __unused) { struct dmar_unit *unit; struct dmar_map_entry *entry; uint32_t ics; unit = arg; DMAR_LOCK(unit); for (;;) { entry = TAILQ_FIRST(&unit->tlb_flush_entries); if (entry == NULL) break; if ((entry->gseq.gen == 0 && entry->gseq.seq == 0) || !dmar_qi_seq_processed(unit, &entry->gseq)) break; TAILQ_REMOVE(&unit->tlb_flush_entries, entry, dmamap_link); DMAR_UNLOCK(unit); - dmar_ctx_free_entry(entry, (entry->flags & + dmar_domain_free_entry(entry, (entry->flags & DMAR_MAP_ENTRY_QI_NF) == 0); DMAR_LOCK(unit); } ics = dmar_read4(unit, DMAR_ICS_REG); if ((ics & DMAR_ICS_IWC) != 0) { ics = DMAR_ICS_IWC; dmar_write4(unit, DMAR_ICS_REG, ics); } if (unit->inv_seq_waiters > 0) wakeup(&unit->inv_seq_waiters); DMAR_UNLOCK(unit); } int dmar_init_qi(struct dmar_unit *unit) { uint64_t iqa; uint32_t ics; int qi_sz; if (!DMAR_HAS_QI(unit) || (unit->hw_cap & DMAR_CAP_CM) != 0) return (0); unit->qi_enabled = 1; TUNABLE_INT_FETCH("hw.dmar.qi", &unit->qi_enabled); if (!unit->qi_enabled) return (0); TAILQ_INIT(&unit->tlb_flush_entries); TASK_INIT(&unit->qi_task, 0, dmar_qi_task, unit); unit->qi_taskqueue = taskqueue_create_fast("dmar", M_WAITOK, taskqueue_thread_enqueue, &unit->qi_taskqueue); taskqueue_start_threads(&unit->qi_taskqueue, 1, PI_AV, "dmar%d qi taskq", unit->unit); unit->inv_waitd_gen = 0; unit->inv_waitd_seq = 1; qi_sz = DMAR_IQA_QS_DEF; TUNABLE_INT_FETCH("hw.dmar.qi_size", &qi_sz); if (qi_sz > DMAR_IQA_QS_MAX) qi_sz = DMAR_IQA_QS_MAX; unit->inv_queue_size = (1ULL << qi_sz) * PAGE_SIZE; /* Reserve one descriptor to prevent wraparound. */ unit->inv_queue_avail = unit->inv_queue_size - DMAR_IQ_DESCR_SZ; /* The invalidation queue reads by DMARs are always coherent. */ unit->inv_queue = kmem_alloc_contig(kernel_arena, unit->inv_queue_size, M_WAITOK | M_ZERO, 0, dmar_high, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); unit->inv_waitd_seq_hw_phys = pmap_kextract( (vm_offset_t)&unit->inv_waitd_seq_hw); DMAR_LOCK(unit); dmar_write8(unit, DMAR_IQT_REG, 0); iqa = pmap_kextract(unit->inv_queue); iqa |= qi_sz; dmar_write8(unit, DMAR_IQA_REG, iqa); dmar_enable_qi(unit); ics = dmar_read4(unit, DMAR_ICS_REG); if ((ics & DMAR_ICS_IWC) != 0) { ics = DMAR_ICS_IWC; dmar_write4(unit, DMAR_ICS_REG, ics); } dmar_enable_qi_intr(unit); DMAR_UNLOCK(unit); return (0); } void dmar_fini_qi(struct dmar_unit *unit) { struct dmar_qi_genseq gseq; if (unit->qi_enabled) return; taskqueue_drain(unit->qi_taskqueue, &unit->qi_task); taskqueue_free(unit->qi_taskqueue); unit->qi_taskqueue = NULL; DMAR_LOCK(unit); /* quisce */ dmar_qi_ensure(unit, 1); dmar_qi_emit_wait_seq(unit, &gseq); dmar_qi_advance_tail(unit); dmar_qi_wait_for_seq(unit, &gseq, false); /* only after the quisce, disable queue */ dmar_disable_qi_intr(unit); dmar_disable_qi(unit); KASSERT(unit->inv_seq_waiters == 0, ("dmar%d: waiters on disabled queue", unit->unit)); DMAR_UNLOCK(unit); kmem_free(kernel_arena, unit->inv_queue, unit->inv_queue_size); unit->inv_queue = 0; unit->inv_queue_size = 0; unit->qi_enabled = 0; } void dmar_enable_qi_intr(struct dmar_unit *unit) { uint32_t iectl; DMAR_ASSERT_LOCKED(unit); KASSERT(DMAR_HAS_QI(unit), ("dmar%d: QI is not supported", unit->unit)); iectl = dmar_read4(unit, DMAR_IECTL_REG); iectl &= ~DMAR_IECTL_IM; dmar_write4(unit, DMAR_IECTL_REG, iectl); } void dmar_disable_qi_intr(struct dmar_unit *unit) { uint32_t iectl; DMAR_ASSERT_LOCKED(unit); KASSERT(DMAR_HAS_QI(unit), ("dmar%d: QI is not supported", unit->unit)); iectl = dmar_read4(unit, DMAR_IECTL_REG); dmar_write4(unit, DMAR_IECTL_REG, iectl | DMAR_IECTL_IM); } Index: head/sys/x86/iommu/intel_utils.c =================================================================== --- head/sys/x86/iommu/intel_utils.c (revision 284868) +++ head/sys/x86/iommu/intel_utils.c (revision 284869) @@ -1,641 +1,635 @@ /*- * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include u_int dmar_nd2mask(u_int nd) { static const u_int masks[] = { 0x000f, /* nd == 0 */ 0x002f, /* nd == 1 */ 0x00ff, /* nd == 2 */ 0x02ff, /* nd == 3 */ 0x0fff, /* nd == 4 */ 0x2fff, /* nd == 5 */ 0xffff, /* nd == 6 */ 0x0000, /* nd == 7 reserved */ }; KASSERT(nd <= 6, ("number of domains %d", nd)); return (masks[nd]); } static const struct sagaw_bits_tag { int agaw; int cap; int awlvl; int pglvl; } sagaw_bits[] = { {.agaw = 30, .cap = DMAR_CAP_SAGAW_2LVL, .awlvl = DMAR_CTX2_AW_2LVL, .pglvl = 2}, {.agaw = 39, .cap = DMAR_CAP_SAGAW_3LVL, .awlvl = DMAR_CTX2_AW_3LVL, .pglvl = 3}, {.agaw = 48, .cap = DMAR_CAP_SAGAW_4LVL, .awlvl = DMAR_CTX2_AW_4LVL, .pglvl = 4}, {.agaw = 57, .cap = DMAR_CAP_SAGAW_5LVL, .awlvl = DMAR_CTX2_AW_5LVL, .pglvl = 5}, {.agaw = 64, .cap = DMAR_CAP_SAGAW_6LVL, .awlvl = DMAR_CTX2_AW_6LVL, .pglvl = 6} }; -#define SIZEOF_SAGAW_BITS (sizeof(sagaw_bits) / sizeof(sagaw_bits[0])) bool dmar_pglvl_supported(struct dmar_unit *unit, int pglvl) { int i; - for (i = 0; i < SIZEOF_SAGAW_BITS; i++) { + for (i = 0; i < nitems(sagaw_bits); i++) { if (sagaw_bits[i].pglvl != pglvl) continue; if ((DMAR_CAP_SAGAW(unit->hw_cap) & sagaw_bits[i].cap) != 0) return (true); } return (false); } int -ctx_set_agaw(struct dmar_ctx *ctx, int mgaw) +domain_set_agaw(struct dmar_domain *domain, int mgaw) { int sagaw, i; - ctx->mgaw = mgaw; - sagaw = DMAR_CAP_SAGAW(ctx->dmar->hw_cap); - for (i = 0; i < SIZEOF_SAGAW_BITS; i++) { + domain->mgaw = mgaw; + sagaw = DMAR_CAP_SAGAW(domain->dmar->hw_cap); + for (i = 0; i < nitems(sagaw_bits); i++) { if (sagaw_bits[i].agaw >= mgaw) { - ctx->agaw = sagaw_bits[i].agaw; - ctx->pglvl = sagaw_bits[i].pglvl; - ctx->awlvl = sagaw_bits[i].awlvl; + domain->agaw = sagaw_bits[i].agaw; + domain->pglvl = sagaw_bits[i].pglvl; + domain->awlvl = sagaw_bits[i].awlvl; return (0); } } - device_printf(ctx->dmar->dev, - "context request mgaw %d for pci%d:%d:%d:%d, " - "no agaw found, sagaw %x\n", mgaw, ctx->dmar->segment, - pci_get_bus(ctx->ctx_tag.owner), - pci_get_slot(ctx->ctx_tag.owner), - pci_get_function(ctx->ctx_tag.owner), sagaw); + device_printf(domain->dmar->dev, + "context request mgaw %d: no agaw found, sagaw %x\n", + mgaw, sagaw); return (EINVAL); } /* * Find a best fit mgaw for the given maxaddr: * - if allow_less is false, must find sagaw which maps all requested * addresses (used by identity mappings); * - if allow_less is true, and no supported sagaw can map all requested * address space, accept the biggest sagaw, whatever is it. */ int dmar_maxaddr2mgaw(struct dmar_unit *unit, dmar_gaddr_t maxaddr, bool allow_less) { int i; - for (i = 0; i < SIZEOF_SAGAW_BITS; i++) { + for (i = 0; i < nitems(sagaw_bits); i++) { if ((1ULL << sagaw_bits[i].agaw) >= maxaddr && (DMAR_CAP_SAGAW(unit->hw_cap) & sagaw_bits[i].cap) != 0) break; } - if (allow_less && i == SIZEOF_SAGAW_BITS) { + if (allow_less && i == nitems(sagaw_bits)) { do { i--; } while ((DMAR_CAP_SAGAW(unit->hw_cap) & sagaw_bits[i].cap) == 0); } - if (i < SIZEOF_SAGAW_BITS) + if (i < nitems(sagaw_bits)) return (sagaw_bits[i].agaw); KASSERT(0, ("no mgaw for maxaddr %jx allow_less %d", (uintmax_t) maxaddr, allow_less)); return (-1); } /* * Calculate the total amount of page table pages needed to map the * whole bus address space on the context with the selected agaw. */ vm_pindex_t pglvl_max_pages(int pglvl) { vm_pindex_t res; int i; for (res = 0, i = pglvl; i > 0; i--) { res *= DMAR_NPTEPG; res++; } return (res); } /* * Return true if the page table level lvl supports the superpage for * the context ctx. */ int -ctx_is_sp_lvl(struct dmar_ctx *ctx, int lvl) +domain_is_sp_lvl(struct dmar_domain *domain, int lvl) { int alvl, cap_sps; static const int sagaw_sp[] = { DMAR_CAP_SPS_2M, DMAR_CAP_SPS_1G, DMAR_CAP_SPS_512G, DMAR_CAP_SPS_1T }; - alvl = ctx->pglvl - lvl - 1; - cap_sps = DMAR_CAP_SPS(ctx->dmar->hw_cap); - return (alvl < sizeof(sagaw_sp) / sizeof(sagaw_sp[0]) && - (sagaw_sp[alvl] & cap_sps) != 0); + alvl = domain->pglvl - lvl - 1; + cap_sps = DMAR_CAP_SPS(domain->dmar->hw_cap); + return (alvl < nitems(sagaw_sp) && (sagaw_sp[alvl] & cap_sps) != 0); } dmar_gaddr_t pglvl_page_size(int total_pglvl, int lvl) { int rlvl; static const dmar_gaddr_t pg_sz[] = { (dmar_gaddr_t)DMAR_PAGE_SIZE, (dmar_gaddr_t)DMAR_PAGE_SIZE << DMAR_NPTEPGSHIFT, (dmar_gaddr_t)DMAR_PAGE_SIZE << (2 * DMAR_NPTEPGSHIFT), (dmar_gaddr_t)DMAR_PAGE_SIZE << (3 * DMAR_NPTEPGSHIFT), (dmar_gaddr_t)DMAR_PAGE_SIZE << (4 * DMAR_NPTEPGSHIFT), (dmar_gaddr_t)DMAR_PAGE_SIZE << (5 * DMAR_NPTEPGSHIFT) }; KASSERT(lvl >= 0 && lvl < total_pglvl, ("total %d lvl %d", total_pglvl, lvl)); rlvl = total_pglvl - lvl - 1; - KASSERT(rlvl < sizeof(pg_sz) / sizeof(pg_sz[0]), - ("sizeof pg_sz lvl %d", lvl)); + KASSERT(rlvl < nitems(pg_sz), ("sizeof pg_sz lvl %d", lvl)); return (pg_sz[rlvl]); } dmar_gaddr_t -ctx_page_size(struct dmar_ctx *ctx, int lvl) +domain_page_size(struct dmar_domain *domain, int lvl) { - return (pglvl_page_size(ctx->pglvl, lvl)); + return (pglvl_page_size(domain->pglvl, lvl)); } int calc_am(struct dmar_unit *unit, dmar_gaddr_t base, dmar_gaddr_t size, dmar_gaddr_t *isizep) { dmar_gaddr_t isize; int am; for (am = DMAR_CAP_MAMV(unit->hw_cap);; am--) { isize = 1ULL << (am + DMAR_PAGE_SHIFT); if ((base & (isize - 1)) == 0 && size >= isize) break; if (am == 0) break; } *isizep = isize; return (am); } dmar_haddr_t dmar_high; int haw; int dmar_tbl_pagecnt; vm_page_t dmar_pgalloc(vm_object_t obj, vm_pindex_t idx, int flags) { vm_page_t m; int zeroed; zeroed = (flags & DMAR_PGF_ZERO) != 0 ? VM_ALLOC_ZERO : 0; for (;;) { if ((flags & DMAR_PGF_OBJL) == 0) VM_OBJECT_WLOCK(obj); m = vm_page_lookup(obj, idx); if ((flags & DMAR_PGF_NOALLOC) != 0 || m != NULL) { if ((flags & DMAR_PGF_OBJL) == 0) VM_OBJECT_WUNLOCK(obj); break; } m = vm_page_alloc_contig(obj, idx, VM_ALLOC_NOBUSY | VM_ALLOC_SYSTEM | VM_ALLOC_NODUMP | zeroed, 1, 0, dmar_high, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); if ((flags & DMAR_PGF_OBJL) == 0) VM_OBJECT_WUNLOCK(obj); if (m != NULL) { if (zeroed && (m->flags & PG_ZERO) == 0) pmap_zero_page(m); atomic_add_int(&dmar_tbl_pagecnt, 1); break; } if ((flags & DMAR_PGF_WAITOK) == 0) break; if ((flags & DMAR_PGF_OBJL) != 0) VM_OBJECT_WUNLOCK(obj); VM_WAIT; if ((flags & DMAR_PGF_OBJL) != 0) VM_OBJECT_WLOCK(obj); } return (m); } void dmar_pgfree(vm_object_t obj, vm_pindex_t idx, int flags) { vm_page_t m; if ((flags & DMAR_PGF_OBJL) == 0) VM_OBJECT_WLOCK(obj); m = vm_page_lookup(obj, idx); if (m != NULL) { vm_page_free(m); atomic_subtract_int(&dmar_tbl_pagecnt, 1); } if ((flags & DMAR_PGF_OBJL) == 0) VM_OBJECT_WUNLOCK(obj); } void * dmar_map_pgtbl(vm_object_t obj, vm_pindex_t idx, int flags, struct sf_buf **sf) { vm_page_t m; bool allocated; if ((flags & DMAR_PGF_OBJL) == 0) VM_OBJECT_WLOCK(obj); m = vm_page_lookup(obj, idx); if (m == NULL && (flags & DMAR_PGF_ALLOC) != 0) { m = dmar_pgalloc(obj, idx, flags | DMAR_PGF_OBJL); allocated = true; } else allocated = false; if (m == NULL) { if ((flags & DMAR_PGF_OBJL) == 0) VM_OBJECT_WUNLOCK(obj); return (NULL); } /* Sleepable allocations cannot fail. */ if ((flags & DMAR_PGF_WAITOK) != 0) VM_OBJECT_WUNLOCK(obj); sched_pin(); *sf = sf_buf_alloc(m, SFB_CPUPRIVATE | ((flags & DMAR_PGF_WAITOK) == 0 ? SFB_NOWAIT : 0)); if (*sf == NULL) { sched_unpin(); if (allocated) { VM_OBJECT_ASSERT_WLOCKED(obj); dmar_pgfree(obj, m->pindex, flags | DMAR_PGF_OBJL); } if ((flags & DMAR_PGF_OBJL) == 0) VM_OBJECT_WUNLOCK(obj); return (NULL); } if ((flags & (DMAR_PGF_WAITOK | DMAR_PGF_OBJL)) == (DMAR_PGF_WAITOK | DMAR_PGF_OBJL)) VM_OBJECT_WLOCK(obj); else if ((flags & (DMAR_PGF_WAITOK | DMAR_PGF_OBJL)) == 0) VM_OBJECT_WUNLOCK(obj); return ((void *)sf_buf_kva(*sf)); } void dmar_unmap_pgtbl(struct sf_buf *sf) { sf_buf_free(sf); sched_unpin(); } static void dmar_flush_transl_to_ram(struct dmar_unit *unit, void *dst, size_t sz) { if (DMAR_IS_COHERENT(unit)) return; /* * If DMAR does not snoop paging structures accesses, flush * CPU cache to memory. */ pmap_invalidate_cache_range((uintptr_t)dst, (uintptr_t)dst + sz, TRUE); } void dmar_flush_pte_to_ram(struct dmar_unit *unit, dmar_pte_t *dst) { dmar_flush_transl_to_ram(unit, dst, sizeof(*dst)); } void dmar_flush_ctx_to_ram(struct dmar_unit *unit, dmar_ctx_entry_t *dst) { dmar_flush_transl_to_ram(unit, dst, sizeof(*dst)); } void dmar_flush_root_to_ram(struct dmar_unit *unit, dmar_root_entry_t *dst) { dmar_flush_transl_to_ram(unit, dst, sizeof(*dst)); } /* * Load the root entry pointer into the hardware, busily waiting for * the completion. */ int dmar_load_root_entry_ptr(struct dmar_unit *unit) { vm_page_t root_entry; /* * Access to the GCMD register must be serialized while the * command is submitted. */ DMAR_ASSERT_LOCKED(unit); VM_OBJECT_RLOCK(unit->ctx_obj); root_entry = vm_page_lookup(unit->ctx_obj, 0); VM_OBJECT_RUNLOCK(unit->ctx_obj); dmar_write8(unit, DMAR_RTADDR_REG, VM_PAGE_TO_PHYS(root_entry)); dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd | DMAR_GCMD_SRTP); /* XXXKIB should have a timeout */ while ((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_RTPS) == 0) cpu_spinwait(); return (0); } /* * Globally invalidate the context entries cache, busily waiting for * the completion. */ int dmar_inv_ctx_glob(struct dmar_unit *unit) { /* * Access to the CCMD register must be serialized while the * command is submitted. */ DMAR_ASSERT_LOCKED(unit); KASSERT(!unit->qi_enabled, ("QI enabled")); /* * The DMAR_CCMD_ICC bit in the upper dword should be written * after the low dword write is completed. Amd64 * dmar_write8() does not have this issue, i386 dmar_write8() * writes the upper dword last. */ dmar_write8(unit, DMAR_CCMD_REG, DMAR_CCMD_ICC | DMAR_CCMD_CIRG_GLOB); /* XXXKIB should have a timeout */ while ((dmar_read4(unit, DMAR_CCMD_REG + 4) & DMAR_CCMD_ICC32) != 0) cpu_spinwait(); return (0); } /* * Globally invalidate the IOTLB, busily waiting for the completion. */ int dmar_inv_iotlb_glob(struct dmar_unit *unit) { int reg; DMAR_ASSERT_LOCKED(unit); KASSERT(!unit->qi_enabled, ("QI enabled")); reg = 16 * DMAR_ECAP_IRO(unit->hw_ecap); /* See a comment about DMAR_CCMD_ICC in dmar_inv_ctx_glob. */ dmar_write8(unit, reg + DMAR_IOTLB_REG_OFF, DMAR_IOTLB_IVT | DMAR_IOTLB_IIRG_GLB | DMAR_IOTLB_DR | DMAR_IOTLB_DW); /* XXXKIB should have a timeout */ while ((dmar_read4(unit, reg + DMAR_IOTLB_REG_OFF + 4) & DMAR_IOTLB_IVT32) != 0) cpu_spinwait(); return (0); } /* * Flush the chipset write buffers. See 11.1 "Write Buffer Flushing" * in the architecture specification. */ int dmar_flush_write_bufs(struct dmar_unit *unit) { DMAR_ASSERT_LOCKED(unit); /* * DMAR_GCMD_WBF is only valid when CAP_RWBF is reported. */ KASSERT((unit->hw_cap & DMAR_CAP_RWBF) != 0, ("dmar%d: no RWBF", unit->unit)); dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd | DMAR_GCMD_WBF); /* XXXKIB should have a timeout */ while ((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_WBFS) == 0) cpu_spinwait(); return (0); } int dmar_enable_translation(struct dmar_unit *unit) { DMAR_ASSERT_LOCKED(unit); unit->hw_gcmd |= DMAR_GCMD_TE; dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd); /* XXXKIB should have a timeout */ while ((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_TES) == 0) cpu_spinwait(); return (0); } int dmar_disable_translation(struct dmar_unit *unit) { DMAR_ASSERT_LOCKED(unit); unit->hw_gcmd &= ~DMAR_GCMD_TE; dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd); /* XXXKIB should have a timeout */ while ((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_TES) != 0) cpu_spinwait(); return (0); } int dmar_load_irt_ptr(struct dmar_unit *unit) { uint64_t irta, s; DMAR_ASSERT_LOCKED(unit); irta = unit->irt_phys; if (DMAR_X2APIC(unit)) irta |= DMAR_IRTA_EIME; s = fls(unit->irte_cnt) - 2; KASSERT(unit->irte_cnt >= 2 && s <= DMAR_IRTA_S_MASK && powerof2(unit->irte_cnt), ("IRTA_REG_S overflow %x", unit->irte_cnt)); irta |= s; dmar_write8(unit, DMAR_IRTA_REG, irta); dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd | DMAR_GCMD_SIRTP); /* XXXKIB should have a timeout */ while ((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_IRTPS) == 0) cpu_spinwait(); return (0); } int dmar_enable_ir(struct dmar_unit *unit) { DMAR_ASSERT_LOCKED(unit); unit->hw_gcmd |= DMAR_GCMD_IRE; unit->hw_gcmd &= ~DMAR_GCMD_CFI; dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd); /* XXXKIB should have a timeout */ while ((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_IRES) == 0) cpu_spinwait(); return (0); } int dmar_disable_ir(struct dmar_unit *unit) { DMAR_ASSERT_LOCKED(unit); unit->hw_gcmd &= ~DMAR_GCMD_IRE; dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd); /* XXXKIB should have a timeout */ while ((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_IRES) != 0) cpu_spinwait(); return (0); } #define BARRIER_F \ u_int f_done, f_inproc, f_wakeup; \ \ f_done = 1 << (barrier_id * 3); \ f_inproc = 1 << (barrier_id * 3 + 1); \ f_wakeup = 1 << (barrier_id * 3 + 2) bool dmar_barrier_enter(struct dmar_unit *dmar, u_int barrier_id) { BARRIER_F; DMAR_LOCK(dmar); if ((dmar->barrier_flags & f_done) != 0) { DMAR_UNLOCK(dmar); return (false); } if ((dmar->barrier_flags & f_inproc) != 0) { while ((dmar->barrier_flags & f_inproc) != 0) { dmar->barrier_flags |= f_wakeup; msleep(&dmar->barrier_flags, &dmar->lock, 0, "dmarb", 0); } KASSERT((dmar->barrier_flags & f_done) != 0, ("dmar%d barrier %d missing done", dmar->unit, barrier_id)); DMAR_UNLOCK(dmar); return (false); } dmar->barrier_flags |= f_inproc; DMAR_UNLOCK(dmar); return (true); } void dmar_barrier_exit(struct dmar_unit *dmar, u_int barrier_id) { BARRIER_F; DMAR_ASSERT_LOCKED(dmar); KASSERT((dmar->barrier_flags & (f_done | f_inproc)) == f_inproc, ("dmar%d barrier %d missed entry", dmar->unit, barrier_id)); dmar->barrier_flags |= f_done; if ((dmar->barrier_flags & f_wakeup) != 0) wakeup(&dmar->barrier_flags); dmar->barrier_flags &= ~(f_inproc | f_wakeup); DMAR_UNLOCK(dmar); } int dmar_match_verbose; static SYSCTL_NODE(_hw, OID_AUTO, dmar, CTLFLAG_RD, NULL, ""); SYSCTL_INT(_hw_dmar, OID_AUTO, tbl_pagecnt, CTLFLAG_RD, &dmar_tbl_pagecnt, 0, "Count of pages used for DMAR pagetables"); SYSCTL_INT(_hw_dmar, OID_AUTO, match_verbose, CTLFLAG_RWTUN, &dmar_match_verbose, 0, "Verbose matching of the PCI devices to DMAR paths"); #ifdef INVARIANTS int dmar_check_free; SYSCTL_INT(_hw_dmar, OID_AUTO, check_free, CTLFLAG_RWTUN, &dmar_check_free, 0, "Check the GPA RBtree for free_down and free_after validity"); #endif