diff --git a/sys/x86/iommu/intel_ctx.c b/sys/x86/iommu/intel_ctx.c index 90c883b28ea6..65ca88b052ed 100644 --- a/sys/x86/iommu/intel_ctx.c +++ b/sys/x86/iommu/intel_ctx.c @@ -1,981 +1,984 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2013 The FreeBSD Foundation * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_DMAR_CTX, "dmar_ctx", "Intel DMAR Context"); static MALLOC_DEFINE(M_DMAR_DOMAIN, "dmar_dom", "Intel DMAR Domain"); static void dmar_unref_domain_locked(struct dmar_unit *dmar, struct dmar_domain *domain); static void dmar_domain_destroy(struct dmar_domain *domain); static void dmar_ensure_ctx_page(struct dmar_unit *dmar, int bus) { struct sf_buf *sf; dmar_root_entry_t *re; vm_page_t ctxm; /* * Allocated context page must be linked. */ ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, IOMMU_PGF_NOALLOC); if (ctxm != NULL) return; /* * Page not present, allocate and link. Note that other * thread might execute this sequence in parallel. This * should be safe, because the context entries written by both * threads are equal. */ TD_PREP_PINNED_ASSERT; ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, IOMMU_PGF_ZERO | IOMMU_PGF_WAITOK); re = dmar_map_pgtbl(dmar->ctx_obj, 0, IOMMU_PGF_NOALLOC, &sf); re += bus; dmar_pte_store(&re->r1, DMAR_ROOT_R1_P | (DMAR_ROOT_R1_CTP_MASK & VM_PAGE_TO_PHYS(ctxm))); dmar_flush_root_to_ram(dmar, re); dmar_unmap_pgtbl(sf); TD_PINNED_ASSERT; } static dmar_ctx_entry_t * dmar_map_ctx_entry(struct dmar_ctx *ctx, struct sf_buf **sfp) { struct dmar_unit *dmar; dmar_ctx_entry_t *ctxp; dmar = CTX2DMAR(ctx); ctxp = dmar_map_pgtbl(dmar->ctx_obj, 1 + PCI_RID2BUS(ctx->context.rid), IOMMU_PGF_NOALLOC | IOMMU_PGF_WAITOK, sfp); ctxp += ctx->context.rid & 0xff; return (ctxp); } static void device_tag_init(struct dmar_ctx *ctx, device_t dev) { struct dmar_domain *domain; bus_addr_t maxaddr; domain = CTX2DOM(ctx); maxaddr = MIN(domain->iodom.end, BUS_SPACE_MAXADDR); ctx->context.tag->common.ref_count = 1; /* Prevent free */ ctx->context.tag->common.impl = &bus_dma_iommu_impl; ctx->context.tag->common.boundary = 0; ctx->context.tag->common.lowaddr = maxaddr; ctx->context.tag->common.highaddr = maxaddr; ctx->context.tag->common.maxsize = maxaddr; ctx->context.tag->common.nsegments = BUS_SPACE_UNRESTRICTED; ctx->context.tag->common.maxsegsz = maxaddr; ctx->context.tag->ctx = CTX2IOCTX(ctx); ctx->context.tag->owner = dev; } static void ctx_id_entry_init_one(dmar_ctx_entry_t *ctxp, struct dmar_domain *domain, vm_page_t ctx_root) { /* * For update due to move, the store is not atomic. It is * possible that DMAR read upper doubleword, while low * doubleword is not yet updated. The domain id is stored in * the upper doubleword, while the table pointer in the lower. * * There is no good solution, for the same reason it is wrong * to clear P bit in the ctx entry for update. */ dmar_pte_store1(&ctxp->ctx2, DMAR_CTX2_DID(domain->domain) | domain->awlvl); if (ctx_root == NULL) { dmar_pte_store1(&ctxp->ctx1, DMAR_CTX1_T_PASS | DMAR_CTX1_P); } else { dmar_pte_store1(&ctxp->ctx1, DMAR_CTX1_T_UNTR | (DMAR_CTX1_ASR_MASK & VM_PAGE_TO_PHYS(ctx_root)) | DMAR_CTX1_P); } } static void ctx_id_entry_init(struct dmar_ctx *ctx, dmar_ctx_entry_t *ctxp, bool move, int busno) { struct dmar_unit *unit; struct dmar_domain *domain; vm_page_t ctx_root; int i; domain = CTX2DOM(ctx); unit = DOM2DMAR(domain); KASSERT(move || (ctxp->ctx1 == 0 && ctxp->ctx2 == 0), ("dmar%d: initialized ctx entry %d:%d:%d 0x%jx 0x%jx", unit->iommu.unit, busno, pci_get_slot(ctx->context.tag->owner), pci_get_function(ctx->context.tag->owner), ctxp->ctx1, ctxp->ctx2)); if ((domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0 && (unit->hw_ecap & DMAR_ECAP_PT) != 0) { KASSERT(domain->pgtbl_obj == NULL, ("ctx %p non-null pgtbl_obj", ctx)); ctx_root = NULL; } else { ctx_root = dmar_pgalloc(domain->pgtbl_obj, 0, IOMMU_PGF_NOALLOC); } if (iommu_is_buswide_ctx(DMAR2IOMMU(unit), busno)) { MPASS(!move); for (i = 0; i <= PCI_BUSMAX; i++) { ctx_id_entry_init_one(&ctxp[i], domain, ctx_root); } } else { ctx_id_entry_init_one(ctxp, domain, ctx_root); } dmar_flush_ctx_to_ram(unit, ctxp); } static int dmar_flush_for_ctx_entry(struct dmar_unit *dmar, bool force) { int error; /* * If dmar declares Caching Mode as Set, follow 11.5 "Caching * Mode Consideration" and do the (global) invalidation of the * negative TLB entries. */ if ((dmar->hw_cap & DMAR_CAP_CM) == 0 && !force) return (0); if (dmar->qi_enabled) { dmar_qi_invalidate_ctx_glob_locked(dmar); if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0 || force) dmar_qi_invalidate_iotlb_glob_locked(dmar); return (0); } error = dmar_inv_ctx_glob(dmar); if (error == 0 && ((dmar->hw_ecap & DMAR_ECAP_DI) != 0 || force)) error = dmar_inv_iotlb_glob(dmar); return (error); } static int domain_init_rmrr(struct dmar_domain *domain, device_t dev, int bus, int slot, int func, int dev_domain, int dev_busno, const void *dev_path, int dev_path_len) { struct iommu_map_entries_tailq rmrr_entries; struct iommu_map_entry *entry, *entry1; vm_page_t *ma; iommu_gaddr_t start, end; vm_pindex_t size, i; int error, error1; + if (!dmar_rmrr_enable) + return (0); + error = 0; TAILQ_INIT(&rmrr_entries); dmar_dev_parse_rmrr(domain, dev_domain, dev_busno, dev_path, dev_path_len, &rmrr_entries); TAILQ_FOREACH_SAFE(entry, &rmrr_entries, dmamap_link, entry1) { /* * VT-d specification requires that the start of an * RMRR entry is 4k-aligned. Buggy BIOSes put * anything into the start and end fields. Truncate * and round as neccesary. * * We also allow the overlapping RMRR entries, see * iommu_gas_alloc_region(). */ start = entry->start; end = entry->end; if (bootverbose) printf("dmar%d ctx pci%d:%d:%d RMRR [%#jx, %#jx]\n", domain->iodom.iommu->unit, bus, slot, func, (uintmax_t)start, (uintmax_t)end); entry->start = trunc_page(start); entry->end = round_page(end); if (entry->start == entry->end) { /* Workaround for some AMI (?) BIOSes */ if (bootverbose) { if (dev != NULL) device_printf(dev, ""); printf("pci%d:%d:%d ", bus, slot, func); printf("BIOS bug: dmar%d RMRR " "region (%jx, %jx) corrected\n", domain->iodom.iommu->unit, start, end); } entry->end += DMAR_PAGE_SIZE * 0x20; } size = OFF_TO_IDX(entry->end - entry->start); ma = malloc(sizeof(vm_page_t) * size, M_TEMP, M_WAITOK); for (i = 0; i < size; i++) { ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i, VM_MEMATTR_DEFAULT); } error1 = iommu_gas_map_region(DOM2IODOM(domain), entry, IOMMU_MAP_ENTRY_READ | IOMMU_MAP_ENTRY_WRITE, IOMMU_MF_CANWAIT | IOMMU_MF_RMRR, ma); /* * Non-failed RMRR entries are owned by context rb * tree. Get rid of the failed entry, but do not stop * the loop. Rest of the parsed RMRR entries are * loaded and removed on the context destruction. */ if (error1 == 0 && entry->end != entry->start) { IOMMU_LOCK(domain->iodom.iommu); domain->refs++; /* XXXKIB prevent free */ domain->iodom.flags |= IOMMU_DOMAIN_RMRR; IOMMU_UNLOCK(domain->iodom.iommu); } else { if (error1 != 0) { if (dev != NULL) device_printf(dev, ""); printf("pci%d:%d:%d ", bus, slot, func); printf( "dmar%d failed to map RMRR region (%jx, %jx) %d\n", domain->iodom.iommu->unit, start, end, error1); error = error1; } TAILQ_REMOVE(&rmrr_entries, entry, dmamap_link); iommu_gas_free_entry(entry); } for (i = 0; i < size; i++) vm_page_putfake(ma[i]); free(ma, M_TEMP); } return (error); } /* * PCI memory address space is shared between memory-mapped devices (MMIO) and * host memory (which may be remapped by an IOMMU). Device accesses to an * address within a memory aperture in a PCIe root port will be treated as * peer-to-peer and not forwarded to an IOMMU. To avoid this, reserve the * address space of the root port's memory apertures in the address space used * by the IOMMU for remapping. */ static int dmar_reserve_pci_regions(struct dmar_domain *domain, device_t dev) { struct iommu_domain *iodom; device_t root; uint32_t val; uint64_t base, limit; int error; iodom = DOM2IODOM(domain); root = pci_find_pcie_root_port(dev); if (root == NULL) return (0); /* Disable downstream memory */ base = PCI_PPBMEMBASE(0, pci_read_config(root, PCIR_MEMBASE_1, 2)); limit = PCI_PPBMEMLIMIT(0, pci_read_config(root, PCIR_MEMLIMIT_1, 2)); error = iommu_gas_reserve_region_extend(iodom, base, limit + 1); if (bootverbose || error != 0) device_printf(dev, "DMAR reserve [%#jx-%#jx] (error %d)\n", base, limit + 1, error); if (error != 0) return (error); /* Disable downstream prefetchable memory */ val = pci_read_config(root, PCIR_PMBASEL_1, 2); if (val != 0 || pci_read_config(root, PCIR_PMLIMITL_1, 2) != 0) { if ((val & PCIM_BRPM_MASK) == PCIM_BRPM_64) { base = PCI_PPBMEMBASE( pci_read_config(root, PCIR_PMBASEH_1, 4), val); limit = PCI_PPBMEMLIMIT( pci_read_config(root, PCIR_PMLIMITH_1, 4), pci_read_config(root, PCIR_PMLIMITL_1, 2)); } else { base = PCI_PPBMEMBASE(0, val); limit = PCI_PPBMEMLIMIT(0, pci_read_config(root, PCIR_PMLIMITL_1, 2)); } error = iommu_gas_reserve_region_extend(iodom, base, limit + 1); if (bootverbose || error != 0) device_printf(dev, "DMAR reserve [%#jx-%#jx] " "(error %d)\n", base, limit + 1, error); if (error != 0) return (error); } return (error); } static struct dmar_domain * dmar_domain_alloc(struct dmar_unit *dmar, bool id_mapped) { struct iommu_domain *iodom; struct iommu_unit *unit; struct dmar_domain *domain; int error, id, mgaw; id = alloc_unr(dmar->domids); if (id == -1) return (NULL); domain = malloc(sizeof(*domain), M_DMAR_DOMAIN, M_WAITOK | M_ZERO); iodom = DOM2IODOM(domain); unit = DMAR2IOMMU(dmar); domain->domain = id; LIST_INIT(&domain->contexts); iommu_domain_init(unit, iodom, &dmar_domain_map_ops); domain->dmar = dmar; /* * For now, use the maximal usable physical address of the * installed memory to calculate the mgaw on id_mapped domain. * It is useful for the identity mapping, and less so for the * virtualized bus address space. */ domain->iodom.end = id_mapped ? ptoa(Maxmem) : BUS_SPACE_MAXADDR; mgaw = dmar_maxaddr2mgaw(dmar, domain->iodom.end, !id_mapped); error = domain_set_agaw(domain, mgaw); if (error != 0) goto fail; if (!id_mapped) /* Use all supported address space for remapping. */ domain->iodom.end = 1ULL << (domain->agaw - 1); iommu_gas_init_domain(DOM2IODOM(domain)); if (id_mapped) { if ((dmar->hw_ecap & DMAR_ECAP_PT) == 0) { domain->pgtbl_obj = domain_get_idmap_pgtbl(domain, domain->iodom.end); } domain->iodom.flags |= IOMMU_DOMAIN_IDMAP; } else { error = domain_alloc_pgtbl(domain); if (error != 0) goto fail; /* Disable local apic region access */ error = iommu_gas_reserve_region(iodom, 0xfee00000, 0xfeefffff + 1, &iodom->msi_entry); if (error != 0) goto fail; } return (domain); fail: dmar_domain_destroy(domain); return (NULL); } static struct dmar_ctx * dmar_ctx_alloc(struct dmar_domain *domain, uint16_t rid) { struct dmar_ctx *ctx; ctx = malloc(sizeof(*ctx), M_DMAR_CTX, M_WAITOK | M_ZERO); ctx->context.domain = DOM2IODOM(domain); ctx->context.tag = malloc(sizeof(struct bus_dma_tag_iommu), M_DMAR_CTX, M_WAITOK | M_ZERO); ctx->context.rid = rid; ctx->refs = 1; return (ctx); } static void dmar_ctx_link(struct dmar_ctx *ctx) { struct dmar_domain *domain; domain = CTX2DOM(ctx); IOMMU_ASSERT_LOCKED(domain->iodom.iommu); KASSERT(domain->refs >= domain->ctx_cnt, ("dom %p ref underflow %d %d", domain, domain->refs, domain->ctx_cnt)); domain->refs++; domain->ctx_cnt++; LIST_INSERT_HEAD(&domain->contexts, ctx, link); } static void dmar_ctx_unlink(struct dmar_ctx *ctx) { struct dmar_domain *domain; domain = CTX2DOM(ctx); IOMMU_ASSERT_LOCKED(domain->iodom.iommu); KASSERT(domain->refs > 0, ("domain %p ctx dtr refs %d", domain, domain->refs)); KASSERT(domain->ctx_cnt >= domain->refs, ("domain %p ctx dtr refs %d ctx_cnt %d", domain, domain->refs, domain->ctx_cnt)); domain->refs--; domain->ctx_cnt--; LIST_REMOVE(ctx, link); } static void dmar_domain_destroy(struct dmar_domain *domain) { struct iommu_domain *iodom; struct dmar_unit *dmar; iodom = DOM2IODOM(domain); KASSERT(TAILQ_EMPTY(&domain->iodom.unload_entries), ("unfinished unloads %p", domain)); KASSERT(LIST_EMPTY(&domain->contexts), ("destroying dom %p with contexts", domain)); KASSERT(domain->ctx_cnt == 0, ("destroying dom %p with ctx_cnt %d", domain, domain->ctx_cnt)); KASSERT(domain->refs == 0, ("destroying dom %p with refs %d", domain, domain->refs)); if ((domain->iodom.flags & IOMMU_DOMAIN_GAS_INITED) != 0) { DMAR_DOMAIN_LOCK(domain); iommu_gas_fini_domain(iodom); DMAR_DOMAIN_UNLOCK(domain); } if ((domain->iodom.flags & IOMMU_DOMAIN_PGTBL_INITED) != 0) { if (domain->pgtbl_obj != NULL) DMAR_DOMAIN_PGLOCK(domain); domain_free_pgtbl(domain); } iommu_domain_fini(iodom); dmar = DOM2DMAR(domain); free_unr(dmar->domids, domain->domain); free(domain, M_DMAR_DOMAIN); } static struct dmar_ctx * dmar_get_ctx_for_dev1(struct dmar_unit *dmar, device_t dev, uint16_t rid, int dev_domain, int dev_busno, const void *dev_path, int dev_path_len, bool id_mapped, bool rmrr_init) { struct dmar_domain *domain, *domain1; struct dmar_ctx *ctx, *ctx1; struct iommu_unit *unit __diagused; dmar_ctx_entry_t *ctxp; struct sf_buf *sf; int bus, slot, func, error; bool enable; if (dev != NULL) { bus = pci_get_bus(dev); slot = pci_get_slot(dev); func = pci_get_function(dev); } else { bus = PCI_RID2BUS(rid); slot = PCI_RID2SLOT(rid); func = PCI_RID2FUNC(rid); } enable = false; TD_PREP_PINNED_ASSERT; unit = DMAR2IOMMU(dmar); DMAR_LOCK(dmar); KASSERT(!iommu_is_buswide_ctx(unit, bus) || (slot == 0 && func == 0), ("iommu%d pci%d:%d:%d get_ctx for buswide", dmar->iommu.unit, bus, slot, func)); ctx = dmar_find_ctx_locked(dmar, rid); error = 0; if (ctx == NULL) { /* * Perform the allocations which require sleep or have * higher chance to succeed if the sleep is allowed. */ DMAR_UNLOCK(dmar); dmar_ensure_ctx_page(dmar, PCI_RID2BUS(rid)); domain1 = dmar_domain_alloc(dmar, id_mapped); if (domain1 == NULL) { TD_PINNED_ASSERT; return (NULL); } if (!id_mapped) { error = domain_init_rmrr(domain1, dev, bus, slot, func, dev_domain, dev_busno, dev_path, dev_path_len); if (error == 0 && dev != NULL) error = dmar_reserve_pci_regions(domain1, dev); if (error != 0) { dmar_domain_destroy(domain1); TD_PINNED_ASSERT; return (NULL); } } ctx1 = dmar_ctx_alloc(domain1, rid); ctxp = dmar_map_ctx_entry(ctx1, &sf); DMAR_LOCK(dmar); /* * Recheck the contexts, other thread might have * already allocated needed one. */ ctx = dmar_find_ctx_locked(dmar, rid); if (ctx == NULL) { domain = domain1; ctx = ctx1; dmar_ctx_link(ctx); ctx->context.tag->owner = dev; device_tag_init(ctx, dev); /* * This is the first activated context for the * DMAR unit. Enable the translation after * everything is set up. */ if (LIST_EMPTY(&dmar->domains)) enable = true; LIST_INSERT_HEAD(&dmar->domains, domain, link); ctx_id_entry_init(ctx, ctxp, false, bus); if (dev != NULL) { device_printf(dev, "dmar%d pci%d:%d:%d:%d rid %x domain %d mgaw %d " "agaw %d %s-mapped\n", dmar->iommu.unit, dmar->segment, bus, slot, func, rid, domain->domain, domain->mgaw, domain->agaw, id_mapped ? "id" : "re"); } dmar_unmap_pgtbl(sf); } else { dmar_unmap_pgtbl(sf); dmar_domain_destroy(domain1); /* Nothing needs to be done to destroy ctx1. */ free(ctx1, M_DMAR_CTX); domain = CTX2DOM(ctx); ctx->refs++; /* tag referenced us */ } } else { domain = CTX2DOM(ctx); if (ctx->context.tag->owner == NULL) ctx->context.tag->owner = dev; ctx->refs++; /* tag referenced us */ } error = dmar_flush_for_ctx_entry(dmar, enable); if (error != 0) { dmar_free_ctx_locked(dmar, ctx); TD_PINNED_ASSERT; return (NULL); } /* * The dmar lock was potentially dropped between check for the * empty context list and now. Recheck the state of GCMD_TE * to avoid unneeded command. */ if (enable && !rmrr_init && (dmar->hw_gcmd & DMAR_GCMD_TE) == 0) { error = dmar_disable_protected_regions(dmar); if (error != 0) printf("dmar%d: Failed to disable protected regions\n", dmar->iommu.unit); error = dmar_enable_translation(dmar); if (error == 0) { if (bootverbose) { printf("dmar%d: enabled translation\n", dmar->iommu.unit); } } else { printf("dmar%d: enabling translation failed, " "error %d\n", dmar->iommu.unit, error); dmar_free_ctx_locked(dmar, ctx); TD_PINNED_ASSERT; return (NULL); } } DMAR_UNLOCK(dmar); TD_PINNED_ASSERT; return (ctx); } struct dmar_ctx * dmar_get_ctx_for_dev(struct dmar_unit *dmar, device_t dev, uint16_t rid, bool id_mapped, bool rmrr_init) { int dev_domain, dev_path_len, dev_busno; dev_domain = pci_get_domain(dev); dev_path_len = dmar_dev_depth(dev); ACPI_DMAR_PCI_PATH dev_path[dev_path_len]; dmar_dev_path(dev, &dev_busno, dev_path, dev_path_len); return (dmar_get_ctx_for_dev1(dmar, dev, rid, dev_domain, dev_busno, dev_path, dev_path_len, id_mapped, rmrr_init)); } struct dmar_ctx * dmar_get_ctx_for_devpath(struct dmar_unit *dmar, uint16_t rid, int dev_domain, int dev_busno, const void *dev_path, int dev_path_len, bool id_mapped, bool rmrr_init) { return (dmar_get_ctx_for_dev1(dmar, NULL, rid, dev_domain, dev_busno, dev_path, dev_path_len, id_mapped, rmrr_init)); } int dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx) { struct dmar_unit *dmar; struct dmar_domain *old_domain; dmar_ctx_entry_t *ctxp; struct sf_buf *sf; int error; dmar = domain->dmar; old_domain = CTX2DOM(ctx); if (domain == old_domain) return (0); KASSERT(old_domain->iodom.iommu == domain->iodom.iommu, ("domain %p %u moving between dmars %u %u", domain, domain->domain, old_domain->iodom.iommu->unit, domain->iodom.iommu->unit)); TD_PREP_PINNED_ASSERT; ctxp = dmar_map_ctx_entry(ctx, &sf); DMAR_LOCK(dmar); dmar_ctx_unlink(ctx); ctx->context.domain = &domain->iodom; dmar_ctx_link(ctx); ctx_id_entry_init(ctx, ctxp, true, PCI_BUSMAX + 100); dmar_unmap_pgtbl(sf); error = dmar_flush_for_ctx_entry(dmar, true); /* If flush failed, rolling back would not work as well. */ printf("dmar%d rid %x domain %d->%d %s-mapped\n", dmar->iommu.unit, ctx->context.rid, old_domain->domain, domain->domain, (domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0 ? "id" : "re"); dmar_unref_domain_locked(dmar, old_domain); TD_PINNED_ASSERT; return (error); } static void dmar_unref_domain_locked(struct dmar_unit *dmar, struct dmar_domain *domain) { DMAR_ASSERT_LOCKED(dmar); KASSERT(domain->refs >= 1, ("dmar %d domain %p refs %u", dmar->iommu.unit, domain, domain->refs)); KASSERT(domain->refs > domain->ctx_cnt, ("dmar %d domain %p refs %d ctx_cnt %d", dmar->iommu.unit, domain, domain->refs, domain->ctx_cnt)); if (domain->refs > 1) { domain->refs--; DMAR_UNLOCK(dmar); return; } KASSERT((domain->iodom.flags & IOMMU_DOMAIN_RMRR) == 0, ("lost ref on RMRR domain %p", domain)); LIST_REMOVE(domain, link); DMAR_UNLOCK(dmar); taskqueue_drain(dmar->iommu.delayed_taskqueue, &domain->iodom.unload_task); dmar_domain_destroy(domain); } void dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx) { struct sf_buf *sf; dmar_ctx_entry_t *ctxp; struct dmar_domain *domain; DMAR_ASSERT_LOCKED(dmar); KASSERT(ctx->refs >= 1, ("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs)); /* * If our reference is not last, only the dereference should * be performed. */ if (ctx->refs > 1) { ctx->refs--; DMAR_UNLOCK(dmar); return; } KASSERT((ctx->context.flags & IOMMU_CTX_DISABLED) == 0, ("lost ref on disabled ctx %p", ctx)); /* * Otherwise, the context entry must be cleared before the * page table is destroyed. The mapping of the context * entries page could require sleep, unlock the dmar. */ DMAR_UNLOCK(dmar); TD_PREP_PINNED_ASSERT; ctxp = dmar_map_ctx_entry(ctx, &sf); DMAR_LOCK(dmar); KASSERT(ctx->refs >= 1, ("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs)); /* * Other thread might have referenced the context, in which * case again only the dereference should be performed. */ if (ctx->refs > 1) { ctx->refs--; DMAR_UNLOCK(dmar); dmar_unmap_pgtbl(sf); TD_PINNED_ASSERT; return; } KASSERT((ctx->context.flags & IOMMU_CTX_DISABLED) == 0, ("lost ref on disabled ctx %p", ctx)); /* * Clear the context pointer and flush the caches. * XXXKIB: cannot do this if any RMRR entries are still present. */ dmar_pte_clear(&ctxp->ctx1); ctxp->ctx2 = 0; dmar_flush_ctx_to_ram(dmar, ctxp); dmar_inv_ctx_glob(dmar); if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0) { if (dmar->qi_enabled) dmar_qi_invalidate_iotlb_glob_locked(dmar); else dmar_inv_iotlb_glob(dmar); } dmar_unmap_pgtbl(sf); domain = CTX2DOM(ctx); dmar_ctx_unlink(ctx); free(ctx->context.tag, M_DMAR_CTX); free(ctx, M_DMAR_CTX); dmar_unref_domain_locked(dmar, domain); TD_PINNED_ASSERT; } void dmar_free_ctx(struct dmar_ctx *ctx) { struct dmar_unit *dmar; dmar = CTX2DMAR(ctx); DMAR_LOCK(dmar); dmar_free_ctx_locked(dmar, ctx); } /* * Returns with the domain locked. */ struct dmar_ctx * dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid) { struct dmar_domain *domain; struct dmar_ctx *ctx; DMAR_ASSERT_LOCKED(dmar); LIST_FOREACH(domain, &dmar->domains, link) { LIST_FOREACH(ctx, &domain->contexts, link) { if (ctx->context.rid == rid) return (ctx); } } return (NULL); } void dmar_domain_free_entry(struct iommu_map_entry *entry, bool free) { if ((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0) iommu_gas_free_region(entry); else iommu_gas_free_space(entry); if (free) iommu_gas_free_entry(entry); else entry->flags = 0; } /* * If the given value for "free" is true, then the caller must not be using * the entry's dmamap_link field. */ void iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free, bool cansleep) { struct dmar_domain *domain; struct dmar_unit *unit; domain = IODOM2DOM(entry->domain); unit = DOM2DMAR(domain); /* * If "free" is false, then the IOTLB invalidation must be performed * synchronously. Otherwise, the caller might free the entry before * dmar_qi_task() is finished processing it. */ if (unit->qi_enabled) { if (free) { DMAR_LOCK(unit); dmar_qi_invalidate_locked(domain, entry, true); DMAR_UNLOCK(unit); } else { dmar_qi_invalidate_sync(domain, entry->start, entry->end - entry->start, cansleep); dmar_domain_free_entry(entry, false); } } else { domain_flush_iotlb_sync(domain, entry->start, entry->end - entry->start); dmar_domain_free_entry(entry, free); } } static bool dmar_domain_unload_emit_wait(struct dmar_domain *domain, struct iommu_map_entry *entry) { if (TAILQ_NEXT(entry, dmamap_link) == NULL) return (true); return (domain->batch_no++ % dmar_batch_coalesce == 0); } void iommu_domain_unload(struct iommu_domain *iodom, struct iommu_map_entries_tailq *entries, bool cansleep) { struct dmar_domain *domain; struct dmar_unit *unit; struct iommu_map_entry *entry, *entry1; int error __diagused; domain = IODOM2DOM(iodom); unit = DOM2DMAR(domain); TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) { KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0, ("not mapped entry %p %p", domain, entry)); error = iodom->ops->unmap(iodom, entry->start, entry->end - entry->start, cansleep ? IOMMU_PGF_WAITOK : 0); KASSERT(error == 0, ("unmap %p error %d", domain, error)); if (!unit->qi_enabled) { domain_flush_iotlb_sync(domain, entry->start, entry->end - entry->start); TAILQ_REMOVE(entries, entry, dmamap_link); dmar_domain_free_entry(entry, true); } } if (TAILQ_EMPTY(entries)) return; KASSERT(unit->qi_enabled, ("loaded entry left")); DMAR_LOCK(unit); while ((entry = TAILQ_FIRST(entries)) != NULL) { TAILQ_REMOVE(entries, entry, dmamap_link); dmar_qi_invalidate_locked(domain, entry, dmar_domain_unload_emit_wait(domain, entry)); } DMAR_UNLOCK(unit); } struct iommu_ctx * iommu_get_ctx(struct iommu_unit *iommu, device_t dev, uint16_t rid, bool id_mapped, bool rmrr_init) { struct dmar_unit *dmar; struct dmar_ctx *ret; dmar = IOMMU2DMAR(iommu); ret = dmar_get_ctx_for_dev(dmar, dev, rid, id_mapped, rmrr_init); return (CTX2IOCTX(ret)); } void iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *context) { struct dmar_unit *dmar; struct dmar_ctx *ctx; dmar = IOMMU2DMAR(iommu); ctx = IOCTX2CTX(context); dmar_free_ctx_locked(dmar, ctx); } void iommu_free_ctx(struct iommu_ctx *context) { struct dmar_ctx *ctx; ctx = IOCTX2CTX(context); dmar_free_ctx(ctx); } diff --git a/sys/x86/iommu/intel_dmar.h b/sys/x86/iommu/intel_dmar.h index 1c428e60d96f..e20144094c80 100644 --- a/sys/x86/iommu/intel_dmar.h +++ b/sys/x86/iommu/intel_dmar.h @@ -1,480 +1,481 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2013-2015 The FreeBSD Foundation * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef __X86_IOMMU_INTEL_DMAR_H #define __X86_IOMMU_INTEL_DMAR_H #include struct dmar_unit; /* * Locking annotations: * (u) - Protected by iommu unit lock * (d) - Protected by domain lock * (c) - Immutable after initialization */ /* * The domain abstraction. Most non-constant members of the domain * are protected by owning dmar unit lock, not by the domain lock. * Most important, the dmar lock protects the contexts list. * * The domain lock protects the address map for the domain, and list * of unload entries delayed. * * Page tables pages and pages content is protected by the vm object * lock pgtbl_obj, which contains the page tables pages. */ struct dmar_domain { struct iommu_domain iodom; int domain; /* (c) DID, written in context entry */ int mgaw; /* (c) Real max address width */ int agaw; /* (c) Adjusted guest address width */ int pglvl; /* (c) The pagelevel */ int awlvl; /* (c) The pagelevel as the bitmask, to set in context entry */ u_int ctx_cnt; /* (u) Number of contexts owned */ u_int refs; /* (u) Refs, including ctx */ struct dmar_unit *dmar; /* (c) */ LIST_ENTRY(dmar_domain) link; /* (u) Member in the dmar list */ LIST_HEAD(, dmar_ctx) contexts; /* (u) */ vm_object_t pgtbl_obj; /* (c) Page table pages */ u_int batch_no; }; struct dmar_ctx { struct iommu_ctx context; uint64_t last_fault_rec[2]; /* Last fault reported */ LIST_ENTRY(dmar_ctx) link; /* (u) Member in the domain list */ u_int refs; /* (u) References from tags */ }; #define DMAR_DOMAIN_PGLOCK(dom) VM_OBJECT_WLOCK((dom)->pgtbl_obj) #define DMAR_DOMAIN_PGTRYLOCK(dom) VM_OBJECT_TRYWLOCK((dom)->pgtbl_obj) #define DMAR_DOMAIN_PGUNLOCK(dom) VM_OBJECT_WUNLOCK((dom)->pgtbl_obj) #define DMAR_DOMAIN_ASSERT_PGLOCKED(dom) \ VM_OBJECT_ASSERT_WLOCKED((dom)->pgtbl_obj) #define DMAR_DOMAIN_LOCK(dom) mtx_lock(&(dom)->iodom.lock) #define DMAR_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->iodom.lock) #define DMAR_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->iodom.lock, MA_OWNED) #define DMAR2IOMMU(dmar) &((dmar)->iommu) #define IOMMU2DMAR(dmar) \ __containerof((dmar), struct dmar_unit, iommu) #define DOM2IODOM(domain) &((domain)->iodom) #define IODOM2DOM(domain) \ __containerof((domain), struct dmar_domain, iodom) #define CTX2IOCTX(ctx) &((ctx)->context) #define IOCTX2CTX(ctx) \ __containerof((ctx), struct dmar_ctx, context) #define CTX2DOM(ctx) IODOM2DOM((ctx)->context.domain) #define CTX2DMAR(ctx) (CTX2DOM(ctx)->dmar) #define DOM2DMAR(domain) ((domain)->dmar) struct dmar_msi_data { int irq; int irq_rid; struct resource *irq_res; void *intr_handle; int (*handler)(void *); int msi_data_reg; int msi_addr_reg; int msi_uaddr_reg; void (*enable_intr)(struct dmar_unit *); void (*disable_intr)(struct dmar_unit *); const char *name; }; #define DMAR_INTR_FAULT 0 #define DMAR_INTR_QI 1 #define DMAR_INTR_TOTAL 2 struct dmar_unit { struct iommu_unit iommu; device_t dev; uint16_t segment; uint64_t base; /* Resources */ int reg_rid; struct resource *regs; struct dmar_msi_data intrs[DMAR_INTR_TOTAL]; /* Hardware registers cache */ uint32_t hw_ver; uint64_t hw_cap; uint64_t hw_ecap; uint32_t hw_gcmd; /* Data for being a dmar */ LIST_HEAD(, dmar_domain) domains; struct unrhdr *domids; vm_object_t ctx_obj; u_int barrier_flags; /* Fault handler data */ struct mtx fault_lock; uint64_t *fault_log; int fault_log_head; int fault_log_tail; int fault_log_size; struct task fault_task; struct taskqueue *fault_taskqueue; /* QI */ int qi_enabled; char *inv_queue; vm_size_t inv_queue_size; uint32_t inv_queue_avail; uint32_t inv_queue_tail; volatile uint32_t inv_waitd_seq_hw; /* hw writes there on wait descr completion */ uint64_t inv_waitd_seq_hw_phys; uint32_t inv_waitd_seq; /* next sequence number to use for wait descr */ u_int inv_waitd_gen; /* seq number generation AKA seq overflows */ u_int inv_seq_waiters; /* count of waiters for seq */ u_int inv_queue_full; /* informational counter */ /* IR */ int ir_enabled; vm_paddr_t irt_phys; dmar_irte_t *irt; u_int irte_cnt; vmem_t *irtids; /* * Delayed freeing of map entries queue processing: * * tlb_flush_head and tlb_flush_tail are used to implement a FIFO * queue that supports concurrent dequeues and enqueues. However, * there can only be a single dequeuer (accessing tlb_flush_head) and * a single enqueuer (accessing tlb_flush_tail) at a time. Since the * unit's qi_task is the only dequeuer, it can access tlb_flush_head * without any locking. In contrast, there may be multiple enqueuers, * so the enqueuers acquire the iommu unit lock to serialize their * accesses to tlb_flush_tail. * * In this FIFO queue implementation, the key to enabling concurrent * dequeues and enqueues is that the dequeuer never needs to access * tlb_flush_tail and the enqueuer never needs to access * tlb_flush_head. In particular, tlb_flush_head and tlb_flush_tail * are never NULL, so neither a dequeuer nor an enqueuer ever needs to * update both. Instead, tlb_flush_head always points to a "zombie" * struct, which previously held the last dequeued item. Thus, the * zombie's next field actually points to the struct holding the first * item in the queue. When an item is dequeued, the current zombie is * finally freed, and the struct that held the just dequeued item * becomes the new zombie. When the queue is empty, tlb_flush_tail * also points to the zombie. */ struct iommu_map_entry *tlb_flush_head; struct iommu_map_entry *tlb_flush_tail; struct task qi_task; struct taskqueue *qi_taskqueue; }; #define DMAR_LOCK(dmar) mtx_lock(&(dmar)->iommu.lock) #define DMAR_UNLOCK(dmar) mtx_unlock(&(dmar)->iommu.lock) #define DMAR_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->iommu.lock, MA_OWNED) #define DMAR_FAULT_LOCK(dmar) mtx_lock_spin(&(dmar)->fault_lock) #define DMAR_FAULT_UNLOCK(dmar) mtx_unlock_spin(&(dmar)->fault_lock) #define DMAR_FAULT_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->fault_lock, MA_OWNED) #define DMAR_IS_COHERENT(dmar) (((dmar)->hw_ecap & DMAR_ECAP_C) != 0) #define DMAR_HAS_QI(dmar) (((dmar)->hw_ecap & DMAR_ECAP_QI) != 0) #define DMAR_X2APIC(dmar) \ (x2apic_mode && ((dmar)->hw_ecap & DMAR_ECAP_EIM) != 0) /* Barrier ids */ #define DMAR_BARRIER_RMRR 0 #define DMAR_BARRIER_USEQ 1 struct dmar_unit *dmar_find(device_t dev, bool verbose); struct dmar_unit *dmar_find_hpet(device_t dev, uint16_t *rid); struct dmar_unit *dmar_find_ioapic(u_int apic_id, uint16_t *rid); u_int dmar_nd2mask(u_int nd); bool dmar_pglvl_supported(struct dmar_unit *unit, int pglvl); int domain_set_agaw(struct dmar_domain *domain, int mgaw); int dmar_maxaddr2mgaw(struct dmar_unit *unit, iommu_gaddr_t maxaddr, bool allow_less); vm_pindex_t pglvl_max_pages(int pglvl); int domain_is_sp_lvl(struct dmar_domain *domain, int lvl); iommu_gaddr_t pglvl_page_size(int total_pglvl, int lvl); iommu_gaddr_t domain_page_size(struct dmar_domain *domain, int lvl); int calc_am(struct dmar_unit *unit, iommu_gaddr_t base, iommu_gaddr_t size, iommu_gaddr_t *isizep); struct vm_page *dmar_pgalloc(vm_object_t obj, vm_pindex_t idx, int flags); void dmar_pgfree(vm_object_t obj, vm_pindex_t idx, int flags); void *dmar_map_pgtbl(vm_object_t obj, vm_pindex_t idx, int flags, struct sf_buf **sf); void dmar_unmap_pgtbl(struct sf_buf *sf); int dmar_load_root_entry_ptr(struct dmar_unit *unit); int dmar_inv_ctx_glob(struct dmar_unit *unit); int dmar_inv_iotlb_glob(struct dmar_unit *unit); int dmar_flush_write_bufs(struct dmar_unit *unit); void dmar_flush_pte_to_ram(struct dmar_unit *unit, dmar_pte_t *dst); void dmar_flush_ctx_to_ram(struct dmar_unit *unit, dmar_ctx_entry_t *dst); void dmar_flush_root_to_ram(struct dmar_unit *unit, dmar_root_entry_t *dst); int dmar_disable_protected_regions(struct dmar_unit *unit); int dmar_enable_translation(struct dmar_unit *unit); int dmar_disable_translation(struct dmar_unit *unit); int dmar_load_irt_ptr(struct dmar_unit *unit); int dmar_enable_ir(struct dmar_unit *unit); int dmar_disable_ir(struct dmar_unit *unit); bool dmar_barrier_enter(struct dmar_unit *dmar, u_int barrier_id); void dmar_barrier_exit(struct dmar_unit *dmar, u_int barrier_id); uint64_t dmar_get_timeout(void); void dmar_update_timeout(uint64_t newval); int dmar_fault_intr(void *arg); void dmar_enable_fault_intr(struct dmar_unit *unit); void dmar_disable_fault_intr(struct dmar_unit *unit); int dmar_init_fault_log(struct dmar_unit *unit); void dmar_fini_fault_log(struct dmar_unit *unit); int dmar_qi_intr(void *arg); void dmar_enable_qi_intr(struct dmar_unit *unit); void dmar_disable_qi_intr(struct dmar_unit *unit); int dmar_init_qi(struct dmar_unit *unit); void dmar_fini_qi(struct dmar_unit *unit); void dmar_qi_invalidate_locked(struct dmar_domain *domain, struct iommu_map_entry *entry, bool emit_wait); void dmar_qi_invalidate_sync(struct dmar_domain *domain, iommu_gaddr_t start, iommu_gaddr_t size, bool cansleep); void dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit); void dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit); void dmar_qi_invalidate_iec_glob(struct dmar_unit *unit); void dmar_qi_invalidate_iec(struct dmar_unit *unit, u_int start, u_int cnt); vm_object_t domain_get_idmap_pgtbl(struct dmar_domain *domain, iommu_gaddr_t maxaddr); void put_idmap_pgtbl(vm_object_t obj); void domain_flush_iotlb_sync(struct dmar_domain *domain, iommu_gaddr_t base, iommu_gaddr_t size); int domain_alloc_pgtbl(struct dmar_domain *domain); void domain_free_pgtbl(struct dmar_domain *domain); extern const struct iommu_domain_map_ops dmar_domain_map_ops; int dmar_dev_depth(device_t child); void dmar_dev_path(device_t child, int *busno, void *path1, int depth); struct dmar_ctx *dmar_get_ctx_for_dev(struct dmar_unit *dmar, device_t dev, uint16_t rid, bool id_mapped, bool rmrr_init); struct dmar_ctx *dmar_get_ctx_for_devpath(struct dmar_unit *dmar, uint16_t rid, int dev_domain, int dev_busno, const void *dev_path, int dev_path_len, bool id_mapped, bool rmrr_init); int dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx); void dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx); void dmar_free_ctx(struct dmar_ctx *ctx); struct dmar_ctx *dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid); void dmar_domain_free_entry(struct iommu_map_entry *entry, bool free); void dmar_dev_parse_rmrr(struct dmar_domain *domain, int dev_domain, int dev_busno, const void *dev_path, int dev_path_len, struct iommu_map_entries_tailq *rmrr_entries); int dmar_instantiate_rmrr_ctxs(struct iommu_unit *dmar); void dmar_quirks_post_ident(struct dmar_unit *dmar); void dmar_quirks_pre_use(struct iommu_unit *dmar); int dmar_init_irt(struct dmar_unit *unit); void dmar_fini_irt(struct dmar_unit *unit); extern iommu_haddr_t dmar_high; extern int haw; extern int dmar_tbl_pagecnt; extern int dmar_batch_coalesce; +extern int dmar_rmrr_enable; static inline uint32_t dmar_read4(const struct dmar_unit *unit, int reg) { return (bus_read_4(unit->regs, reg)); } static inline uint64_t dmar_read8(const struct dmar_unit *unit, int reg) { #ifdef __i386__ uint32_t high, low; low = bus_read_4(unit->regs, reg); high = bus_read_4(unit->regs, reg + 4); return (low | ((uint64_t)high << 32)); #else return (bus_read_8(unit->regs, reg)); #endif } static inline void dmar_write4(const struct dmar_unit *unit, int reg, uint32_t val) { KASSERT(reg != DMAR_GCMD_REG || (val & DMAR_GCMD_TE) == (unit->hw_gcmd & DMAR_GCMD_TE), ("dmar%d clearing TE 0x%08x 0x%08x", unit->iommu.unit, unit->hw_gcmd, val)); bus_write_4(unit->regs, reg, val); } static inline void dmar_write8(const struct dmar_unit *unit, int reg, uint64_t val) { KASSERT(reg != DMAR_GCMD_REG, ("8byte GCMD write")); #ifdef __i386__ uint32_t high, low; low = val; high = val >> 32; bus_write_4(unit->regs, reg, low); bus_write_4(unit->regs, reg + 4, high); #else bus_write_8(unit->regs, reg, val); #endif } /* * dmar_pte_store and dmar_pte_clear ensure that on i386, 32bit writes * are issued in the correct order. For store, the lower word, * containing the P or R and W bits, is set only after the high word * is written. For clear, the P bit is cleared first, then the high * word is cleared. * * dmar_pte_update updates the pte. For amd64, the update is atomic. * For i386, it first disables the entry by clearing the word * containing the P bit, and then defer to dmar_pte_store. The locked * cmpxchg8b is probably available on any machine having DMAR support, * but interrupt translation table may be mapped uncached. */ static inline void dmar_pte_store1(volatile uint64_t *dst, uint64_t val) { #ifdef __i386__ volatile uint32_t *p; uint32_t hi, lo; hi = val >> 32; lo = val; p = (volatile uint32_t *)dst; *(p + 1) = hi; *p = lo; #else *dst = val; #endif } static inline void dmar_pte_store(volatile uint64_t *dst, uint64_t val) { KASSERT(*dst == 0, ("used pte %p oldval %jx newval %jx", dst, (uintmax_t)*dst, (uintmax_t)val)); dmar_pte_store1(dst, val); } static inline void dmar_pte_update(volatile uint64_t *dst, uint64_t val) { #ifdef __i386__ volatile uint32_t *p; p = (volatile uint32_t *)dst; *p = 0; #endif dmar_pte_store1(dst, val); } static inline void dmar_pte_clear(volatile uint64_t *dst) { #ifdef __i386__ volatile uint32_t *p; p = (volatile uint32_t *)dst; *p = 0; *(p + 1) = 0; #else *dst = 0; #endif } extern struct timespec dmar_hw_timeout; #define DMAR_WAIT_UNTIL(cond) \ { \ struct timespec last, curr; \ bool forever; \ \ if (dmar_hw_timeout.tv_sec == 0 && \ dmar_hw_timeout.tv_nsec == 0) { \ forever = true; \ } else { \ forever = false; \ nanouptime(&curr); \ timespecadd(&curr, &dmar_hw_timeout, &last); \ } \ for (;;) { \ if (cond) { \ error = 0; \ break; \ } \ nanouptime(&curr); \ if (!forever && timespeccmp(&last, &curr, <)) { \ error = ETIMEDOUT; \ break; \ } \ cpu_spinwait(); \ } \ } #ifdef INVARIANTS #define TD_PREP_PINNED_ASSERT \ int old_td_pinned; \ old_td_pinned = curthread->td_pinned #define TD_PINNED_ASSERT \ KASSERT(curthread->td_pinned == old_td_pinned, \ ("pin count leak: %d %d %s:%d", curthread->td_pinned, \ old_td_pinned, __FILE__, __LINE__)) #else #define TD_PREP_PINNED_ASSERT #define TD_PINNED_ASSERT #endif #endif diff --git a/sys/x86/iommu/intel_drv.c b/sys/x86/iommu/intel_drv.c index 274ba566ad23..c4bb3a1745f4 100644 --- a/sys/x86/iommu/intel_drv.c +++ b/sys/x86/iommu/intel_drv.c @@ -1,1347 +1,1357 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2013-2015 The FreeBSD Foundation * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_acpi.h" #if defined(__amd64__) #define DEV_APIC #else #include "opt_apic.h" #endif #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEV_APIC #include "pcib_if.h" #include #include #include #endif #define DMAR_FAULT_IRQ_RID 0 #define DMAR_QI_IRQ_RID 1 #define DMAR_REG_RID 2 static device_t *dmar_devs; static int dmar_devcnt; typedef int (*dmar_iter_t)(ACPI_DMAR_HEADER *, void *); static void dmar_iterate_tbl(dmar_iter_t iter, void *arg) { ACPI_TABLE_DMAR *dmartbl; ACPI_DMAR_HEADER *dmarh; char *ptr, *ptrend; ACPI_STATUS status; status = AcpiGetTable(ACPI_SIG_DMAR, 1, (ACPI_TABLE_HEADER **)&dmartbl); if (ACPI_FAILURE(status)) return; ptr = (char *)dmartbl + sizeof(*dmartbl); ptrend = (char *)dmartbl + dmartbl->Header.Length; for (;;) { if (ptr >= ptrend) break; dmarh = (ACPI_DMAR_HEADER *)ptr; if (dmarh->Length <= 0) { printf("dmar_identify: corrupted DMAR table, l %d\n", dmarh->Length); break; } ptr += dmarh->Length; if (!iter(dmarh, arg)) break; } AcpiPutTable((ACPI_TABLE_HEADER *)dmartbl); } struct find_iter_args { int i; ACPI_DMAR_HARDWARE_UNIT *res; }; static int dmar_find_iter(ACPI_DMAR_HEADER *dmarh, void *arg) { struct find_iter_args *fia; if (dmarh->Type != ACPI_DMAR_TYPE_HARDWARE_UNIT) return (1); fia = arg; if (fia->i == 0) { fia->res = (ACPI_DMAR_HARDWARE_UNIT *)dmarh; return (0); } fia->i--; return (1); } static ACPI_DMAR_HARDWARE_UNIT * dmar_find_by_index(int idx) { struct find_iter_args fia; fia.i = idx; fia.res = NULL; dmar_iterate_tbl(dmar_find_iter, &fia); return (fia.res); } static int dmar_count_iter(ACPI_DMAR_HEADER *dmarh, void *arg) { if (dmarh->Type == ACPI_DMAR_TYPE_HARDWARE_UNIT) dmar_devcnt++; return (1); } +int dmar_rmrr_enable = 1; + static int dmar_enable = 0; static void dmar_identify(driver_t *driver, device_t parent) { ACPI_TABLE_DMAR *dmartbl; ACPI_DMAR_HARDWARE_UNIT *dmarh; ACPI_STATUS status; int i, error; if (acpi_disabled("dmar")) return; TUNABLE_INT_FETCH("hw.dmar.enable", &dmar_enable); if (!dmar_enable) return; + TUNABLE_INT_FETCH("hw.dmar.rmrr_enable", &dmar_rmrr_enable); + status = AcpiGetTable(ACPI_SIG_DMAR, 1, (ACPI_TABLE_HEADER **)&dmartbl); if (ACPI_FAILURE(status)) return; haw = dmartbl->Width + 1; if ((1ULL << (haw + 1)) > BUS_SPACE_MAXADDR) dmar_high = BUS_SPACE_MAXADDR; else dmar_high = 1ULL << (haw + 1); if (bootverbose) { printf("DMAR HAW=%d flags=<%b>\n", dmartbl->Width, (unsigned)dmartbl->Flags, "\020\001INTR_REMAP\002X2APIC_OPT_OUT"); } AcpiPutTable((ACPI_TABLE_HEADER *)dmartbl); dmar_iterate_tbl(dmar_count_iter, NULL); if (dmar_devcnt == 0) return; dmar_devs = malloc(sizeof(device_t) * dmar_devcnt, M_DEVBUF, M_WAITOK | M_ZERO); for (i = 0; i < dmar_devcnt; i++) { dmarh = dmar_find_by_index(i); if (dmarh == NULL) { printf("dmar_identify: cannot find HWUNIT %d\n", i); continue; } dmar_devs[i] = BUS_ADD_CHILD(parent, 1, "dmar", i); if (dmar_devs[i] == NULL) { printf("dmar_identify: cannot create instance %d\n", i); continue; } error = bus_set_resource(dmar_devs[i], SYS_RES_MEMORY, DMAR_REG_RID, dmarh->Address, PAGE_SIZE); if (error != 0) { printf( "dmar%d: unable to alloc register window at 0x%08jx: error %d\n", i, (uintmax_t)dmarh->Address, error); device_delete_child(parent, dmar_devs[i]); dmar_devs[i] = NULL; } } } static int dmar_probe(device_t dev) { if (acpi_get_handle(dev) != NULL) return (ENXIO); device_set_desc(dev, "DMA remap"); return (BUS_PROBE_NOWILDCARD); } static void dmar_release_intr(device_t dev, struct dmar_unit *unit, int idx) { struct dmar_msi_data *dmd; dmd = &unit->intrs[idx]; if (dmd->irq == -1) return; bus_teardown_intr(dev, dmd->irq_res, dmd->intr_handle); bus_release_resource(dev, SYS_RES_IRQ, dmd->irq_rid, dmd->irq_res); bus_delete_resource(dev, SYS_RES_IRQ, dmd->irq_rid); PCIB_RELEASE_MSIX(device_get_parent(device_get_parent(dev)), dev, dmd->irq); dmd->irq = -1; } static void dmar_release_resources(device_t dev, struct dmar_unit *unit) { int i; iommu_fini_busdma(&unit->iommu); dmar_fini_irt(unit); dmar_fini_qi(unit); dmar_fini_fault_log(unit); for (i = 0; i < DMAR_INTR_TOTAL; i++) dmar_release_intr(dev, unit, i); if (unit->regs != NULL) { bus_deactivate_resource(dev, SYS_RES_MEMORY, unit->reg_rid, unit->regs); bus_release_resource(dev, SYS_RES_MEMORY, unit->reg_rid, unit->regs); unit->regs = NULL; } if (unit->domids != NULL) { delete_unrhdr(unit->domids); unit->domids = NULL; } if (unit->ctx_obj != NULL) { vm_object_deallocate(unit->ctx_obj); unit->ctx_obj = NULL; } } static int dmar_alloc_irq(device_t dev, struct dmar_unit *unit, int idx) { device_t pcib; struct dmar_msi_data *dmd; uint64_t msi_addr; uint32_t msi_data; int error; dmd = &unit->intrs[idx]; pcib = device_get_parent(device_get_parent(dev)); /* Really not pcib */ error = PCIB_ALLOC_MSIX(pcib, dev, &dmd->irq); if (error != 0) { device_printf(dev, "cannot allocate %s interrupt, %d\n", dmd->name, error); goto err1; } error = bus_set_resource(dev, SYS_RES_IRQ, dmd->irq_rid, dmd->irq, 1); if (error != 0) { device_printf(dev, "cannot set %s interrupt resource, %d\n", dmd->name, error); goto err2; } dmd->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &dmd->irq_rid, RF_ACTIVE); if (dmd->irq_res == NULL) { device_printf(dev, "cannot allocate resource for %s interrupt\n", dmd->name); error = ENXIO; goto err3; } error = bus_setup_intr(dev, dmd->irq_res, INTR_TYPE_MISC, dmd->handler, NULL, unit, &dmd->intr_handle); if (error != 0) { device_printf(dev, "cannot setup %s interrupt, %d\n", dmd->name, error); goto err4; } bus_describe_intr(dev, dmd->irq_res, dmd->intr_handle, "%s", dmd->name); error = PCIB_MAP_MSI(pcib, dev, dmd->irq, &msi_addr, &msi_data); if (error != 0) { device_printf(dev, "cannot map %s interrupt, %d\n", dmd->name, error); goto err5; } dmar_write4(unit, dmd->msi_data_reg, msi_data); dmar_write4(unit, dmd->msi_addr_reg, msi_addr); /* Only for xAPIC mode */ dmar_write4(unit, dmd->msi_uaddr_reg, msi_addr >> 32); return (0); err5: bus_teardown_intr(dev, dmd->irq_res, dmd->intr_handle); err4: bus_release_resource(dev, SYS_RES_IRQ, dmd->irq_rid, dmd->irq_res); err3: bus_delete_resource(dev, SYS_RES_IRQ, dmd->irq_rid); err2: PCIB_RELEASE_MSIX(pcib, dev, dmd->irq); dmd->irq = -1; err1: return (error); } #ifdef DEV_APIC static int dmar_remap_intr(device_t dev, device_t child, u_int irq) { struct dmar_unit *unit; struct dmar_msi_data *dmd; uint64_t msi_addr; uint32_t msi_data; int i, error; unit = device_get_softc(dev); for (i = 0; i < DMAR_INTR_TOTAL; i++) { dmd = &unit->intrs[i]; if (irq == dmd->irq) { error = PCIB_MAP_MSI(device_get_parent( device_get_parent(dev)), dev, irq, &msi_addr, &msi_data); if (error != 0) return (error); DMAR_LOCK(unit); (dmd->disable_intr)(unit); dmar_write4(unit, dmd->msi_data_reg, msi_data); dmar_write4(unit, dmd->msi_addr_reg, msi_addr); dmar_write4(unit, dmd->msi_uaddr_reg, msi_addr >> 32); (dmd->enable_intr)(unit); DMAR_UNLOCK(unit); return (0); } } return (ENOENT); } #endif static void dmar_print_caps(device_t dev, struct dmar_unit *unit, ACPI_DMAR_HARDWARE_UNIT *dmaru) { uint32_t caphi, ecaphi; device_printf(dev, "regs@0x%08jx, ver=%d.%d, seg=%d, flags=<%b>\n", (uintmax_t)dmaru->Address, DMAR_MAJOR_VER(unit->hw_ver), DMAR_MINOR_VER(unit->hw_ver), dmaru->Segment, dmaru->Flags, "\020\001INCLUDE_ALL_PCI"); caphi = unit->hw_cap >> 32; device_printf(dev, "cap=%b,", (u_int)unit->hw_cap, "\020\004AFL\005WBF\006PLMR\007PHMR\010CM\027ZLR\030ISOCH"); printf("%b, ", caphi, "\020\010PSI\027DWD\030DRD\031FL1GP\034PSI"); printf("ndoms=%d, sagaw=%d, mgaw=%d, fro=%d, nfr=%d, superp=%d", DMAR_CAP_ND(unit->hw_cap), DMAR_CAP_SAGAW(unit->hw_cap), DMAR_CAP_MGAW(unit->hw_cap), DMAR_CAP_FRO(unit->hw_cap), DMAR_CAP_NFR(unit->hw_cap), DMAR_CAP_SPS(unit->hw_cap)); if ((unit->hw_cap & DMAR_CAP_PSI) != 0) printf(", mamv=%d", DMAR_CAP_MAMV(unit->hw_cap)); printf("\n"); ecaphi = unit->hw_ecap >> 32; device_printf(dev, "ecap=%b,", (u_int)unit->hw_ecap, "\020\001C\002QI\003DI\004IR\005EIM\007PT\010SC\031ECS\032MTS" "\033NEST\034DIS\035PASID\036PRS\037ERS\040SRS"); printf("%b, ", ecaphi, "\020\002NWFS\003EAFS"); printf("mhmw=%d, iro=%d\n", DMAR_ECAP_MHMV(unit->hw_ecap), DMAR_ECAP_IRO(unit->hw_ecap)); } static int dmar_attach(device_t dev) { struct dmar_unit *unit; ACPI_DMAR_HARDWARE_UNIT *dmaru; uint64_t timeout; int disable_pmr; int i, error; unit = device_get_softc(dev); unit->dev = dev; unit->iommu.unit = device_get_unit(dev); unit->iommu.dev = dev; dmaru = dmar_find_by_index(unit->iommu.unit); if (dmaru == NULL) return (EINVAL); unit->segment = dmaru->Segment; unit->base = dmaru->Address; unit->reg_rid = DMAR_REG_RID; unit->regs = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &unit->reg_rid, RF_ACTIVE); if (unit->regs == NULL) { device_printf(dev, "cannot allocate register window\n"); return (ENOMEM); } unit->hw_ver = dmar_read4(unit, DMAR_VER_REG); unit->hw_cap = dmar_read8(unit, DMAR_CAP_REG); unit->hw_ecap = dmar_read8(unit, DMAR_ECAP_REG); if (bootverbose) dmar_print_caps(dev, unit, dmaru); dmar_quirks_post_ident(unit); timeout = dmar_get_timeout(); TUNABLE_UINT64_FETCH("hw.iommu.dmar.timeout", &timeout); dmar_update_timeout(timeout); for (i = 0; i < DMAR_INTR_TOTAL; i++) unit->intrs[i].irq = -1; unit->intrs[DMAR_INTR_FAULT].name = "fault"; unit->intrs[DMAR_INTR_FAULT].irq_rid = DMAR_FAULT_IRQ_RID; unit->intrs[DMAR_INTR_FAULT].handler = dmar_fault_intr; unit->intrs[DMAR_INTR_FAULT].msi_data_reg = DMAR_FEDATA_REG; unit->intrs[DMAR_INTR_FAULT].msi_addr_reg = DMAR_FEADDR_REG; unit->intrs[DMAR_INTR_FAULT].msi_uaddr_reg = DMAR_FEUADDR_REG; unit->intrs[DMAR_INTR_FAULT].enable_intr = dmar_enable_fault_intr; unit->intrs[DMAR_INTR_FAULT].disable_intr = dmar_disable_fault_intr; error = dmar_alloc_irq(dev, unit, DMAR_INTR_FAULT); if (error != 0) { dmar_release_resources(dev, unit); return (error); } if (DMAR_HAS_QI(unit)) { unit->intrs[DMAR_INTR_QI].name = "qi"; unit->intrs[DMAR_INTR_QI].irq_rid = DMAR_QI_IRQ_RID; unit->intrs[DMAR_INTR_QI].handler = dmar_qi_intr; unit->intrs[DMAR_INTR_QI].msi_data_reg = DMAR_IEDATA_REG; unit->intrs[DMAR_INTR_QI].msi_addr_reg = DMAR_IEADDR_REG; unit->intrs[DMAR_INTR_QI].msi_uaddr_reg = DMAR_IEUADDR_REG; unit->intrs[DMAR_INTR_QI].enable_intr = dmar_enable_qi_intr; unit->intrs[DMAR_INTR_QI].disable_intr = dmar_disable_qi_intr; error = dmar_alloc_irq(dev, unit, DMAR_INTR_QI); if (error != 0) { dmar_release_resources(dev, unit); return (error); } } mtx_init(&unit->iommu.lock, "dmarhw", NULL, MTX_DEF); unit->domids = new_unrhdr(0, dmar_nd2mask(DMAR_CAP_ND(unit->hw_cap)), &unit->iommu.lock); LIST_INIT(&unit->domains); /* * 9.2 "Context Entry": * When Caching Mode (CM) field is reported as Set, the * domain-id value of zero is architecturally reserved. * Software must not use domain-id value of zero * when CM is Set. */ if ((unit->hw_cap & DMAR_CAP_CM) != 0) alloc_unr_specific(unit->domids, 0); unit->ctx_obj = vm_pager_allocate(OBJT_PHYS, NULL, IDX_TO_OFF(1 + DMAR_CTX_CNT), 0, 0, NULL); /* * Allocate and load the root entry table pointer. Enable the * address translation after the required invalidations are * done. */ dmar_pgalloc(unit->ctx_obj, 0, IOMMU_PGF_WAITOK | IOMMU_PGF_ZERO); DMAR_LOCK(unit); error = dmar_load_root_entry_ptr(unit); if (error != 0) { DMAR_UNLOCK(unit); dmar_release_resources(dev, unit); return (error); } error = dmar_inv_ctx_glob(unit); if (error != 0) { DMAR_UNLOCK(unit); dmar_release_resources(dev, unit); return (error); } if ((unit->hw_ecap & DMAR_ECAP_DI) != 0) { error = dmar_inv_iotlb_glob(unit); if (error != 0) { DMAR_UNLOCK(unit); dmar_release_resources(dev, unit); return (error); } } DMAR_UNLOCK(unit); error = dmar_init_fault_log(unit); if (error != 0) { dmar_release_resources(dev, unit); return (error); } error = dmar_init_qi(unit); if (error != 0) { dmar_release_resources(dev, unit); return (error); } error = dmar_init_irt(unit); if (error != 0) { dmar_release_resources(dev, unit); return (error); } disable_pmr = 0; TUNABLE_INT_FETCH("hw.dmar.pmr.disable", &disable_pmr); if (disable_pmr) { error = dmar_disable_protected_regions(unit); if (error != 0) device_printf(dev, "Failed to disable protected regions\n"); } error = iommu_init_busdma(&unit->iommu); if (error != 0) { dmar_release_resources(dev, unit); return (error); } #ifdef NOTYET DMAR_LOCK(unit); error = dmar_enable_translation(unit); if (error != 0) { DMAR_UNLOCK(unit); dmar_release_resources(dev, unit); return (error); } DMAR_UNLOCK(unit); #endif return (0); } static int dmar_detach(device_t dev) { return (EBUSY); } static int dmar_suspend(device_t dev) { return (0); } static int dmar_resume(device_t dev) { /* XXXKIB */ return (0); } static device_method_t dmar_methods[] = { DEVMETHOD(device_identify, dmar_identify), DEVMETHOD(device_probe, dmar_probe), DEVMETHOD(device_attach, dmar_attach), DEVMETHOD(device_detach, dmar_detach), DEVMETHOD(device_suspend, dmar_suspend), DEVMETHOD(device_resume, dmar_resume), #ifdef DEV_APIC DEVMETHOD(bus_remap_intr, dmar_remap_intr), #endif DEVMETHOD_END }; static driver_t dmar_driver = { "dmar", dmar_methods, sizeof(struct dmar_unit), }; DRIVER_MODULE(dmar, acpi, dmar_driver, 0, 0); MODULE_DEPEND(dmar, acpi, 1, 1, 1); static void dmar_print_path(int busno, int depth, const ACPI_DMAR_PCI_PATH *path) { int i; printf("[%d, ", busno); for (i = 0; i < depth; i++) { if (i != 0) printf(", "); printf("(%d, %d)", path[i].Device, path[i].Function); } printf("]"); } int dmar_dev_depth(device_t child) { devclass_t pci_class; device_t bus, pcib; int depth; pci_class = devclass_find("pci"); for (depth = 1; ; depth++) { bus = device_get_parent(child); pcib = device_get_parent(bus); if (device_get_devclass(device_get_parent(pcib)) != pci_class) return (depth); child = pcib; } } void dmar_dev_path(device_t child, int *busno, void *path1, int depth) { devclass_t pci_class; device_t bus, pcib; ACPI_DMAR_PCI_PATH *path; pci_class = devclass_find("pci"); path = path1; for (depth--; depth != -1; depth--) { path[depth].Device = pci_get_slot(child); path[depth].Function = pci_get_function(child); bus = device_get_parent(child); pcib = device_get_parent(bus); if (device_get_devclass(device_get_parent(pcib)) != pci_class) { /* reached a host bridge */ *busno = pcib_get_bus(bus); return; } child = pcib; } panic("wrong depth"); } static int dmar_match_pathes(int busno1, const ACPI_DMAR_PCI_PATH *path1, int depth1, int busno2, const ACPI_DMAR_PCI_PATH *path2, int depth2, enum AcpiDmarScopeType scope_type) { int i, depth; if (busno1 != busno2) return (0); if (scope_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && depth1 != depth2) return (0); depth = depth1; if (depth2 < depth) depth = depth2; for (i = 0; i < depth; i++) { if (path1[i].Device != path2[i].Device || path1[i].Function != path2[i].Function) return (0); } return (1); } static int dmar_match_devscope(ACPI_DMAR_DEVICE_SCOPE *devscope, int dev_busno, const ACPI_DMAR_PCI_PATH *dev_path, int dev_path_len) { ACPI_DMAR_PCI_PATH *path; int path_len; if (devscope->Length < sizeof(*devscope)) { printf("dmar_match_devscope: corrupted DMAR table, dl %d\n", devscope->Length); return (-1); } if (devscope->EntryType != ACPI_DMAR_SCOPE_TYPE_ENDPOINT && devscope->EntryType != ACPI_DMAR_SCOPE_TYPE_BRIDGE) return (0); path_len = devscope->Length - sizeof(*devscope); if (path_len % 2 != 0) { printf("dmar_match_devscope: corrupted DMAR table, dl %d\n", devscope->Length); return (-1); } path_len /= 2; path = (ACPI_DMAR_PCI_PATH *)(devscope + 1); if (path_len == 0) { printf("dmar_match_devscope: corrupted DMAR table, dl %d\n", devscope->Length); return (-1); } return (dmar_match_pathes(devscope->Bus, path, path_len, dev_busno, dev_path, dev_path_len, devscope->EntryType)); } static bool dmar_match_by_path(struct dmar_unit *unit, int dev_domain, int dev_busno, const ACPI_DMAR_PCI_PATH *dev_path, int dev_path_len, const char **banner) { ACPI_DMAR_HARDWARE_UNIT *dmarh; ACPI_DMAR_DEVICE_SCOPE *devscope; char *ptr, *ptrend; int match; dmarh = dmar_find_by_index(unit->iommu.unit); if (dmarh == NULL) return (false); if (dmarh->Segment != dev_domain) return (false); if ((dmarh->Flags & ACPI_DMAR_INCLUDE_ALL) != 0) { if (banner != NULL) *banner = "INCLUDE_ALL"; return (true); } ptr = (char *)dmarh + sizeof(*dmarh); ptrend = (char *)dmarh + dmarh->Header.Length; while (ptr < ptrend) { devscope = (ACPI_DMAR_DEVICE_SCOPE *)ptr; ptr += devscope->Length; match = dmar_match_devscope(devscope, dev_busno, dev_path, dev_path_len); if (match == -1) return (false); if (match == 1) { if (banner != NULL) *banner = "specific match"; return (true); } } return (false); } static struct dmar_unit * dmar_find_by_scope(int dev_domain, int dev_busno, const ACPI_DMAR_PCI_PATH *dev_path, int dev_path_len) { struct dmar_unit *unit; int i; for (i = 0; i < dmar_devcnt; i++) { if (dmar_devs[i] == NULL) continue; unit = device_get_softc(dmar_devs[i]); if (dmar_match_by_path(unit, dev_domain, dev_busno, dev_path, dev_path_len, NULL)) return (unit); } return (NULL); } struct dmar_unit * dmar_find(device_t dev, bool verbose) { struct dmar_unit *unit; const char *banner; int i, dev_domain, dev_busno, dev_path_len; /* * This function can only handle PCI(e) devices. */ if (device_get_devclass(device_get_parent(dev)) != devclass_find("pci")) return (NULL); dev_domain = pci_get_domain(dev); dev_path_len = dmar_dev_depth(dev); ACPI_DMAR_PCI_PATH dev_path[dev_path_len]; dmar_dev_path(dev, &dev_busno, dev_path, dev_path_len); banner = ""; for (i = 0; i < dmar_devcnt; i++) { if (dmar_devs[i] == NULL) continue; unit = device_get_softc(dmar_devs[i]); if (dmar_match_by_path(unit, dev_domain, dev_busno, dev_path, dev_path_len, &banner)) break; } if (i == dmar_devcnt) return (NULL); if (verbose) { device_printf(dev, "pci%d:%d:%d:%d matched dmar%d by %s", dev_domain, pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), unit->iommu.unit, banner); printf(" scope path "); dmar_print_path(dev_busno, dev_path_len, dev_path); printf("\n"); } return (unit); } static struct dmar_unit * dmar_find_nonpci(u_int id, u_int entry_type, uint16_t *rid) { device_t dmar_dev; struct dmar_unit *unit; ACPI_DMAR_HARDWARE_UNIT *dmarh; ACPI_DMAR_DEVICE_SCOPE *devscope; ACPI_DMAR_PCI_PATH *path; char *ptr, *ptrend; #ifdef DEV_APIC int error; #endif int i; for (i = 0; i < dmar_devcnt; i++) { dmar_dev = dmar_devs[i]; if (dmar_dev == NULL) continue; unit = (struct dmar_unit *)device_get_softc(dmar_dev); dmarh = dmar_find_by_index(i); if (dmarh == NULL) continue; ptr = (char *)dmarh + sizeof(*dmarh); ptrend = (char *)dmarh + dmarh->Header.Length; for (;;) { if (ptr >= ptrend) break; devscope = (ACPI_DMAR_DEVICE_SCOPE *)ptr; ptr += devscope->Length; if (devscope->EntryType != entry_type) continue; if (devscope->EnumerationId != id) continue; #ifdef DEV_APIC if (entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) { error = ioapic_get_rid(id, rid); /* * If our IOAPIC has PCI bindings then * use the PCI device rid. */ if (error == 0) return (unit); } #endif if (devscope->Length - sizeof(ACPI_DMAR_DEVICE_SCOPE) == 2) { if (rid != NULL) { path = (ACPI_DMAR_PCI_PATH *) (devscope + 1); *rid = PCI_RID(devscope->Bus, path->Device, path->Function); } return (unit); } printf( "dmar_find_nonpci: id %d type %d path length != 2\n", id, entry_type); break; } } return (NULL); } struct dmar_unit * dmar_find_hpet(device_t dev, uint16_t *rid) { return (dmar_find_nonpci(hpet_get_uid(dev), ACPI_DMAR_SCOPE_TYPE_HPET, rid)); } struct dmar_unit * dmar_find_ioapic(u_int apic_id, uint16_t *rid) { return (dmar_find_nonpci(apic_id, ACPI_DMAR_SCOPE_TYPE_IOAPIC, rid)); } struct rmrr_iter_args { struct dmar_domain *domain; int dev_domain; int dev_busno; const ACPI_DMAR_PCI_PATH *dev_path; int dev_path_len; struct iommu_map_entries_tailq *rmrr_entries; }; static int dmar_rmrr_iter(ACPI_DMAR_HEADER *dmarh, void *arg) { struct rmrr_iter_args *ria; ACPI_DMAR_RESERVED_MEMORY *resmem; ACPI_DMAR_DEVICE_SCOPE *devscope; struct iommu_map_entry *entry; char *ptr, *ptrend; int match; + if (!dmar_rmrr_enable) + return (1); + if (dmarh->Type != ACPI_DMAR_TYPE_RESERVED_MEMORY) return (1); ria = arg; resmem = (ACPI_DMAR_RESERVED_MEMORY *)dmarh; if (resmem->Segment != ria->dev_domain) return (1); ptr = (char *)resmem + sizeof(*resmem); ptrend = (char *)resmem + resmem->Header.Length; for (;;) { if (ptr >= ptrend) break; devscope = (ACPI_DMAR_DEVICE_SCOPE *)ptr; ptr += devscope->Length; match = dmar_match_devscope(devscope, ria->dev_busno, ria->dev_path, ria->dev_path_len); if (match == 1) { entry = iommu_gas_alloc_entry(DOM2IODOM(ria->domain), IOMMU_PGF_WAITOK); entry->start = resmem->BaseAddress; /* The RMRR entry end address is inclusive. */ entry->end = resmem->EndAddress; TAILQ_INSERT_TAIL(ria->rmrr_entries, entry, dmamap_link); } } return (1); } void dmar_dev_parse_rmrr(struct dmar_domain *domain, int dev_domain, int dev_busno, const void *dev_path, int dev_path_len, struct iommu_map_entries_tailq *rmrr_entries) { struct rmrr_iter_args ria; ria.domain = domain; ria.dev_domain = dev_domain; ria.dev_busno = dev_busno; ria.dev_path = (const ACPI_DMAR_PCI_PATH *)dev_path; ria.dev_path_len = dev_path_len; ria.rmrr_entries = rmrr_entries; dmar_iterate_tbl(dmar_rmrr_iter, &ria); } struct inst_rmrr_iter_args { struct dmar_unit *dmar; }; static device_t dmar_path_dev(int segment, int path_len, int busno, const ACPI_DMAR_PCI_PATH *path, uint16_t *rid) { device_t dev; int i; dev = NULL; for (i = 0; i < path_len; i++) { dev = pci_find_dbsf(segment, busno, path->Device, path->Function); if (i != path_len - 1) { busno = pci_cfgregread(busno, path->Device, path->Function, PCIR_SECBUS_1, 1); path++; } } *rid = PCI_RID(busno, path->Device, path->Function); return (dev); } static int dmar_inst_rmrr_iter(ACPI_DMAR_HEADER *dmarh, void *arg) { const ACPI_DMAR_RESERVED_MEMORY *resmem; const ACPI_DMAR_DEVICE_SCOPE *devscope; struct inst_rmrr_iter_args *iria; const char *ptr, *ptrend; device_t dev; struct dmar_unit *unit; int dev_path_len; uint16_t rid; iria = arg; + if (!dmar_rmrr_enable) + return (1); + if (dmarh->Type != ACPI_DMAR_TYPE_RESERVED_MEMORY) return (1); resmem = (ACPI_DMAR_RESERVED_MEMORY *)dmarh; if (resmem->Segment != iria->dmar->segment) return (1); ptr = (const char *)resmem + sizeof(*resmem); ptrend = (const char *)resmem + resmem->Header.Length; for (;;) { if (ptr >= ptrend) break; devscope = (const ACPI_DMAR_DEVICE_SCOPE *)ptr; ptr += devscope->Length; /* XXXKIB bridge */ if (devscope->EntryType != ACPI_DMAR_SCOPE_TYPE_ENDPOINT) continue; rid = 0; dev_path_len = (devscope->Length - sizeof(ACPI_DMAR_DEVICE_SCOPE)) / 2; dev = dmar_path_dev(resmem->Segment, dev_path_len, devscope->Bus, (const ACPI_DMAR_PCI_PATH *)(devscope + 1), &rid); if (dev == NULL) { if (bootverbose) { printf("dmar%d no dev found for RMRR " "[%#jx, %#jx] rid %#x scope path ", iria->dmar->iommu.unit, (uintmax_t)resmem->BaseAddress, (uintmax_t)resmem->EndAddress, rid); dmar_print_path(devscope->Bus, dev_path_len, (const ACPI_DMAR_PCI_PATH *)(devscope + 1)); printf("\n"); } unit = dmar_find_by_scope(resmem->Segment, devscope->Bus, (const ACPI_DMAR_PCI_PATH *)(devscope + 1), dev_path_len); if (iria->dmar != unit) continue; dmar_get_ctx_for_devpath(iria->dmar, rid, resmem->Segment, devscope->Bus, (const ACPI_DMAR_PCI_PATH *)(devscope + 1), dev_path_len, false, true); } else { unit = dmar_find(dev, false); if (iria->dmar != unit) continue; iommu_instantiate_ctx(&(iria)->dmar->iommu, dev, true); } } return (1); } /* * Pre-create all contexts for the DMAR which have RMRR entries. */ int dmar_instantiate_rmrr_ctxs(struct iommu_unit *unit) { struct dmar_unit *dmar; struct inst_rmrr_iter_args iria; int error; dmar = IOMMU2DMAR(unit); if (!dmar_barrier_enter(dmar, DMAR_BARRIER_RMRR)) return (0); error = 0; iria.dmar = dmar; dmar_iterate_tbl(dmar_inst_rmrr_iter, &iria); DMAR_LOCK(dmar); if (!LIST_EMPTY(&dmar->domains)) { KASSERT((dmar->hw_gcmd & DMAR_GCMD_TE) == 0, ("dmar%d: RMRR not handled but translation is already enabled", dmar->iommu.unit)); error = dmar_disable_protected_regions(dmar); if (error != 0) printf("dmar%d: Failed to disable protected regions\n", dmar->iommu.unit); error = dmar_enable_translation(dmar); if (bootverbose) { if (error == 0) { printf("dmar%d: enabled translation\n", dmar->iommu.unit); } else { printf("dmar%d: enabling translation failed, " "error %d\n", dmar->iommu.unit, error); } } } dmar_barrier_exit(dmar, DMAR_BARRIER_RMRR); return (error); } #ifdef DDB #include #include static void dmar_print_domain_entry(const struct iommu_map_entry *entry) { struct iommu_map_entry *l, *r; db_printf( " start %jx end %jx first %jx last %jx free_down %jx flags %x ", entry->start, entry->end, entry->first, entry->last, entry->free_down, entry->flags); db_printf("left "); l = RB_LEFT(entry, rb_entry); if (l == NULL) db_printf("NULL "); else db_printf("%jx ", l->start); db_printf("right "); r = RB_RIGHT(entry, rb_entry); if (r == NULL) db_printf("NULL"); else db_printf("%jx", r->start); db_printf("\n"); } static void dmar_print_ctx(struct dmar_ctx *ctx) { db_printf( " @%p pci%d:%d:%d refs %d flags %x loads %lu unloads %lu\n", ctx, pci_get_bus(ctx->context.tag->owner), pci_get_slot(ctx->context.tag->owner), pci_get_function(ctx->context.tag->owner), ctx->refs, ctx->context.flags, ctx->context.loads, ctx->context.unloads); } static void dmar_print_domain(struct dmar_domain *domain, bool show_mappings) { struct iommu_domain *iodom; struct iommu_map_entry *entry; struct dmar_ctx *ctx; iodom = DOM2IODOM(domain); db_printf( " @%p dom %d mgaw %d agaw %d pglvl %d end %jx refs %d\n" " ctx_cnt %d flags %x pgobj %p map_ents %u\n", domain, domain->domain, domain->mgaw, domain->agaw, domain->pglvl, (uintmax_t)domain->iodom.end, domain->refs, domain->ctx_cnt, domain->iodom.flags, domain->pgtbl_obj, domain->iodom.entries_cnt); if (!LIST_EMPTY(&domain->contexts)) { db_printf(" Contexts:\n"); LIST_FOREACH(ctx, &domain->contexts, link) dmar_print_ctx(ctx); } if (!show_mappings) return; db_printf(" mapped:\n"); RB_FOREACH(entry, iommu_gas_entries_tree, &iodom->rb_root) { dmar_print_domain_entry(entry); if (db_pager_quit) break; } if (db_pager_quit) return; db_printf(" unloading:\n"); TAILQ_FOREACH(entry, &domain->iodom.unload_entries, dmamap_link) { dmar_print_domain_entry(entry); if (db_pager_quit) break; } } DB_SHOW_COMMAND_FLAGS(dmar_domain, db_dmar_print_domain, CS_OWN) { struct dmar_unit *unit; struct dmar_domain *domain; struct dmar_ctx *ctx; bool show_mappings, valid; int pci_domain, bus, device, function, i, t; db_expr_t radix; valid = false; radix = db_radix; db_radix = 10; t = db_read_token(); if (t == tSLASH) { t = db_read_token(); if (t != tIDENT) { db_printf("Bad modifier\n"); db_radix = radix; db_skip_to_eol(); return; } show_mappings = strchr(db_tok_string, 'm') != NULL; t = db_read_token(); } else { show_mappings = false; } if (t == tNUMBER) { pci_domain = db_tok_number; t = db_read_token(); if (t == tNUMBER) { bus = db_tok_number; t = db_read_token(); if (t == tNUMBER) { device = db_tok_number; t = db_read_token(); if (t == tNUMBER) { function = db_tok_number; valid = true; } } } } db_radix = radix; db_skip_to_eol(); if (!valid) { db_printf("usage: show dmar_domain [/m] " " \n"); return; } for (i = 0; i < dmar_devcnt; i++) { unit = device_get_softc(dmar_devs[i]); LIST_FOREACH(domain, &unit->domains, link) { LIST_FOREACH(ctx, &domain->contexts, link) { if (pci_domain == unit->segment && bus == pci_get_bus(ctx->context.tag->owner) && device == pci_get_slot(ctx->context.tag->owner) && function == pci_get_function(ctx->context.tag->owner)) { dmar_print_domain(domain, show_mappings); goto out; } } } } out:; } static void dmar_print_one(int idx, bool show_domains, bool show_mappings) { struct dmar_unit *unit; struct dmar_domain *domain; int i, frir; unit = device_get_softc(dmar_devs[idx]); db_printf("dmar%d at %p, root at 0x%jx, ver 0x%x\n", unit->iommu.unit, unit, dmar_read8(unit, DMAR_RTADDR_REG), dmar_read4(unit, DMAR_VER_REG)); db_printf("cap 0x%jx ecap 0x%jx gsts 0x%x fsts 0x%x fectl 0x%x\n", (uintmax_t)dmar_read8(unit, DMAR_CAP_REG), (uintmax_t)dmar_read8(unit, DMAR_ECAP_REG), dmar_read4(unit, DMAR_GSTS_REG), dmar_read4(unit, DMAR_FSTS_REG), dmar_read4(unit, DMAR_FECTL_REG)); if (unit->ir_enabled) { db_printf("ir is enabled; IRT @%p phys 0x%jx maxcnt %d\n", unit->irt, (uintmax_t)unit->irt_phys, unit->irte_cnt); } db_printf("fed 0x%x fea 0x%x feua 0x%x\n", dmar_read4(unit, DMAR_FEDATA_REG), dmar_read4(unit, DMAR_FEADDR_REG), dmar_read4(unit, DMAR_FEUADDR_REG)); db_printf("primary fault log:\n"); for (i = 0; i < DMAR_CAP_NFR(unit->hw_cap); i++) { frir = (DMAR_CAP_FRO(unit->hw_cap) + i) * 16; db_printf(" %d at 0x%x: %jx %jx\n", i, frir, (uintmax_t)dmar_read8(unit, frir), (uintmax_t)dmar_read8(unit, frir + 8)); } if (DMAR_HAS_QI(unit)) { db_printf("ied 0x%x iea 0x%x ieua 0x%x\n", dmar_read4(unit, DMAR_IEDATA_REG), dmar_read4(unit, DMAR_IEADDR_REG), dmar_read4(unit, DMAR_IEUADDR_REG)); if (unit->qi_enabled) { db_printf("qi is enabled: queue @0x%jx (IQA 0x%jx) " "size 0x%jx\n" " head 0x%x tail 0x%x avail 0x%x status 0x%x ctrl 0x%x\n" " hw compl 0x%x@%p/phys@%jx next seq 0x%x gen 0x%x\n", (uintmax_t)unit->inv_queue, (uintmax_t)dmar_read8(unit, DMAR_IQA_REG), (uintmax_t)unit->inv_queue_size, dmar_read4(unit, DMAR_IQH_REG), dmar_read4(unit, DMAR_IQT_REG), unit->inv_queue_avail, dmar_read4(unit, DMAR_ICS_REG), dmar_read4(unit, DMAR_IECTL_REG), unit->inv_waitd_seq_hw, &unit->inv_waitd_seq_hw, (uintmax_t)unit->inv_waitd_seq_hw_phys, unit->inv_waitd_seq, unit->inv_waitd_gen); } else { db_printf("qi is disabled\n"); } } if (show_domains) { db_printf("domains:\n"); LIST_FOREACH(domain, &unit->domains, link) { dmar_print_domain(domain, show_mappings); if (db_pager_quit) break; } } } DB_SHOW_COMMAND(dmar, db_dmar_print) { bool show_domains, show_mappings; show_domains = strchr(modif, 'd') != NULL; show_mappings = strchr(modif, 'm') != NULL; if (!have_addr) { db_printf("usage: show dmar [/d] [/m] index\n"); return; } dmar_print_one((int)addr, show_domains, show_mappings); } DB_SHOW_ALL_COMMAND(dmars, db_show_all_dmars) { int i; bool show_domains, show_mappings; show_domains = strchr(modif, 'd') != NULL; show_mappings = strchr(modif, 'm') != NULL; for (i = 0; i < dmar_devcnt; i++) { dmar_print_one(i, show_domains, show_mappings); if (db_pager_quit) break; } } #endif struct iommu_unit * iommu_find(device_t dev, bool verbose) { struct dmar_unit *dmar; dmar = dmar_find(dev, verbose); return (&dmar->iommu); }