Index: sys/dev/iommu/iommu.h =================================================================== --- sys/dev/iommu/iommu.h +++ sys/dev/iommu/iommu.h @@ -199,6 +199,8 @@ struct iommu_map_entry *entry, u_int eflags, u_int flags, vm_page_t *ma); int iommu_gas_reserve_region(struct iommu_domain *domain, iommu_gaddr_t start, iommu_gaddr_t end, struct iommu_map_entry **entry0); +int iommu_gas_reserve_region_extend(struct iommu_domain *domain, + iommu_gaddr_t start, iommu_gaddr_t end); void iommu_set_buswide_ctx(struct iommu_unit *unit, u_int busno); bool iommu_is_buswide_ctx(struct iommu_unit *unit, u_int busno); Index: sys/dev/iommu/iommu_gas.c =================================================================== --- sys/dev/iommu/iommu_gas.c +++ sys/dev/iommu/iommu_gas.c @@ -699,6 +699,44 @@ return (error); } +/* + * As in iommu_gas_reserve_region, reserve [start, end), but allow for existing + * entries. + */ +int +iommu_gas_reserve_region_extend(struct iommu_domain *domain, + iommu_gaddr_t start, iommu_gaddr_t end) +{ + struct iommu_map_entry *next, *prev, key = {}; + iommu_gaddr_t entry_start, entry_end; + int error; + + end = ummin(end, domain->end); + while (start < end) { + key.start = key.end = start; + IOMMU_DOMAIN_LOCK(domain); + next = RB_NFIND(iommu_gas_entries_tree, &domain->rb_root, &key); + KASSERT(next != NULL, ("domain %p with end %#jx has no entry " + "after %#jx", domain, (uintmax_t)domain->end, + (uintmax_t)start)); + entry_end = ummin(end, next->start); + prev = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, next); + if (prev != NULL) + entry_start = ummax(start, prev->end); + else + entry_start = start; + IOMMU_DOMAIN_UNLOCK(domain); + if (entry_start != entry_end) { + error = iommu_gas_reserve_region(domain, entry_start, + entry_end, NULL); + if (error != 0) + return (error); + } + start = next->end; + } + return (0); +} + struct iommu_map_entry * iommu_map_alloc_entry(struct iommu_domain *domain, u_int flags) { Index: sys/x86/iommu/intel_ctx.c =================================================================== --- sys/x86/iommu/intel_ctx.c +++ sys/x86/iommu/intel_ctx.c @@ -317,6 +317,58 @@ return (error); } +static int +dmar_reserve_regions(struct dmar_domain *domain, device_t dev) +{ + struct iommu_domain *iodom; + device_t root; + uint32_t val; + uint64_t base, limit; + int error; + + iodom = DOM2IODOM(domain); + + root = pci_find_pcie_root_port(dev); + if (root == NULL) + return (EINVAL); + + /* Disable downstream memory */ + base = PCI_PPBMEMBASE(0, pci_read_config(root, PCIR_MEMBASE_1, 2)); + limit = PCI_PPBMEMLIMIT(0, pci_read_config(root, PCIR_MEMLIMIT_1, 2)); + error = iommu_gas_reserve_region_extend(iodom, base, limit + 1); + if (bootverbose || error != 0) + device_printf(dev, "DMAR reserve [%#jx-%#jx] (error %d)\n", + base, limit + 1, error); + if (error != 0) + return (error); + + /* Disable downstream prefetchable memory */ + val = pci_read_config(root, PCIR_PMBASEL_1, 2); + if (val != 0 || pci_read_config(root, PCIR_PMLIMITL_1, 2) != 0) { + if ((val & PCIM_BRPM_MASK) == PCIM_BRPM_64) { + base = PCI_PPBMEMBASE( + pci_read_config(root, PCIR_PMBASEH_1, 4), + val); + limit = PCI_PPBMEMLIMIT( + pci_read_config(root, PCIR_PMLIMITH_1, 4), + pci_read_config(root, PCIR_PMLIMITL_1, 2)); + } else { + base = PCI_PPBMEMBASE(0, val); + limit = PCI_PPBMEMLIMIT(0, + pci_read_config(root, PCIR_PMLIMITL_1, 2)); + } + error = iommu_gas_reserve_region_extend(iodom, base, + limit + 1); + if (bootverbose || error != 0) + device_printf(dev, "DMAR reserve [%#jx-%#jx] " + "(error %d)\n", base, limit + 1, error); + if (error != 0) + return (error); + } + + return (error); +} + static struct dmar_domain * dmar_domain_alloc(struct dmar_unit *dmar, bool id_mapped) { @@ -502,6 +554,8 @@ error = domain_init_rmrr(domain1, dev, bus, slot, func, dev_domain, dev_busno, dev_path, dev_path_len); + if (error == 0) + error = dmar_reserve_regions(domain1, dev); if (error != 0) { dmar_domain_destroy(domain1); TD_PINNED_ASSERT;