Index: head/sys/dev/iommu/iommu.h =================================================================== --- head/sys/dev/iommu/iommu.h +++ head/sys/dev/iommu/iommu.h @@ -100,6 +100,13 @@ uint32_t buswide_ctxs[(PCI_BUSMAX + 1) / NBBY / sizeof(uint32_t)]; }; +struct iommu_domain_map_ops { + int (*map)(struct iommu_domain *domain, iommu_gaddr_t base, + iommu_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags); + int (*unmap)(struct iommu_domain *domain, iommu_gaddr_t base, + iommu_gaddr_t size, int flags); +}; + /* * Locking annotations: * (u) - Protected by iommu unit lock @@ -109,6 +116,7 @@ struct iommu_domain { struct iommu_unit *iommu; /* (c) */ + const struct iommu_domain_map_ops *ops; struct mtx lock; /* (c) */ struct task unload_task; /* (c) */ u_int entries_cnt; /* (d) */ Index: head/sys/dev/iommu/iommu_gas.c =================================================================== --- head/sys/dev/iommu/iommu_gas.c +++ head/sys/dev/iommu/iommu_gas.c @@ -66,10 +66,7 @@ #include #include #if defined(__amd64__) || defined(__i386__) -#include -#include #include -#include #endif #include @@ -620,9 +617,9 @@ entry->flags |= eflags; IOMMU_DOMAIN_UNLOCK(domain); - error = domain_map_buf(domain, entry->start, entry->end - entry->start, - ma, eflags, - ((flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0)); + error = domain->ops->map(domain, entry->start, + entry->end - entry->start, ma, eflags, + ((flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0)); if (error == ENOMEM) { iommu_domain_unload_entry(entry, true); return (error); @@ -658,9 +655,9 @@ if (entry->end == entry->start) return (0); - error = domain_map_buf(domain, entry->start, entry->end - entry->start, - ma + OFF_TO_IDX(start - entry->start), eflags, - ((flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0)); + error = domain->ops->map(domain, entry->start, + entry->end - entry->start, ma + OFF_TO_IDX(start - entry->start), + eflags, ((flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0)); if (error == ENOMEM) { iommu_domain_unload_entry(entry, false); return (error); Index: head/sys/x86/iommu/intel_ctx.c =================================================================== --- head/sys/x86/iommu/intel_ctx.c +++ head/sys/x86/iommu/intel_ctx.c @@ -341,6 +341,7 @@ mtx_init(&domain->iodom.lock, "dmardom", NULL, MTX_DEF); domain->dmar = dmar; domain->iodom.iommu = &dmar->iommu; + domain_pgtbl_init(domain); /* * For now, use the maximal usable physical address of the @@ -842,15 +843,17 @@ struct iommu_map_entries_tailq *entries, bool cansleep) { struct dmar_unit *unit; + struct iommu_domain *iodom; struct iommu_map_entry *entry, *entry1; int error; + iodom = (struct iommu_domain *)domain; unit = (struct dmar_unit *)domain->iodom.iommu; TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) { KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0, ("not mapped entry %p %p", domain, entry)); - error = domain_unmap_buf(domain, entry->start, entry->end - + error = iodom->ops->unmap(iodom, entry->start, entry->end - entry->start, cansleep ? IOMMU_PGF_WAITOK : 0); KASSERT(error == 0, ("unmap %p error %d", domain, error)); if (!unit->qi_enabled) { Index: head/sys/x86/iommu/intel_dmar.h =================================================================== --- head/sys/x86/iommu/intel_dmar.h +++ head/sys/x86/iommu/intel_dmar.h @@ -244,14 +244,11 @@ vm_object_t domain_get_idmap_pgtbl(struct dmar_domain *domain, iommu_gaddr_t maxaddr); void put_idmap_pgtbl(vm_object_t obj); -int domain_map_buf(struct iommu_domain *domain, iommu_gaddr_t base, - iommu_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags); -int domain_unmap_buf(struct dmar_domain *domain, iommu_gaddr_t base, - iommu_gaddr_t size, int flags); void domain_flush_iotlb_sync(struct dmar_domain *domain, iommu_gaddr_t base, iommu_gaddr_t size); int domain_alloc_pgtbl(struct dmar_domain *domain); void domain_free_pgtbl(struct dmar_domain *domain); +void domain_pgtbl_init(struct dmar_domain *domain); int dmar_dev_depth(device_t child); void dmar_dev_path(device_t child, int *busno, void *path1, int depth); Index: head/sys/x86/iommu/intel_idpgtbl.c =================================================================== --- head/sys/x86/iommu/intel_idpgtbl.c +++ head/sys/x86/iommu/intel_idpgtbl.c @@ -498,7 +498,7 @@ return (0); } -int +static int domain_map_buf(struct iommu_domain *iodom, iommu_gaddr_t base, iommu_gaddr_t size, vm_page_t *ma, uint64_t eflags, int flags) { @@ -684,12 +684,15 @@ return (0); } -int -domain_unmap_buf(struct dmar_domain *domain, iommu_gaddr_t base, +static int +domain_unmap_buf(struct iommu_domain *iodom, iommu_gaddr_t base, iommu_gaddr_t size, int flags) { + struct dmar_domain *domain; int error; + domain = (struct dmar_domain *)iodom; + DMAR_DOMAIN_PGLOCK(domain); error = domain_unmap_buf_locked(domain, base, size, flags); DMAR_DOMAIN_PGUNLOCK(domain); @@ -808,4 +811,18 @@ } } DMAR_UNLOCK(unit); +} + +static const struct iommu_domain_map_ops dmar_domain_map_ops = { + .map = domain_map_buf, + .unmap = domain_unmap_buf, +}; + +void +domain_pgtbl_init(struct dmar_domain *domain) +{ + struct iommu_domain *iodom; + + iodom = (struct iommu_domain *)domain; + iodom->ops = &dmar_domain_map_ops; }