Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F133508234
D25574.id74169.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
112 KB
Referenced Files
None
Subscribers
None
D25574.id74169.diff
View Options
Index: sys/x86/iommu/busdma_dmar.h
===================================================================
--- sys/x86/iommu/busdma_dmar.h
+++ sys/x86/iommu/busdma_dmar.h
@@ -34,33 +34,49 @@
#ifndef __X86_IOMMU_BUSDMA_DMAR_H
#define __X86_IOMMU_BUSDMA_DMAR_H
-struct dmar_map_entry;
-TAILQ_HEAD(dmar_map_entries_tailq, dmar_map_entry);
+struct iommu_map_entry;
+TAILQ_HEAD(iommu_map_entries_tailq, iommu_map_entry);
-struct bus_dma_tag_dmar {
+struct iommu_unit {
+ struct mtx lock;
+ int unit;
+
+ int dma_enabled;
+
+ /* Busdma delayed map load */
+ struct task dmamap_load_task;
+ TAILQ_HEAD(, bus_dmamap_iommu) delayed_maps;
+ struct taskqueue *delayed_taskqueue;
+};
+
+struct bus_dma_tag_iommu {
struct bus_dma_tag_common common;
- struct dmar_ctx *ctx;
+ struct iommu_device *ctx;
device_t owner;
int map_count;
bus_dma_segment_t *segments;
};
-struct bus_dmamap_dmar {
- struct bus_dma_tag_dmar *tag;
+struct bus_dmamap_iommu {
+ struct bus_dma_tag_iommu *tag;
struct memdesc mem;
bus_dmamap_callback_t *callback;
void *callback_arg;
- struct dmar_map_entries_tailq map_entries;
- TAILQ_ENTRY(bus_dmamap_dmar) delay_link;
+ struct iommu_map_entries_tailq map_entries;
+ TAILQ_ENTRY(bus_dmamap_iommu) delay_link;
bool locked;
bool cansleep;
int flags;
};
-#define BUS_DMAMAP_DMAR_MALLOC 0x0001
-#define BUS_DMAMAP_DMAR_KMEM_ALLOC 0x0002
+#define BUS_DMAMAP_IOMMU_MALLOC 0x0001
+#define BUS_DMAMAP_IOMMU_KMEM_ALLOC 0x0002
+
+#define IOMMU_LOCK(unit) mtx_lock(&(unit)->lock)
+#define IOMMU_UNLOCK(unit) mtx_unlock(&(unit)->lock)
+#define IOMMU_ASSERT_LOCKED(unit) mtx_assert(&(unit)->lock, MA_OWNED)
-extern struct bus_dma_impl bus_dma_dmar_impl;
+extern struct bus_dma_impl bus_dma_iommu_impl;
bus_dma_tag_t acpi_iommu_get_dma_tag(device_t dev, device_t child);
Index: sys/x86/iommu/busdma_dmar.c
===================================================================
--- sys/x86/iommu/busdma_dmar.c
+++ sys/x86/iommu/busdma_dmar.c
@@ -74,7 +74,7 @@
*/
static bool
-dmar_bus_dma_is_dev_disabled(int domain, int bus, int slot, int func)
+iommu_bus_dma_is_dev_disabled(int domain, int bus, int slot, int func)
{
char str[128], *env;
int default_bounce;
@@ -117,7 +117,7 @@
* bounce mapping.
*/
device_t
-dmar_get_requester(device_t dev, uint16_t *rid)
+iommu_get_requester(device_t dev, uint16_t *rid)
{
devclass_t pci_class;
device_t l, pci, pcib, pcip, pcibp, requester;
@@ -137,15 +137,15 @@
*/
for (;;) {
pci = device_get_parent(l);
- KASSERT(pci != NULL, ("dmar_get_requester(%s): NULL parent "
+ KASSERT(pci != NULL, ("iommu_get_requester(%s): NULL parent "
"for %s", device_get_name(dev), device_get_name(l)));
KASSERT(device_get_devclass(pci) == pci_class,
- ("dmar_get_requester(%s): non-pci parent %s for %s",
+ ("iommu_get_requester(%s): non-pci parent %s for %s",
device_get_name(dev), device_get_name(pci),
device_get_name(l)));
pcib = device_get_parent(pci);
- KASSERT(pcib != NULL, ("dmar_get_requester(%s): NULL bridge "
+ KASSERT(pcib != NULL, ("iommu_get_requester(%s): NULL bridge "
"for %s", device_get_name(dev), device_get_name(pci)));
/*
@@ -228,15 +228,15 @@
return (requester);
}
-struct dmar_ctx *
-dmar_instantiate_ctx(struct dmar_unit *dmar, device_t dev, bool rmrr)
+struct iommu_device *
+iommu_instantiate_device(struct iommu_unit *unit, device_t dev, bool rmrr)
{
device_t requester;
- struct dmar_ctx *ctx;
+ struct iommu_device *ctx;
bool disabled;
uint16_t rid;
- requester = dmar_get_requester(dev, &rid);
+ requester = iommu_get_requester(dev, &rid);
/*
* If the user requested the IOMMU disabled for the device, we
@@ -245,10 +245,10 @@
* Instead provide the identity mapping for the device
* context.
*/
- disabled = dmar_bus_dma_is_dev_disabled(pci_get_domain(requester),
+ disabled = iommu_bus_dma_is_dev_disabled(pci_get_domain(requester),
pci_get_bus(requester), pci_get_slot(requester),
pci_get_function(requester));
- ctx = dmar_get_ctx_for_dev(dmar, requester, rid, disabled, rmrr);
+ ctx = dmar_get_ctx_for_dev(unit, requester, rid, disabled, rmrr);
if (ctx == NULL)
return (NULL);
if (disabled) {
@@ -256,12 +256,12 @@
* Keep the first reference on context, release the
* later refs.
*/
- DMAR_LOCK(dmar);
+ IOMMU_LOCK(unit);
if ((ctx->flags & DMAR_CTX_DISABLED) == 0) {
ctx->flags |= DMAR_CTX_DISABLED;
- DMAR_UNLOCK(dmar);
+ IOMMU_UNLOCK(unit);
} else {
- dmar_free_ctx_locked(dmar, ctx);
+ dmar_free_ctx_locked(unit, ctx);
}
ctx = NULL;
}
@@ -271,36 +271,36 @@
bus_dma_tag_t
acpi_iommu_get_dma_tag(device_t dev, device_t child)
{
- struct dmar_unit *dmar;
- struct dmar_ctx *ctx;
+ struct iommu_unit *unit;
+ struct iommu_device *ctx;
bus_dma_tag_t res;
- dmar = dmar_find(child, bootverbose);
+ unit = dmar_find(child, bootverbose);
/* Not in scope of any DMAR ? */
- if (dmar == NULL)
+ if (unit == NULL)
return (NULL);
- if (!dmar->dma_enabled)
+ if (!unit->dma_enabled)
return (NULL);
- dmar_quirks_pre_use(dmar);
- dmar_instantiate_rmrr_ctxs(dmar);
+ dmar_quirks_pre_use(unit);
+ dmar_instantiate_rmrr_ctxs(unit);
- ctx = dmar_instantiate_ctx(dmar, child, false);
- res = ctx == NULL ? NULL : (bus_dma_tag_t)&ctx->ctx_tag;
+ ctx = iommu_instantiate_device(unit, child, false);
+ res = ctx == NULL ? NULL : (bus_dma_tag_t)&ctx->device_tag;
return (res);
}
bool
bus_dma_dmar_set_buswide(device_t dev)
{
- struct dmar_unit *dmar;
+ struct iommu_unit *unit;
device_t parent;
u_int busno, slot, func;
parent = device_get_parent(dev);
if (device_get_devclass(parent) != devclass_find("pci"))
return (false);
- dmar = dmar_find(dev, bootverbose);
- if (dmar == NULL)
+ unit = dmar_find(dev, bootverbose);
+ if (unit == NULL)
return (false);
busno = pci_get_bus(dev);
slot = pci_get_slot(dev);
@@ -309,40 +309,40 @@
if (bootverbose) {
device_printf(dev,
"dmar%d pci%d:%d:%d requested buswide busdma\n",
- dmar->unit, busno, slot, func);
+ unit->unit, busno, slot, func);
}
return (false);
}
- dmar_set_buswide_ctx(dmar, busno);
+ dmar_set_buswide_ctx(unit, busno);
return (true);
}
static MALLOC_DEFINE(M_DMAR_DMAMAP, "dmar_dmamap", "Intel DMAR DMA Map");
-static void dmar_bus_schedule_dmamap(struct dmar_unit *unit,
- struct bus_dmamap_dmar *map);
+static void iommu_bus_schedule_dmamap(struct iommu_unit *unit,
+ struct bus_dmamap_iommu *map);
static int
-dmar_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
+iommu_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
void *lockfuncarg, bus_dma_tag_t *dmat)
{
- struct bus_dma_tag_dmar *newtag, *oldtag;
+ struct bus_dma_tag_iommu *newtag, *oldtag;
int error;
*dmat = NULL;
error = common_bus_dma_tag_create(parent != NULL ?
- &((struct bus_dma_tag_dmar *)parent)->common : NULL, alignment,
+ &((struct bus_dma_tag_iommu *)parent)->common : NULL, alignment,
boundary, lowaddr, highaddr, filter, filterarg, maxsize,
nsegments, maxsegsz, flags, lockfunc, lockfuncarg,
- sizeof(struct bus_dma_tag_dmar), (void **)&newtag);
+ sizeof(struct bus_dma_tag_iommu), (void **)&newtag);
if (error != 0)
goto out;
- oldtag = (struct bus_dma_tag_dmar *)parent;
- newtag->common.impl = &bus_dma_dmar_impl;
+ oldtag = (struct bus_dma_tag_iommu *)parent;
+ newtag->common.impl = &bus_dma_iommu_impl;
newtag->ctx = oldtag->ctx;
newtag->owner = oldtag->owner;
@@ -355,20 +355,20 @@
}
static int
-dmar_bus_dma_tag_set_domain(bus_dma_tag_t dmat)
+iommu_bus_dma_tag_set_domain(bus_dma_tag_t dmat)
{
return (0);
}
static int
-dmar_bus_dma_tag_destroy(bus_dma_tag_t dmat1)
+iommu_bus_dma_tag_destroy(bus_dma_tag_t dmat1)
{
- struct bus_dma_tag_dmar *dmat, *dmat_copy, *parent;
+ struct bus_dma_tag_iommu *dmat, *dmat_copy, *parent;
int error;
error = 0;
- dmat_copy = dmat = (struct bus_dma_tag_dmar *)dmat1;
+ dmat_copy = dmat = (struct bus_dma_tag_iommu *)dmat1;
if (dmat != NULL) {
if (dmat->map_count != 0) {
@@ -376,10 +376,10 @@
goto out;
}
while (dmat != NULL) {
- parent = (struct bus_dma_tag_dmar *)dmat->common.parent;
+ parent = (struct bus_dma_tag_iommu *)dmat->common.parent;
if (atomic_fetchadd_int(&dmat->common.ref_count, -1) ==
1) {
- if (dmat == &dmat->ctx->ctx_tag)
+ if (dmat == &dmat->ctx->device_tag)
dmar_free_ctx(dmat->ctx);
free_domain(dmat->segments, M_DMAR_DMAMAP);
free(dmat, M_DEVBUF);
@@ -394,19 +394,19 @@
}
static bool
-dmar_bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen)
+iommu_bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen)
{
return (false);
}
static int
-dmar_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
+iommu_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
{
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
- tag = (struct bus_dma_tag_dmar *)dmat;
+ tag = (struct bus_dma_tag_iommu *)dmat;
map = malloc_domainset(sizeof(*map), M_DMAR_DMAMAP,
DOMAINSET_PREF(tag->common.domain), M_NOWAIT | M_ZERO);
if (map == NULL) {
@@ -434,22 +434,22 @@
}
static int
-dmar_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map1)
+iommu_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map1)
{
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
- struct dmar_domain *domain;
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
+ struct iommu_domain *domain;
- tag = (struct bus_dma_tag_dmar *)dmat;
- map = (struct bus_dmamap_dmar *)map1;
+ tag = (struct bus_dma_tag_iommu *)dmat;
+ map = (struct bus_dmamap_iommu *)map1;
if (map != NULL) {
domain = tag->ctx->domain;
- DMAR_DOMAIN_LOCK(domain);
+ IOMMU_DOMAIN_LOCK(domain);
if (!TAILQ_EMPTY(&map->map_entries)) {
- DMAR_DOMAIN_UNLOCK(domain);
+ IOMMU_DOMAIN_UNLOCK(domain);
return (EBUSY);
}
- DMAR_DOMAIN_UNLOCK(domain);
+ IOMMU_DOMAIN_UNLOCK(domain);
free_domain(map, M_DMAR_DMAMAP);
}
tag->map_count--;
@@ -458,15 +458,15 @@
static int
-dmar_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
+iommu_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
bus_dmamap_t *mapp)
{
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
int error, mflags;
vm_memattr_t attr;
- error = dmar_bus_dmamap_create(dmat, flags, mapp);
+ error = iommu_bus_dmamap_create(dmat, flags, mapp);
if (error != 0)
return (error);
@@ -475,23 +475,23 @@
attr = (flags & BUS_DMA_NOCACHE) != 0 ? VM_MEMATTR_UNCACHEABLE :
VM_MEMATTR_DEFAULT;
- tag = (struct bus_dma_tag_dmar *)dmat;
- map = (struct bus_dmamap_dmar *)*mapp;
+ tag = (struct bus_dma_tag_iommu *)dmat;
+ map = (struct bus_dmamap_iommu *)*mapp;
if (tag->common.maxsize < PAGE_SIZE &&
tag->common.alignment <= tag->common.maxsize &&
attr == VM_MEMATTR_DEFAULT) {
*vaddr = malloc_domainset(tag->common.maxsize, M_DEVBUF,
DOMAINSET_PREF(tag->common.domain), mflags);
- map->flags |= BUS_DMAMAP_DMAR_MALLOC;
+ map->flags |= BUS_DMAMAP_IOMMU_MALLOC;
} else {
*vaddr = (void *)kmem_alloc_attr_domainset(
DOMAINSET_PREF(tag->common.domain), tag->common.maxsize,
mflags, 0ul, BUS_SPACE_MAXADDR, attr);
- map->flags |= BUS_DMAMAP_DMAR_KMEM_ALLOC;
+ map->flags |= BUS_DMAMAP_IOMMU_KMEM_ALLOC;
}
if (*vaddr == NULL) {
- dmar_bus_dmamap_destroy(dmat, *mapp);
+ iommu_bus_dmamap_destroy(dmat, *mapp);
*mapp = NULL;
return (ENOMEM);
}
@@ -499,36 +499,36 @@
}
static void
-dmar_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map1)
+iommu_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map1)
{
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
- tag = (struct bus_dma_tag_dmar *)dmat;
- map = (struct bus_dmamap_dmar *)map1;
+ tag = (struct bus_dma_tag_iommu *)dmat;
+ map = (struct bus_dmamap_iommu *)map1;
- if ((map->flags & BUS_DMAMAP_DMAR_MALLOC) != 0) {
+ if ((map->flags & BUS_DMAMAP_IOMMU_MALLOC) != 0) {
free_domain(vaddr, M_DEVBUF);
- map->flags &= ~BUS_DMAMAP_DMAR_MALLOC;
+ map->flags &= ~BUS_DMAMAP_IOMMU_MALLOC;
} else {
- KASSERT((map->flags & BUS_DMAMAP_DMAR_KMEM_ALLOC) != 0,
- ("dmar_bus_dmamem_free for non alloced map %p", map));
+ KASSERT((map->flags & BUS_DMAMAP_IOMMU_KMEM_ALLOC) != 0,
+ ("iommu_bus_dmamem_free for non alloced map %p", map));
kmem_free((vm_offset_t)vaddr, tag->common.maxsize);
- map->flags &= ~BUS_DMAMAP_DMAR_KMEM_ALLOC;
+ map->flags &= ~BUS_DMAMAP_IOMMU_KMEM_ALLOC;
}
- dmar_bus_dmamap_destroy(dmat, map1);
+ iommu_bus_dmamap_destroy(dmat, map1);
}
static int
-dmar_bus_dmamap_load_something1(struct bus_dma_tag_dmar *tag,
- struct bus_dmamap_dmar *map, vm_page_t *ma, int offset, bus_size_t buflen,
+iommu_bus_dmamap_load_something1(struct bus_dma_tag_iommu *tag,
+ struct bus_dmamap_iommu *map, vm_page_t *ma, int offset, bus_size_t buflen,
int flags, bus_dma_segment_t *segs, int *segp,
- struct dmar_map_entries_tailq *unroll_list)
+ struct iommu_map_entries_tailq *unroll_list)
{
- struct dmar_ctx *ctx;
- struct dmar_domain *domain;
- struct dmar_map_entry *entry;
+ struct iommu_device *ctx;
+ struct iommu_domain *domain;
+ struct iommu_map_entry *entry;
dmar_gaddr_t size;
bus_size_t buflen1;
int error, idx, gas_flags, seg;
@@ -555,17 +555,17 @@
* (Too) optimistically allow split if there are more
* then one segments left.
*/
- gas_flags = map->cansleep ? DMAR_GM_CANWAIT : 0;
+ gas_flags = map->cansleep ? IOMMU_MF_CANWAIT : 0;
if (seg + 1 < tag->common.nsegments)
- gas_flags |= DMAR_GM_CANSPLIT;
+ gas_flags |= IOMMU_MF_CANSPLIT;
error = dmar_gas_map(domain, &tag->common, size, offset,
- DMAR_MAP_ENTRY_READ |
- ((flags & BUS_DMA_NOWRITE) == 0 ? DMAR_MAP_ENTRY_WRITE : 0),
+ IOMMU_MAP_ENTRY_READ |
+ ((flags & BUS_DMA_NOWRITE) == 0 ? IOMMU_MAP_ENTRY_WRITE : 0),
gas_flags, ma + idx, &entry);
if (error != 0)
break;
- if ((gas_flags & DMAR_GM_CANSPLIT) != 0) {
+ if ((gas_flags & IOMMU_MF_CANSPLIT) != 0) {
KASSERT(size >= entry->end - entry->start,
("split increased entry size %jx %jx %jx",
(uintmax_t)size, (uintmax_t)entry->start,
@@ -596,7 +596,7 @@
(uintmax_t)entry->start, (uintmax_t)entry->end,
(uintmax_t)tag->common.lowaddr,
(uintmax_t)tag->common.highaddr));
- KASSERT(dmar_test_boundary(entry->start + offset, buflen1,
+ KASSERT(iommu_test_boundary(entry->start + offset, buflen1,
tag->common.boundary),
("boundary failed: ctx %p start 0x%jx end 0x%jx "
"boundary 0x%jx", ctx, (uintmax_t)entry->start,
@@ -607,10 +607,10 @@
(uintmax_t)entry->start, (uintmax_t)entry->end,
(uintmax_t)buflen1, (uintmax_t)tag->common.maxsegsz));
- DMAR_DOMAIN_LOCK(domain);
+ IOMMU_DOMAIN_LOCK(domain);
TAILQ_INSERT_TAIL(&map->map_entries, entry, dmamap_link);
- entry->flags |= DMAR_MAP_ENTRY_MAP;
- DMAR_DOMAIN_UNLOCK(domain);
+ entry->flags |= IOMMU_MAP_ENTRY_MAP;
+ IOMMU_DOMAIN_UNLOCK(domain);
TAILQ_INSERT_TAIL(unroll_list, entry, unroll_link);
segs[seg].ds_addr = entry->start + offset;
@@ -627,14 +627,14 @@
}
static int
-dmar_bus_dmamap_load_something(struct bus_dma_tag_dmar *tag,
- struct bus_dmamap_dmar *map, vm_page_t *ma, int offset, bus_size_t buflen,
+iommu_bus_dmamap_load_something(struct bus_dma_tag_iommu *tag,
+ struct bus_dmamap_iommu *map, vm_page_t *ma, int offset, bus_size_t buflen,
int flags, bus_dma_segment_t *segs, int *segp)
{
- struct dmar_ctx *ctx;
- struct dmar_domain *domain;
- struct dmar_map_entry *entry, *entry1;
- struct dmar_map_entries_tailq unroll_list;
+ struct iommu_device *ctx;
+ struct iommu_domain *domain;
+ struct iommu_map_entry *entry, *entry1;
+ struct iommu_map_entries_tailq unroll_list;
int error;
ctx = tag->ctx;
@@ -642,7 +642,7 @@
atomic_add_long(&ctx->loads, 1);
TAILQ_INIT(&unroll_list);
- error = dmar_bus_dmamap_load_something1(tag, map, ma, offset,
+ error = iommu_bus_dmamap_load_something1(tag, map, ma, offset,
buflen, flags, segs, segp, &unroll_list);
if (error != 0) {
/*
@@ -650,7 +650,7 @@
* partial buffer load, so unfortunately we have to
* revert all work done.
*/
- DMAR_DOMAIN_LOCK(domain);
+ IOMMU_DOMAIN_LOCK(domain);
TAILQ_FOREACH_SAFE(entry, &unroll_list, unroll_link,
entry1) {
/*
@@ -664,8 +664,8 @@
TAILQ_INSERT_TAIL(&domain->unload_entries, entry,
dmamap_link);
}
- DMAR_DOMAIN_UNLOCK(domain);
- taskqueue_enqueue(domain->dmar->delayed_taskqueue,
+ IOMMU_DOMAIN_UNLOCK(domain);
+ taskqueue_enqueue(domain->iommu->delayed_taskqueue,
&domain->unload_task);
}
@@ -673,37 +673,37 @@
!map->cansleep)
error = EINPROGRESS;
if (error == EINPROGRESS)
- dmar_bus_schedule_dmamap(domain->dmar, map);
+ iommu_bus_schedule_dmamap(domain->iommu, map);
return (error);
}
static int
-dmar_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map1,
+iommu_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map1,
struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
bus_dma_segment_t *segs, int *segp)
{
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
- tag = (struct bus_dma_tag_dmar *)dmat;
- map = (struct bus_dmamap_dmar *)map1;
- return (dmar_bus_dmamap_load_something(tag, map, ma, ma_offs, tlen,
+ tag = (struct bus_dma_tag_iommu *)dmat;
+ map = (struct bus_dmamap_iommu *)map1;
+ return (iommu_bus_dmamap_load_something(tag, map, ma, ma_offs, tlen,
flags, segs, segp));
}
static int
-dmar_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map1,
+iommu_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map1,
vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
int *segp)
{
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
vm_page_t *ma, fma;
vm_paddr_t pstart, pend, paddr;
int error, i, ma_cnt, mflags, offset;
- tag = (struct bus_dma_tag_dmar *)dmat;
- map = (struct bus_dmamap_dmar *)map1;
+ tag = (struct bus_dma_tag_iommu *)dmat;
+ map = (struct bus_dmamap_iommu *)map1;
pstart = trunc_page(buf);
pend = round_page(buf + buflen);
offset = buf & PAGE_MASK;
@@ -735,7 +735,7 @@
ma[i] = &fma[i];
}
}
- error = dmar_bus_dmamap_load_something(tag, map, ma, offset, buflen,
+ error = iommu_bus_dmamap_load_something(tag, map, ma, offset, buflen,
flags, segs, segp);
free(fma, M_DEVBUF);
free(ma, M_DEVBUF);
@@ -743,18 +743,18 @@
}
static int
-dmar_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map1, void *buf,
+iommu_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map1, void *buf,
bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
int *segp)
{
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
vm_page_t *ma, fma;
vm_paddr_t pstart, pend, paddr;
int error, i, ma_cnt, mflags, offset;
- tag = (struct bus_dma_tag_dmar *)dmat;
- map = (struct bus_dmamap_dmar *)map1;
+ tag = (struct bus_dma_tag_iommu *)dmat;
+ map = (struct bus_dmamap_iommu *)map1;
pstart = trunc_page((vm_offset_t)buf);
pend = round_page((vm_offset_t)buf + buflen);
offset = (vm_offset_t)buf & PAGE_MASK;
@@ -788,7 +788,7 @@
ma[i] = &fma[i];
}
}
- error = dmar_bus_dmamap_load_something(tag, map, ma, offset, buflen,
+ error = iommu_bus_dmamap_load_something(tag, map, ma, offset, buflen,
flags, segs, segp);
free(ma, M_DEVBUF);
free(fma, M_DEVBUF);
@@ -796,29 +796,29 @@
}
static void
-dmar_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map1,
+iommu_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map1,
struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
{
- struct bus_dmamap_dmar *map;
+ struct bus_dmamap_iommu *map;
if (map1 == NULL)
return;
- map = (struct bus_dmamap_dmar *)map1;
+ map = (struct bus_dmamap_iommu *)map1;
map->mem = *mem;
- map->tag = (struct bus_dma_tag_dmar *)dmat;
+ map->tag = (struct bus_dma_tag_iommu *)dmat;
map->callback = callback;
map->callback_arg = callback_arg;
}
static bus_dma_segment_t *
-dmar_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map1,
+iommu_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map1,
bus_dma_segment_t *segs, int nsegs, int error)
{
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
- tag = (struct bus_dma_tag_dmar *)dmat;
- map = (struct bus_dmamap_dmar *)map1;
+ tag = (struct bus_dma_tag_iommu *)dmat;
+ map = (struct bus_dmamap_iommu *)map1;
if (!map->locked) {
KASSERT(map->cansleep,
@@ -848,76 +848,76 @@
* On amd64, we assume that sf allocation cannot fail.
*/
static void
-dmar_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map1)
+iommu_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map1)
{
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
- struct dmar_ctx *ctx;
- struct dmar_domain *domain;
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
+ struct iommu_device *ctx;
+ struct iommu_domain *domain;
#if defined(__amd64__)
- struct dmar_map_entries_tailq entries;
+ struct iommu_map_entries_tailq entries;
#endif
- tag = (struct bus_dma_tag_dmar *)dmat;
- map = (struct bus_dmamap_dmar *)map1;
+ tag = (struct bus_dma_tag_iommu *)dmat;
+ map = (struct bus_dmamap_iommu *)map1;
ctx = tag->ctx;
domain = ctx->domain;
atomic_add_long(&ctx->unloads, 1);
#if defined(__i386__)
- DMAR_DOMAIN_LOCK(domain);
+ IOMMU_DOMAIN_LOCK(domain);
TAILQ_CONCAT(&domain->unload_entries, &map->map_entries, dmamap_link);
- DMAR_DOMAIN_UNLOCK(domain);
- taskqueue_enqueue(domain->dmar->delayed_taskqueue,
+ IOMMU_DOMAIN_UNLOCK(domain);
+ taskqueue_enqueue(domain->iommu->delayed_taskqueue,
&domain->unload_task);
#else /* defined(__amd64__) */
TAILQ_INIT(&entries);
- DMAR_DOMAIN_LOCK(domain);
+ IOMMU_DOMAIN_LOCK(domain);
TAILQ_CONCAT(&entries, &map->map_entries, dmamap_link);
- DMAR_DOMAIN_UNLOCK(domain);
+ IOMMU_DOMAIN_UNLOCK(domain);
THREAD_NO_SLEEPING();
- dmar_domain_unload(domain, &entries, false);
+ iommu_domain_unload(domain, &entries, false);
THREAD_SLEEPING_OK();
- KASSERT(TAILQ_EMPTY(&entries), ("lazy dmar_ctx_unload %p", ctx));
+ KASSERT(TAILQ_EMPTY(&entries), ("lazy iommu_device_unload %p", ctx));
#endif
}
static void
-dmar_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
+iommu_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
bus_dmasync_op_t op)
{
}
-struct bus_dma_impl bus_dma_dmar_impl = {
- .tag_create = dmar_bus_dma_tag_create,
- .tag_destroy = dmar_bus_dma_tag_destroy,
- .tag_set_domain = dmar_bus_dma_tag_set_domain,
- .id_mapped = dmar_bus_dma_id_mapped,
- .map_create = dmar_bus_dmamap_create,
- .map_destroy = dmar_bus_dmamap_destroy,
- .mem_alloc = dmar_bus_dmamem_alloc,
- .mem_free = dmar_bus_dmamem_free,
- .load_phys = dmar_bus_dmamap_load_phys,
- .load_buffer = dmar_bus_dmamap_load_buffer,
- .load_ma = dmar_bus_dmamap_load_ma,
- .map_waitok = dmar_bus_dmamap_waitok,
- .map_complete = dmar_bus_dmamap_complete,
- .map_unload = dmar_bus_dmamap_unload,
- .map_sync = dmar_bus_dmamap_sync,
+struct bus_dma_impl bus_dma_iommu_impl = {
+ .tag_create = iommu_bus_dma_tag_create,
+ .tag_destroy = iommu_bus_dma_tag_destroy,
+ .tag_set_domain = iommu_bus_dma_tag_set_domain,
+ .id_mapped = iommu_bus_dma_id_mapped,
+ .map_create = iommu_bus_dmamap_create,
+ .map_destroy = iommu_bus_dmamap_destroy,
+ .mem_alloc = iommu_bus_dmamem_alloc,
+ .mem_free = iommu_bus_dmamem_free,
+ .load_phys = iommu_bus_dmamap_load_phys,
+ .load_buffer = iommu_bus_dmamap_load_buffer,
+ .load_ma = iommu_bus_dmamap_load_ma,
+ .map_waitok = iommu_bus_dmamap_waitok,
+ .map_complete = iommu_bus_dmamap_complete,
+ .map_unload = iommu_bus_dmamap_unload,
+ .map_sync = iommu_bus_dmamap_sync,
};
static void
-dmar_bus_task_dmamap(void *arg, int pending)
+iommu_bus_task_dmamap(void *arg, int pending)
{
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
- struct dmar_unit *unit;
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
+ struct iommu_unit *unit;
unit = arg;
- DMAR_LOCK(unit);
+ IOMMU_LOCK(unit);
while ((map = TAILQ_FIRST(&unit->delayed_maps)) != NULL) {
TAILQ_REMOVE(&unit->delayed_maps, map, delay_link);
- DMAR_UNLOCK(unit);
+ IOMMU_UNLOCK(unit);
tag = map->tag;
map->cansleep = true;
map->locked = false;
@@ -931,30 +931,30 @@
} else
map->locked = true;
map->cansleep = false;
- DMAR_LOCK(unit);
+ IOMMU_LOCK(unit);
}
- DMAR_UNLOCK(unit);
+ IOMMU_UNLOCK(unit);
}
static void
-dmar_bus_schedule_dmamap(struct dmar_unit *unit, struct bus_dmamap_dmar *map)
+iommu_bus_schedule_dmamap(struct iommu_unit *unit, struct bus_dmamap_iommu *map)
{
map->locked = false;
- DMAR_LOCK(unit);
+ IOMMU_LOCK(unit);
TAILQ_INSERT_TAIL(&unit->delayed_maps, map, delay_link);
- DMAR_UNLOCK(unit);
+ IOMMU_UNLOCK(unit);
taskqueue_enqueue(unit->delayed_taskqueue, &unit->dmamap_load_task);
}
int
-dmar_init_busdma(struct dmar_unit *unit)
+iommu_init_busdma(struct iommu_unit *unit)
{
unit->dma_enabled = 1;
TUNABLE_INT_FETCH("hw.dmar.dma", &unit->dma_enabled);
TAILQ_INIT(&unit->delayed_maps);
- TASK_INIT(&unit->dmamap_load_task, 0, dmar_bus_task_dmamap, unit);
+ TASK_INIT(&unit->dmamap_load_task, 0, iommu_bus_task_dmamap, unit);
unit->delayed_taskqueue = taskqueue_create("dmar", M_WAITOK,
taskqueue_thread_enqueue, &unit->delayed_taskqueue);
taskqueue_start_threads(&unit->delayed_taskqueue, 1, PI_DISK,
@@ -963,7 +963,7 @@
}
void
-dmar_fini_busdma(struct dmar_unit *unit)
+iommu_fini_busdma(struct iommu_unit *unit)
{
if (unit->delayed_taskqueue == NULL)
@@ -979,11 +979,11 @@
vm_paddr_t start, vm_size_t length, int flags)
{
struct bus_dma_tag_common *tc;
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
- struct dmar_ctx *ctx;
- struct dmar_domain *domain;
- struct dmar_map_entry *entry;
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
+ struct iommu_device *ctx;
+ struct iommu_domain *domain;
+ struct iommu_map_entry *entry;
vm_page_t *ma;
vm_size_t i;
int error;
@@ -996,13 +996,13 @@
MPASS((flags & ~(BUS_DMA_NOWAIT | BUS_DMA_NOWRITE)) == 0);
tc = (struct bus_dma_tag_common *)dmat;
- if (tc->impl != &bus_dma_dmar_impl)
+ if (tc->impl != &bus_dma_iommu_impl)
return (0);
- tag = (struct bus_dma_tag_dmar *)dmat;
+ tag = (struct bus_dma_tag_iommu *)dmat;
ctx = tag->ctx;
domain = ctx->domain;
- map = (struct bus_dmamap_dmar *)map1;
+ map = (struct bus_dmamap_iommu *)map1;
waitok = (flags & BUS_DMA_NOWAIT) != 0;
entry = dmar_gas_alloc_entry(domain, waitok ? 0 : DMAR_PGF_WAITOK);
@@ -1020,16 +1020,16 @@
ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i,
VM_MEMATTR_DEFAULT);
}
- error = dmar_gas_map_region(domain, entry, DMAR_MAP_ENTRY_READ |
- ((flags & BUS_DMA_NOWRITE) ? 0 : DMAR_MAP_ENTRY_WRITE),
- waitok ? DMAR_GM_CANWAIT : 0, ma);
+ error = dmar_gas_map_region(domain, entry, IOMMU_MAP_ENTRY_READ |
+ ((flags & BUS_DMA_NOWRITE) ? 0 : IOMMU_MAP_ENTRY_WRITE),
+ waitok ? IOMMU_MF_CANWAIT : 0, ma);
if (error == 0) {
- DMAR_DOMAIN_LOCK(domain);
+ IOMMU_DOMAIN_LOCK(domain);
TAILQ_INSERT_TAIL(&map->map_entries, entry, dmamap_link);
- entry->flags |= DMAR_MAP_ENTRY_MAP;
- DMAR_DOMAIN_UNLOCK(domain);
+ entry->flags |= IOMMU_MAP_ENTRY_MAP;
+ IOMMU_DOMAIN_UNLOCK(domain);
} else {
- dmar_domain_unload_entry(entry, true);
+ iommu_domain_unload_entry(entry, true);
}
for (i = 0; i < atop(length); i++)
vm_page_putfake(ma[i]);
Index: sys/x86/iommu/intel_ctx.c
===================================================================
--- sys/x86/iommu/intel_ctx.c
+++ sys/x86/iommu/intel_ctx.c
@@ -71,13 +71,13 @@
#include <x86/iommu/intel_dmar.h>
#include <dev/pci/pcivar.h>
-static MALLOC_DEFINE(M_DMAR_CTX, "dmar_ctx", "Intel DMAR Context");
-static MALLOC_DEFINE(M_DMAR_DOMAIN, "dmar_dom", "Intel DMAR Domain");
+static MALLOC_DEFINE(M_DMAR_CTX, "iommu_device", "Intel DMAR Context");
+static MALLOC_DEFINE(M_IOMMU_DOMAIN, "dmar_dom", "Intel DMAR Domain");
-static void dmar_domain_unload_task(void *arg, int pending);
+static void iommu_domain_unload_task(void *arg, int pending);
static void dmar_unref_domain_locked(struct dmar_unit *dmar,
- struct dmar_domain *domain);
-static void dmar_domain_destroy(struct dmar_domain *domain);
+ struct iommu_domain *domain);
+static void iommu_domain_destroy(struct iommu_domain *domain);
static void
dmar_ensure_ctx_page(struct dmar_unit *dmar, int bus)
@@ -111,37 +111,40 @@
TD_PINNED_ASSERT;
}
-static dmar_ctx_entry_t *
-dmar_map_ctx_entry(struct dmar_ctx *ctx, struct sf_buf **sfp)
+static iommu_device_entry_t *
+dmar_map_ctx_entry(struct iommu_device *ctx, struct sf_buf **sfp)
{
- dmar_ctx_entry_t *ctxp;
+ struct dmar_unit *dmar;
+ iommu_device_entry_t *ctxp;
+
+ dmar = (struct dmar_unit *)ctx->domain->iommu;
- ctxp = dmar_map_pgtbl(ctx->domain->dmar->ctx_obj, 1 +
+ ctxp = dmar_map_pgtbl(dmar->ctx_obj, 1 +
PCI_RID2BUS(ctx->rid), DMAR_PGF_NOALLOC | DMAR_PGF_WAITOK, sfp);
ctxp += ctx->rid & 0xff;
return (ctxp);
}
static void
-ctx_tag_init(struct dmar_ctx *ctx, device_t dev)
+device_tag_init(struct iommu_device *ctx, device_t dev)
{
bus_addr_t maxaddr;
maxaddr = MIN(ctx->domain->end, BUS_SPACE_MAXADDR);
- ctx->ctx_tag.common.ref_count = 1; /* Prevent free */
- ctx->ctx_tag.common.impl = &bus_dma_dmar_impl;
- ctx->ctx_tag.common.boundary = 0;
- ctx->ctx_tag.common.lowaddr = maxaddr;
- ctx->ctx_tag.common.highaddr = maxaddr;
- ctx->ctx_tag.common.maxsize = maxaddr;
- ctx->ctx_tag.common.nsegments = BUS_SPACE_UNRESTRICTED;
- ctx->ctx_tag.common.maxsegsz = maxaddr;
- ctx->ctx_tag.ctx = ctx;
- ctx->ctx_tag.owner = dev;
+ ctx->device_tag.common.ref_count = 1; /* Prevent free */
+ ctx->device_tag.common.impl = &bus_dma_iommu_impl;
+ ctx->device_tag.common.boundary = 0;
+ ctx->device_tag.common.lowaddr = maxaddr;
+ ctx->device_tag.common.highaddr = maxaddr;
+ ctx->device_tag.common.maxsize = maxaddr;
+ ctx->device_tag.common.nsegments = BUS_SPACE_UNRESTRICTED;
+ ctx->device_tag.common.maxsegsz = maxaddr;
+ ctx->device_tag.ctx = ctx;
+ ctx->device_tag.owner = dev;
}
static void
-ctx_id_entry_init_one(dmar_ctx_entry_t *ctxp, struct dmar_domain *domain,
+ctx_id_entry_init_one(iommu_device_entry_t *ctxp, struct iommu_domain *domain,
vm_page_t ctx_root)
{
/*
@@ -165,23 +168,23 @@
}
static void
-ctx_id_entry_init(struct dmar_ctx *ctx, dmar_ctx_entry_t *ctxp, bool move,
+ctx_id_entry_init(struct iommu_device *ctx, iommu_device_entry_t *ctxp, bool move,
int busno)
{
struct dmar_unit *unit;
- struct dmar_domain *domain;
+ struct iommu_domain *domain;
vm_page_t ctx_root;
int i;
domain = ctx->domain;
- unit = domain->dmar;
+ unit = (struct dmar_unit *)domain->iommu;
KASSERT(move || (ctxp->ctx1 == 0 && ctxp->ctx2 == 0),
("dmar%d: initialized ctx entry %d:%d:%d 0x%jx 0x%jx",
- unit->unit, busno, pci_get_slot(ctx->ctx_tag.owner),
- pci_get_function(ctx->ctx_tag.owner),
+ unit->iommu.unit, busno, pci_get_slot(ctx->device_tag.owner),
+ pci_get_function(ctx->device_tag.owner),
ctxp->ctx1, ctxp->ctx2));
- if ((domain->flags & DMAR_DOMAIN_IDMAP) != 0 &&
+ if ((domain->flags & IOMMU_DOMAIN_IDMAP) != 0 &&
(unit->hw_ecap & DMAR_ECAP_PT) != 0) {
KASSERT(domain->pgtbl_obj == NULL,
("ctx %p non-null pgtbl_obj", ctx));
@@ -226,12 +229,12 @@
}
static int
-domain_init_rmrr(struct dmar_domain *domain, device_t dev, int bus,
+domain_init_rmrr(struct iommu_domain *domain, device_t dev, int bus,
int slot, int func, int dev_domain, int dev_busno,
const void *dev_path, int dev_path_len)
{
- struct dmar_map_entries_tailq rmrr_entries;
- struct dmar_map_entry *entry, *entry1;
+ struct iommu_map_entries_tailq rmrr_entries;
+ struct iommu_map_entry *entry, *entry1;
vm_page_t *ma;
dmar_gaddr_t start, end;
vm_pindex_t size, i;
@@ -255,7 +258,7 @@
end = entry->end;
if (bootverbose)
printf("dmar%d ctx pci%d:%d:%d RMRR [%#jx, %#jx]\n",
- domain->dmar->unit, bus, slot, func,
+ domain->iommu->unit, bus, slot, func,
(uintmax_t)start, (uintmax_t)end);
entry->start = trunc_page(start);
entry->end = round_page(end);
@@ -267,7 +270,7 @@
printf("pci%d:%d:%d ", bus, slot, func);
printf("BIOS bug: dmar%d RMRR "
"region (%jx, %jx) corrected\n",
- domain->dmar->unit, start, end);
+ domain->iommu->unit, start, end);
}
entry->end += DMAR_PAGE_SIZE * 0x20;
}
@@ -278,8 +281,8 @@
VM_MEMATTR_DEFAULT);
}
error1 = dmar_gas_map_region(domain, entry,
- DMAR_MAP_ENTRY_READ | DMAR_MAP_ENTRY_WRITE,
- DMAR_GM_CANWAIT | DMAR_GM_RMRR, ma);
+ IOMMU_MAP_ENTRY_READ | IOMMU_MAP_ENTRY_WRITE,
+ IOMMU_MF_CANWAIT | IOMMU_MF_RMRR, ma);
/*
* Non-failed RMRR entries are owned by context rb
* tree. Get rid of the failed entry, but do not stop
@@ -287,10 +290,10 @@
* loaded and removed on the context destruction.
*/
if (error1 == 0 && entry->end != entry->start) {
- DMAR_LOCK(domain->dmar);
+ IOMMU_LOCK(domain->iommu);
domain->refs++; /* XXXKIB prevent free */
- domain->flags |= DMAR_DOMAIN_RMRR;
- DMAR_UNLOCK(domain->dmar);
+ domain->flags |= IOMMU_DOMAIN_RMRR;
+ IOMMU_UNLOCK(domain->iommu);
} else {
if (error1 != 0) {
if (dev != NULL)
@@ -298,7 +301,7 @@
printf("pci%d:%d:%d ", bus, slot, func);
printf(
"dmar%d failed to map RMRR region (%jx, %jx) %d\n",
- domain->dmar->unit, start, end,
+ domain->iommu->unit, start, end,
error1);
error = error1;
}
@@ -312,23 +315,23 @@
return (error);
}
-static struct dmar_domain *
-dmar_domain_alloc(struct dmar_unit *dmar, bool id_mapped)
+static struct iommu_domain *
+iommu_domain_alloc(struct dmar_unit *dmar, bool id_mapped)
{
- struct dmar_domain *domain;
+ struct iommu_domain *domain;
int error, id, mgaw;
id = alloc_unr(dmar->domids);
if (id == -1)
return (NULL);
- domain = malloc(sizeof(*domain), M_DMAR_DOMAIN, M_WAITOK | M_ZERO);
+ domain = malloc(sizeof(*domain), M_IOMMU_DOMAIN, M_WAITOK | M_ZERO);
domain->domain = id;
LIST_INIT(&domain->contexts);
RB_INIT(&domain->rb_root);
TAILQ_INIT(&domain->unload_entries);
- TASK_INIT(&domain->unload_task, 0, dmar_domain_unload_task, domain);
+ TASK_INIT(&domain->unload_task, 0, iommu_domain_unload_task, domain);
mtx_init(&domain->lock, "dmardom", NULL, MTX_DEF);
- domain->dmar = dmar;
+ domain->iommu = &dmar->iommu;
/*
* For now, use the maximal usable physical address of the
@@ -352,7 +355,7 @@
domain->pgtbl_obj = domain_get_idmap_pgtbl(domain,
domain->end);
}
- domain->flags |= DMAR_DOMAIN_IDMAP;
+ domain->flags |= IOMMU_DOMAIN_IDMAP;
} else {
error = domain_alloc_pgtbl(domain);
if (error != 0)
@@ -366,14 +369,14 @@
return (domain);
fail:
- dmar_domain_destroy(domain);
+ iommu_domain_destroy(domain);
return (NULL);
}
-static struct dmar_ctx *
-dmar_ctx_alloc(struct dmar_domain *domain, uint16_t rid)
+static struct iommu_device *
+iommu_device_alloc(struct iommu_domain *domain, uint16_t rid)
{
- struct dmar_ctx *ctx;
+ struct iommu_device *ctx;
ctx = malloc(sizeof(*ctx), M_DMAR_CTX, M_WAITOK | M_ZERO);
ctx->domain = domain;
@@ -383,12 +386,12 @@
}
static void
-dmar_ctx_link(struct dmar_ctx *ctx)
+iommu_device_link(struct iommu_device *ctx)
{
- struct dmar_domain *domain;
+ struct iommu_domain *domain;
domain = ctx->domain;
- DMAR_ASSERT_LOCKED(domain->dmar);
+ IOMMU_ASSERT_LOCKED(domain->iommu);
KASSERT(domain->refs >= domain->ctx_cnt,
("dom %p ref underflow %d %d", domain, domain->refs,
domain->ctx_cnt));
@@ -398,12 +401,12 @@
}
static void
-dmar_ctx_unlink(struct dmar_ctx *ctx)
+iommu_device_unlink(struct iommu_device *ctx)
{
- struct dmar_domain *domain;
+ struct iommu_domain *domain;
domain = ctx->domain;
- DMAR_ASSERT_LOCKED(domain->dmar);
+ IOMMU_ASSERT_LOCKED(domain->iommu);
KASSERT(domain->refs > 0,
("domain %p ctx dtr refs %d", domain, domain->refs));
KASSERT(domain->ctx_cnt >= domain->refs,
@@ -415,8 +418,9 @@
}
static void
-dmar_domain_destroy(struct dmar_domain *domain)
+iommu_domain_destroy(struct iommu_domain *domain)
{
+ struct dmar_unit *dmar;
KASSERT(TAILQ_EMPTY(&domain->unload_entries),
("unfinished unloads %p", domain));
@@ -426,29 +430,30 @@
("destroying dom %p with ctx_cnt %d", domain, domain->ctx_cnt));
KASSERT(domain->refs == 0,
("destroying dom %p with refs %d", domain, domain->refs));
- if ((domain->flags & DMAR_DOMAIN_GAS_INITED) != 0) {
- DMAR_DOMAIN_LOCK(domain);
+ if ((domain->flags & IOMMU_DOMAIN_GAS_INITED) != 0) {
+ IOMMU_DOMAIN_LOCK(domain);
dmar_gas_fini_domain(domain);
- DMAR_DOMAIN_UNLOCK(domain);
+ IOMMU_DOMAIN_UNLOCK(domain);
}
- if ((domain->flags & DMAR_DOMAIN_PGTBL_INITED) != 0) {
+ if ((domain->flags & IOMMU_DOMAIN_PGTBL_INITED) != 0) {
if (domain->pgtbl_obj != NULL)
- DMAR_DOMAIN_PGLOCK(domain);
+ IOMMU_DOMAIN_PGLOCK(domain);
domain_free_pgtbl(domain);
}
mtx_destroy(&domain->lock);
- free_unr(domain->dmar->domids, domain->domain);
- free(domain, M_DMAR_DOMAIN);
+ dmar = (struct dmar_unit *)domain->iommu;
+ free_unr(dmar->domids, domain->domain);
+ free(domain, M_IOMMU_DOMAIN);
}
-static struct dmar_ctx *
+static struct iommu_device *
dmar_get_ctx_for_dev1(struct dmar_unit *dmar, device_t dev, uint16_t rid,
int dev_domain, int dev_busno, const void *dev_path, int dev_path_len,
bool id_mapped, bool rmrr_init)
{
- struct dmar_domain *domain, *domain1;
- struct dmar_ctx *ctx, *ctx1;
- dmar_ctx_entry_t *ctxp;
+ struct iommu_domain *domain, *domain1;
+ struct iommu_device *ctx, *ctx1;
+ iommu_device_entry_t *ctxp;
struct sf_buf *sf;
int bus, slot, func, error;
bool enable;
@@ -466,7 +471,7 @@
TD_PREP_PINNED_ASSERT;
DMAR_LOCK(dmar);
KASSERT(!dmar_is_buswide_ctx(dmar, bus) || (slot == 0 && func == 0),
- ("dmar%d pci%d:%d:%d get_ctx for buswide", dmar->unit, bus,
+ ("dmar%d pci%d:%d:%d get_ctx for buswide", dmar->iommu.unit, bus,
slot, func));
ctx = dmar_find_ctx_locked(dmar, rid);
error = 0;
@@ -477,7 +482,7 @@
*/
DMAR_UNLOCK(dmar);
dmar_ensure_ctx_page(dmar, PCI_RID2BUS(rid));
- domain1 = dmar_domain_alloc(dmar, id_mapped);
+ domain1 = iommu_domain_alloc(dmar, id_mapped);
if (domain1 == NULL) {
TD_PINNED_ASSERT;
return (NULL);
@@ -487,12 +492,12 @@
slot, func, dev_domain, dev_busno, dev_path,
dev_path_len);
if (error != 0) {
- dmar_domain_destroy(domain1);
+ iommu_domain_destroy(domain1);
TD_PINNED_ASSERT;
return (NULL);
}
}
- ctx1 = dmar_ctx_alloc(domain1, rid);
+ ctx1 = iommu_device_alloc(domain1, rid);
ctxp = dmar_map_ctx_entry(ctx1, &sf);
DMAR_LOCK(dmar);
@@ -504,9 +509,9 @@
if (ctx == NULL) {
domain = domain1;
ctx = ctx1;
- dmar_ctx_link(ctx);
- ctx->ctx_tag.owner = dev;
- ctx_tag_init(ctx, dev);
+ iommu_device_link(ctx);
+ ctx->device_tag.owner = dev;
+ device_tag_init(ctx, dev);
/*
* This is the first activated context for the
@@ -521,14 +526,14 @@
device_printf(dev,
"dmar%d pci%d:%d:%d:%d rid %x domain %d mgaw %d "
"agaw %d %s-mapped\n",
- dmar->unit, dmar->segment, bus, slot,
+ dmar->iommu.unit, dmar->segment, bus, slot,
func, rid, domain->domain, domain->mgaw,
domain->agaw, id_mapped ? "id" : "re");
}
dmar_unmap_pgtbl(sf);
} else {
dmar_unmap_pgtbl(sf);
- dmar_domain_destroy(domain1);
+ iommu_domain_destroy(domain1);
/* Nothing needs to be done to destroy ctx1. */
free(ctx1, M_DMAR_CTX);
domain = ctx->domain;
@@ -536,14 +541,14 @@
}
} else {
domain = ctx->domain;
- if (ctx->ctx_tag.owner == NULL)
- ctx->ctx_tag.owner = dev;
+ if (ctx->device_tag.owner == NULL)
+ ctx->device_tag.owner = dev;
ctx->refs++; /* tag referenced us */
}
error = dmar_flush_for_ctx_entry(dmar, enable);
if (error != 0) {
- dmar_free_ctx_locked(dmar, ctx);
+ dmar_free_ctx_locked(&dmar->iommu, ctx);
TD_PINNED_ASSERT;
return (NULL);
}
@@ -558,12 +563,12 @@
if (error == 0) {
if (bootverbose) {
printf("dmar%d: enabled translation\n",
- dmar->unit);
+ dmar->iommu.unit);
}
} else {
printf("dmar%d: enabling translation failed, "
- "error %d\n", dmar->unit, error);
- dmar_free_ctx_locked(dmar, ctx);
+ "error %d\n", dmar->iommu.unit, error);
+ dmar_free_ctx_locked(&dmar->iommu, ctx);
TD_PINNED_ASSERT;
return (NULL);
}
@@ -573,8 +578,8 @@
return (ctx);
}
-struct dmar_ctx *
-dmar_get_ctx_for_dev(struct dmar_unit *dmar, device_t dev, uint16_t rid,
+struct iommu_device *
+dmar_get_ctx_for_dev(struct iommu_unit *dmar, device_t dev, uint16_t rid,
bool id_mapped, bool rmrr_init)
{
int dev_domain, dev_path_len, dev_busno;
@@ -583,11 +588,12 @@
dev_path_len = dmar_dev_depth(dev);
ACPI_DMAR_PCI_PATH dev_path[dev_path_len];
dmar_dev_path(dev, &dev_busno, dev_path, dev_path_len);
- return (dmar_get_ctx_for_dev1(dmar, dev, rid, dev_domain, dev_busno,
- dev_path, dev_path_len, id_mapped, rmrr_init));
+ return (dmar_get_ctx_for_dev1((struct dmar_unit *)dmar, dev, rid,
+ dev_domain, dev_busno, dev_path, dev_path_len, id_mapped,
+ rmrr_init));
}
-struct dmar_ctx *
+struct iommu_device *
dmar_get_ctx_for_devpath(struct dmar_unit *dmar, uint16_t rid,
int dev_domain, int dev_busno,
const void *dev_path, int dev_path_len,
@@ -599,49 +605,51 @@
}
int
-dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx)
+dmar_move_ctx_to_domain(struct iommu_domain *domain, struct iommu_device *ctx)
{
struct dmar_unit *dmar;
- struct dmar_domain *old_domain;
- dmar_ctx_entry_t *ctxp;
+ struct iommu_domain *old_domain;
+ iommu_device_entry_t *ctxp;
struct sf_buf *sf;
int error;
- dmar = domain->dmar;
+ dmar = (struct dmar_unit *)domain->iommu;
old_domain = ctx->domain;
if (domain == old_domain)
return (0);
- KASSERT(old_domain->dmar == dmar,
+ KASSERT(old_domain->iommu == domain->iommu,
("domain %p %u moving between dmars %u %u", domain,
- domain->domain, old_domain->dmar->unit, domain->dmar->unit));
+ domain->domain, old_domain->iommu->unit,
+ domain->iommu->unit));
TD_PREP_PINNED_ASSERT;
ctxp = dmar_map_ctx_entry(ctx, &sf);
DMAR_LOCK(dmar);
- dmar_ctx_unlink(ctx);
+ iommu_device_unlink(ctx);
ctx->domain = domain;
- dmar_ctx_link(ctx);
+ iommu_device_link(ctx);
ctx_id_entry_init(ctx, ctxp, true, PCI_BUSMAX + 100);
dmar_unmap_pgtbl(sf);
error = dmar_flush_for_ctx_entry(dmar, true);
/* If flush failed, rolling back would not work as well. */
printf("dmar%d rid %x domain %d->%d %s-mapped\n",
- dmar->unit, ctx->rid, old_domain->domain, domain->domain,
- (domain->flags & DMAR_DOMAIN_IDMAP) != 0 ? "id" : "re");
+ dmar->iommu.unit, ctx->rid, old_domain->domain, domain->domain,
+ (domain->flags & IOMMU_DOMAIN_IDMAP) != 0 ? "id" : "re");
dmar_unref_domain_locked(dmar, old_domain);
TD_PINNED_ASSERT;
return (error);
}
static void
-dmar_unref_domain_locked(struct dmar_unit *dmar, struct dmar_domain *domain)
+dmar_unref_domain_locked(struct dmar_unit *dmar, struct iommu_domain *domain)
{
DMAR_ASSERT_LOCKED(dmar);
KASSERT(domain->refs >= 1,
- ("dmar %d domain %p refs %u", dmar->unit, domain, domain->refs));
+ ("dmar %d domain %p refs %u", dmar->iommu.unit, domain,
+ domain->refs));
KASSERT(domain->refs > domain->ctx_cnt,
- ("dmar %d domain %p refs %d ctx_cnt %d", dmar->unit, domain,
+ ("dmar %d domain %p refs %d ctx_cnt %d", dmar->iommu.unit, domain,
domain->refs, domain->ctx_cnt));
if (domain->refs > 1) {
@@ -650,22 +658,25 @@
return;
}
- KASSERT((domain->flags & DMAR_DOMAIN_RMRR) == 0,
+ KASSERT((domain->flags & IOMMU_DOMAIN_RMRR) == 0,
("lost ref on RMRR domain %p", domain));
LIST_REMOVE(domain, link);
DMAR_UNLOCK(dmar);
- taskqueue_drain(dmar->delayed_taskqueue, &domain->unload_task);
- dmar_domain_destroy(domain);
+ taskqueue_drain(dmar->iommu.delayed_taskqueue, &domain->unload_task);
+ iommu_domain_destroy(domain);
}
void
-dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx)
+dmar_free_ctx_locked(struct iommu_unit *unit, struct iommu_device *ctx)
{
+ struct dmar_unit *dmar;
struct sf_buf *sf;
- dmar_ctx_entry_t *ctxp;
- struct dmar_domain *domain;
+ iommu_device_entry_t *ctxp;
+ struct iommu_domain *domain;
+
+ dmar = (struct dmar_unit *)unit;
DMAR_ASSERT_LOCKED(dmar);
KASSERT(ctx->refs >= 1,
@@ -727,30 +738,30 @@
}
dmar_unmap_pgtbl(sf);
domain = ctx->domain;
- dmar_ctx_unlink(ctx);
+ iommu_device_unlink(ctx);
free(ctx, M_DMAR_CTX);
dmar_unref_domain_locked(dmar, domain);
TD_PINNED_ASSERT;
}
void
-dmar_free_ctx(struct dmar_ctx *ctx)
+dmar_free_ctx(struct iommu_device *ctx)
{
struct dmar_unit *dmar;
- dmar = ctx->domain->dmar;
+ dmar = (struct dmar_unit *)ctx->domain->iommu;
DMAR_LOCK(dmar);
- dmar_free_ctx_locked(dmar, ctx);
+ dmar_free_ctx_locked(&dmar->iommu, ctx);
}
/*
* Returns with the domain locked.
*/
-struct dmar_ctx *
+struct iommu_device *
dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid)
{
- struct dmar_domain *domain;
- struct dmar_ctx *ctx;
+ struct iommu_domain *domain;
+ struct iommu_device *ctx;
DMAR_ASSERT_LOCKED(dmar);
@@ -764,17 +775,17 @@
}
void
-dmar_domain_free_entry(struct dmar_map_entry *entry, bool free)
+iommu_domain_free_entry(struct iommu_map_entry *entry, bool free)
{
- struct dmar_domain *domain;
+ struct iommu_domain *domain;
domain = entry->domain;
- DMAR_DOMAIN_LOCK(domain);
- if ((entry->flags & DMAR_MAP_ENTRY_RMRR) != 0)
+ IOMMU_DOMAIN_LOCK(domain);
+ if ((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0)
dmar_gas_free_region(domain, entry);
else
dmar_gas_free_space(domain, entry);
- DMAR_DOMAIN_UNLOCK(domain);
+ IOMMU_DOMAIN_UNLOCK(domain);
if (free)
dmar_gas_free_entry(domain, entry);
else
@@ -782,29 +793,29 @@
}
void
-dmar_domain_unload_entry(struct dmar_map_entry *entry, bool free)
+iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free)
{
struct dmar_unit *unit;
- unit = entry->domain->dmar;
+ unit = (struct dmar_unit *)entry->domain->iommu;
if (unit->qi_enabled) {
DMAR_LOCK(unit);
dmar_qi_invalidate_locked(entry->domain, entry->start,
entry->end - entry->start, &entry->gseq, true);
if (!free)
- entry->flags |= DMAR_MAP_ENTRY_QI_NF;
+ entry->flags |= IOMMU_MAP_ENTRY_QI_NF;
TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link);
DMAR_UNLOCK(unit);
} else {
domain_flush_iotlb_sync(entry->domain, entry->start,
entry->end - entry->start);
- dmar_domain_free_entry(entry, free);
+ iommu_domain_free_entry(entry, free);
}
}
static bool
-dmar_domain_unload_emit_wait(struct dmar_domain *domain,
- struct dmar_map_entry *entry)
+iommu_domain_unload_emit_wait(struct iommu_domain *domain,
+ struct iommu_map_entry *entry)
{
if (TAILQ_NEXT(entry, dmamap_link) == NULL)
@@ -813,17 +824,17 @@
}
void
-dmar_domain_unload(struct dmar_domain *domain,
- struct dmar_map_entries_tailq *entries, bool cansleep)
+iommu_domain_unload(struct iommu_domain *domain,
+ struct iommu_map_entries_tailq *entries, bool cansleep)
{
struct dmar_unit *unit;
- struct dmar_map_entry *entry, *entry1;
+ struct iommu_map_entry *entry, *entry1;
int error;
- unit = domain->dmar;
+ unit = (struct dmar_unit *)domain->iommu;
TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) {
- KASSERT((entry->flags & DMAR_MAP_ENTRY_MAP) != 0,
+ KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0,
("not mapped entry %p %p", domain, entry));
error = domain_unmap_buf(domain, entry->start, entry->end -
entry->start, cansleep ? DMAR_PGF_WAITOK : 0);
@@ -832,7 +843,7 @@
domain_flush_iotlb_sync(domain, entry->start,
entry->end - entry->start);
TAILQ_REMOVE(entries, entry, dmamap_link);
- dmar_domain_free_entry(entry, true);
+ iommu_domain_free_entry(entry, true);
}
}
if (TAILQ_EMPTY(entries))
@@ -843,28 +854,28 @@
TAILQ_FOREACH(entry, entries, dmamap_link) {
dmar_qi_invalidate_locked(domain, entry->start, entry->end -
entry->start, &entry->gseq,
- dmar_domain_unload_emit_wait(domain, entry));
+ iommu_domain_unload_emit_wait(domain, entry));
}
TAILQ_CONCAT(&unit->tlb_flush_entries, entries, dmamap_link);
DMAR_UNLOCK(unit);
}
static void
-dmar_domain_unload_task(void *arg, int pending)
+iommu_domain_unload_task(void *arg, int pending)
{
- struct dmar_domain *domain;
- struct dmar_map_entries_tailq entries;
+ struct iommu_domain *domain;
+ struct iommu_map_entries_tailq entries;
domain = arg;
TAILQ_INIT(&entries);
for (;;) {
- DMAR_DOMAIN_LOCK(domain);
- TAILQ_SWAP(&domain->unload_entries, &entries, dmar_map_entry,
+ IOMMU_DOMAIN_LOCK(domain);
+ TAILQ_SWAP(&domain->unload_entries, &entries, iommu_map_entry,
dmamap_link);
- DMAR_DOMAIN_UNLOCK(domain);
+ IOMMU_DOMAIN_UNLOCK(domain);
if (TAILQ_EMPTY(&entries))
break;
- dmar_domain_unload(domain, &entries, true);
+ iommu_domain_unload(domain, &entries, true);
}
}
Index: sys/x86/iommu/intel_dmar.h
===================================================================
--- sys/x86/iommu/intel_dmar.h
+++ sys/x86/iommu/intel_dmar.h
@@ -39,12 +39,14 @@
/* Guest or bus address, before translation. */
typedef uint64_t dmar_gaddr_t;
+struct dmar_unit;
+
struct dmar_qi_genseq {
u_int gen;
uint32_t seq;
};
-struct dmar_map_entry {
+struct iommu_map_entry {
dmar_gaddr_t start;
dmar_gaddr_t end;
dmar_gaddr_t first; /* Least start in subtree */
@@ -52,29 +54,29 @@
dmar_gaddr_t free_down; /* Max free space below the
current R/B tree node */
u_int flags;
- TAILQ_ENTRY(dmar_map_entry) dmamap_link; /* Link for dmamap entries */
- RB_ENTRY(dmar_map_entry) rb_entry; /* Links for domain entries */
- TAILQ_ENTRY(dmar_map_entry) unroll_link; /* Link for unroll after
+ TAILQ_ENTRY(iommu_map_entry) dmamap_link; /* Link for dmamap entries */
+ RB_ENTRY(iommu_map_entry) rb_entry; /* Links for domain entries */
+ TAILQ_ENTRY(iommu_map_entry) unroll_link; /* Link for unroll after
dmamap_load failure */
- struct dmar_domain *domain;
+ struct iommu_domain *domain;
struct dmar_qi_genseq gseq;
};
-RB_HEAD(dmar_gas_entries_tree, dmar_map_entry);
-RB_PROTOTYPE(dmar_gas_entries_tree, dmar_map_entry, rb_entry,
+RB_HEAD(dmar_gas_entries_tree, iommu_map_entry);
+RB_PROTOTYPE(dmar_gas_entries_tree, iommu_map_entry, rb_entry,
dmar_gas_cmp_entries);
-#define DMAR_MAP_ENTRY_PLACE 0x0001 /* Fake entry */
-#define DMAR_MAP_ENTRY_RMRR 0x0002 /* Permanent, not linked by
+#define IOMMU_MAP_ENTRY_PLACE 0x0001 /* Fake entry */
+#define IOMMU_MAP_ENTRY_RMRR 0x0002 /* Permanent, not linked by
dmamap_link */
-#define DMAR_MAP_ENTRY_MAP 0x0004 /* Busdma created, linked by
+#define IOMMU_MAP_ENTRY_MAP 0x0004 /* Busdma created, linked by
dmamap_link */
-#define DMAR_MAP_ENTRY_UNMAPPED 0x0010 /* No backing pages */
-#define DMAR_MAP_ENTRY_QI_NF 0x0020 /* qi task, do not free entry */
-#define DMAR_MAP_ENTRY_READ 0x1000 /* Read permitted */
-#define DMAR_MAP_ENTRY_WRITE 0x2000 /* Write permitted */
-#define DMAR_MAP_ENTRY_SNOOP 0x4000 /* Snoop */
-#define DMAR_MAP_ENTRY_TM 0x8000 /* Transient */
+#define IOMMU_MAP_ENTRY_UNMAPPED 0x0010 /* No backing pages */
+#define IOMMU_MAP_ENTRY_QI_NF 0x0020 /* qi task, do not free entry */
+#define IOMMU_MAP_ENTRY_READ 0x1000 /* Read permitted */
+#define IOMMU_MAP_ENTRY_WRITE 0x2000 /* Write permitted */
+#define IOMMU_MAP_ENTRY_SNOOP 0x4000 /* Snoop */
+#define IOMMU_MAP_ENTRY_TM 0x8000 /* Transient */
/*
* Locking annotations:
@@ -94,7 +96,7 @@
* Page tables pages and pages content is protected by the vm object
* lock pgtbl_obj, which contains the page tables pages.
*/
-struct dmar_domain {
+struct iommu_domain {
int domain; /* (c) DID, written in context entry */
int mgaw; /* (c) Real max address width */
int agaw; /* (c) Adjusted guest address width */
@@ -105,56 +107,56 @@
the guest AS */
u_int ctx_cnt; /* (u) Number of contexts owned */
u_int refs; /* (u) Refs, including ctx */
- struct dmar_unit *dmar; /* (c) */
+ struct iommu_unit *iommu; /* (c) */
struct mtx lock; /* (c) */
- LIST_ENTRY(dmar_domain) link; /* (u) Member in the dmar list */
- LIST_HEAD(, dmar_ctx) contexts; /* (u) */
+ LIST_ENTRY(iommu_domain) link; /* (u) Member in the dmar list */
+ LIST_HEAD(, iommu_device) contexts; /* (u) */
vm_object_t pgtbl_obj; /* (c) Page table pages */
u_int flags; /* (u) */
u_int entries_cnt; /* (d) */
struct dmar_gas_entries_tree rb_root; /* (d) */
- struct dmar_map_entries_tailq unload_entries; /* (d) Entries to
+ struct iommu_map_entries_tailq unload_entries; /* (d) Entries to
unload */
- struct dmar_map_entry *first_place, *last_place; /* (d) */
+ struct iommu_map_entry *first_place, *last_place; /* (d) */
struct task unload_task; /* (c) */
u_int batch_no;
};
-struct dmar_ctx {
- struct bus_dma_tag_dmar ctx_tag; /* (c) Root tag */
+struct iommu_device {
+ struct bus_dma_tag_iommu device_tag; /* (c) Root tag */
uint16_t rid; /* (c) pci RID */
uint64_t last_fault_rec[2]; /* Last fault reported */
- struct dmar_domain *domain; /* (c) */
- LIST_ENTRY(dmar_ctx) link; /* (u) Member in the domain list */
+ struct iommu_domain *domain; /* (c) */
+ LIST_ENTRY(iommu_device) link; /* (u) Member in the domain list */
u_int refs; /* (u) References from tags */
u_int flags; /* (u) */
u_long loads; /* atomic updates, for stat only */
u_long unloads; /* same */
};
-#define DMAR_DOMAIN_GAS_INITED 0x0001
-#define DMAR_DOMAIN_PGTBL_INITED 0x0002
-#define DMAR_DOMAIN_IDMAP 0x0010 /* Domain uses identity
+#define IOMMU_DOMAIN_GAS_INITED 0x0001
+#define IOMMU_DOMAIN_PGTBL_INITED 0x0002
+#define IOMMU_DOMAIN_IDMAP 0x0010 /* Domain uses identity
page table */
-#define DMAR_DOMAIN_RMRR 0x0020 /* Domain contains RMRR entry,
+#define IOMMU_DOMAIN_RMRR 0x0020 /* Domain contains RMRR entry,
cannot be turned off */
-/* struct dmar_ctx flags */
+/* struct iommu_device flags */
#define DMAR_CTX_FAULTED 0x0001 /* Fault was reported,
last_fault_rec is valid */
#define DMAR_CTX_DISABLED 0x0002 /* Device is disabled, the
ephemeral reference is kept
to prevent context destruction */
-#define DMAR_DOMAIN_PGLOCK(dom) VM_OBJECT_WLOCK((dom)->pgtbl_obj)
-#define DMAR_DOMAIN_PGTRYLOCK(dom) VM_OBJECT_TRYWLOCK((dom)->pgtbl_obj)
-#define DMAR_DOMAIN_PGUNLOCK(dom) VM_OBJECT_WUNLOCK((dom)->pgtbl_obj)
-#define DMAR_DOMAIN_ASSERT_PGLOCKED(dom) \
+#define IOMMU_DOMAIN_PGLOCK(dom) VM_OBJECT_WLOCK((dom)->pgtbl_obj)
+#define IOMMU_DOMAIN_PGTRYLOCK(dom) VM_OBJECT_TRYWLOCK((dom)->pgtbl_obj)
+#define IOMMU_DOMAIN_PGUNLOCK(dom) VM_OBJECT_WUNLOCK((dom)->pgtbl_obj)
+#define IOMMU_DOMAIN_ASSERT_PGLOCKED(dom) \
VM_OBJECT_ASSERT_WLOCKED((dom)->pgtbl_obj)
-#define DMAR_DOMAIN_LOCK(dom) mtx_lock(&(dom)->lock)
-#define DMAR_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->lock)
-#define DMAR_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->lock, MA_OWNED)
+#define IOMMU_DOMAIN_LOCK(dom) mtx_lock(&(dom)->lock)
+#define IOMMU_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->lock)
+#define IOMMU_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->lock, MA_OWNED)
struct dmar_msi_data {
int irq;
@@ -175,8 +177,8 @@
#define DMAR_INTR_TOTAL 2
struct dmar_unit {
+ struct iommu_unit iommu;
device_t dev;
- int unit;
uint16_t segment;
uint64_t base;
@@ -193,8 +195,7 @@
uint32_t hw_gcmd;
/* Data for being a dmar */
- struct mtx lock;
- LIST_HEAD(, dmar_domain) domains;
+ LIST_HEAD(, iommu_domain) domains;
struct unrhdr *domids;
vm_object_t ctx_obj;
u_int barrier_flags;
@@ -230,17 +231,10 @@
vmem_t *irtids;
/* Delayed freeing of map entries queue processing */
- struct dmar_map_entries_tailq tlb_flush_entries;
+ struct iommu_map_entries_tailq tlb_flush_entries;
struct task qi_task;
struct taskqueue *qi_taskqueue;
- /* Busdma delayed map load */
- struct task dmamap_load_task;
- TAILQ_HEAD(, bus_dmamap_dmar) delayed_maps;
- struct taskqueue *delayed_taskqueue;
-
- int dma_enabled;
-
/*
* Bitmap of buses for which context must ignore slot:func,
* duplicating the page table pointer into all context table
@@ -251,9 +245,9 @@
};
-#define DMAR_LOCK(dmar) mtx_lock(&(dmar)->lock)
-#define DMAR_UNLOCK(dmar) mtx_unlock(&(dmar)->lock)
-#define DMAR_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->lock, MA_OWNED)
+#define DMAR_LOCK(dmar) mtx_lock(&(dmar)->iommu.lock)
+#define DMAR_UNLOCK(dmar) mtx_unlock(&(dmar)->iommu.lock)
+#define DMAR_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->iommu.lock, MA_OWNED)
#define DMAR_FAULT_LOCK(dmar) mtx_lock_spin(&(dmar)->fault_lock)
#define DMAR_FAULT_UNLOCK(dmar) mtx_unlock_spin(&(dmar)->fault_lock)
@@ -268,19 +262,19 @@
#define DMAR_BARRIER_RMRR 0
#define DMAR_BARRIER_USEQ 1
-struct dmar_unit *dmar_find(device_t dev, bool verbose);
+struct iommu_unit *dmar_find(device_t dev, bool verbose);
struct dmar_unit *dmar_find_hpet(device_t dev, uint16_t *rid);
struct dmar_unit *dmar_find_ioapic(u_int apic_id, uint16_t *rid);
u_int dmar_nd2mask(u_int nd);
-bool dmar_pglvl_supported(struct dmar_unit *unit, int pglvl);
-int domain_set_agaw(struct dmar_domain *domain, int mgaw);
+bool dmar_pglvl_supported(struct iommu_unit *unit, int pglvl);
+int domain_set_agaw(struct iommu_domain *domain, int mgaw);
int dmar_maxaddr2mgaw(struct dmar_unit *unit, dmar_gaddr_t maxaddr,
bool allow_less);
vm_pindex_t pglvl_max_pages(int pglvl);
-int domain_is_sp_lvl(struct dmar_domain *domain, int lvl);
+int domain_is_sp_lvl(struct iommu_domain *domain, int lvl);
dmar_gaddr_t pglvl_page_size(int total_pglvl, int lvl);
-dmar_gaddr_t domain_page_size(struct dmar_domain *domain, int lvl);
+dmar_gaddr_t domain_page_size(struct iommu_domain *domain, int lvl);
int calc_am(struct dmar_unit *unit, dmar_gaddr_t base, dmar_gaddr_t size,
dmar_gaddr_t *isizep);
struct vm_page *dmar_pgalloc(vm_object_t obj, vm_pindex_t idx, int flags);
@@ -293,7 +287,7 @@
int dmar_inv_iotlb_glob(struct dmar_unit *unit);
int dmar_flush_write_bufs(struct dmar_unit *unit);
void dmar_flush_pte_to_ram(struct dmar_unit *unit, dmar_pte_t *dst);
-void dmar_flush_ctx_to_ram(struct dmar_unit *unit, dmar_ctx_entry_t *dst);
+void dmar_flush_ctx_to_ram(struct dmar_unit *unit, iommu_device_entry_t *dst);
void dmar_flush_root_to_ram(struct dmar_unit *unit, dmar_root_entry_t *dst);
int dmar_enable_translation(struct dmar_unit *unit);
int dmar_disable_translation(struct dmar_unit *unit);
@@ -316,83 +310,84 @@
void dmar_disable_qi_intr(struct dmar_unit *unit);
int dmar_init_qi(struct dmar_unit *unit);
void dmar_fini_qi(struct dmar_unit *unit);
-void dmar_qi_invalidate_locked(struct dmar_domain *domain, dmar_gaddr_t start,
+void dmar_qi_invalidate_locked(struct iommu_domain *domain, dmar_gaddr_t start,
dmar_gaddr_t size, struct dmar_qi_genseq *psec, bool emit_wait);
void dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit);
void dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit);
void dmar_qi_invalidate_iec_glob(struct dmar_unit *unit);
void dmar_qi_invalidate_iec(struct dmar_unit *unit, u_int start, u_int cnt);
-vm_object_t domain_get_idmap_pgtbl(struct dmar_domain *domain,
+vm_object_t domain_get_idmap_pgtbl(struct iommu_domain *domain,
dmar_gaddr_t maxaddr);
void put_idmap_pgtbl(vm_object_t obj);
-int domain_map_buf(struct dmar_domain *domain, dmar_gaddr_t base,
+int domain_map_buf(struct iommu_domain *domain, dmar_gaddr_t base,
dmar_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags);
-int domain_unmap_buf(struct dmar_domain *domain, dmar_gaddr_t base,
+int domain_unmap_buf(struct iommu_domain *domain, dmar_gaddr_t base,
dmar_gaddr_t size, int flags);
-void domain_flush_iotlb_sync(struct dmar_domain *domain, dmar_gaddr_t base,
+void domain_flush_iotlb_sync(struct iommu_domain *domain, dmar_gaddr_t base,
dmar_gaddr_t size);
-int domain_alloc_pgtbl(struct dmar_domain *domain);
-void domain_free_pgtbl(struct dmar_domain *domain);
+int domain_alloc_pgtbl(struct iommu_domain *domain);
+void domain_free_pgtbl(struct iommu_domain *domain);
int dmar_dev_depth(device_t child);
void dmar_dev_path(device_t child, int *busno, void *path1, int depth);
-struct dmar_ctx *dmar_instantiate_ctx(struct dmar_unit *dmar, device_t dev,
- bool rmrr);
-struct dmar_ctx *dmar_get_ctx_for_dev(struct dmar_unit *dmar, device_t dev,
+struct iommu_device *iommu_instantiate_device(struct iommu_unit *dmar,
+ device_t dev, bool rmrr);
+struct iommu_device *dmar_get_ctx_for_dev(struct iommu_unit *dmar, device_t dev,
uint16_t rid, bool id_mapped, bool rmrr_init);
-struct dmar_ctx *dmar_get_ctx_for_devpath(struct dmar_unit *dmar, uint16_t rid,
+struct iommu_device *dmar_get_ctx_for_devpath(struct dmar_unit *dmar, uint16_t rid,
int dev_domain, int dev_busno, const void *dev_path, int dev_path_len,
bool id_mapped, bool rmrr_init);
-int dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx);
-void dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx);
-void dmar_free_ctx(struct dmar_ctx *ctx);
-struct dmar_ctx *dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid);
-void dmar_domain_unload_entry(struct dmar_map_entry *entry, bool free);
-void dmar_domain_unload(struct dmar_domain *domain,
- struct dmar_map_entries_tailq *entries, bool cansleep);
-void dmar_domain_free_entry(struct dmar_map_entry *entry, bool free);
-
-int dmar_init_busdma(struct dmar_unit *unit);
-void dmar_fini_busdma(struct dmar_unit *unit);
-device_t dmar_get_requester(device_t dev, uint16_t *rid);
-
-void dmar_gas_init_domain(struct dmar_domain *domain);
-void dmar_gas_fini_domain(struct dmar_domain *domain);
-struct dmar_map_entry *dmar_gas_alloc_entry(struct dmar_domain *domain,
+int dmar_move_ctx_to_domain(struct iommu_domain *domain, struct iommu_device *ctx);
+void dmar_free_ctx_locked(struct iommu_unit *dmar, struct iommu_device *ctx);
+void dmar_free_ctx(struct iommu_device *ctx);
+struct iommu_device *dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid);
+void iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free);
+void iommu_domain_unload(struct iommu_domain *domain,
+ struct iommu_map_entries_tailq *entries, bool cansleep);
+void iommu_domain_free_entry(struct iommu_map_entry *entry, bool free);
+
+int iommu_init_busdma(struct iommu_unit *unit);
+void iommu_fini_busdma(struct iommu_unit *unit);
+device_t iommu_get_requester(device_t dev, uint16_t *rid);
+
+void dmar_gas_init_domain(struct iommu_domain *domain);
+void dmar_gas_fini_domain(struct iommu_domain *domain);
+struct iommu_map_entry *dmar_gas_alloc_entry(struct iommu_domain *domain,
u_int flags);
-void dmar_gas_free_entry(struct dmar_domain *domain,
- struct dmar_map_entry *entry);
-void dmar_gas_free_space(struct dmar_domain *domain,
- struct dmar_map_entry *entry);
-int dmar_gas_map(struct dmar_domain *domain,
+void dmar_gas_free_entry(struct iommu_domain *domain,
+ struct iommu_map_entry *entry);
+void dmar_gas_free_space(struct iommu_domain *domain,
+ struct iommu_map_entry *entry);
+int dmar_gas_map(struct iommu_domain *domain,
const struct bus_dma_tag_common *common, dmar_gaddr_t size, int offset,
- u_int eflags, u_int flags, vm_page_t *ma, struct dmar_map_entry **res);
-void dmar_gas_free_region(struct dmar_domain *domain,
- struct dmar_map_entry *entry);
-int dmar_gas_map_region(struct dmar_domain *domain,
- struct dmar_map_entry *entry, u_int eflags, u_int flags, vm_page_t *ma);
-int dmar_gas_reserve_region(struct dmar_domain *domain, dmar_gaddr_t start,
+ u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res);
+void dmar_gas_free_region(struct iommu_domain *domain,
+ struct iommu_map_entry *entry);
+int dmar_gas_map_region(struct iommu_domain *domain,
+ struct iommu_map_entry *entry, u_int eflags, u_int flags, vm_page_t *ma);
+int dmar_gas_reserve_region(struct iommu_domain *domain, dmar_gaddr_t start,
dmar_gaddr_t end);
-void dmar_dev_parse_rmrr(struct dmar_domain *domain, int dev_domain,
+void dmar_dev_parse_rmrr(struct iommu_domain *domain, int dev_domain,
int dev_busno, const void *dev_path, int dev_path_len,
- struct dmar_map_entries_tailq *rmrr_entries);
-int dmar_instantiate_rmrr_ctxs(struct dmar_unit *dmar);
+ struct iommu_map_entries_tailq *rmrr_entries);
+int dmar_instantiate_rmrr_ctxs(struct iommu_unit *dmar);
void dmar_quirks_post_ident(struct dmar_unit *dmar);
-void dmar_quirks_pre_use(struct dmar_unit *dmar);
+void dmar_quirks_pre_use(struct iommu_unit *dmar);
int dmar_init_irt(struct dmar_unit *unit);
void dmar_fini_irt(struct dmar_unit *unit);
-void dmar_set_buswide_ctx(struct dmar_unit *unit, u_int busno);
+void dmar_set_buswide_ctx(struct iommu_unit *unit, u_int busno);
bool dmar_is_buswide_ctx(struct dmar_unit *unit, u_int busno);
-#define DMAR_GM_CANWAIT 0x0001
-#define DMAR_GM_CANSPLIT 0x0002
-#define DMAR_GM_RMRR 0x0004
+/* Map flags */
+#define IOMMU_MF_CANWAIT 0x0001
+#define IOMMU_MF_CANSPLIT 0x0002
+#define IOMMU_MF_RMRR 0x0004
#define DMAR_PGF_WAITOK 0x0001
#define DMAR_PGF_ZERO 0x0002
@@ -433,7 +428,7 @@
KASSERT(reg != DMAR_GCMD_REG || (val & DMAR_GCMD_TE) ==
(unit->hw_gcmd & DMAR_GCMD_TE),
- ("dmar%d clearing TE 0x%08x 0x%08x", unit->unit,
+ ("dmar%d clearing TE 0x%08x 0x%08x", unit->iommu.unit,
unit->hw_gcmd, val));
bus_write_4(unit->regs, reg, val);
}
@@ -522,7 +517,7 @@
}
static inline bool
-dmar_test_boundary(dmar_gaddr_t start, dmar_gaddr_t size,
+iommu_test_boundary(dmar_gaddr_t start, dmar_gaddr_t size,
dmar_gaddr_t boundary)
{
Index: sys/x86/iommu/intel_drv.c
===================================================================
--- sys/x86/iommu/intel_drv.c
+++ sys/x86/iommu/intel_drv.c
@@ -252,7 +252,7 @@
{
int i;
- dmar_fini_busdma(unit);
+ iommu_fini_busdma(&unit->iommu);
dmar_fini_irt(unit);
dmar_fini_qi(unit);
dmar_fini_fault_log(unit);
@@ -413,8 +413,8 @@
unit = device_get_softc(dev);
unit->dev = dev;
- unit->unit = device_get_unit(dev);
- dmaru = dmar_find_by_index(unit->unit);
+ unit->iommu.unit = device_get_unit(dev);
+ dmaru = dmar_find_by_index(unit->iommu.unit);
if (dmaru == NULL)
return (EINVAL);
unit->segment = dmaru->Segment;
@@ -469,9 +469,9 @@
}
}
- mtx_init(&unit->lock, "dmarhw", NULL, MTX_DEF);
+ mtx_init(&unit->iommu.lock, "dmarhw", NULL, MTX_DEF);
unit->domids = new_unrhdr(0, dmar_nd2mask(DMAR_CAP_ND(unit->hw_cap)),
- &unit->lock);
+ &unit->iommu.lock);
LIST_INIT(&unit->domains);
/*
@@ -531,7 +531,7 @@
dmar_release_resources(dev, unit);
return (error);
}
- error = dmar_init_busdma(unit);
+ error = iommu_init_busdma(&unit->iommu);
if (error != 0) {
dmar_release_resources(dev, unit);
return (error);
@@ -596,14 +596,17 @@
MODULE_DEPEND(dmar, acpi, 1, 1, 1);
void
-dmar_set_buswide_ctx(struct dmar_unit *unit, u_int busno)
+dmar_set_buswide_ctx(struct iommu_unit *unit, u_int busno)
{
+ struct dmar_unit *dmar;
+
+ dmar = (struct dmar_unit *)unit;
MPASS(busno <= PCI_BUSMAX);
- DMAR_LOCK(unit);
- unit->buswide_ctxs[busno / NBBY / sizeof(uint32_t)] |=
+ DMAR_LOCK(dmar);
+ dmar->buswide_ctxs[busno / NBBY / sizeof(uint32_t)] |=
1 << (busno % (NBBY * sizeof(uint32_t)));
- DMAR_UNLOCK(unit);
+ DMAR_UNLOCK(dmar);
}
bool
@@ -736,7 +739,7 @@
char *ptr, *ptrend;
int match;
- dmarh = dmar_find_by_index(unit->unit);
+ dmarh = dmar_find_by_index(unit->iommu.unit);
if (dmarh == NULL)
return (false);
if (dmarh->Segment != dev_domain)
@@ -782,7 +785,7 @@
return (NULL);
}
-struct dmar_unit *
+struct iommu_unit *
dmar_find(device_t dev, bool verbose)
{
device_t dmar_dev;
@@ -818,12 +821,12 @@
if (verbose) {
device_printf(dev, "pci%d:%d:%d:%d matched dmar%d by %s",
dev_domain, pci_get_bus(dev), pci_get_slot(dev),
- pci_get_function(dev), unit->unit, banner);
+ pci_get_function(dev), unit->iommu.unit, banner);
printf(" scope path ");
dmar_print_path(dev_busno, dev_path_len, dev_path);
printf("\n");
}
- return (unit);
+ return (&unit->iommu);
}
static struct dmar_unit *
@@ -906,12 +909,12 @@
}
struct rmrr_iter_args {
- struct dmar_domain *domain;
+ struct iommu_domain *domain;
int dev_domain;
int dev_busno;
const ACPI_DMAR_PCI_PATH *dev_path;
int dev_path_len;
- struct dmar_map_entries_tailq *rmrr_entries;
+ struct iommu_map_entries_tailq *rmrr_entries;
};
static int
@@ -920,7 +923,7 @@
struct rmrr_iter_args *ria;
ACPI_DMAR_RESERVED_MEMORY *resmem;
ACPI_DMAR_DEVICE_SCOPE *devscope;
- struct dmar_map_entry *entry;
+ struct iommu_map_entry *entry;
char *ptr, *ptrend;
int match;
@@ -956,9 +959,9 @@
}
void
-dmar_dev_parse_rmrr(struct dmar_domain *domain, int dev_domain, int dev_busno,
+dmar_dev_parse_rmrr(struct iommu_domain *domain, int dev_domain, int dev_busno,
const void *dev_path, int dev_path_len,
- struct dmar_map_entries_tailq *rmrr_entries)
+ struct iommu_map_entries_tailq *rmrr_entries)
{
struct rmrr_iter_args ria;
@@ -1004,7 +1007,8 @@
struct inst_rmrr_iter_args *iria;
const char *ptr, *ptrend;
device_t dev;
- struct dmar_unit *unit;
+ struct iommu_unit *unit;
+ struct dmar_unit *dmar;
int dev_path_len;
uint16_t rid;
@@ -1037,7 +1041,7 @@
if (bootverbose) {
printf("dmar%d no dev found for RMRR "
"[%#jx, %#jx] rid %#x scope path ",
- iria->dmar->unit,
+ iria->dmar->iommu.unit,
(uintmax_t)resmem->BaseAddress,
(uintmax_t)resmem->EndAddress,
rid);
@@ -1045,11 +1049,11 @@
(const ACPI_DMAR_PCI_PATH *)(devscope + 1));
printf("\n");
}
- unit = dmar_find_by_scope(resmem->Segment,
+ dmar = dmar_find_by_scope(resmem->Segment,
devscope->Bus,
(const ACPI_DMAR_PCI_PATH *)(devscope + 1),
dev_path_len);
- if (iria->dmar != unit)
+ if (iria->dmar != dmar)
continue;
dmar_get_ctx_for_devpath(iria->dmar, rid,
resmem->Segment, devscope->Bus,
@@ -1057,9 +1061,11 @@
dev_path_len, false, true);
} else {
unit = dmar_find(dev, false);
- if (iria->dmar != unit)
+ dmar = (struct dmar_unit *)unit;
+ if (iria->dmar != dmar)
continue;
- dmar_instantiate_ctx(iria->dmar, dev, true);
+ iommu_instantiate_device(&(iria)->dmar->iommu,
+ dev, true);
}
}
@@ -1071,11 +1077,14 @@
* Pre-create all contexts for the DMAR which have RMRR entries.
*/
int
-dmar_instantiate_rmrr_ctxs(struct dmar_unit *dmar)
+dmar_instantiate_rmrr_ctxs(struct iommu_unit *unit)
{
+ struct dmar_unit *dmar;
struct inst_rmrr_iter_args iria;
int error;
+ dmar = (struct dmar_unit *)unit;
+
if (!dmar_barrier_enter(dmar, DMAR_BARRIER_RMRR))
return (0);
@@ -1086,15 +1095,15 @@
if (!LIST_EMPTY(&dmar->domains)) {
KASSERT((dmar->hw_gcmd & DMAR_GCMD_TE) == 0,
("dmar%d: RMRR not handled but translation is already enabled",
- dmar->unit));
+ dmar->iommu.unit));
error = dmar_enable_translation(dmar);
if (bootverbose) {
if (error == 0) {
printf("dmar%d: enabled translation\n",
- dmar->unit);
+ dmar->iommu.unit);
} else {
printf("dmar%d: enabling translation failed, "
- "error %d\n", dmar->unit, error);
+ "error %d\n", dmar->iommu.unit, error);
}
}
}
@@ -1107,9 +1116,9 @@
#include <ddb/db_lex.h>
static void
-dmar_print_domain_entry(const struct dmar_map_entry *entry)
+dmar_print_domain_entry(const struct iommu_map_entry *entry)
{
- struct dmar_map_entry *l, *r;
+ struct iommu_map_entry *l, *r;
db_printf(
" start %jx end %jx first %jx last %jx free_down %jx flags %x ",
@@ -1131,22 +1140,22 @@
}
static void
-dmar_print_ctx(struct dmar_ctx *ctx)
+dmar_print_ctx(struct iommu_device *ctx)
{
db_printf(
" @%p pci%d:%d:%d refs %d flags %x loads %lu unloads %lu\n",
- ctx, pci_get_bus(ctx->ctx_tag.owner),
- pci_get_slot(ctx->ctx_tag.owner),
- pci_get_function(ctx->ctx_tag.owner), ctx->refs, ctx->flags,
+ ctx, pci_get_bus(ctx->device_tag.owner),
+ pci_get_slot(ctx->device_tag.owner),
+ pci_get_function(ctx->device_tag.owner), ctx->refs, ctx->flags,
ctx->loads, ctx->unloads);
}
static void
-dmar_print_domain(struct dmar_domain *domain, bool show_mappings)
+dmar_print_domain(struct iommu_domain *domain, bool show_mappings)
{
- struct dmar_map_entry *entry;
- struct dmar_ctx *ctx;
+ struct iommu_map_entry *entry;
+ struct iommu_device *ctx;
db_printf(
" @%p dom %d mgaw %d agaw %d pglvl %d end %jx refs %d\n"
@@ -1177,11 +1186,11 @@
}
}
-DB_FUNC(dmar_domain, db_dmar_print_domain, db_show_table, CS_OWN, NULL)
+DB_FUNC(iommu_domain, db_dmar_print_domain, db_show_table, CS_OWN, NULL)
{
struct dmar_unit *unit;
- struct dmar_domain *domain;
- struct dmar_ctx *ctx;
+ struct iommu_domain *domain;
+ struct iommu_device *ctx;
bool show_mappings, valid;
int pci_domain, bus, device, function, i, t;
db_expr_t radix;
@@ -1222,7 +1231,7 @@
db_radix = radix;
db_skip_to_eol();
if (!valid) {
- db_printf("usage: show dmar_domain [/m] "
+ db_printf("usage: show iommu_domain [/m] "
"<domain> <bus> <device> <func>\n");
return;
}
@@ -1231,11 +1240,11 @@
LIST_FOREACH(domain, &unit->domains, link) {
LIST_FOREACH(ctx, &domain->contexts, link) {
if (pci_domain == unit->segment &&
- bus == pci_get_bus(ctx->ctx_tag.owner) &&
+ bus == pci_get_bus(ctx->device_tag.owner) &&
device ==
- pci_get_slot(ctx->ctx_tag.owner) &&
+ pci_get_slot(ctx->device_tag.owner) &&
function ==
- pci_get_function(ctx->ctx_tag.owner)) {
+ pci_get_function(ctx->device_tag.owner)) {
dmar_print_domain(domain,
show_mappings);
goto out;
@@ -1250,12 +1259,13 @@
dmar_print_one(int idx, bool show_domains, bool show_mappings)
{
struct dmar_unit *unit;
- struct dmar_domain *domain;
+ struct iommu_domain *domain;
int i, frir;
unit = device_get_softc(dmar_devs[idx]);
- db_printf("dmar%d at %p, root at 0x%jx, ver 0x%x\n", unit->unit, unit,
- dmar_read8(unit, DMAR_RTADDR_REG), dmar_read4(unit, DMAR_VER_REG));
+ db_printf("dmar%d at %p, root at 0x%jx, ver 0x%x\n", unit->iommu.unit,
+ unit, dmar_read8(unit, DMAR_RTADDR_REG),
+ dmar_read4(unit, DMAR_VER_REG));
db_printf("cap 0x%jx ecap 0x%jx gsts 0x%x fsts 0x%x fectl 0x%x\n",
(uintmax_t)dmar_read8(unit, DMAR_CAP_REG),
(uintmax_t)dmar_read8(unit, DMAR_ECAP_REG),
Index: sys/x86/iommu/intel_fault.c
===================================================================
--- sys/x86/iommu/intel_fault.c
+++ sys/x86/iommu/intel_fault.c
@@ -96,25 +96,25 @@
clear = 0;
if ((fsts & DMAR_FSTS_ITE) != 0) {
- printf("DMAR%d: Invalidation timed out\n", unit->unit);
+ printf("DMAR%d: Invalidation timed out\n", unit->iommu.unit);
clear |= DMAR_FSTS_ITE;
}
if ((fsts & DMAR_FSTS_ICE) != 0) {
printf("DMAR%d: Invalidation completion error\n",
- unit->unit);
+ unit->iommu.unit);
clear |= DMAR_FSTS_ICE;
}
if ((fsts & DMAR_FSTS_IQE) != 0) {
printf("DMAR%d: Invalidation queue error\n",
- unit->unit);
+ unit->iommu.unit);
clear |= DMAR_FSTS_IQE;
}
if ((fsts & DMAR_FSTS_APF) != 0) {
- printf("DMAR%d: Advanced pending fault\n", unit->unit);
+ printf("DMAR%d: Advanced pending fault\n", unit->iommu.unit);
clear |= DMAR_FSTS_APF;
}
if ((fsts & DMAR_FSTS_AFO) != 0) {
- printf("DMAR%d: Advanced fault overflow\n", unit->unit);
+ printf("DMAR%d: Advanced fault overflow\n", unit->iommu.unit);
clear |= DMAR_FSTS_AFO;
}
if (clear != 0)
@@ -176,7 +176,7 @@
*
*/
if ((fsts & DMAR_FSTS_PFO) != 0) {
- printf("DMAR%d: Fault Overflow\n", unit->unit);
+ printf("DMAR%d: Fault Overflow\n", unit->iommu.unit);
dmar_write4(unit, DMAR_FSTS_REG, DMAR_FSTS_PFO);
}
@@ -191,7 +191,7 @@
dmar_fault_task(void *arg, int pending __unused)
{
struct dmar_unit *unit;
- struct dmar_ctx *ctx;
+ struct iommu_device *ctx;
uint64_t fault_rec[2];
int sid, bus, slot, func, faultp;
@@ -208,7 +208,7 @@
DMAR_FAULT_UNLOCK(unit);
sid = DMAR_FRCD2_SID(fault_rec[1]);
- printf("DMAR%d: ", unit->unit);
+ printf("DMAR%d: ", unit->iommu.unit);
DMAR_LOCK(unit);
ctx = dmar_find_ctx_locked(unit, sid);
if (ctx == NULL) {
@@ -226,10 +226,10 @@
ctx->flags |= DMAR_CTX_FAULTED;
ctx->last_fault_rec[0] = fault_rec[0];
ctx->last_fault_rec[1] = fault_rec[1];
- device_print_prettyname(ctx->ctx_tag.owner);
- bus = pci_get_bus(ctx->ctx_tag.owner);
- slot = pci_get_slot(ctx->ctx_tag.owner);
- func = pci_get_function(ctx->ctx_tag.owner);
+ device_print_prettyname(ctx->device_tag.owner);
+ bus = pci_get_bus(ctx->device_tag.owner);
+ slot = pci_get_slot(ctx->device_tag.owner);
+ func = pci_get_function(ctx->device_tag.owner);
}
DMAR_UNLOCK(unit);
printf(
@@ -276,7 +276,7 @@
unit->fault_taskqueue = taskqueue_create_fast("dmarff", M_WAITOK,
taskqueue_thread_enqueue, &unit->fault_taskqueue);
taskqueue_start_threads(&unit->fault_taskqueue, 1, PI_AV,
- "dmar%d fault taskq", unit->unit);
+ "dmar%d fault taskq", unit->iommu.unit);
DMAR_LOCK(unit);
dmar_disable_fault_intr(unit);
Index: sys/x86/iommu/intel_gas.c
===================================================================
--- sys/x86/iommu/intel_gas.c
+++ sys/x86/iommu/intel_gas.c
@@ -74,27 +74,27 @@
* Guest Address Space management.
*/
-static uma_zone_t dmar_map_entry_zone;
+static uma_zone_t iommu_map_entry_zone;
static void
intel_gas_init(void)
{
- dmar_map_entry_zone = uma_zcreate("DMAR_MAP_ENTRY",
- sizeof(struct dmar_map_entry), NULL, NULL,
+ iommu_map_entry_zone = uma_zcreate("IOMMU_MAP_ENTRY",
+ sizeof(struct iommu_map_entry), NULL, NULL,
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NODUMP);
}
SYSINIT(intel_gas, SI_SUB_DRIVERS, SI_ORDER_FIRST, intel_gas_init, NULL);
-struct dmar_map_entry *
-dmar_gas_alloc_entry(struct dmar_domain *domain, u_int flags)
+struct iommu_map_entry *
+dmar_gas_alloc_entry(struct iommu_domain *domain, u_int flags)
{
- struct dmar_map_entry *res;
+ struct iommu_map_entry *res;
KASSERT((flags & ~(DMAR_PGF_WAITOK)) == 0,
("unsupported flags %x", flags));
- res = uma_zalloc(dmar_map_entry_zone, ((flags & DMAR_PGF_WAITOK) !=
+ res = uma_zalloc(iommu_map_entry_zone, ((flags & DMAR_PGF_WAITOK) !=
0 ? M_WAITOK : M_NOWAIT) | M_ZERO);
if (res != NULL) {
res->domain = domain;
@@ -104,18 +104,18 @@
}
void
-dmar_gas_free_entry(struct dmar_domain *domain, struct dmar_map_entry *entry)
+dmar_gas_free_entry(struct iommu_domain *domain, struct iommu_map_entry *entry)
{
KASSERT(domain == entry->domain,
("mismatched free domain %p entry %p entry->domain %p", domain,
entry, entry->domain));
atomic_subtract_int(&domain->entries_cnt, 1);
- uma_zfree(dmar_map_entry_zone, entry);
+ uma_zfree(iommu_map_entry_zone, entry);
}
static int
-dmar_gas_cmp_entries(struct dmar_map_entry *a, struct dmar_map_entry *b)
+dmar_gas_cmp_entries(struct iommu_map_entry *a, struct iommu_map_entry *b)
{
/* Last entry have zero size, so <= */
@@ -137,9 +137,9 @@
}
static void
-dmar_gas_augment_entry(struct dmar_map_entry *entry)
+dmar_gas_augment_entry(struct iommu_map_entry *entry)
{
- struct dmar_map_entry *child;
+ struct iommu_map_entry *child;
dmar_gaddr_t free_down;
free_down = 0;
@@ -159,14 +159,14 @@
entry->free_down = free_down;
}
-RB_GENERATE(dmar_gas_entries_tree, dmar_map_entry, rb_entry,
+RB_GENERATE(dmar_gas_entries_tree, iommu_map_entry, rb_entry,
dmar_gas_cmp_entries);
#ifdef INVARIANTS
static void
-dmar_gas_check_free(struct dmar_domain *domain)
+dmar_gas_check_free(struct iommu_domain *domain)
{
- struct dmar_map_entry *entry, *l, *r;
+ struct iommu_map_entry *entry, *l, *r;
dmar_gaddr_t v;
RB_FOREACH(entry, dmar_gas_entries_tree, &domain->rb_root) {
@@ -190,61 +190,61 @@
#endif
static bool
-dmar_gas_rb_insert(struct dmar_domain *domain, struct dmar_map_entry *entry)
+dmar_gas_rb_insert(struct iommu_domain *domain, struct iommu_map_entry *entry)
{
- struct dmar_map_entry *found;
+ struct iommu_map_entry *found;
found = RB_INSERT(dmar_gas_entries_tree, &domain->rb_root, entry);
return (found == NULL);
}
static void
-dmar_gas_rb_remove(struct dmar_domain *domain, struct dmar_map_entry *entry)
+dmar_gas_rb_remove(struct iommu_domain *domain, struct iommu_map_entry *entry)
{
RB_REMOVE(dmar_gas_entries_tree, &domain->rb_root, entry);
}
void
-dmar_gas_init_domain(struct dmar_domain *domain)
+dmar_gas_init_domain(struct iommu_domain *domain)
{
- struct dmar_map_entry *begin, *end;
+ struct iommu_map_entry *begin, *end;
begin = dmar_gas_alloc_entry(domain, DMAR_PGF_WAITOK);
end = dmar_gas_alloc_entry(domain, DMAR_PGF_WAITOK);
- DMAR_DOMAIN_LOCK(domain);
+ IOMMU_DOMAIN_LOCK(domain);
KASSERT(domain->entries_cnt == 2, ("dirty domain %p", domain));
KASSERT(RB_EMPTY(&domain->rb_root), ("non-empty entries %p", domain));
begin->start = 0;
begin->end = DMAR_PAGE_SIZE;
- begin->flags = DMAR_MAP_ENTRY_PLACE | DMAR_MAP_ENTRY_UNMAPPED;
+ begin->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED;
dmar_gas_rb_insert(domain, begin);
end->start = domain->end;
end->end = domain->end;
- end->flags = DMAR_MAP_ENTRY_PLACE | DMAR_MAP_ENTRY_UNMAPPED;
+ end->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED;
dmar_gas_rb_insert(domain, end);
domain->first_place = begin;
domain->last_place = end;
- domain->flags |= DMAR_DOMAIN_GAS_INITED;
- DMAR_DOMAIN_UNLOCK(domain);
+ domain->flags |= IOMMU_DOMAIN_GAS_INITED;
+ IOMMU_DOMAIN_UNLOCK(domain);
}
void
-dmar_gas_fini_domain(struct dmar_domain *domain)
+dmar_gas_fini_domain(struct iommu_domain *domain)
{
- struct dmar_map_entry *entry, *entry1;
+ struct iommu_map_entry *entry, *entry1;
- DMAR_DOMAIN_ASSERT_LOCKED(domain);
+ IOMMU_DOMAIN_ASSERT_LOCKED(domain);
KASSERT(domain->entries_cnt == 2, ("domain still in use %p", domain));
entry = RB_MIN(dmar_gas_entries_tree, &domain->rb_root);
KASSERT(entry->start == 0, ("start entry start %p", domain));
KASSERT(entry->end == DMAR_PAGE_SIZE, ("start entry end %p", domain));
- KASSERT(entry->flags == DMAR_MAP_ENTRY_PLACE,
+ KASSERT(entry->flags == IOMMU_MAP_ENTRY_PLACE,
("start entry flags %p", domain));
RB_REMOVE(dmar_gas_entries_tree, &domain->rb_root, entry);
dmar_gas_free_entry(domain, entry);
@@ -252,14 +252,14 @@
entry = RB_MAX(dmar_gas_entries_tree, &domain->rb_root);
KASSERT(entry->start == domain->end, ("end entry start %p", domain));
KASSERT(entry->end == domain->end, ("end entry end %p", domain));
- KASSERT(entry->flags == DMAR_MAP_ENTRY_PLACE,
+ KASSERT(entry->flags == IOMMU_MAP_ENTRY_PLACE,
("end entry flags %p", domain));
RB_REMOVE(dmar_gas_entries_tree, &domain->rb_root, entry);
dmar_gas_free_entry(domain, entry);
RB_FOREACH_SAFE(entry, dmar_gas_entries_tree, &domain->rb_root,
entry1) {
- KASSERT((entry->flags & DMAR_MAP_ENTRY_RMRR) != 0,
+ KASSERT((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0,
("non-RMRR entry left %p", domain));
RB_REMOVE(dmar_gas_entries_tree, &domain->rb_root, entry);
dmar_gas_free_entry(domain, entry);
@@ -267,12 +267,12 @@
}
struct dmar_gas_match_args {
- struct dmar_domain *domain;
+ struct iommu_domain *domain;
dmar_gaddr_t size;
int offset;
const struct bus_dma_tag_common *common;
u_int gas_flags;
- struct dmar_map_entry *entry;
+ struct iommu_map_entry *entry;
};
/*
@@ -298,7 +298,7 @@
return (false);
/* No boundary crossing. */
- if (dmar_test_boundary(a->entry->start + a->offset, a->size,
+ if (iommu_test_boundary(a->entry->start + a->offset, a->size,
a->common->boundary))
return (true);
@@ -313,7 +313,7 @@
/* DMAR_PAGE_SIZE to create gap after new entry. */
if (start + a->offset + a->size + DMAR_PAGE_SIZE <= end &&
start + a->offset + a->size <= maxaddr &&
- dmar_test_boundary(start + a->offset, a->size,
+ iommu_test_boundary(start + a->offset, a->size,
a->common->boundary)) {
a->entry->start = start;
return (true);
@@ -327,7 +327,7 @@
* XXXKIB. It is possible that bs is exactly at the start of
* the next entry, then we do not have gap. Ignore for now.
*/
- if ((a->gas_flags & DMAR_GM_CANSPLIT) != 0) {
+ if ((a->gas_flags & IOMMU_MF_CANSPLIT) != 0) {
a->size = bs - a->entry->start;
return (true);
}
@@ -353,13 +353,13 @@
found = dmar_gas_rb_insert(a->domain, a->entry);
KASSERT(found, ("found dup %p start %jx size %jx",
a->domain, (uintmax_t)a->entry->start, (uintmax_t)a->size));
- a->entry->flags = DMAR_MAP_ENTRY_MAP;
+ a->entry->flags = IOMMU_MAP_ENTRY_MAP;
}
static int
-dmar_gas_lowermatch(struct dmar_gas_match_args *a, struct dmar_map_entry *entry)
+dmar_gas_lowermatch(struct dmar_gas_match_args *a, struct iommu_map_entry *entry)
{
- struct dmar_map_entry *child;
+ struct iommu_map_entry *child;
child = RB_RIGHT(entry, rb_entry);
if (child != NULL && entry->end < a->common->lowaddr &&
@@ -388,9 +388,9 @@
}
static int
-dmar_gas_uppermatch(struct dmar_gas_match_args *a, struct dmar_map_entry *entry)
+dmar_gas_uppermatch(struct dmar_gas_match_args *a, struct iommu_map_entry *entry)
{
- struct dmar_map_entry *child;
+ struct iommu_map_entry *child;
if (entry->free_down < a->size + a->offset + DMAR_PAGE_SIZE)
return (ENOMEM);
@@ -418,14 +418,14 @@
}
static int
-dmar_gas_find_space(struct dmar_domain *domain,
+dmar_gas_find_space(struct iommu_domain *domain,
const struct bus_dma_tag_common *common, dmar_gaddr_t size,
- int offset, u_int flags, struct dmar_map_entry *entry)
+ int offset, u_int flags, struct iommu_map_entry *entry)
{
struct dmar_gas_match_args a;
int error;
- DMAR_DOMAIN_ASSERT_LOCKED(domain);
+ IOMMU_DOMAIN_ASSERT_LOCKED(domain);
KASSERT(entry->flags == 0, ("dirty entry %p %p", domain, entry));
KASSERT((size & DMAR_PAGE_MASK) == 0, ("size %jx", (uintmax_t)size));
@@ -454,13 +454,13 @@
}
static int
-dmar_gas_alloc_region(struct dmar_domain *domain, struct dmar_map_entry *entry,
+dmar_gas_alloc_region(struct iommu_domain *domain, struct iommu_map_entry *entry,
u_int flags)
{
- struct dmar_map_entry *next, *prev;
+ struct iommu_map_entry *next, *prev;
bool found;
- DMAR_DOMAIN_ASSERT_LOCKED(domain);
+ IOMMU_DOMAIN_ASSERT_LOCKED(domain);
if ((entry->start & DMAR_PAGE_MASK) != 0 ||
(entry->end & DMAR_PAGE_MASK) != 0)
@@ -485,16 +485,16 @@
* extends both ways.
*/
if (prev != NULL && prev->end > entry->start &&
- (prev->flags & DMAR_MAP_ENTRY_PLACE) == 0) {
- if ((flags & DMAR_GM_RMRR) == 0 ||
- (prev->flags & DMAR_MAP_ENTRY_RMRR) == 0)
+ (prev->flags & IOMMU_MAP_ENTRY_PLACE) == 0) {
+ if ((flags & IOMMU_MF_RMRR) == 0 ||
+ (prev->flags & IOMMU_MAP_ENTRY_RMRR) == 0)
return (EBUSY);
entry->start = prev->end;
}
if (next->start < entry->end &&
- (next->flags & DMAR_MAP_ENTRY_PLACE) == 0) {
- if ((flags & DMAR_GM_RMRR) == 0 ||
- (next->flags & DMAR_MAP_ENTRY_RMRR) == 0)
+ (next->flags & IOMMU_MAP_ENTRY_PLACE) == 0) {
+ if ((flags & IOMMU_MF_RMRR) == 0 ||
+ (next->flags & IOMMU_MAP_ENTRY_RMRR) == 0)
return (EBUSY);
entry->end = next->start;
}
@@ -514,11 +514,11 @@
found = dmar_gas_rb_insert(domain, entry);
KASSERT(found, ("found RMRR dup %p start %jx end %jx",
domain, (uintmax_t)entry->start, (uintmax_t)entry->end));
- if ((flags & DMAR_GM_RMRR) != 0)
- entry->flags = DMAR_MAP_ENTRY_RMRR;
+ if ((flags & IOMMU_MF_RMRR) != 0)
+ entry->flags = IOMMU_MAP_ENTRY_RMRR;
#ifdef INVARIANTS
- struct dmar_map_entry *ip, *in;
+ struct iommu_map_entry *ip, *in;
ip = RB_PREV(dmar_gas_entries_tree, &domain->rb_root, entry);
in = RB_NEXT(dmar_gas_entries_tree, &domain->rb_root, entry);
KASSERT(prev == NULL || ip == prev,
@@ -537,16 +537,16 @@
}
void
-dmar_gas_free_space(struct dmar_domain *domain, struct dmar_map_entry *entry)
+dmar_gas_free_space(struct iommu_domain *domain, struct iommu_map_entry *entry)
{
- DMAR_DOMAIN_ASSERT_LOCKED(domain);
- KASSERT((entry->flags & (DMAR_MAP_ENTRY_PLACE | DMAR_MAP_ENTRY_RMRR |
- DMAR_MAP_ENTRY_MAP)) == DMAR_MAP_ENTRY_MAP,
+ IOMMU_DOMAIN_ASSERT_LOCKED(domain);
+ KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR |
+ IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_MAP,
("permanent entry %p %p", domain, entry));
dmar_gas_rb_remove(domain, entry);
- entry->flags &= ~DMAR_MAP_ENTRY_MAP;
+ entry->flags &= ~IOMMU_MAP_ENTRY_MAP;
#ifdef INVARIANTS
if (dmar_check_free)
dmar_gas_check_free(domain);
@@ -554,19 +554,19 @@
}
void
-dmar_gas_free_region(struct dmar_domain *domain, struct dmar_map_entry *entry)
+dmar_gas_free_region(struct iommu_domain *domain, struct iommu_map_entry *entry)
{
- struct dmar_map_entry *next, *prev;
+ struct iommu_map_entry *next, *prev;
- DMAR_DOMAIN_ASSERT_LOCKED(domain);
- KASSERT((entry->flags & (DMAR_MAP_ENTRY_PLACE | DMAR_MAP_ENTRY_RMRR |
- DMAR_MAP_ENTRY_MAP)) == DMAR_MAP_ENTRY_RMRR,
+ IOMMU_DOMAIN_ASSERT_LOCKED(domain);
+ KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR |
+ IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_RMRR,
("non-RMRR entry %p %p", domain, entry));
prev = RB_PREV(dmar_gas_entries_tree, &domain->rb_root, entry);
next = RB_NEXT(dmar_gas_entries_tree, &domain->rb_root, entry);
dmar_gas_rb_remove(domain, entry);
- entry->flags &= ~DMAR_MAP_ENTRY_RMRR;
+ entry->flags &= ~IOMMU_MAP_ENTRY_RMRR;
if (prev == NULL)
dmar_gas_rb_insert(domain, domain->first_place);
@@ -575,25 +575,25 @@
}
int
-dmar_gas_map(struct dmar_domain *domain,
+dmar_gas_map(struct iommu_domain *domain,
const struct bus_dma_tag_common *common, dmar_gaddr_t size, int offset,
- u_int eflags, u_int flags, vm_page_t *ma, struct dmar_map_entry **res)
+ u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res)
{
- struct dmar_map_entry *entry;
+ struct iommu_map_entry *entry;
int error;
- KASSERT((flags & ~(DMAR_GM_CANWAIT | DMAR_GM_CANSPLIT)) == 0,
+ KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_CANSPLIT)) == 0,
("invalid flags 0x%x", flags));
- entry = dmar_gas_alloc_entry(domain, (flags & DMAR_GM_CANWAIT) != 0 ?
+ entry = dmar_gas_alloc_entry(domain, (flags & IOMMU_MF_CANWAIT) != 0 ?
DMAR_PGF_WAITOK : 0);
if (entry == NULL)
return (ENOMEM);
- DMAR_DOMAIN_LOCK(domain);
+ IOMMU_DOMAIN_LOCK(domain);
error = dmar_gas_find_space(domain, common, size, offset, flags,
entry);
if (error == ENOMEM) {
- DMAR_DOMAIN_UNLOCK(domain);
+ IOMMU_DOMAIN_UNLOCK(domain);
dmar_gas_free_entry(domain, entry);
return (error);
}
@@ -606,17 +606,17 @@
KASSERT(entry->end < domain->end, ("allocated GPA %jx, max GPA %jx",
(uintmax_t)entry->end, (uintmax_t)domain->end));
entry->flags |= eflags;
- DMAR_DOMAIN_UNLOCK(domain);
+ IOMMU_DOMAIN_UNLOCK(domain);
error = domain_map_buf(domain, entry->start, entry->end - entry->start,
ma,
- ((eflags & DMAR_MAP_ENTRY_READ) != 0 ? DMAR_PTE_R : 0) |
- ((eflags & DMAR_MAP_ENTRY_WRITE) != 0 ? DMAR_PTE_W : 0) |
- ((eflags & DMAR_MAP_ENTRY_SNOOP) != 0 ? DMAR_PTE_SNP : 0) |
- ((eflags & DMAR_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0),
- (flags & DMAR_GM_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0);
+ ((eflags & IOMMU_MAP_ENTRY_READ) != 0 ? DMAR_PTE_R : 0) |
+ ((eflags & IOMMU_MAP_ENTRY_WRITE) != 0 ? DMAR_PTE_W : 0) |
+ ((eflags & IOMMU_MAP_ENTRY_SNOOP) != 0 ? DMAR_PTE_SNP : 0) |
+ ((eflags & IOMMU_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0),
+ (flags & IOMMU_MF_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0);
if (error == ENOMEM) {
- dmar_domain_unload_entry(entry, true);
+ iommu_domain_unload_entry(entry, true);
return (error);
}
KASSERT(error == 0,
@@ -627,7 +627,7 @@
}
int
-dmar_gas_map_region(struct dmar_domain *domain, struct dmar_map_entry *entry,
+dmar_gas_map_region(struct iommu_domain *domain, struct iommu_map_entry *entry,
u_int eflags, u_int flags, vm_page_t *ma)
{
dmar_gaddr_t start;
@@ -635,30 +635,30 @@
KASSERT(entry->flags == 0, ("used RMRR entry %p %p %x", domain,
entry, entry->flags));
- KASSERT((flags & ~(DMAR_GM_CANWAIT | DMAR_GM_RMRR)) == 0,
+ KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_RMRR)) == 0,
("invalid flags 0x%x", flags));
start = entry->start;
- DMAR_DOMAIN_LOCK(domain);
+ IOMMU_DOMAIN_LOCK(domain);
error = dmar_gas_alloc_region(domain, entry, flags);
if (error != 0) {
- DMAR_DOMAIN_UNLOCK(domain);
+ IOMMU_DOMAIN_UNLOCK(domain);
return (error);
}
entry->flags |= eflags;
- DMAR_DOMAIN_UNLOCK(domain);
+ IOMMU_DOMAIN_UNLOCK(domain);
if (entry->end == entry->start)
return (0);
error = domain_map_buf(domain, entry->start, entry->end - entry->start,
ma + OFF_TO_IDX(start - entry->start),
- ((eflags & DMAR_MAP_ENTRY_READ) != 0 ? DMAR_PTE_R : 0) |
- ((eflags & DMAR_MAP_ENTRY_WRITE) != 0 ? DMAR_PTE_W : 0) |
- ((eflags & DMAR_MAP_ENTRY_SNOOP) != 0 ? DMAR_PTE_SNP : 0) |
- ((eflags & DMAR_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0),
- (flags & DMAR_GM_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0);
+ ((eflags & IOMMU_MAP_ENTRY_READ) != 0 ? DMAR_PTE_R : 0) |
+ ((eflags & IOMMU_MAP_ENTRY_WRITE) != 0 ? DMAR_PTE_W : 0) |
+ ((eflags & IOMMU_MAP_ENTRY_SNOOP) != 0 ? DMAR_PTE_SNP : 0) |
+ ((eflags & IOMMU_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0),
+ (flags & IOMMU_MF_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0);
if (error == ENOMEM) {
- dmar_domain_unload_entry(entry, false);
+ iommu_domain_unload_entry(entry, false);
return (error);
}
KASSERT(error == 0,
@@ -668,20 +668,20 @@
}
int
-dmar_gas_reserve_region(struct dmar_domain *domain, dmar_gaddr_t start,
+dmar_gas_reserve_region(struct iommu_domain *domain, dmar_gaddr_t start,
dmar_gaddr_t end)
{
- struct dmar_map_entry *entry;
+ struct iommu_map_entry *entry;
int error;
entry = dmar_gas_alloc_entry(domain, DMAR_PGF_WAITOK);
entry->start = start;
entry->end = end;
- DMAR_DOMAIN_LOCK(domain);
- error = dmar_gas_alloc_region(domain, entry, DMAR_GM_CANWAIT);
+ IOMMU_DOMAIN_LOCK(domain);
+ error = dmar_gas_alloc_region(domain, entry, IOMMU_MF_CANWAIT);
if (error == 0)
- entry->flags |= DMAR_MAP_ENTRY_UNMAPPED;
- DMAR_DOMAIN_UNLOCK(domain);
+ entry->flags |= IOMMU_MAP_ENTRY_UNMAPPED;
+ IOMMU_DOMAIN_UNLOCK(domain);
if (error != 0)
dmar_gas_free_entry(domain, entry);
return (error);
Index: sys/x86/iommu/intel_idpgtbl.c
===================================================================
--- sys/x86/iommu/intel_idpgtbl.c
+++ sys/x86/iommu/intel_idpgtbl.c
@@ -69,7 +69,7 @@
#include <dev/pci/pcireg.h>
#include <x86/iommu/intel_dmar.h>
-static int domain_unmap_buf_locked(struct dmar_domain *domain,
+static int domain_unmap_buf_locked(struct iommu_domain *domain,
dmar_gaddr_t base, dmar_gaddr_t size, int flags);
/*
@@ -163,9 +163,9 @@
* maxaddr is typically mapped.
*/
vm_object_t
-domain_get_idmap_pgtbl(struct dmar_domain *domain, dmar_gaddr_t maxaddr)
+domain_get_idmap_pgtbl(struct iommu_domain *domain, dmar_gaddr_t maxaddr)
{
- struct dmar_unit *unit;
+ struct dmar_unit *dmar;
struct idpgtbl *tbl;
vm_object_t res;
vm_page_t m;
@@ -194,7 +194,7 @@
sx_slock(&idpgtbl_lock);
LIST_FOREACH(tbl, &idpgtbls, link) {
if (tbl->maxaddr >= maxaddr &&
- dmar_pglvl_supported(domain->dmar, tbl->pglvl) &&
+ dmar_pglvl_supported(domain->iommu, tbl->pglvl) &&
tbl->leaf == leaf) {
res = tbl->pgtbl_obj;
vm_object_reference(res);
@@ -213,7 +213,7 @@
sx_xlock(&idpgtbl_lock);
LIST_FOREACH(tbl, &idpgtbls, link) {
if (tbl->maxaddr >= maxaddr &&
- dmar_pglvl_supported(domain->dmar, tbl->pglvl) &&
+ dmar_pglvl_supported(domain->iommu, tbl->pglvl) &&
tbl->leaf == leaf) {
res = tbl->pgtbl_obj;
vm_object_reference(res);
@@ -254,18 +254,18 @@
* If DMAR cannot look into the chipset write buffer, flush it
* as well.
*/
- unit = domain->dmar;
- if (!DMAR_IS_COHERENT(unit)) {
+ dmar = (struct dmar_unit *)domain->iommu;
+ if (!DMAR_IS_COHERENT(dmar)) {
VM_OBJECT_WLOCK(res);
for (m = vm_page_lookup(res, 0); m != NULL;
m = vm_page_next(m))
pmap_invalidate_cache_pages(&m, 1);
VM_OBJECT_WUNLOCK(res);
}
- if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) {
- DMAR_LOCK(unit);
- dmar_flush_write_bufs(unit);
- DMAR_UNLOCK(unit);
+ if ((dmar->hw_cap & DMAR_CAP_RWBF) != 0) {
+ DMAR_LOCK(dmar);
+ dmar_flush_write_bufs(dmar);
+ DMAR_UNLOCK(dmar);
}
return (res);
@@ -323,7 +323,7 @@
* the level lvl.
*/
static int
-domain_pgtbl_pte_off(struct dmar_domain *domain, dmar_gaddr_t base, int lvl)
+domain_pgtbl_pte_off(struct iommu_domain *domain, dmar_gaddr_t base, int lvl)
{
base >>= DMAR_PAGE_SHIFT + (domain->pglvl - lvl - 1) *
@@ -337,7 +337,7 @@
* lvl.
*/
static vm_pindex_t
-domain_pgtbl_get_pindex(struct dmar_domain *domain, dmar_gaddr_t base, int lvl)
+domain_pgtbl_get_pindex(struct iommu_domain *domain, dmar_gaddr_t base, int lvl)
{
vm_pindex_t idx, pidx;
int i;
@@ -353,7 +353,7 @@
}
static dmar_pte_t *
-domain_pgtbl_map_pte(struct dmar_domain *domain, dmar_gaddr_t base, int lvl,
+domain_pgtbl_map_pte(struct iommu_domain *domain, dmar_gaddr_t base, int lvl,
int flags, vm_pindex_t *idxp, struct sf_buf **sf)
{
vm_page_t m;
@@ -361,7 +361,7 @@
dmar_pte_t *pte, *ptep;
vm_pindex_t idx, idx1;
- DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
+ IOMMU_DOMAIN_ASSERT_PGLOCKED(domain);
KASSERT((flags & DMAR_PGF_OBJL) != 0, ("lost PGF_OBJL"));
idx = domain_pgtbl_get_pindex(domain, base, lvl);
@@ -408,7 +408,8 @@
}
dmar_pte_store(&ptep->pte, DMAR_PTE_R | DMAR_PTE_W |
VM_PAGE_TO_PHYS(m));
- dmar_flush_pte_to_ram(domain->dmar, ptep);
+ dmar_flush_pte_to_ram((struct dmar_unit *)domain->iommu,
+ ptep);
sf_buf_page(sfp)->ref_count += 1;
m->ref_count--;
dmar_unmap_pgtbl(sfp);
@@ -421,7 +422,7 @@
}
static int
-domain_map_buf_locked(struct dmar_domain *domain, dmar_gaddr_t base,
+domain_map_buf_locked(struct iommu_domain *domain, dmar_gaddr_t base,
dmar_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags)
{
dmar_pte_t *pte;
@@ -431,7 +432,7 @@
int lvl;
bool superpage;
- DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
+ IOMMU_DOMAIN_ASSERT_PGLOCKED(domain);
base1 = base;
size1 = size;
@@ -489,7 +490,7 @@
}
dmar_pte_store(&pte->pte, VM_PAGE_TO_PHYS(ma[pi]) | pflags |
(superpage ? DMAR_PTE_SP : 0));
- dmar_flush_pte_to_ram(domain->dmar, pte);
+ dmar_flush_pte_to_ram((struct dmar_unit *)domain->iommu, pte);
sf_buf_page(sf)->ref_count += 1;
}
if (sf != NULL)
@@ -499,15 +500,15 @@
}
int
-domain_map_buf(struct dmar_domain *domain, dmar_gaddr_t base, dmar_gaddr_t size,
+domain_map_buf(struct iommu_domain *domain, dmar_gaddr_t base, dmar_gaddr_t size,
vm_page_t *ma, uint64_t pflags, int flags)
{
- struct dmar_unit *unit;
+ struct dmar_unit *dmar;
int error;
- unit = domain->dmar;
+ dmar = (struct dmar_unit *)domain->iommu;
- KASSERT((domain->flags & DMAR_DOMAIN_IDMAP) == 0,
+ KASSERT((domain->flags & IOMMU_DOMAIN_IDMAP) == 0,
("modifying idmap pagetable domain %p", domain));
KASSERT((base & DMAR_PAGE_MASK) == 0,
("non-aligned base %p %jx %jx", domain, (uintmax_t)base,
@@ -532,38 +533,38 @@
DMAR_PTE_TM)) == 0,
("invalid pte flags %jx", (uintmax_t)pflags));
KASSERT((pflags & DMAR_PTE_SNP) == 0 ||
- (unit->hw_ecap & DMAR_ECAP_SC) != 0,
+ (dmar->hw_ecap & DMAR_ECAP_SC) != 0,
("PTE_SNP for dmar without snoop control %p %jx",
domain, (uintmax_t)pflags));
KASSERT((pflags & DMAR_PTE_TM) == 0 ||
- (unit->hw_ecap & DMAR_ECAP_DI) != 0,
+ (dmar->hw_ecap & DMAR_ECAP_DI) != 0,
("PTE_TM for dmar without DIOTLB %p %jx",
domain, (uintmax_t)pflags));
KASSERT((flags & ~DMAR_PGF_WAITOK) == 0, ("invalid flags %x", flags));
- DMAR_DOMAIN_PGLOCK(domain);
+ IOMMU_DOMAIN_PGLOCK(domain);
error = domain_map_buf_locked(domain, base, size, ma, pflags, flags);
- DMAR_DOMAIN_PGUNLOCK(domain);
+ IOMMU_DOMAIN_PGUNLOCK(domain);
if (error != 0)
return (error);
- if ((unit->hw_cap & DMAR_CAP_CM) != 0)
+ if ((dmar->hw_cap & DMAR_CAP_CM) != 0)
domain_flush_iotlb_sync(domain, base, size);
- else if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) {
+ else if ((dmar->hw_cap & DMAR_CAP_RWBF) != 0) {
/* See 11.1 Write Buffer Flushing. */
- DMAR_LOCK(unit);
- dmar_flush_write_bufs(unit);
- DMAR_UNLOCK(unit);
+ DMAR_LOCK(dmar);
+ dmar_flush_write_bufs(dmar);
+ DMAR_UNLOCK(dmar);
}
return (0);
}
-static void domain_unmap_clear_pte(struct dmar_domain *domain,
+static void domain_unmap_clear_pte(struct iommu_domain *domain,
dmar_gaddr_t base, int lvl, int flags, dmar_pte_t *pte,
struct sf_buf **sf, bool free_fs);
static void
-domain_free_pgtbl_pde(struct dmar_domain *domain, dmar_gaddr_t base,
+domain_free_pgtbl_pde(struct iommu_domain *domain, dmar_gaddr_t base,
int lvl, int flags)
{
struct sf_buf *sf;
@@ -576,13 +577,13 @@
}
static void
-domain_unmap_clear_pte(struct dmar_domain *domain, dmar_gaddr_t base, int lvl,
+domain_unmap_clear_pte(struct iommu_domain *domain, dmar_gaddr_t base, int lvl,
int flags, dmar_pte_t *pte, struct sf_buf **sf, bool free_sf)
{
vm_page_t m;
dmar_pte_clear(&pte->pte);
- dmar_flush_pte_to_ram(domain->dmar, pte);
+ dmar_flush_pte_to_ram((struct dmar_unit *)domain->iommu, pte);
m = sf_buf_page(*sf);
if (free_sf) {
dmar_unmap_pgtbl(*sf);
@@ -605,7 +606,7 @@
* Assumes that the unmap is never partial.
*/
static int
-domain_unmap_buf_locked(struct dmar_domain *domain, dmar_gaddr_t base,
+domain_unmap_buf_locked(struct iommu_domain *domain, dmar_gaddr_t base,
dmar_gaddr_t size, int flags)
{
dmar_pte_t *pte;
@@ -614,11 +615,11 @@
dmar_gaddr_t pg_sz;
int lvl;
- DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
+ IOMMU_DOMAIN_ASSERT_PGLOCKED(domain);
if (size == 0)
return (0);
- KASSERT((domain->flags & DMAR_DOMAIN_IDMAP) == 0,
+ KASSERT((domain->flags & IOMMU_DOMAIN_IDMAP) == 0,
("modifying idmap pagetable domain %p", domain));
KASSERT((base & DMAR_PAGE_MASK) == 0,
("non-aligned base %p %jx %jx", domain, (uintmax_t)base,
@@ -677,19 +678,19 @@
}
int
-domain_unmap_buf(struct dmar_domain *domain, dmar_gaddr_t base,
+domain_unmap_buf(struct iommu_domain *domain, dmar_gaddr_t base,
dmar_gaddr_t size, int flags)
{
int error;
- DMAR_DOMAIN_PGLOCK(domain);
+ IOMMU_DOMAIN_PGLOCK(domain);
error = domain_unmap_buf_locked(domain, base, size, flags);
- DMAR_DOMAIN_PGUNLOCK(domain);
+ IOMMU_DOMAIN_PGUNLOCK(domain);
return (error);
}
int
-domain_alloc_pgtbl(struct dmar_domain *domain)
+domain_alloc_pgtbl(struct iommu_domain *domain)
{
vm_page_t m;
@@ -698,37 +699,40 @@
domain->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL,
IDX_TO_OFF(pglvl_max_pages(domain->pglvl)), 0, 0, NULL);
- DMAR_DOMAIN_PGLOCK(domain);
+ IOMMU_DOMAIN_PGLOCK(domain);
m = dmar_pgalloc(domain->pgtbl_obj, 0, DMAR_PGF_WAITOK |
DMAR_PGF_ZERO | DMAR_PGF_OBJL);
/* No implicit free of the top level page table page. */
m->ref_count = 1;
- DMAR_DOMAIN_PGUNLOCK(domain);
- DMAR_LOCK(domain->dmar);
- domain->flags |= DMAR_DOMAIN_PGTBL_INITED;
- DMAR_UNLOCK(domain->dmar);
+ IOMMU_DOMAIN_PGUNLOCK(domain);
+ IOMMU_LOCK(domain->iommu);
+ domain->flags |= IOMMU_DOMAIN_PGTBL_INITED;
+ IOMMU_UNLOCK(domain->iommu);
return (0);
}
void
-domain_free_pgtbl(struct dmar_domain *domain)
+domain_free_pgtbl(struct iommu_domain *domain)
{
+ struct dmar_unit *dmar;
vm_object_t obj;
vm_page_t m;
+ dmar = (struct dmar_unit *)domain->iommu;
+
obj = domain->pgtbl_obj;
if (obj == NULL) {
- KASSERT((domain->dmar->hw_ecap & DMAR_ECAP_PT) != 0 &&
- (domain->flags & DMAR_DOMAIN_IDMAP) != 0,
+ KASSERT((dmar->hw_ecap & DMAR_ECAP_PT) != 0 &&
+ (domain->flags & IOMMU_DOMAIN_IDMAP) != 0,
("lost pagetable object domain %p", domain));
return;
}
- DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
+ IOMMU_DOMAIN_ASSERT_PGLOCKED(domain);
domain->pgtbl_obj = NULL;
- if ((domain->flags & DMAR_DOMAIN_IDMAP) != 0) {
+ if ((domain->flags & IOMMU_DOMAIN_IDMAP) != 0) {
put_idmap_pgtbl(obj);
- domain->flags &= ~DMAR_DOMAIN_IDMAP;
+ domain->flags &= ~IOMMU_DOMAIN_IDMAP;
return;
}
@@ -757,7 +761,7 @@
}
void
-domain_flush_iotlb_sync(struct dmar_domain *domain, dmar_gaddr_t base,
+domain_flush_iotlb_sync(struct iommu_domain *domain, dmar_gaddr_t base,
dmar_gaddr_t size)
{
struct dmar_unit *unit;
@@ -765,9 +769,9 @@
uint64_t iotlbr;
int am, iro;
- unit = domain->dmar;
+ unit = (struct dmar_unit *)domain->iommu;
KASSERT(!unit->qi_enabled, ("dmar%d: sync iotlb flush call",
- unit->unit));
+ unit->iommu.unit));
iro = DMAR_ECAP_IRO(unit->hw_ecap) * 16;
DMAR_LOCK(unit);
if ((unit->hw_cap & DMAR_CAP_PSI) == 0 || size > 2 * 1024 * 1024) {
@@ -775,7 +779,7 @@
DMAR_IOTLB_DID(domain->domain), iro);
KASSERT((iotlbr & DMAR_IOTLB_IAIG_MASK) !=
DMAR_IOTLB_IAIG_INVLD,
- ("dmar%d: invalidation failed %jx", unit->unit,
+ ("dmar%d: invalidation failed %jx", unit->iommu.unit,
(uintmax_t)iotlbr));
} else {
for (; size > 0; base += isize, size -= isize) {
@@ -788,7 +792,7 @@
DMAR_IOTLB_IAIG_INVLD,
("dmar%d: PSI invalidation failed "
"iotlbr 0x%jx base 0x%jx size 0x%jx am %d",
- unit->unit, (uintmax_t)iotlbr,
+ unit->iommu.unit, (uintmax_t)iotlbr,
(uintmax_t)base, (uintmax_t)size, am));
/*
* Any non-page granularity covers whole guest
Index: sys/x86/iommu/intel_intrmap.c
===================================================================
--- sys/x86/iommu/intel_intrmap.c
+++ sys/x86/iommu/intel_intrmap.c
@@ -234,7 +234,8 @@
dmar_ir_find(device_t src, uint16_t *rid, int *is_dmar)
{
devclass_t src_class;
- struct dmar_unit *unit;
+ struct iommu_unit *unit;
+ struct dmar_unit *dmar;
/*
* We need to determine if the interrupt source generates FSB
@@ -247,17 +248,18 @@
*is_dmar = FALSE;
src_class = device_get_devclass(src);
if (src_class == devclass_find("dmar")) {
- unit = NULL;
+ dmar = NULL;
if (is_dmar != NULL)
*is_dmar = TRUE;
} else if (src_class == devclass_find("hpet")) {
- unit = dmar_find_hpet(src, rid);
+ dmar = dmar_find_hpet(src, rid);
} else {
unit = dmar_find(src, bootverbose);
+ dmar = (struct dmar_unit *)unit;
if (unit != NULL && rid != NULL)
- dmar_get_requester(src, rid);
+ iommu_get_requester(src, rid);
}
- return (unit);
+ return (dmar);
}
static void
Index: sys/x86/iommu/intel_qi.c
===================================================================
--- sys/x86/iommu/intel_qi.c
+++ sys/x86/iommu/intel_qi.c
@@ -213,7 +213,7 @@
if (cold || nowait) {
cpu_spinwait();
} else {
- msleep(&unit->inv_seq_waiters, &unit->lock, 0,
+ msleep(&unit->inv_seq_waiters, &unit->iommu.lock, 0,
"dmarse", hz);
}
}
@@ -221,14 +221,14 @@
}
void
-dmar_qi_invalidate_locked(struct dmar_domain *domain, dmar_gaddr_t base,
+dmar_qi_invalidate_locked(struct iommu_domain *domain, dmar_gaddr_t base,
dmar_gaddr_t size, struct dmar_qi_genseq *pseq, bool emit_wait)
{
struct dmar_unit *unit;
dmar_gaddr_t isize;
int am;
- unit = domain->dmar;
+ unit = (struct dmar_unit *)domain->iommu;
DMAR_ASSERT_LOCKED(unit);
for (; size > 0; base += isize, size -= isize) {
am = calc_am(unit, base, size, &isize);
@@ -329,7 +329,8 @@
struct dmar_unit *unit;
unit = arg;
- KASSERT(unit->qi_enabled, ("dmar%d: QI is not enabled", unit->unit));
+ KASSERT(unit->qi_enabled, ("dmar%d: QI is not enabled",
+ unit->iommu.unit));
taskqueue_enqueue(unit->qi_taskqueue, &unit->qi_task);
return (FILTER_HANDLED);
}
@@ -338,7 +339,7 @@
dmar_qi_task(void *arg, int pending __unused)
{
struct dmar_unit *unit;
- struct dmar_map_entry *entry;
+ struct iommu_map_entry *entry;
uint32_t ics;
unit = arg;
@@ -352,8 +353,8 @@
break;
TAILQ_REMOVE(&unit->tlb_flush_entries, entry, dmamap_link);
DMAR_UNLOCK(unit);
- dmar_domain_free_entry(entry, (entry->flags &
- DMAR_MAP_ENTRY_QI_NF) == 0);
+ iommu_domain_free_entry(entry, (entry->flags &
+ IOMMU_MAP_ENTRY_QI_NF) == 0);
DMAR_LOCK(unit);
}
ics = dmar_read4(unit, DMAR_ICS_REG);
@@ -385,7 +386,7 @@
unit->qi_taskqueue = taskqueue_create_fast("dmarqf", M_WAITOK,
taskqueue_thread_enqueue, &unit->qi_taskqueue);
taskqueue_start_threads(&unit->qi_taskqueue, 1, PI_AV,
- "dmar%d qi taskq", unit->unit);
+ "dmar%d qi taskq", unit->iommu.unit);
unit->inv_waitd_gen = 0;
unit->inv_waitd_seq = 1;
@@ -442,7 +443,7 @@
dmar_disable_qi_intr(unit);
dmar_disable_qi(unit);
KASSERT(unit->inv_seq_waiters == 0,
- ("dmar%d: waiters on disabled queue", unit->unit));
+ ("dmar%d: waiters on disabled queue", unit->iommu.unit));
DMAR_UNLOCK(unit);
kmem_free(unit->inv_queue, unit->inv_queue_size);
@@ -457,7 +458,8 @@
uint32_t iectl;
DMAR_ASSERT_LOCKED(unit);
- KASSERT(DMAR_HAS_QI(unit), ("dmar%d: QI is not supported", unit->unit));
+ KASSERT(DMAR_HAS_QI(unit), ("dmar%d: QI is not supported",
+ unit->iommu.unit));
iectl = dmar_read4(unit, DMAR_IECTL_REG);
iectl &= ~DMAR_IECTL_IM;
dmar_write4(unit, DMAR_IECTL_REG, iectl);
@@ -469,7 +471,8 @@
uint32_t iectl;
DMAR_ASSERT_LOCKED(unit);
- KASSERT(DMAR_HAS_QI(unit), ("dmar%d: QI is not supported", unit->unit));
+ KASSERT(DMAR_HAS_QI(unit), ("dmar%d: QI is not supported",
+ unit->iommu.unit));
iectl = dmar_read4(unit, DMAR_IECTL_REG);
dmar_write4(unit, DMAR_IECTL_REG, iectl | DMAR_IECTL_IM);
}
Index: sys/x86/iommu/intel_quirks.c
===================================================================
--- sys/x86/iommu/intel_quirks.c
+++ sys/x86/iommu/intel_quirks.c
@@ -222,8 +222,11 @@
};
void
-dmar_quirks_pre_use(struct dmar_unit *dmar)
+dmar_quirks_pre_use(struct iommu_unit *unit)
{
+ struct dmar_unit *dmar;
+
+ dmar = (struct dmar_unit *)unit;
if (!dmar_barrier_enter(dmar, DMAR_BARRIER_USEQ))
return;
Index: sys/x86/iommu/intel_reg.h
===================================================================
--- sys/x86/iommu/intel_reg.h
+++ sys/x86/iommu/intel_reg.h
@@ -51,10 +51,10 @@
#define DMAR_CTX_CNT (DMAR_PAGE_SIZE / sizeof(dmar_root_entry_t))
-typedef struct dmar_ctx_entry {
+typedef struct iommu_device_entry {
uint64_t ctx1;
uint64_t ctx2;
-} dmar_ctx_entry_t;
+} iommu_device_entry_t;
#define DMAR_CTX1_P 1 /* Present */
#define DMAR_CTX1_FPD 2 /* Fault Processing Disable */
/* Translation Type: */
Index: sys/x86/iommu/intel_utils.c
===================================================================
--- sys/x86/iommu/intel_utils.c
+++ sys/x86/iommu/intel_utils.c
@@ -106,26 +106,32 @@
};
bool
-dmar_pglvl_supported(struct dmar_unit *unit, int pglvl)
+dmar_pglvl_supported(struct iommu_unit *unit, int pglvl)
{
+ struct dmar_unit *dmar;
int i;
+ dmar = (struct dmar_unit *)unit;
+
for (i = 0; i < nitems(sagaw_bits); i++) {
if (sagaw_bits[i].pglvl != pglvl)
continue;
- if ((DMAR_CAP_SAGAW(unit->hw_cap) & sagaw_bits[i].cap) != 0)
+ if ((DMAR_CAP_SAGAW(dmar->hw_cap) & sagaw_bits[i].cap) != 0)
return (true);
}
return (false);
}
int
-domain_set_agaw(struct dmar_domain *domain, int mgaw)
+domain_set_agaw(struct iommu_domain *domain, int mgaw)
{
+ struct dmar_unit *dmar;
int sagaw, i;
+ dmar = (struct dmar_unit *)domain->iommu;
+
domain->mgaw = mgaw;
- sagaw = DMAR_CAP_SAGAW(domain->dmar->hw_cap);
+ sagaw = DMAR_CAP_SAGAW(dmar->hw_cap);
for (i = 0; i < nitems(sagaw_bits); i++) {
if (sagaw_bits[i].agaw >= mgaw) {
domain->agaw = sagaw_bits[i].agaw;
@@ -134,7 +140,7 @@
return (0);
}
}
- device_printf(domain->dmar->dev,
+ device_printf(dmar->dev,
"context request mgaw %d: no agaw found, sagaw %x\n",
mgaw, sagaw);
return (EINVAL);
@@ -192,8 +198,9 @@
* the context ctx.
*/
int
-domain_is_sp_lvl(struct dmar_domain *domain, int lvl)
+domain_is_sp_lvl(struct iommu_domain *domain, int lvl)
{
+ struct dmar_unit *dmar;
int alvl, cap_sps;
static const int sagaw_sp[] = {
DMAR_CAP_SPS_2M,
@@ -203,7 +210,8 @@
};
alvl = domain->pglvl - lvl - 1;
- cap_sps = DMAR_CAP_SPS(domain->dmar->hw_cap);
+ dmar = (struct dmar_unit *)domain->iommu;
+ cap_sps = DMAR_CAP_SPS(dmar->hw_cap);
return (alvl < nitems(sagaw_sp) && (sagaw_sp[alvl] & cap_sps) != 0);
}
@@ -228,7 +236,7 @@
}
dmar_gaddr_t
-domain_page_size(struct dmar_domain *domain, int lvl)
+domain_page_size(struct iommu_domain *domain, int lvl)
{
return (pglvl_page_size(domain->pglvl, lvl));
@@ -380,7 +388,7 @@
}
void
-dmar_flush_ctx_to_ram(struct dmar_unit *unit, dmar_ctx_entry_t *dst)
+dmar_flush_ctx_to_ram(struct dmar_unit *unit, iommu_device_entry_t *dst)
{
dmar_flush_transl_to_ram(unit, dst, sizeof(*dst));
@@ -482,7 +490,7 @@
* DMAR_GCMD_WBF is only valid when CAP_RWBF is reported.
*/
KASSERT((unit->hw_cap & DMAR_CAP_RWBF) != 0,
- ("dmar%d: no RWBF", unit->unit));
+ ("dmar%d: no RWBF", unit->iommu.unit));
dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd | DMAR_GCMD_WBF);
DMAR_WAIT_UNTIL(((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_WBFS)
@@ -586,11 +594,12 @@
if ((dmar->barrier_flags & f_inproc) != 0) {
while ((dmar->barrier_flags & f_inproc) != 0) {
dmar->barrier_flags |= f_wakeup;
- msleep(&dmar->barrier_flags, &dmar->lock, 0,
+ msleep(&dmar->barrier_flags, &dmar->iommu.lock, 0,
"dmarb", 0);
}
KASSERT((dmar->barrier_flags & f_done) != 0,
- ("dmar%d barrier %d missing done", dmar->unit, barrier_id));
+ ("dmar%d barrier %d missing done", dmar->iommu.unit,
+ barrier_id));
DMAR_UNLOCK(dmar);
return (false);
}
@@ -607,7 +616,7 @@
DMAR_ASSERT_LOCKED(dmar);
KASSERT((dmar->barrier_flags & (f_done | f_inproc)) == f_inproc,
- ("dmar%d barrier %d missed entry", dmar->unit, barrier_id));
+ ("dmar%d barrier %d missed entry", dmar->iommu.unit, barrier_id));
dmar->barrier_flags |= f_done;
if ((dmar->barrier_flags & f_wakeup) != 0)
wakeup(&dmar->barrier_flags);
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Mon, Oct 27, 7:11 AM (2 h, 12 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
24279947
Default Alt Text
D25574.id74169.diff (112 KB)
Attached To
Mode
D25574: rename dmar->iommu
Attached
Detach File
Event Timeline
Log In to Comment