Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F142765489
D25574.id74237.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
103 KB
Referenced Files
None
Subscribers
None
D25574.id74237.diff
View Options
Index: sys/sys/iommu.h
===================================================================
--- /dev/null
+++ sys/sys/iommu.h
@@ -0,0 +1,131 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2013 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_IOMMU_H_
+#define _SYS_IOMMU_H_
+
+#include <sys/taskqueue.h>
+#include <sys/tree.h>
+
+/* Host or physical memory address, after translation. */
+typedef uint64_t iommu_haddr_t;
+/* Guest or bus address, before translation. */
+typedef uint64_t iommu_gaddr_t;
+
+struct iommu_map_entry;
+TAILQ_HEAD(iommu_map_entries_tailq, iommu_map_entry);
+
+struct iommu_qi_genseq {
+ u_int gen;
+ uint32_t seq;
+};
+
+struct iommu_map_entry {
+ iommu_gaddr_t start;
+ iommu_gaddr_t end;
+ iommu_gaddr_t first; /* Least start in subtree */
+ iommu_gaddr_t last; /* Greatest end in subtree */
+ iommu_gaddr_t free_down; /* Max free space below the
+ current R/B tree node */
+ u_int flags;
+ TAILQ_ENTRY(iommu_map_entry) dmamap_link; /* Link for dmamap entries */
+ RB_ENTRY(iommu_map_entry) rb_entry; /* Links for domain entries */
+ TAILQ_ENTRY(iommu_map_entry) unroll_link; /* Link for unroll after
+ dmamap_load failure */
+ struct iommu_domain *domain;
+ struct iommu_qi_genseq gseq;
+};
+
+#define IOMMU_MAP_ENTRY_PLACE 0x0001 /* Fake entry */
+#define IOMMU_MAP_ENTRY_RMRR 0x0002 /* Permanent, not linked by
+ dmamap_link */
+#define IOMMU_MAP_ENTRY_MAP 0x0004 /* Busdma created, linked by
+ dmamap_link */
+#define IOMMU_MAP_ENTRY_UNMAPPED 0x0010 /* No backing pages */
+#define IOMMU_MAP_ENTRY_QI_NF 0x0020 /* qi task, do not free entry */
+#define IOMMU_MAP_ENTRY_READ 0x1000 /* Read permitted */
+#define IOMMU_MAP_ENTRY_WRITE 0x2000 /* Write permitted */
+#define IOMMU_MAP_ENTRY_SNOOP 0x4000 /* Snoop */
+#define IOMMU_MAP_ENTRY_TM 0x8000 /* Transient */
+
+struct iommu_unit {
+ struct mtx lock;
+ int unit;
+
+ int dma_enabled;
+
+ /* Busdma delayed map load */
+ struct task dmamap_load_task;
+ TAILQ_HEAD(, bus_dmamap_iommu) delayed_maps;
+ struct taskqueue *delayed_taskqueue;
+};
+
+/*
+ * Locking annotations:
+ * (u) - Protected by iommu unit lock
+ * (d) - Protected by domain lock
+ * (c) - Immutable after initialization
+ */
+
+struct iommu_domain {
+ struct iommu_unit *iommu; /* (c) */
+ struct mtx lock; /* (c) */
+ struct task unload_task; /* (c) */
+ struct iommu_map_entries_tailq unload_entries; /* (d) Entries to
+ unload */
+};
+
+struct iommu_ctx {
+ struct iommu_domain *domain; /* (c) */
+ struct bus_dma_tag_iommu *tag; /* (c) Root tag */
+ u_long loads; /* atomic updates, for stat only */
+ u_long unloads; /* same */
+ u_int flags; /* (u) */
+};
+
+#define IOMMU_LOCK(unit) mtx_lock(&(unit)->lock)
+#define IOMMU_UNLOCK(unit) mtx_unlock(&(unit)->lock)
+#define IOMMU_ASSERT_LOCKED(unit) mtx_assert(&(unit)->lock, MA_OWNED)
+
+#define IOMMU_DOMAIN_LOCK(dom) mtx_lock(&(dom)->lock)
+#define IOMMU_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->lock)
+#define IOMMU_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->lock, MA_OWNED)
+
+struct iommu_ctx *iommu_get_ctx(struct iommu_unit *, device_t dev,
+ uint16_t rid, bool id_mapped, bool rmrr_init);
+struct iommu_unit *iommu_find(device_t dev, bool verbose);
+void iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free);
+void iommu_domain_unload(struct iommu_domain *domain,
+ struct iommu_map_entries_tailq *entries, bool cansleep);
+
+#endif /* !_SYS_IOMMU_H_ */
Index: sys/x86/iommu/busdma_dmar.h
===================================================================
--- sys/x86/iommu/busdma_dmar.h
+++ sys/x86/iommu/busdma_dmar.h
@@ -34,33 +34,32 @@
#ifndef __X86_IOMMU_BUSDMA_DMAR_H
#define __X86_IOMMU_BUSDMA_DMAR_H
-struct dmar_map_entry;
-TAILQ_HEAD(dmar_map_entries_tailq, dmar_map_entry);
+#include <sys/iommu.h>
-struct bus_dma_tag_dmar {
+struct bus_dma_tag_iommu {
struct bus_dma_tag_common common;
- struct dmar_ctx *ctx;
+ struct iommu_ctx *ctx;
device_t owner;
int map_count;
bus_dma_segment_t *segments;
};
-struct bus_dmamap_dmar {
- struct bus_dma_tag_dmar *tag;
+struct bus_dmamap_iommu {
+ struct bus_dma_tag_iommu *tag;
struct memdesc mem;
bus_dmamap_callback_t *callback;
void *callback_arg;
- struct dmar_map_entries_tailq map_entries;
- TAILQ_ENTRY(bus_dmamap_dmar) delay_link;
+ struct iommu_map_entries_tailq map_entries;
+ TAILQ_ENTRY(bus_dmamap_iommu) delay_link;
bool locked;
bool cansleep;
int flags;
};
-#define BUS_DMAMAP_DMAR_MALLOC 0x0001
-#define BUS_DMAMAP_DMAR_KMEM_ALLOC 0x0002
+#define BUS_DMAMAP_IOMMU_MALLOC 0x0001
+#define BUS_DMAMAP_IOMMU_KMEM_ALLOC 0x0002
-extern struct bus_dma_impl bus_dma_dmar_impl;
+extern struct bus_dma_impl bus_dma_iommu_impl;
bus_dma_tag_t acpi_iommu_get_dma_tag(device_t dev, device_t child);
Index: sys/x86/iommu/busdma_dmar.c
===================================================================
--- sys/x86/iommu/busdma_dmar.c
+++ sys/x86/iommu/busdma_dmar.c
@@ -74,7 +74,7 @@
*/
static bool
-dmar_bus_dma_is_dev_disabled(int domain, int bus, int slot, int func)
+iommu_bus_dma_is_dev_disabled(int domain, int bus, int slot, int func)
{
char str[128], *env;
int default_bounce;
@@ -117,7 +117,7 @@
* bounce mapping.
*/
device_t
-dmar_get_requester(device_t dev, uint16_t *rid)
+iommu_get_requester(device_t dev, uint16_t *rid)
{
devclass_t pci_class;
device_t l, pci, pcib, pcip, pcibp, requester;
@@ -137,15 +137,15 @@
*/
for (;;) {
pci = device_get_parent(l);
- KASSERT(pci != NULL, ("dmar_get_requester(%s): NULL parent "
+ KASSERT(pci != NULL, ("iommu_get_requester(%s): NULL parent "
"for %s", device_get_name(dev), device_get_name(l)));
KASSERT(device_get_devclass(pci) == pci_class,
- ("dmar_get_requester(%s): non-pci parent %s for %s",
+ ("iommu_get_requester(%s): non-pci parent %s for %s",
device_get_name(dev), device_get_name(pci),
device_get_name(l)));
pcib = device_get_parent(pci);
- KASSERT(pcib != NULL, ("dmar_get_requester(%s): NULL bridge "
+ KASSERT(pcib != NULL, ("iommu_get_requester(%s): NULL bridge "
"for %s", device_get_name(dev), device_get_name(pci)));
/*
@@ -228,15 +228,15 @@
return (requester);
}
-struct dmar_ctx *
-dmar_instantiate_ctx(struct dmar_unit *dmar, device_t dev, bool rmrr)
+struct iommu_ctx *
+iommu_instantiate_ctx(struct iommu_unit *unit, device_t dev, bool rmrr)
{
device_t requester;
- struct dmar_ctx *ctx;
+ struct iommu_ctx *ctx;
bool disabled;
uint16_t rid;
- requester = dmar_get_requester(dev, &rid);
+ requester = iommu_get_requester(dev, &rid);
/*
* If the user requested the IOMMU disabled for the device, we
@@ -245,10 +245,10 @@
* Instead provide the identity mapping for the device
* context.
*/
- disabled = dmar_bus_dma_is_dev_disabled(pci_get_domain(requester),
+ disabled = iommu_bus_dma_is_dev_disabled(pci_get_domain(requester),
pci_get_bus(requester), pci_get_slot(requester),
pci_get_function(requester));
- ctx = dmar_get_ctx_for_dev(dmar, requester, rid, disabled, rmrr);
+ ctx = iommu_get_ctx(unit, requester, rid, disabled, rmrr);
if (ctx == NULL)
return (NULL);
if (disabled) {
@@ -256,12 +256,12 @@
* Keep the first reference on context, release the
* later refs.
*/
- DMAR_LOCK(dmar);
+ IOMMU_LOCK(unit);
if ((ctx->flags & DMAR_CTX_DISABLED) == 0) {
ctx->flags |= DMAR_CTX_DISABLED;
- DMAR_UNLOCK(dmar);
+ IOMMU_UNLOCK(unit);
} else {
- dmar_free_ctx_locked(dmar, ctx);
+ dmar_free_ctx_locked(unit, ctx);
}
ctx = NULL;
}
@@ -271,36 +271,36 @@
bus_dma_tag_t
acpi_iommu_get_dma_tag(device_t dev, device_t child)
{
- struct dmar_unit *dmar;
- struct dmar_ctx *ctx;
+ struct iommu_unit *unit;
+ struct iommu_ctx *ctx;
bus_dma_tag_t res;
- dmar = dmar_find(child, bootverbose);
+ unit = iommu_find(child, bootverbose);
/* Not in scope of any DMAR ? */
- if (dmar == NULL)
+ if (unit == NULL)
return (NULL);
- if (!dmar->dma_enabled)
+ if (!unit->dma_enabled)
return (NULL);
- dmar_quirks_pre_use(dmar);
- dmar_instantiate_rmrr_ctxs(dmar);
+ dmar_quirks_pre_use(unit);
+ dmar_instantiate_rmrr_ctxs(unit);
- ctx = dmar_instantiate_ctx(dmar, child, false);
- res = ctx == NULL ? NULL : (bus_dma_tag_t)&ctx->ctx_tag;
+ ctx = iommu_instantiate_ctx(unit, child, false);
+ res = ctx == NULL ? NULL : (bus_dma_tag_t)ctx->tag;
return (res);
}
bool
bus_dma_dmar_set_buswide(device_t dev)
{
- struct dmar_unit *dmar;
+ struct iommu_unit *unit;
device_t parent;
u_int busno, slot, func;
parent = device_get_parent(dev);
if (device_get_devclass(parent) != devclass_find("pci"))
return (false);
- dmar = dmar_find(dev, bootverbose);
- if (dmar == NULL)
+ unit = iommu_find(dev, bootverbose);
+ if (unit == NULL)
return (false);
busno = pci_get_bus(dev);
slot = pci_get_slot(dev);
@@ -309,40 +309,40 @@
if (bootverbose) {
device_printf(dev,
"dmar%d pci%d:%d:%d requested buswide busdma\n",
- dmar->unit, busno, slot, func);
+ unit->unit, busno, slot, func);
}
return (false);
}
- dmar_set_buswide_ctx(dmar, busno);
+ dmar_set_buswide_ctx(unit, busno);
return (true);
}
static MALLOC_DEFINE(M_DMAR_DMAMAP, "dmar_dmamap", "Intel DMAR DMA Map");
-static void dmar_bus_schedule_dmamap(struct dmar_unit *unit,
- struct bus_dmamap_dmar *map);
+static void iommu_bus_schedule_dmamap(struct iommu_unit *unit,
+ struct bus_dmamap_iommu *map);
static int
-dmar_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
+iommu_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
void *lockfuncarg, bus_dma_tag_t *dmat)
{
- struct bus_dma_tag_dmar *newtag, *oldtag;
+ struct bus_dma_tag_iommu *newtag, *oldtag;
int error;
*dmat = NULL;
error = common_bus_dma_tag_create(parent != NULL ?
- &((struct bus_dma_tag_dmar *)parent)->common : NULL, alignment,
+ &((struct bus_dma_tag_iommu *)parent)->common : NULL, alignment,
boundary, lowaddr, highaddr, filter, filterarg, maxsize,
nsegments, maxsegsz, flags, lockfunc, lockfuncarg,
- sizeof(struct bus_dma_tag_dmar), (void **)&newtag);
+ sizeof(struct bus_dma_tag_iommu), (void **)&newtag);
if (error != 0)
goto out;
- oldtag = (struct bus_dma_tag_dmar *)parent;
- newtag->common.impl = &bus_dma_dmar_impl;
+ oldtag = (struct bus_dma_tag_iommu *)parent;
+ newtag->common.impl = &bus_dma_iommu_impl;
newtag->ctx = oldtag->ctx;
newtag->owner = oldtag->owner;
@@ -355,20 +355,20 @@
}
static int
-dmar_bus_dma_tag_set_domain(bus_dma_tag_t dmat)
+iommu_bus_dma_tag_set_domain(bus_dma_tag_t dmat)
{
return (0);
}
static int
-dmar_bus_dma_tag_destroy(bus_dma_tag_t dmat1)
+iommu_bus_dma_tag_destroy(bus_dma_tag_t dmat1)
{
- struct bus_dma_tag_dmar *dmat, *dmat_copy, *parent;
+ struct bus_dma_tag_iommu *dmat, *dmat_copy, *parent;
int error;
error = 0;
- dmat_copy = dmat = (struct bus_dma_tag_dmar *)dmat1;
+ dmat_copy = dmat = (struct bus_dma_tag_iommu *)dmat1;
if (dmat != NULL) {
if (dmat->map_count != 0) {
@@ -376,10 +376,10 @@
goto out;
}
while (dmat != NULL) {
- parent = (struct bus_dma_tag_dmar *)dmat->common.parent;
+ parent = (struct bus_dma_tag_iommu *)dmat->common.parent;
if (atomic_fetchadd_int(&dmat->common.ref_count, -1) ==
1) {
- if (dmat == &dmat->ctx->ctx_tag)
+ if (dmat == dmat->ctx->tag)
dmar_free_ctx(dmat->ctx);
free_domain(dmat->segments, M_DMAR_DMAMAP);
free(dmat, M_DEVBUF);
@@ -394,19 +394,19 @@
}
static bool
-dmar_bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen)
+iommu_bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen)
{
return (false);
}
static int
-dmar_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
+iommu_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
{
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
- tag = (struct bus_dma_tag_dmar *)dmat;
+ tag = (struct bus_dma_tag_iommu *)dmat;
map = malloc_domainset(sizeof(*map), M_DMAR_DMAMAP,
DOMAINSET_PREF(tag->common.domain), M_NOWAIT | M_ZERO);
if (map == NULL) {
@@ -434,22 +434,22 @@
}
static int
-dmar_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map1)
+iommu_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map1)
{
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
- struct dmar_domain *domain;
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
+ struct iommu_domain *domain;
- tag = (struct bus_dma_tag_dmar *)dmat;
- map = (struct bus_dmamap_dmar *)map1;
+ tag = (struct bus_dma_tag_iommu *)dmat;
+ map = (struct bus_dmamap_iommu *)map1;
if (map != NULL) {
domain = tag->ctx->domain;
- DMAR_DOMAIN_LOCK(domain);
+ IOMMU_DOMAIN_LOCK(domain);
if (!TAILQ_EMPTY(&map->map_entries)) {
- DMAR_DOMAIN_UNLOCK(domain);
+ IOMMU_DOMAIN_UNLOCK(domain);
return (EBUSY);
}
- DMAR_DOMAIN_UNLOCK(domain);
+ IOMMU_DOMAIN_UNLOCK(domain);
free_domain(map, M_DMAR_DMAMAP);
}
tag->map_count--;
@@ -458,15 +458,15 @@
static int
-dmar_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
+iommu_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
bus_dmamap_t *mapp)
{
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
int error, mflags;
vm_memattr_t attr;
- error = dmar_bus_dmamap_create(dmat, flags, mapp);
+ error = iommu_bus_dmamap_create(dmat, flags, mapp);
if (error != 0)
return (error);
@@ -475,23 +475,23 @@
attr = (flags & BUS_DMA_NOCACHE) != 0 ? VM_MEMATTR_UNCACHEABLE :
VM_MEMATTR_DEFAULT;
- tag = (struct bus_dma_tag_dmar *)dmat;
- map = (struct bus_dmamap_dmar *)*mapp;
+ tag = (struct bus_dma_tag_iommu *)dmat;
+ map = (struct bus_dmamap_iommu *)*mapp;
if (tag->common.maxsize < PAGE_SIZE &&
tag->common.alignment <= tag->common.maxsize &&
attr == VM_MEMATTR_DEFAULT) {
*vaddr = malloc_domainset(tag->common.maxsize, M_DEVBUF,
DOMAINSET_PREF(tag->common.domain), mflags);
- map->flags |= BUS_DMAMAP_DMAR_MALLOC;
+ map->flags |= BUS_DMAMAP_IOMMU_MALLOC;
} else {
*vaddr = (void *)kmem_alloc_attr_domainset(
DOMAINSET_PREF(tag->common.domain), tag->common.maxsize,
mflags, 0ul, BUS_SPACE_MAXADDR, attr);
- map->flags |= BUS_DMAMAP_DMAR_KMEM_ALLOC;
+ map->flags |= BUS_DMAMAP_IOMMU_KMEM_ALLOC;
}
if (*vaddr == NULL) {
- dmar_bus_dmamap_destroy(dmat, *mapp);
+ iommu_bus_dmamap_destroy(dmat, *mapp);
*mapp = NULL;
return (ENOMEM);
}
@@ -499,37 +499,37 @@
}
static void
-dmar_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map1)
+iommu_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map1)
{
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
- tag = (struct bus_dma_tag_dmar *)dmat;
- map = (struct bus_dmamap_dmar *)map1;
+ tag = (struct bus_dma_tag_iommu *)dmat;
+ map = (struct bus_dmamap_iommu *)map1;
- if ((map->flags & BUS_DMAMAP_DMAR_MALLOC) != 0) {
+ if ((map->flags & BUS_DMAMAP_IOMMU_MALLOC) != 0) {
free_domain(vaddr, M_DEVBUF);
- map->flags &= ~BUS_DMAMAP_DMAR_MALLOC;
+ map->flags &= ~BUS_DMAMAP_IOMMU_MALLOC;
} else {
- KASSERT((map->flags & BUS_DMAMAP_DMAR_KMEM_ALLOC) != 0,
- ("dmar_bus_dmamem_free for non alloced map %p", map));
+ KASSERT((map->flags & BUS_DMAMAP_IOMMU_KMEM_ALLOC) != 0,
+ ("iommu_bus_dmamem_free for non alloced map %p", map));
kmem_free((vm_offset_t)vaddr, tag->common.maxsize);
- map->flags &= ~BUS_DMAMAP_DMAR_KMEM_ALLOC;
+ map->flags &= ~BUS_DMAMAP_IOMMU_KMEM_ALLOC;
}
- dmar_bus_dmamap_destroy(dmat, map1);
+ iommu_bus_dmamap_destroy(dmat, map1);
}
static int
-dmar_bus_dmamap_load_something1(struct bus_dma_tag_dmar *tag,
- struct bus_dmamap_dmar *map, vm_page_t *ma, int offset, bus_size_t buflen,
+iommu_bus_dmamap_load_something1(struct bus_dma_tag_iommu *tag,
+ struct bus_dmamap_iommu *map, vm_page_t *ma, int offset, bus_size_t buflen,
int flags, bus_dma_segment_t *segs, int *segp,
- struct dmar_map_entries_tailq *unroll_list)
+ struct iommu_map_entries_tailq *unroll_list)
{
- struct dmar_ctx *ctx;
- struct dmar_domain *domain;
- struct dmar_map_entry *entry;
- dmar_gaddr_t size;
+ struct iommu_ctx *ctx;
+ struct iommu_domain *domain;
+ struct iommu_map_entry *entry;
+ iommu_gaddr_t size;
bus_size_t buflen1;
int error, idx, gas_flags, seg;
@@ -555,17 +555,17 @@
* (Too) optimistically allow split if there are more
* then one segments left.
*/
- gas_flags = map->cansleep ? DMAR_GM_CANWAIT : 0;
+ gas_flags = map->cansleep ? IOMMU_MF_CANWAIT : 0;
if (seg + 1 < tag->common.nsegments)
- gas_flags |= DMAR_GM_CANSPLIT;
+ gas_flags |= IOMMU_MF_CANSPLIT;
error = dmar_gas_map(domain, &tag->common, size, offset,
- DMAR_MAP_ENTRY_READ |
- ((flags & BUS_DMA_NOWRITE) == 0 ? DMAR_MAP_ENTRY_WRITE : 0),
+ IOMMU_MAP_ENTRY_READ |
+ ((flags & BUS_DMA_NOWRITE) == 0 ? IOMMU_MAP_ENTRY_WRITE : 0),
gas_flags, ma + idx, &entry);
if (error != 0)
break;
- if ((gas_flags & DMAR_GM_CANSPLIT) != 0) {
+ if ((gas_flags & IOMMU_MF_CANSPLIT) != 0) {
KASSERT(size >= entry->end - entry->start,
("split increased entry size %jx %jx %jx",
(uintmax_t)size, (uintmax_t)entry->start,
@@ -596,7 +596,7 @@
(uintmax_t)entry->start, (uintmax_t)entry->end,
(uintmax_t)tag->common.lowaddr,
(uintmax_t)tag->common.highaddr));
- KASSERT(dmar_test_boundary(entry->start + offset, buflen1,
+ KASSERT(iommu_test_boundary(entry->start + offset, buflen1,
tag->common.boundary),
("boundary failed: ctx %p start 0x%jx end 0x%jx "
"boundary 0x%jx", ctx, (uintmax_t)entry->start,
@@ -607,10 +607,10 @@
(uintmax_t)entry->start, (uintmax_t)entry->end,
(uintmax_t)buflen1, (uintmax_t)tag->common.maxsegsz));
- DMAR_DOMAIN_LOCK(domain);
+ IOMMU_DOMAIN_LOCK(domain);
TAILQ_INSERT_TAIL(&map->map_entries, entry, dmamap_link);
- entry->flags |= DMAR_MAP_ENTRY_MAP;
- DMAR_DOMAIN_UNLOCK(domain);
+ entry->flags |= IOMMU_MAP_ENTRY_MAP;
+ IOMMU_DOMAIN_UNLOCK(domain);
TAILQ_INSERT_TAIL(unroll_list, entry, unroll_link);
segs[seg].ds_addr = entry->start + offset;
@@ -627,14 +627,14 @@
}
static int
-dmar_bus_dmamap_load_something(struct bus_dma_tag_dmar *tag,
- struct bus_dmamap_dmar *map, vm_page_t *ma, int offset, bus_size_t buflen,
+iommu_bus_dmamap_load_something(struct bus_dma_tag_iommu *tag,
+ struct bus_dmamap_iommu *map, vm_page_t *ma, int offset, bus_size_t buflen,
int flags, bus_dma_segment_t *segs, int *segp)
{
- struct dmar_ctx *ctx;
- struct dmar_domain *domain;
- struct dmar_map_entry *entry, *entry1;
- struct dmar_map_entries_tailq unroll_list;
+ struct iommu_ctx *ctx;
+ struct iommu_domain *domain;
+ struct iommu_map_entry *entry, *entry1;
+ struct iommu_map_entries_tailq unroll_list;
int error;
ctx = tag->ctx;
@@ -642,7 +642,7 @@
atomic_add_long(&ctx->loads, 1);
TAILQ_INIT(&unroll_list);
- error = dmar_bus_dmamap_load_something1(tag, map, ma, offset,
+ error = iommu_bus_dmamap_load_something1(tag, map, ma, offset,
buflen, flags, segs, segp, &unroll_list);
if (error != 0) {
/*
@@ -650,7 +650,7 @@
* partial buffer load, so unfortunately we have to
* revert all work done.
*/
- DMAR_DOMAIN_LOCK(domain);
+ IOMMU_DOMAIN_LOCK(domain);
TAILQ_FOREACH_SAFE(entry, &unroll_list, unroll_link,
entry1) {
/*
@@ -664,8 +664,8 @@
TAILQ_INSERT_TAIL(&domain->unload_entries, entry,
dmamap_link);
}
- DMAR_DOMAIN_UNLOCK(domain);
- taskqueue_enqueue(domain->dmar->delayed_taskqueue,
+ IOMMU_DOMAIN_UNLOCK(domain);
+ taskqueue_enqueue(domain->iommu->delayed_taskqueue,
&domain->unload_task);
}
@@ -673,37 +673,37 @@
!map->cansleep)
error = EINPROGRESS;
if (error == EINPROGRESS)
- dmar_bus_schedule_dmamap(domain->dmar, map);
+ iommu_bus_schedule_dmamap(domain->iommu, map);
return (error);
}
static int
-dmar_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map1,
+iommu_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map1,
struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
bus_dma_segment_t *segs, int *segp)
{
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
- tag = (struct bus_dma_tag_dmar *)dmat;
- map = (struct bus_dmamap_dmar *)map1;
- return (dmar_bus_dmamap_load_something(tag, map, ma, ma_offs, tlen,
+ tag = (struct bus_dma_tag_iommu *)dmat;
+ map = (struct bus_dmamap_iommu *)map1;
+ return (iommu_bus_dmamap_load_something(tag, map, ma, ma_offs, tlen,
flags, segs, segp));
}
static int
-dmar_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map1,
+iommu_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map1,
vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
int *segp)
{
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
vm_page_t *ma, fma;
vm_paddr_t pstart, pend, paddr;
int error, i, ma_cnt, mflags, offset;
- tag = (struct bus_dma_tag_dmar *)dmat;
- map = (struct bus_dmamap_dmar *)map1;
+ tag = (struct bus_dma_tag_iommu *)dmat;
+ map = (struct bus_dmamap_iommu *)map1;
pstart = trunc_page(buf);
pend = round_page(buf + buflen);
offset = buf & PAGE_MASK;
@@ -735,7 +735,7 @@
ma[i] = &fma[i];
}
}
- error = dmar_bus_dmamap_load_something(tag, map, ma, offset, buflen,
+ error = iommu_bus_dmamap_load_something(tag, map, ma, offset, buflen,
flags, segs, segp);
free(fma, M_DEVBUF);
free(ma, M_DEVBUF);
@@ -743,18 +743,18 @@
}
static int
-dmar_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map1, void *buf,
+iommu_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map1, void *buf,
bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
int *segp)
{
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
vm_page_t *ma, fma;
vm_paddr_t pstart, pend, paddr;
int error, i, ma_cnt, mflags, offset;
- tag = (struct bus_dma_tag_dmar *)dmat;
- map = (struct bus_dmamap_dmar *)map1;
+ tag = (struct bus_dma_tag_iommu *)dmat;
+ map = (struct bus_dmamap_iommu *)map1;
pstart = trunc_page((vm_offset_t)buf);
pend = round_page((vm_offset_t)buf + buflen);
offset = (vm_offset_t)buf & PAGE_MASK;
@@ -788,7 +788,7 @@
ma[i] = &fma[i];
}
}
- error = dmar_bus_dmamap_load_something(tag, map, ma, offset, buflen,
+ error = iommu_bus_dmamap_load_something(tag, map, ma, offset, buflen,
flags, segs, segp);
free(ma, M_DEVBUF);
free(fma, M_DEVBUF);
@@ -796,29 +796,29 @@
}
static void
-dmar_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map1,
+iommu_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map1,
struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
{
- struct bus_dmamap_dmar *map;
+ struct bus_dmamap_iommu *map;
if (map1 == NULL)
return;
- map = (struct bus_dmamap_dmar *)map1;
+ map = (struct bus_dmamap_iommu *)map1;
map->mem = *mem;
- map->tag = (struct bus_dma_tag_dmar *)dmat;
+ map->tag = (struct bus_dma_tag_iommu *)dmat;
map->callback = callback;
map->callback_arg = callback_arg;
}
static bus_dma_segment_t *
-dmar_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map1,
+iommu_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map1,
bus_dma_segment_t *segs, int nsegs, int error)
{
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
- tag = (struct bus_dma_tag_dmar *)dmat;
- map = (struct bus_dmamap_dmar *)map1;
+ tag = (struct bus_dma_tag_iommu *)dmat;
+ map = (struct bus_dmamap_iommu *)map1;
if (!map->locked) {
KASSERT(map->cansleep,
@@ -848,76 +848,76 @@
* On amd64, we assume that sf allocation cannot fail.
*/
static void
-dmar_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map1)
+iommu_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map1)
{
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
- struct dmar_ctx *ctx;
- struct dmar_domain *domain;
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
+ struct iommu_ctx *ctx;
+ struct iommu_domain *domain;
#if defined(__amd64__)
- struct dmar_map_entries_tailq entries;
+ struct iommu_map_entries_tailq entries;
#endif
- tag = (struct bus_dma_tag_dmar *)dmat;
- map = (struct bus_dmamap_dmar *)map1;
+ tag = (struct bus_dma_tag_iommu *)dmat;
+ map = (struct bus_dmamap_iommu *)map1;
ctx = tag->ctx;
domain = ctx->domain;
atomic_add_long(&ctx->unloads, 1);
#if defined(__i386__)
- DMAR_DOMAIN_LOCK(domain);
+ IOMMU_DOMAIN_LOCK(domain);
TAILQ_CONCAT(&domain->unload_entries, &map->map_entries, dmamap_link);
- DMAR_DOMAIN_UNLOCK(domain);
- taskqueue_enqueue(domain->dmar->delayed_taskqueue,
+ IOMMU_DOMAIN_UNLOCK(domain);
+ taskqueue_enqueue(domain->iommu->delayed_taskqueue,
&domain->unload_task);
#else /* defined(__amd64__) */
TAILQ_INIT(&entries);
- DMAR_DOMAIN_LOCK(domain);
+ IOMMU_DOMAIN_LOCK(domain);
TAILQ_CONCAT(&entries, &map->map_entries, dmamap_link);
- DMAR_DOMAIN_UNLOCK(domain);
+ IOMMU_DOMAIN_UNLOCK(domain);
THREAD_NO_SLEEPING();
- dmar_domain_unload(domain, &entries, false);
+ iommu_domain_unload(domain, &entries, false);
THREAD_SLEEPING_OK();
- KASSERT(TAILQ_EMPTY(&entries), ("lazy dmar_ctx_unload %p", ctx));
+ KASSERT(TAILQ_EMPTY(&entries), ("lazy iommu_ctx_unload %p", ctx));
#endif
}
static void
-dmar_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
+iommu_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
bus_dmasync_op_t op)
{
}
-struct bus_dma_impl bus_dma_dmar_impl = {
- .tag_create = dmar_bus_dma_tag_create,
- .tag_destroy = dmar_bus_dma_tag_destroy,
- .tag_set_domain = dmar_bus_dma_tag_set_domain,
- .id_mapped = dmar_bus_dma_id_mapped,
- .map_create = dmar_bus_dmamap_create,
- .map_destroy = dmar_bus_dmamap_destroy,
- .mem_alloc = dmar_bus_dmamem_alloc,
- .mem_free = dmar_bus_dmamem_free,
- .load_phys = dmar_bus_dmamap_load_phys,
- .load_buffer = dmar_bus_dmamap_load_buffer,
- .load_ma = dmar_bus_dmamap_load_ma,
- .map_waitok = dmar_bus_dmamap_waitok,
- .map_complete = dmar_bus_dmamap_complete,
- .map_unload = dmar_bus_dmamap_unload,
- .map_sync = dmar_bus_dmamap_sync,
+struct bus_dma_impl bus_dma_iommu_impl = {
+ .tag_create = iommu_bus_dma_tag_create,
+ .tag_destroy = iommu_bus_dma_tag_destroy,
+ .tag_set_domain = iommu_bus_dma_tag_set_domain,
+ .id_mapped = iommu_bus_dma_id_mapped,
+ .map_create = iommu_bus_dmamap_create,
+ .map_destroy = iommu_bus_dmamap_destroy,
+ .mem_alloc = iommu_bus_dmamem_alloc,
+ .mem_free = iommu_bus_dmamem_free,
+ .load_phys = iommu_bus_dmamap_load_phys,
+ .load_buffer = iommu_bus_dmamap_load_buffer,
+ .load_ma = iommu_bus_dmamap_load_ma,
+ .map_waitok = iommu_bus_dmamap_waitok,
+ .map_complete = iommu_bus_dmamap_complete,
+ .map_unload = iommu_bus_dmamap_unload,
+ .map_sync = iommu_bus_dmamap_sync,
};
static void
-dmar_bus_task_dmamap(void *arg, int pending)
+iommu_bus_task_dmamap(void *arg, int pending)
{
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
- struct dmar_unit *unit;
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
+ struct iommu_unit *unit;
unit = arg;
- DMAR_LOCK(unit);
+ IOMMU_LOCK(unit);
while ((map = TAILQ_FIRST(&unit->delayed_maps)) != NULL) {
TAILQ_REMOVE(&unit->delayed_maps, map, delay_link);
- DMAR_UNLOCK(unit);
+ IOMMU_UNLOCK(unit);
tag = map->tag;
map->cansleep = true;
map->locked = false;
@@ -931,30 +931,30 @@
} else
map->locked = true;
map->cansleep = false;
- DMAR_LOCK(unit);
+ IOMMU_LOCK(unit);
}
- DMAR_UNLOCK(unit);
+ IOMMU_UNLOCK(unit);
}
static void
-dmar_bus_schedule_dmamap(struct dmar_unit *unit, struct bus_dmamap_dmar *map)
+iommu_bus_schedule_dmamap(struct iommu_unit *unit, struct bus_dmamap_iommu *map)
{
map->locked = false;
- DMAR_LOCK(unit);
+ IOMMU_LOCK(unit);
TAILQ_INSERT_TAIL(&unit->delayed_maps, map, delay_link);
- DMAR_UNLOCK(unit);
+ IOMMU_UNLOCK(unit);
taskqueue_enqueue(unit->delayed_taskqueue, &unit->dmamap_load_task);
}
int
-dmar_init_busdma(struct dmar_unit *unit)
+iommu_init_busdma(struct iommu_unit *unit)
{
unit->dma_enabled = 1;
TUNABLE_INT_FETCH("hw.dmar.dma", &unit->dma_enabled);
TAILQ_INIT(&unit->delayed_maps);
- TASK_INIT(&unit->dmamap_load_task, 0, dmar_bus_task_dmamap, unit);
+ TASK_INIT(&unit->dmamap_load_task, 0, iommu_bus_task_dmamap, unit);
unit->delayed_taskqueue = taskqueue_create("dmar", M_WAITOK,
taskqueue_thread_enqueue, &unit->delayed_taskqueue);
taskqueue_start_threads(&unit->delayed_taskqueue, 1, PI_DISK,
@@ -963,7 +963,7 @@
}
void
-dmar_fini_busdma(struct dmar_unit *unit)
+iommu_fini_busdma(struct iommu_unit *unit)
{
if (unit->delayed_taskqueue == NULL)
@@ -979,11 +979,11 @@
vm_paddr_t start, vm_size_t length, int flags)
{
struct bus_dma_tag_common *tc;
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
- struct dmar_ctx *ctx;
- struct dmar_domain *domain;
- struct dmar_map_entry *entry;
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
+ struct iommu_ctx *ctx;
+ struct iommu_domain *domain;
+ struct iommu_map_entry *entry;
vm_page_t *ma;
vm_size_t i;
int error;
@@ -996,13 +996,13 @@
MPASS((flags & ~(BUS_DMA_NOWAIT | BUS_DMA_NOWRITE)) == 0);
tc = (struct bus_dma_tag_common *)dmat;
- if (tc->impl != &bus_dma_dmar_impl)
+ if (tc->impl != &bus_dma_iommu_impl)
return (0);
- tag = (struct bus_dma_tag_dmar *)dmat;
+ tag = (struct bus_dma_tag_iommu *)dmat;
ctx = tag->ctx;
domain = ctx->domain;
- map = (struct bus_dmamap_dmar *)map1;
+ map = (struct bus_dmamap_iommu *)map1;
waitok = (flags & BUS_DMA_NOWAIT) != 0;
entry = dmar_gas_alloc_entry(domain, waitok ? 0 : DMAR_PGF_WAITOK);
@@ -1020,16 +1020,16 @@
ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i,
VM_MEMATTR_DEFAULT);
}
- error = dmar_gas_map_region(domain, entry, DMAR_MAP_ENTRY_READ |
- ((flags & BUS_DMA_NOWRITE) ? 0 : DMAR_MAP_ENTRY_WRITE),
- waitok ? DMAR_GM_CANWAIT : 0, ma);
+ error = dmar_gas_map_region(domain, entry, IOMMU_MAP_ENTRY_READ |
+ ((flags & BUS_DMA_NOWRITE) ? 0 : IOMMU_MAP_ENTRY_WRITE),
+ waitok ? IOMMU_MF_CANWAIT : 0, ma);
if (error == 0) {
- DMAR_DOMAIN_LOCK(domain);
+ IOMMU_DOMAIN_LOCK(domain);
TAILQ_INSERT_TAIL(&map->map_entries, entry, dmamap_link);
- entry->flags |= DMAR_MAP_ENTRY_MAP;
- DMAR_DOMAIN_UNLOCK(domain);
+ entry->flags |= IOMMU_MAP_ENTRY_MAP;
+ IOMMU_DOMAIN_UNLOCK(domain);
} else {
- dmar_domain_unload_entry(entry, true);
+ iommu_domain_unload_entry(entry, true);
}
for (i = 0; i < atop(length); i++)
vm_page_putfake(ma[i]);
Index: sys/x86/iommu/intel_ctx.c
===================================================================
--- sys/x86/iommu/intel_ctx.c
+++ sys/x86/iommu/intel_ctx.c
@@ -114,30 +114,35 @@
static dmar_ctx_entry_t *
dmar_map_ctx_entry(struct dmar_ctx *ctx, struct sf_buf **sfp)
{
+ struct dmar_unit *dmar;
dmar_ctx_entry_t *ctxp;
- ctxp = dmar_map_pgtbl(ctx->domain->dmar->ctx_obj, 1 +
+ dmar = (struct dmar_unit *)ctx->context.domain->iommu;
+
+ ctxp = dmar_map_pgtbl(dmar->ctx_obj, 1 +
PCI_RID2BUS(ctx->rid), DMAR_PGF_NOALLOC | DMAR_PGF_WAITOK, sfp);
ctxp += ctx->rid & 0xff;
return (ctxp);
}
static void
-ctx_tag_init(struct dmar_ctx *ctx, device_t dev)
+device_tag_init(struct dmar_ctx *ctx, device_t dev)
{
+ struct dmar_domain *domain;
bus_addr_t maxaddr;
- maxaddr = MIN(ctx->domain->end, BUS_SPACE_MAXADDR);
- ctx->ctx_tag.common.ref_count = 1; /* Prevent free */
- ctx->ctx_tag.common.impl = &bus_dma_dmar_impl;
- ctx->ctx_tag.common.boundary = 0;
- ctx->ctx_tag.common.lowaddr = maxaddr;
- ctx->ctx_tag.common.highaddr = maxaddr;
- ctx->ctx_tag.common.maxsize = maxaddr;
- ctx->ctx_tag.common.nsegments = BUS_SPACE_UNRESTRICTED;
- ctx->ctx_tag.common.maxsegsz = maxaddr;
- ctx->ctx_tag.ctx = ctx;
- ctx->ctx_tag.owner = dev;
+ domain = (struct dmar_domain *)ctx->context.domain;
+ maxaddr = MIN(domain->end, BUS_SPACE_MAXADDR);
+ ctx->context.tag->common.ref_count = 1; /* Prevent free */
+ ctx->context.tag->common.impl = &bus_dma_iommu_impl;
+ ctx->context.tag->common.boundary = 0;
+ ctx->context.tag->common.lowaddr = maxaddr;
+ ctx->context.tag->common.highaddr = maxaddr;
+ ctx->context.tag->common.maxsize = maxaddr;
+ ctx->context.tag->common.nsegments = BUS_SPACE_UNRESTRICTED;
+ ctx->context.tag->common.maxsegsz = maxaddr;
+ ctx->context.tag->ctx = (struct iommu_ctx *)ctx;
+ ctx->context.tag->owner = dev;
}
static void
@@ -173,12 +178,12 @@
vm_page_t ctx_root;
int i;
- domain = ctx->domain;
- unit = domain->dmar;
+ domain = (struct dmar_domain *)ctx->context.domain;
+ unit = (struct dmar_unit *)domain->iodom.iommu;
KASSERT(move || (ctxp->ctx1 == 0 && ctxp->ctx2 == 0),
("dmar%d: initialized ctx entry %d:%d:%d 0x%jx 0x%jx",
- unit->unit, busno, pci_get_slot(ctx->ctx_tag.owner),
- pci_get_function(ctx->ctx_tag.owner),
+ unit->iommu.unit, busno, pci_get_slot(ctx->context.tag->owner),
+ pci_get_function(ctx->context.tag->owner),
ctxp->ctx1, ctxp->ctx2));
if ((domain->flags & DMAR_DOMAIN_IDMAP) != 0 &&
@@ -230,10 +235,10 @@
int slot, int func, int dev_domain, int dev_busno,
const void *dev_path, int dev_path_len)
{
- struct dmar_map_entries_tailq rmrr_entries;
- struct dmar_map_entry *entry, *entry1;
+ struct iommu_map_entries_tailq rmrr_entries;
+ struct iommu_map_entry *entry, *entry1;
vm_page_t *ma;
- dmar_gaddr_t start, end;
+ iommu_gaddr_t start, end;
vm_pindex_t size, i;
int error, error1;
@@ -255,7 +260,7 @@
end = entry->end;
if (bootverbose)
printf("dmar%d ctx pci%d:%d:%d RMRR [%#jx, %#jx]\n",
- domain->dmar->unit, bus, slot, func,
+ domain->iodom.iommu->unit, bus, slot, func,
(uintmax_t)start, (uintmax_t)end);
entry->start = trunc_page(start);
entry->end = round_page(end);
@@ -267,7 +272,7 @@
printf("pci%d:%d:%d ", bus, slot, func);
printf("BIOS bug: dmar%d RMRR "
"region (%jx, %jx) corrected\n",
- domain->dmar->unit, start, end);
+ domain->iodom.iommu->unit, start, end);
}
entry->end += DMAR_PAGE_SIZE * 0x20;
}
@@ -277,9 +282,9 @@
ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i,
VM_MEMATTR_DEFAULT);
}
- error1 = dmar_gas_map_region(domain, entry,
- DMAR_MAP_ENTRY_READ | DMAR_MAP_ENTRY_WRITE,
- DMAR_GM_CANWAIT | DMAR_GM_RMRR, ma);
+ error1 = dmar_gas_map_region(&domain->iodom, entry,
+ IOMMU_MAP_ENTRY_READ | IOMMU_MAP_ENTRY_WRITE,
+ IOMMU_MF_CANWAIT | IOMMU_MF_RMRR, ma);
/*
* Non-failed RMRR entries are owned by context rb
* tree. Get rid of the failed entry, but do not stop
@@ -287,10 +292,10 @@
* loaded and removed on the context destruction.
*/
if (error1 == 0 && entry->end != entry->start) {
- DMAR_LOCK(domain->dmar);
+ IOMMU_LOCK(domain->iodom.iommu);
domain->refs++; /* XXXKIB prevent free */
domain->flags |= DMAR_DOMAIN_RMRR;
- DMAR_UNLOCK(domain->dmar);
+ IOMMU_UNLOCK(domain->iodom.iommu);
} else {
if (error1 != 0) {
if (dev != NULL)
@@ -298,12 +303,12 @@
printf("pci%d:%d:%d ", bus, slot, func);
printf(
"dmar%d failed to map RMRR region (%jx, %jx) %d\n",
- domain->dmar->unit, start, end,
+ domain->iodom.iommu->unit, start, end,
error1);
error = error1;
}
TAILQ_REMOVE(&rmrr_entries, entry, unroll_link);
- dmar_gas_free_entry(domain, entry);
+ dmar_gas_free_entry(&domain->iodom, entry);
}
for (i = 0; i < size; i++)
vm_page_putfake(ma[i]);
@@ -325,10 +330,12 @@
domain->domain = id;
LIST_INIT(&domain->contexts);
RB_INIT(&domain->rb_root);
- TAILQ_INIT(&domain->unload_entries);
- TASK_INIT(&domain->unload_task, 0, dmar_domain_unload_task, domain);
- mtx_init(&domain->lock, "dmardom", NULL, MTX_DEF);
+ TAILQ_INIT(&domain->iodom.unload_entries);
+ TASK_INIT(&domain->iodom.unload_task, 0, dmar_domain_unload_task,
+ domain);
+ mtx_init(&domain->iodom.lock, "dmardom", NULL, MTX_DEF);
domain->dmar = dmar;
+ domain->iodom.iommu = &dmar->iommu;
/*
* For now, use the maximal usable physical address of the
@@ -376,7 +383,9 @@
struct dmar_ctx *ctx;
ctx = malloc(sizeof(*ctx), M_DMAR_CTX, M_WAITOK | M_ZERO);
- ctx->domain = domain;
+ ctx->context.domain = (struct iommu_domain *)domain;
+ ctx->context.tag = malloc(sizeof(struct bus_dma_tag_iommu),
+ M_DMAR_CTX, M_WAITOK | M_ZERO);
ctx->rid = rid;
ctx->refs = 1;
return (ctx);
@@ -387,8 +396,8 @@
{
struct dmar_domain *domain;
- domain = ctx->domain;
- DMAR_ASSERT_LOCKED(domain->dmar);
+ domain = (struct dmar_domain *)ctx->context.domain;
+ IOMMU_ASSERT_LOCKED(domain->iodom.iommu);
KASSERT(domain->refs >= domain->ctx_cnt,
("dom %p ref underflow %d %d", domain, domain->refs,
domain->ctx_cnt));
@@ -402,8 +411,8 @@
{
struct dmar_domain *domain;
- domain = ctx->domain;
- DMAR_ASSERT_LOCKED(domain->dmar);
+ domain = (struct dmar_domain *)ctx->context.domain;
+ IOMMU_ASSERT_LOCKED(domain->iodom.iommu);
KASSERT(domain->refs > 0,
("domain %p ctx dtr refs %d", domain, domain->refs));
KASSERT(domain->ctx_cnt >= domain->refs,
@@ -417,8 +426,9 @@
static void
dmar_domain_destroy(struct dmar_domain *domain)
{
+ struct dmar_unit *dmar;
- KASSERT(TAILQ_EMPTY(&domain->unload_entries),
+ KASSERT(TAILQ_EMPTY(&domain->iodom.unload_entries),
("unfinished unloads %p", domain));
KASSERT(LIST_EMPTY(&domain->contexts),
("destroying dom %p with contexts", domain));
@@ -436,8 +446,9 @@
DMAR_DOMAIN_PGLOCK(domain);
domain_free_pgtbl(domain);
}
- mtx_destroy(&domain->lock);
- free_unr(domain->dmar->domids, domain->domain);
+ mtx_destroy(&domain->iodom.lock);
+ dmar = (struct dmar_unit *)domain->iodom.iommu;
+ free_unr(dmar->domids, domain->domain);
free(domain, M_DMAR_DOMAIN);
}
@@ -466,7 +477,7 @@
TD_PREP_PINNED_ASSERT;
DMAR_LOCK(dmar);
KASSERT(!dmar_is_buswide_ctx(dmar, bus) || (slot == 0 && func == 0),
- ("dmar%d pci%d:%d:%d get_ctx for buswide", dmar->unit, bus,
+ ("dmar%d pci%d:%d:%d get_ctx for buswide", dmar->iommu.unit, bus,
slot, func));
ctx = dmar_find_ctx_locked(dmar, rid);
error = 0;
@@ -505,8 +516,8 @@
domain = domain1;
ctx = ctx1;
dmar_ctx_link(ctx);
- ctx->ctx_tag.owner = dev;
- ctx_tag_init(ctx, dev);
+ ctx->context.tag->owner = dev;
+ device_tag_init(ctx, dev);
/*
* This is the first activated context for the
@@ -521,7 +532,7 @@
device_printf(dev,
"dmar%d pci%d:%d:%d:%d rid %x domain %d mgaw %d "
"agaw %d %s-mapped\n",
- dmar->unit, dmar->segment, bus, slot,
+ dmar->iommu.unit, dmar->segment, bus, slot,
func, rid, domain->domain, domain->mgaw,
domain->agaw, id_mapped ? "id" : "re");
}
@@ -531,19 +542,19 @@
dmar_domain_destroy(domain1);
/* Nothing needs to be done to destroy ctx1. */
free(ctx1, M_DMAR_CTX);
- domain = ctx->domain;
+ domain = (struct dmar_domain *)ctx->context.domain;
ctx->refs++; /* tag referenced us */
}
} else {
- domain = ctx->domain;
- if (ctx->ctx_tag.owner == NULL)
- ctx->ctx_tag.owner = dev;
+ domain = (struct dmar_domain *)ctx->context.domain;
+ if (ctx->context.tag->owner == NULL)
+ ctx->context.tag->owner = dev;
ctx->refs++; /* tag referenced us */
}
error = dmar_flush_for_ctx_entry(dmar, enable);
if (error != 0) {
- dmar_free_ctx_locked(dmar, ctx);
+ dmar_free_ctx_locked(&dmar->iommu, (struct iommu_ctx *)ctx);
TD_PINNED_ASSERT;
return (NULL);
}
@@ -558,12 +569,13 @@
if (error == 0) {
if (bootverbose) {
printf("dmar%d: enabled translation\n",
- dmar->unit);
+ dmar->iommu.unit);
}
} else {
printf("dmar%d: enabling translation failed, "
- "error %d\n", dmar->unit, error);
- dmar_free_ctx_locked(dmar, ctx);
+ "error %d\n", dmar->iommu.unit, error);
+ dmar_free_ctx_locked(&dmar->iommu,
+ (struct iommu_ctx *)ctx);
TD_PINNED_ASSERT;
return (NULL);
}
@@ -607,26 +619,27 @@
struct sf_buf *sf;
int error;
- dmar = domain->dmar;
- old_domain = ctx->domain;
+ dmar = (struct dmar_unit *)domain->iodom.iommu;
+ old_domain = (struct dmar_domain *)ctx->context.domain;
if (domain == old_domain)
return (0);
- KASSERT(old_domain->dmar == dmar,
+ KASSERT(old_domain->iodom.iommu == domain->iodom.iommu,
("domain %p %u moving between dmars %u %u", domain,
- domain->domain, old_domain->dmar->unit, domain->dmar->unit));
+ domain->domain, old_domain->iodom.iommu->unit,
+ domain->iodom.iommu->unit));
TD_PREP_PINNED_ASSERT;
ctxp = dmar_map_ctx_entry(ctx, &sf);
DMAR_LOCK(dmar);
dmar_ctx_unlink(ctx);
- ctx->domain = domain;
+ ctx->context.domain = &domain->iodom;
dmar_ctx_link(ctx);
ctx_id_entry_init(ctx, ctxp, true, PCI_BUSMAX + 100);
dmar_unmap_pgtbl(sf);
error = dmar_flush_for_ctx_entry(dmar, true);
/* If flush failed, rolling back would not work as well. */
printf("dmar%d rid %x domain %d->%d %s-mapped\n",
- dmar->unit, ctx->rid, old_domain->domain, domain->domain,
+ dmar->iommu.unit, ctx->rid, old_domain->domain, domain->domain,
(domain->flags & DMAR_DOMAIN_IDMAP) != 0 ? "id" : "re");
dmar_unref_domain_locked(dmar, old_domain);
TD_PINNED_ASSERT;
@@ -639,9 +652,10 @@
DMAR_ASSERT_LOCKED(dmar);
KASSERT(domain->refs >= 1,
- ("dmar %d domain %p refs %u", dmar->unit, domain, domain->refs));
+ ("dmar %d domain %p refs %u", dmar->iommu.unit, domain,
+ domain->refs));
KASSERT(domain->refs > domain->ctx_cnt,
- ("dmar %d domain %p refs %d ctx_cnt %d", dmar->unit, domain,
+ ("dmar %d domain %p refs %d ctx_cnt %d", dmar->iommu.unit, domain,
domain->refs, domain->ctx_cnt));
if (domain->refs > 1) {
@@ -656,17 +670,23 @@
LIST_REMOVE(domain, link);
DMAR_UNLOCK(dmar);
- taskqueue_drain(dmar->delayed_taskqueue, &domain->unload_task);
+ taskqueue_drain(dmar->iommu.delayed_taskqueue,
+ &domain->iodom.unload_task);
dmar_domain_destroy(domain);
}
void
-dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx)
+dmar_free_ctx_locked(struct iommu_unit *unit, struct iommu_ctx *context)
{
+ struct dmar_ctx *ctx;
+ struct dmar_unit *dmar;
struct sf_buf *sf;
dmar_ctx_entry_t *ctxp;
struct dmar_domain *domain;
+ ctx = (struct dmar_ctx *)context;
+ dmar = (struct dmar_unit *)unit;
+
DMAR_ASSERT_LOCKED(dmar);
KASSERT(ctx->refs >= 1,
("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs));
@@ -681,7 +701,7 @@
return;
}
- KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0,
+ KASSERT((ctx->context.flags & DMAR_CTX_DISABLED) == 0,
("lost ref on disabled ctx %p", ctx));
/*
@@ -708,7 +728,7 @@
return;
}
- KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0,
+ KASSERT((ctx->context.flags & DMAR_CTX_DISABLED) == 0,
("lost ref on disabled ctx %p", ctx));
/*
@@ -726,21 +746,24 @@
dmar_inv_iotlb_glob(dmar);
}
dmar_unmap_pgtbl(sf);
- domain = ctx->domain;
+ domain = (struct dmar_domain *)ctx->context.domain;
dmar_ctx_unlink(ctx);
+ free(ctx->context.tag, M_DMAR_CTX);
free(ctx, M_DMAR_CTX);
dmar_unref_domain_locked(dmar, domain);
TD_PINNED_ASSERT;
}
void
-dmar_free_ctx(struct dmar_ctx *ctx)
+dmar_free_ctx(struct iommu_ctx *context)
{
+ struct dmar_ctx *ctx;
struct dmar_unit *dmar;
- dmar = ctx->domain->dmar;
+ ctx = (struct dmar_ctx *)context;
+ dmar = (struct dmar_unit *)ctx->context.domain->iommu;
DMAR_LOCK(dmar);
- dmar_free_ctx_locked(dmar, ctx);
+ dmar_free_ctx_locked(&dmar->iommu, (struct iommu_ctx *)ctx);
}
/*
@@ -764,47 +787,50 @@
}
void
-dmar_domain_free_entry(struct dmar_map_entry *entry, bool free)
+dmar_domain_free_entry(struct iommu_map_entry *entry, bool free)
{
struct dmar_domain *domain;
- domain = entry->domain;
+ domain = (struct dmar_domain *)entry->domain;
DMAR_DOMAIN_LOCK(domain);
- if ((entry->flags & DMAR_MAP_ENTRY_RMRR) != 0)
+ if ((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0)
dmar_gas_free_region(domain, entry);
else
dmar_gas_free_space(domain, entry);
DMAR_DOMAIN_UNLOCK(domain);
if (free)
- dmar_gas_free_entry(domain, entry);
+ dmar_gas_free_entry(&domain->iodom, entry);
else
entry->flags = 0;
}
void
-dmar_domain_unload_entry(struct dmar_map_entry *entry, bool free)
+dmar_domain_unload_entry(struct iommu_map_entry *entry, bool free)
{
+ struct dmar_domain *domain;
struct dmar_unit *unit;
- unit = entry->domain->dmar;
+ domain = (struct dmar_domain *)entry->domain;
+ unit = (struct dmar_unit *)domain->iodom.iommu;
if (unit->qi_enabled) {
DMAR_LOCK(unit);
- dmar_qi_invalidate_locked(entry->domain, entry->start,
- entry->end - entry->start, &entry->gseq, true);
+ dmar_qi_invalidate_locked((struct dmar_domain *)entry->domain,
+ entry->start, entry->end - entry->start, &entry->gseq,
+ true);
if (!free)
- entry->flags |= DMAR_MAP_ENTRY_QI_NF;
+ entry->flags |= IOMMU_MAP_ENTRY_QI_NF;
TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link);
DMAR_UNLOCK(unit);
} else {
- domain_flush_iotlb_sync(entry->domain, entry->start,
- entry->end - entry->start);
+ domain_flush_iotlb_sync((struct dmar_domain *)entry->domain,
+ entry->start, entry->end - entry->start);
dmar_domain_free_entry(entry, free);
}
}
static bool
dmar_domain_unload_emit_wait(struct dmar_domain *domain,
- struct dmar_map_entry *entry)
+ struct iommu_map_entry *entry)
{
if (TAILQ_NEXT(entry, dmamap_link) == NULL)
@@ -814,16 +840,16 @@
void
dmar_domain_unload(struct dmar_domain *domain,
- struct dmar_map_entries_tailq *entries, bool cansleep)
+ struct iommu_map_entries_tailq *entries, bool cansleep)
{
struct dmar_unit *unit;
- struct dmar_map_entry *entry, *entry1;
+ struct iommu_map_entry *entry, *entry1;
int error;
- unit = domain->dmar;
+ unit = (struct dmar_unit *)domain->iodom.iommu;
TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) {
- KASSERT((entry->flags & DMAR_MAP_ENTRY_MAP) != 0,
+ KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0,
("not mapped entry %p %p", domain, entry));
error = domain_unmap_buf(domain, entry->start, entry->end -
entry->start, cansleep ? DMAR_PGF_WAITOK : 0);
@@ -853,18 +879,50 @@
dmar_domain_unload_task(void *arg, int pending)
{
struct dmar_domain *domain;
- struct dmar_map_entries_tailq entries;
+ struct iommu_map_entries_tailq entries;
domain = arg;
TAILQ_INIT(&entries);
for (;;) {
DMAR_DOMAIN_LOCK(domain);
- TAILQ_SWAP(&domain->unload_entries, &entries, dmar_map_entry,
- dmamap_link);
+ TAILQ_SWAP(&domain->iodom.unload_entries, &entries,
+ iommu_map_entry, dmamap_link);
DMAR_DOMAIN_UNLOCK(domain);
if (TAILQ_EMPTY(&entries))
break;
dmar_domain_unload(domain, &entries, true);
}
}
+
+struct iommu_ctx *
+iommu_get_ctx(struct iommu_unit *iommu, device_t dev, uint16_t rid,
+ bool id_mapped, bool rmrr_init)
+{
+ struct dmar_unit *dmar;
+ struct dmar_ctx *ret;
+
+ dmar = (struct dmar_unit *)iommu;
+
+ ret = dmar_get_ctx_for_dev(dmar, dev, rid, id_mapped, rmrr_init);
+
+ return ((struct iommu_ctx *)ret);
+}
+
+void
+iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free)
+{
+
+ dmar_domain_unload_entry(entry, free);
+}
+
+void
+iommu_domain_unload(struct iommu_domain *iodom,
+ struct iommu_map_entries_tailq *entries, bool cansleep)
+{
+ struct dmar_domain *domain;
+
+ domain = (struct dmar_domain *)iodom;
+
+ dmar_domain_unload(domain, entries, cansleep);
+}
Index: sys/x86/iommu/intel_dmar.h
===================================================================
--- sys/x86/iommu/intel_dmar.h
+++ sys/x86/iommu/intel_dmar.h
@@ -34,51 +34,17 @@
#ifndef __X86_IOMMU_INTEL_DMAR_H
#define __X86_IOMMU_INTEL_DMAR_H
-/* Host or physical memory address, after translation. */
-typedef uint64_t dmar_haddr_t;
-/* Guest or bus address, before translation. */
-typedef uint64_t dmar_gaddr_t;
-
-struct dmar_qi_genseq {
- u_int gen;
- uint32_t seq;
-};
+#include <sys/iommu.h>
-struct dmar_map_entry {
- dmar_gaddr_t start;
- dmar_gaddr_t end;
- dmar_gaddr_t first; /* Least start in subtree */
- dmar_gaddr_t last; /* Greatest end in subtree */
- dmar_gaddr_t free_down; /* Max free space below the
- current R/B tree node */
- u_int flags;
- TAILQ_ENTRY(dmar_map_entry) dmamap_link; /* Link for dmamap entries */
- RB_ENTRY(dmar_map_entry) rb_entry; /* Links for domain entries */
- TAILQ_ENTRY(dmar_map_entry) unroll_link; /* Link for unroll after
- dmamap_load failure */
- struct dmar_domain *domain;
- struct dmar_qi_genseq gseq;
-};
+struct dmar_unit;
-RB_HEAD(dmar_gas_entries_tree, dmar_map_entry);
-RB_PROTOTYPE(dmar_gas_entries_tree, dmar_map_entry, rb_entry,
+RB_HEAD(dmar_gas_entries_tree, iommu_map_entry);
+RB_PROTOTYPE(dmar_gas_entries_tree, iommu_map_entry, rb_entry,
dmar_gas_cmp_entries);
-#define DMAR_MAP_ENTRY_PLACE 0x0001 /* Fake entry */
-#define DMAR_MAP_ENTRY_RMRR 0x0002 /* Permanent, not linked by
- dmamap_link */
-#define DMAR_MAP_ENTRY_MAP 0x0004 /* Busdma created, linked by
- dmamap_link */
-#define DMAR_MAP_ENTRY_UNMAPPED 0x0010 /* No backing pages */
-#define DMAR_MAP_ENTRY_QI_NF 0x0020 /* qi task, do not free entry */
-#define DMAR_MAP_ENTRY_READ 0x1000 /* Read permitted */
-#define DMAR_MAP_ENTRY_WRITE 0x2000 /* Write permitted */
-#define DMAR_MAP_ENTRY_SNOOP 0x4000 /* Snoop */
-#define DMAR_MAP_ENTRY_TM 0x8000 /* Transient */
-
/*
* Locking annotations:
- * (u) - Protected by dmar unit lock
+ * (u) - Protected by iommu unit lock
* (d) - Protected by domain lock
* (c) - Immutable after initialization
*/
@@ -95,41 +61,34 @@
* lock pgtbl_obj, which contains the page tables pages.
*/
struct dmar_domain {
+ struct iommu_domain iodom;
int domain; /* (c) DID, written in context entry */
int mgaw; /* (c) Real max address width */
int agaw; /* (c) Adjusted guest address width */
int pglvl; /* (c) The pagelevel */
int awlvl; /* (c) The pagelevel as the bitmask,
to set in context entry */
- dmar_gaddr_t end; /* (c) Highest address + 1 in
+ iommu_gaddr_t end; /* (c) Highest address + 1 in
the guest AS */
u_int ctx_cnt; /* (u) Number of contexts owned */
u_int refs; /* (u) Refs, including ctx */
struct dmar_unit *dmar; /* (c) */
- struct mtx lock; /* (c) */
LIST_ENTRY(dmar_domain) link; /* (u) Member in the dmar list */
LIST_HEAD(, dmar_ctx) contexts; /* (u) */
vm_object_t pgtbl_obj; /* (c) Page table pages */
u_int flags; /* (u) */
u_int entries_cnt; /* (d) */
struct dmar_gas_entries_tree rb_root; /* (d) */
- struct dmar_map_entries_tailq unload_entries; /* (d) Entries to
- unload */
- struct dmar_map_entry *first_place, *last_place; /* (d) */
- struct task unload_task; /* (c) */
+ struct iommu_map_entry *first_place, *last_place; /* (d) */
u_int batch_no;
};
struct dmar_ctx {
- struct bus_dma_tag_dmar ctx_tag; /* (c) Root tag */
+ struct iommu_ctx context;
uint16_t rid; /* (c) pci RID */
uint64_t last_fault_rec[2]; /* Last fault reported */
- struct dmar_domain *domain; /* (c) */
LIST_ENTRY(dmar_ctx) link; /* (u) Member in the domain list */
u_int refs; /* (u) References from tags */
- u_int flags; /* (u) */
- u_long loads; /* atomic updates, for stat only */
- u_long unloads; /* same */
};
#define DMAR_DOMAIN_GAS_INITED 0x0001
@@ -152,9 +111,9 @@
#define DMAR_DOMAIN_ASSERT_PGLOCKED(dom) \
VM_OBJECT_ASSERT_WLOCKED((dom)->pgtbl_obj)
-#define DMAR_DOMAIN_LOCK(dom) mtx_lock(&(dom)->lock)
-#define DMAR_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->lock)
-#define DMAR_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->lock, MA_OWNED)
+#define DMAR_DOMAIN_LOCK(dom) mtx_lock(&(dom)->iodom.lock)
+#define DMAR_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->iodom.lock)
+#define DMAR_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->iodom.lock, MA_OWNED)
struct dmar_msi_data {
int irq;
@@ -175,8 +134,8 @@
#define DMAR_INTR_TOTAL 2
struct dmar_unit {
+ struct iommu_unit iommu;
device_t dev;
- int unit;
uint16_t segment;
uint64_t base;
@@ -193,7 +152,6 @@
uint32_t hw_gcmd;
/* Data for being a dmar */
- struct mtx lock;
LIST_HEAD(, dmar_domain) domains;
struct unrhdr *domids;
vm_object_t ctx_obj;
@@ -230,17 +188,10 @@
vmem_t *irtids;
/* Delayed freeing of map entries queue processing */
- struct dmar_map_entries_tailq tlb_flush_entries;
+ struct iommu_map_entries_tailq tlb_flush_entries;
struct task qi_task;
struct taskqueue *qi_taskqueue;
- /* Busdma delayed map load */
- struct task dmamap_load_task;
- TAILQ_HEAD(, bus_dmamap_dmar) delayed_maps;
- struct taskqueue *delayed_taskqueue;
-
- int dma_enabled;
-
/*
* Bitmap of buses for which context must ignore slot:func,
* duplicating the page table pointer into all context table
@@ -251,9 +202,9 @@
};
-#define DMAR_LOCK(dmar) mtx_lock(&(dmar)->lock)
-#define DMAR_UNLOCK(dmar) mtx_unlock(&(dmar)->lock)
-#define DMAR_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->lock, MA_OWNED)
+#define DMAR_LOCK(dmar) mtx_lock(&(dmar)->iommu.lock)
+#define DMAR_UNLOCK(dmar) mtx_unlock(&(dmar)->iommu.lock)
+#define DMAR_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->iommu.lock, MA_OWNED)
#define DMAR_FAULT_LOCK(dmar) mtx_lock_spin(&(dmar)->fault_lock)
#define DMAR_FAULT_UNLOCK(dmar) mtx_unlock_spin(&(dmar)->fault_lock)
@@ -275,14 +226,14 @@
u_int dmar_nd2mask(u_int nd);
bool dmar_pglvl_supported(struct dmar_unit *unit, int pglvl);
int domain_set_agaw(struct dmar_domain *domain, int mgaw);
-int dmar_maxaddr2mgaw(struct dmar_unit *unit, dmar_gaddr_t maxaddr,
+int dmar_maxaddr2mgaw(struct dmar_unit *unit, iommu_gaddr_t maxaddr,
bool allow_less);
vm_pindex_t pglvl_max_pages(int pglvl);
int domain_is_sp_lvl(struct dmar_domain *domain, int lvl);
-dmar_gaddr_t pglvl_page_size(int total_pglvl, int lvl);
-dmar_gaddr_t domain_page_size(struct dmar_domain *domain, int lvl);
-int calc_am(struct dmar_unit *unit, dmar_gaddr_t base, dmar_gaddr_t size,
- dmar_gaddr_t *isizep);
+iommu_gaddr_t pglvl_page_size(int total_pglvl, int lvl);
+iommu_gaddr_t domain_page_size(struct dmar_domain *domain, int lvl);
+int calc_am(struct dmar_unit *unit, iommu_gaddr_t base, iommu_gaddr_t size,
+ iommu_gaddr_t *isizep);
struct vm_page *dmar_pgalloc(vm_object_t obj, vm_pindex_t idx, int flags);
void dmar_pgfree(vm_object_t obj, vm_pindex_t idx, int flags);
void *dmar_map_pgtbl(vm_object_t obj, vm_pindex_t idx, int flags,
@@ -316,83 +267,84 @@
void dmar_disable_qi_intr(struct dmar_unit *unit);
int dmar_init_qi(struct dmar_unit *unit);
void dmar_fini_qi(struct dmar_unit *unit);
-void dmar_qi_invalidate_locked(struct dmar_domain *domain, dmar_gaddr_t start,
- dmar_gaddr_t size, struct dmar_qi_genseq *psec, bool emit_wait);
+void dmar_qi_invalidate_locked(struct dmar_domain *domain, iommu_gaddr_t start,
+ iommu_gaddr_t size, struct iommu_qi_genseq *psec, bool emit_wait);
void dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit);
void dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit);
void dmar_qi_invalidate_iec_glob(struct dmar_unit *unit);
void dmar_qi_invalidate_iec(struct dmar_unit *unit, u_int start, u_int cnt);
vm_object_t domain_get_idmap_pgtbl(struct dmar_domain *domain,
- dmar_gaddr_t maxaddr);
+ iommu_gaddr_t maxaddr);
void put_idmap_pgtbl(vm_object_t obj);
-int domain_map_buf(struct dmar_domain *domain, dmar_gaddr_t base,
- dmar_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags);
-int domain_unmap_buf(struct dmar_domain *domain, dmar_gaddr_t base,
- dmar_gaddr_t size, int flags);
-void domain_flush_iotlb_sync(struct dmar_domain *domain, dmar_gaddr_t base,
- dmar_gaddr_t size);
+int domain_map_buf(struct dmar_domain *domain, iommu_gaddr_t base,
+ iommu_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags);
+int domain_unmap_buf(struct dmar_domain *domain, iommu_gaddr_t base,
+ iommu_gaddr_t size, int flags);
+void domain_flush_iotlb_sync(struct dmar_domain *domain, iommu_gaddr_t base,
+ iommu_gaddr_t size);
int domain_alloc_pgtbl(struct dmar_domain *domain);
void domain_free_pgtbl(struct dmar_domain *domain);
int dmar_dev_depth(device_t child);
void dmar_dev_path(device_t child, int *busno, void *path1, int depth);
-struct dmar_ctx *dmar_instantiate_ctx(struct dmar_unit *dmar, device_t dev,
- bool rmrr);
+struct iommu_ctx *iommu_instantiate_ctx(struct iommu_unit *dmar,
+ device_t dev, bool rmrr);
struct dmar_ctx *dmar_get_ctx_for_dev(struct dmar_unit *dmar, device_t dev,
uint16_t rid, bool id_mapped, bool rmrr_init);
struct dmar_ctx *dmar_get_ctx_for_devpath(struct dmar_unit *dmar, uint16_t rid,
int dev_domain, int dev_busno, const void *dev_path, int dev_path_len,
bool id_mapped, bool rmrr_init);
int dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx);
-void dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx);
-void dmar_free_ctx(struct dmar_ctx *ctx);
+void dmar_free_ctx_locked(struct iommu_unit *dmar, struct iommu_ctx *ctx);
+void dmar_free_ctx(struct iommu_ctx *ctx);
struct dmar_ctx *dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid);
-void dmar_domain_unload_entry(struct dmar_map_entry *entry, bool free);
+void dmar_domain_unload_entry(struct iommu_map_entry *entry, bool free);
void dmar_domain_unload(struct dmar_domain *domain,
- struct dmar_map_entries_tailq *entries, bool cansleep);
-void dmar_domain_free_entry(struct dmar_map_entry *entry, bool free);
+ struct iommu_map_entries_tailq *entries, bool cansleep);
+void dmar_domain_free_entry(struct iommu_map_entry *entry, bool free);
-int dmar_init_busdma(struct dmar_unit *unit);
-void dmar_fini_busdma(struct dmar_unit *unit);
-device_t dmar_get_requester(device_t dev, uint16_t *rid);
+int iommu_init_busdma(struct iommu_unit *unit);
+void iommu_fini_busdma(struct iommu_unit *unit);
+device_t iommu_get_requester(device_t dev, uint16_t *rid);
void dmar_gas_init_domain(struct dmar_domain *domain);
void dmar_gas_fini_domain(struct dmar_domain *domain);
-struct dmar_map_entry *dmar_gas_alloc_entry(struct dmar_domain *domain,
+struct iommu_map_entry *dmar_gas_alloc_entry(struct iommu_domain *domain,
u_int flags);
-void dmar_gas_free_entry(struct dmar_domain *domain,
- struct dmar_map_entry *entry);
+void dmar_gas_free_entry(struct iommu_domain *domain,
+ struct iommu_map_entry *entry);
void dmar_gas_free_space(struct dmar_domain *domain,
- struct dmar_map_entry *entry);
-int dmar_gas_map(struct dmar_domain *domain,
- const struct bus_dma_tag_common *common, dmar_gaddr_t size, int offset,
- u_int eflags, u_int flags, vm_page_t *ma, struct dmar_map_entry **res);
+ struct iommu_map_entry *entry);
+int dmar_gas_map(struct iommu_domain *domain,
+ const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset,
+ u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res);
void dmar_gas_free_region(struct dmar_domain *domain,
- struct dmar_map_entry *entry);
-int dmar_gas_map_region(struct dmar_domain *domain,
- struct dmar_map_entry *entry, u_int eflags, u_int flags, vm_page_t *ma);
-int dmar_gas_reserve_region(struct dmar_domain *domain, dmar_gaddr_t start,
- dmar_gaddr_t end);
+ struct iommu_map_entry *entry);
+int dmar_gas_map_region(struct iommu_domain *domain,
+ struct iommu_map_entry *entry, u_int eflags, u_int flags, vm_page_t *ma);
+int dmar_gas_reserve_region(struct dmar_domain *domain, iommu_gaddr_t start,
+ iommu_gaddr_t end);
void dmar_dev_parse_rmrr(struct dmar_domain *domain, int dev_domain,
int dev_busno, const void *dev_path, int dev_path_len,
- struct dmar_map_entries_tailq *rmrr_entries);
-int dmar_instantiate_rmrr_ctxs(struct dmar_unit *dmar);
+ struct iommu_map_entries_tailq *rmrr_entries);
+int dmar_instantiate_rmrr_ctxs(struct iommu_unit *dmar);
void dmar_quirks_post_ident(struct dmar_unit *dmar);
-void dmar_quirks_pre_use(struct dmar_unit *dmar);
+void dmar_quirks_pre_use(struct iommu_unit *dmar);
int dmar_init_irt(struct dmar_unit *unit);
void dmar_fini_irt(struct dmar_unit *unit);
-void dmar_set_buswide_ctx(struct dmar_unit *unit, u_int busno);
+void dmar_set_buswide_ctx(struct iommu_unit *unit, u_int busno);
bool dmar_is_buswide_ctx(struct dmar_unit *unit, u_int busno);
-#define DMAR_GM_CANWAIT 0x0001
-#define DMAR_GM_CANSPLIT 0x0002
-#define DMAR_GM_RMRR 0x0004
+/* Map flags */
+#define IOMMU_MF_CANWAIT 0x0001
+#define IOMMU_MF_CANSPLIT 0x0002
+#define IOMMU_MF_RMRR 0x0004
#define DMAR_PGF_WAITOK 0x0001
#define DMAR_PGF_ZERO 0x0002
@@ -400,7 +352,7 @@
#define DMAR_PGF_NOALLOC 0x0008
#define DMAR_PGF_OBJL 0x0010
-extern dmar_haddr_t dmar_high;
+extern iommu_haddr_t dmar_high;
extern int haw;
extern int dmar_tbl_pagecnt;
extern int dmar_batch_coalesce;
@@ -433,7 +385,7 @@
KASSERT(reg != DMAR_GCMD_REG || (val & DMAR_GCMD_TE) ==
(unit->hw_gcmd & DMAR_GCMD_TE),
- ("dmar%d clearing TE 0x%08x 0x%08x", unit->unit,
+ ("dmar%d clearing TE 0x%08x 0x%08x", unit->iommu.unit,
unit->hw_gcmd, val));
bus_write_4(unit->regs, reg, val);
}
@@ -522,8 +474,8 @@
}
static inline bool
-dmar_test_boundary(dmar_gaddr_t start, dmar_gaddr_t size,
- dmar_gaddr_t boundary)
+iommu_test_boundary(iommu_gaddr_t start, iommu_gaddr_t size,
+ iommu_gaddr_t boundary)
{
if (boundary == 0)
Index: sys/x86/iommu/intel_drv.c
===================================================================
--- sys/x86/iommu/intel_drv.c
+++ sys/x86/iommu/intel_drv.c
@@ -252,7 +252,7 @@
{
int i;
- dmar_fini_busdma(unit);
+ iommu_fini_busdma(&unit->iommu);
dmar_fini_irt(unit);
dmar_fini_qi(unit);
dmar_fini_fault_log(unit);
@@ -413,8 +413,8 @@
unit = device_get_softc(dev);
unit->dev = dev;
- unit->unit = device_get_unit(dev);
- dmaru = dmar_find_by_index(unit->unit);
+ unit->iommu.unit = device_get_unit(dev);
+ dmaru = dmar_find_by_index(unit->iommu.unit);
if (dmaru == NULL)
return (EINVAL);
unit->segment = dmaru->Segment;
@@ -469,9 +469,9 @@
}
}
- mtx_init(&unit->lock, "dmarhw", NULL, MTX_DEF);
+ mtx_init(&unit->iommu.lock, "dmarhw", NULL, MTX_DEF);
unit->domids = new_unrhdr(0, dmar_nd2mask(DMAR_CAP_ND(unit->hw_cap)),
- &unit->lock);
+ &unit->iommu.lock);
LIST_INIT(&unit->domains);
/*
@@ -531,7 +531,7 @@
dmar_release_resources(dev, unit);
return (error);
}
- error = dmar_init_busdma(unit);
+ error = iommu_init_busdma(&unit->iommu);
if (error != 0) {
dmar_release_resources(dev, unit);
return (error);
@@ -596,14 +596,17 @@
MODULE_DEPEND(dmar, acpi, 1, 1, 1);
void
-dmar_set_buswide_ctx(struct dmar_unit *unit, u_int busno)
+dmar_set_buswide_ctx(struct iommu_unit *unit, u_int busno)
{
+ struct dmar_unit *dmar;
+
+ dmar = (struct dmar_unit *)unit;
MPASS(busno <= PCI_BUSMAX);
- DMAR_LOCK(unit);
- unit->buswide_ctxs[busno / NBBY / sizeof(uint32_t)] |=
+ DMAR_LOCK(dmar);
+ dmar->buswide_ctxs[busno / NBBY / sizeof(uint32_t)] |=
1 << (busno % (NBBY * sizeof(uint32_t)));
- DMAR_UNLOCK(unit);
+ DMAR_UNLOCK(dmar);
}
bool
@@ -736,7 +739,7 @@
char *ptr, *ptrend;
int match;
- dmarh = dmar_find_by_index(unit->unit);
+ dmarh = dmar_find_by_index(unit->iommu.unit);
if (dmarh == NULL)
return (false);
if (dmarh->Segment != dev_domain)
@@ -818,7 +821,7 @@
if (verbose) {
device_printf(dev, "pci%d:%d:%d:%d matched dmar%d by %s",
dev_domain, pci_get_bus(dev), pci_get_slot(dev),
- pci_get_function(dev), unit->unit, banner);
+ pci_get_function(dev), unit->iommu.unit, banner);
printf(" scope path ");
dmar_print_path(dev_busno, dev_path_len, dev_path);
printf("\n");
@@ -911,7 +914,7 @@
int dev_busno;
const ACPI_DMAR_PCI_PATH *dev_path;
int dev_path_len;
- struct dmar_map_entries_tailq *rmrr_entries;
+ struct iommu_map_entries_tailq *rmrr_entries;
};
static int
@@ -920,7 +923,7 @@
struct rmrr_iter_args *ria;
ACPI_DMAR_RESERVED_MEMORY *resmem;
ACPI_DMAR_DEVICE_SCOPE *devscope;
- struct dmar_map_entry *entry;
+ struct iommu_map_entry *entry;
char *ptr, *ptrend;
int match;
@@ -942,7 +945,7 @@
match = dmar_match_devscope(devscope, ria->dev_busno,
ria->dev_path, ria->dev_path_len);
if (match == 1) {
- entry = dmar_gas_alloc_entry(ria->domain,
+ entry = dmar_gas_alloc_entry(&ria->domain->iodom,
DMAR_PGF_WAITOK);
entry->start = resmem->BaseAddress;
/* The RMRR entry end address is inclusive. */
@@ -958,7 +961,7 @@
void
dmar_dev_parse_rmrr(struct dmar_domain *domain, int dev_domain, int dev_busno,
const void *dev_path, int dev_path_len,
- struct dmar_map_entries_tailq *rmrr_entries)
+ struct iommu_map_entries_tailq *rmrr_entries)
{
struct rmrr_iter_args ria;
@@ -1037,7 +1040,7 @@
if (bootverbose) {
printf("dmar%d no dev found for RMRR "
"[%#jx, %#jx] rid %#x scope path ",
- iria->dmar->unit,
+ iria->dmar->iommu.unit,
(uintmax_t)resmem->BaseAddress,
(uintmax_t)resmem->EndAddress,
rid);
@@ -1059,7 +1062,8 @@
unit = dmar_find(dev, false);
if (iria->dmar != unit)
continue;
- dmar_instantiate_ctx(iria->dmar, dev, true);
+ iommu_instantiate_ctx(&(iria)->dmar->iommu,
+ dev, true);
}
}
@@ -1071,11 +1075,14 @@
* Pre-create all contexts for the DMAR which have RMRR entries.
*/
int
-dmar_instantiate_rmrr_ctxs(struct dmar_unit *dmar)
+dmar_instantiate_rmrr_ctxs(struct iommu_unit *unit)
{
+ struct dmar_unit *dmar;
struct inst_rmrr_iter_args iria;
int error;
+ dmar = (struct dmar_unit *)unit;
+
if (!dmar_barrier_enter(dmar, DMAR_BARRIER_RMRR))
return (0);
@@ -1086,15 +1093,15 @@
if (!LIST_EMPTY(&dmar->domains)) {
KASSERT((dmar->hw_gcmd & DMAR_GCMD_TE) == 0,
("dmar%d: RMRR not handled but translation is already enabled",
- dmar->unit));
+ dmar->iommu.unit));
error = dmar_enable_translation(dmar);
if (bootverbose) {
if (error == 0) {
printf("dmar%d: enabled translation\n",
- dmar->unit);
+ dmar->iommu.unit);
} else {
printf("dmar%d: enabling translation failed, "
- "error %d\n", dmar->unit, error);
+ "error %d\n", dmar->iommu.unit, error);
}
}
}
@@ -1107,9 +1114,9 @@
#include <ddb/db_lex.h>
static void
-dmar_print_domain_entry(const struct dmar_map_entry *entry)
+dmar_print_domain_entry(const struct iommu_map_entry *entry)
{
- struct dmar_map_entry *l, *r;
+ struct iommu_map_entry *l, *r;
db_printf(
" start %jx end %jx first %jx last %jx free_down %jx flags %x ",
@@ -1136,16 +1143,16 @@
db_printf(
" @%p pci%d:%d:%d refs %d flags %x loads %lu unloads %lu\n",
- ctx, pci_get_bus(ctx->ctx_tag.owner),
- pci_get_slot(ctx->ctx_tag.owner),
- pci_get_function(ctx->ctx_tag.owner), ctx->refs, ctx->flags,
- ctx->loads, ctx->unloads);
+ ctx, pci_get_bus(ctx->context.tag->owner),
+ pci_get_slot(ctx->context.tag->owner),
+ pci_get_function(ctx->context.tag->owner), ctx->refs,
+ ctx->context.flags, ctx->context.loads, ctx->context.unloads);
}
static void
dmar_print_domain(struct dmar_domain *domain, bool show_mappings)
{
- struct dmar_map_entry *entry;
+ struct iommu_map_entry *entry;
struct dmar_ctx *ctx;
db_printf(
@@ -1170,7 +1177,7 @@
if (db_pager_quit)
return;
db_printf(" unloading:\n");
- TAILQ_FOREACH(entry, &domain->unload_entries, dmamap_link) {
+ TAILQ_FOREACH(entry, &domain->iodom.unload_entries, dmamap_link) {
dmar_print_domain_entry(entry);
if (db_pager_quit)
break;
@@ -1231,11 +1238,11 @@
LIST_FOREACH(domain, &unit->domains, link) {
LIST_FOREACH(ctx, &domain->contexts, link) {
if (pci_domain == unit->segment &&
- bus == pci_get_bus(ctx->ctx_tag.owner) &&
+ bus == pci_get_bus(ctx->context.tag->owner) &&
device ==
- pci_get_slot(ctx->ctx_tag.owner) &&
+ pci_get_slot(ctx->context.tag->owner) &&
function ==
- pci_get_function(ctx->ctx_tag.owner)) {
+ pci_get_function(ctx->context.tag->owner)) {
dmar_print_domain(domain,
show_mappings);
goto out;
@@ -1254,8 +1261,9 @@
int i, frir;
unit = device_get_softc(dmar_devs[idx]);
- db_printf("dmar%d at %p, root at 0x%jx, ver 0x%x\n", unit->unit, unit,
- dmar_read8(unit, DMAR_RTADDR_REG), dmar_read4(unit, DMAR_VER_REG));
+ db_printf("dmar%d at %p, root at 0x%jx, ver 0x%x\n", unit->iommu.unit,
+ unit, dmar_read8(unit, DMAR_RTADDR_REG),
+ dmar_read4(unit, DMAR_VER_REG));
db_printf("cap 0x%jx ecap 0x%jx gsts 0x%x fsts 0x%x fectl 0x%x\n",
(uintmax_t)dmar_read8(unit, DMAR_CAP_REG),
(uintmax_t)dmar_read8(unit, DMAR_ECAP_REG),
@@ -1342,3 +1350,13 @@
}
}
#endif
+
+struct iommu_unit *
+iommu_find(device_t dev, bool verbose)
+{
+ struct dmar_unit *dmar;
+
+ dmar = dmar_find(dev, verbose);
+
+ return (&dmar->iommu);
+}
Index: sys/x86/iommu/intel_fault.c
===================================================================
--- sys/x86/iommu/intel_fault.c
+++ sys/x86/iommu/intel_fault.c
@@ -96,25 +96,25 @@
clear = 0;
if ((fsts & DMAR_FSTS_ITE) != 0) {
- printf("DMAR%d: Invalidation timed out\n", unit->unit);
+ printf("DMAR%d: Invalidation timed out\n", unit->iommu.unit);
clear |= DMAR_FSTS_ITE;
}
if ((fsts & DMAR_FSTS_ICE) != 0) {
printf("DMAR%d: Invalidation completion error\n",
- unit->unit);
+ unit->iommu.unit);
clear |= DMAR_FSTS_ICE;
}
if ((fsts & DMAR_FSTS_IQE) != 0) {
printf("DMAR%d: Invalidation queue error\n",
- unit->unit);
+ unit->iommu.unit);
clear |= DMAR_FSTS_IQE;
}
if ((fsts & DMAR_FSTS_APF) != 0) {
- printf("DMAR%d: Advanced pending fault\n", unit->unit);
+ printf("DMAR%d: Advanced pending fault\n", unit->iommu.unit);
clear |= DMAR_FSTS_APF;
}
if ((fsts & DMAR_FSTS_AFO) != 0) {
- printf("DMAR%d: Advanced fault overflow\n", unit->unit);
+ printf("DMAR%d: Advanced fault overflow\n", unit->iommu.unit);
clear |= DMAR_FSTS_AFO;
}
if (clear != 0)
@@ -176,7 +176,7 @@
*
*/
if ((fsts & DMAR_FSTS_PFO) != 0) {
- printf("DMAR%d: Fault Overflow\n", unit->unit);
+ printf("DMAR%d: Fault Overflow\n", unit->iommu.unit);
dmar_write4(unit, DMAR_FSTS_REG, DMAR_FSTS_PFO);
}
@@ -208,7 +208,7 @@
DMAR_FAULT_UNLOCK(unit);
sid = DMAR_FRCD2_SID(fault_rec[1]);
- printf("DMAR%d: ", unit->unit);
+ printf("DMAR%d: ", unit->iommu.unit);
DMAR_LOCK(unit);
ctx = dmar_find_ctx_locked(unit, sid);
if (ctx == NULL) {
@@ -223,13 +223,13 @@
slot = PCI_RID2SLOT(sid);
func = PCI_RID2FUNC(sid);
} else {
- ctx->flags |= DMAR_CTX_FAULTED;
+ ctx->context.flags |= DMAR_CTX_FAULTED;
ctx->last_fault_rec[0] = fault_rec[0];
ctx->last_fault_rec[1] = fault_rec[1];
- device_print_prettyname(ctx->ctx_tag.owner);
- bus = pci_get_bus(ctx->ctx_tag.owner);
- slot = pci_get_slot(ctx->ctx_tag.owner);
- func = pci_get_function(ctx->ctx_tag.owner);
+ device_print_prettyname(ctx->context.tag->owner);
+ bus = pci_get_bus(ctx->context.tag->owner);
+ slot = pci_get_slot(ctx->context.tag->owner);
+ func = pci_get_function(ctx->context.tag->owner);
}
DMAR_UNLOCK(unit);
printf(
@@ -276,7 +276,7 @@
unit->fault_taskqueue = taskqueue_create_fast("dmarff", M_WAITOK,
taskqueue_thread_enqueue, &unit->fault_taskqueue);
taskqueue_start_threads(&unit->fault_taskqueue, 1, PI_AV,
- "dmar%d fault taskq", unit->unit);
+ "dmar%d fault taskq", unit->iommu.unit);
DMAR_LOCK(unit);
dmar_disable_fault_intr(unit);
Index: sys/x86/iommu/intel_gas.c
===================================================================
--- sys/x86/iommu/intel_gas.c
+++ sys/x86/iommu/intel_gas.c
@@ -74,48 +74,54 @@
* Guest Address Space management.
*/
-static uma_zone_t dmar_map_entry_zone;
+static uma_zone_t iommu_map_entry_zone;
static void
intel_gas_init(void)
{
- dmar_map_entry_zone = uma_zcreate("DMAR_MAP_ENTRY",
- sizeof(struct dmar_map_entry), NULL, NULL,
+ iommu_map_entry_zone = uma_zcreate("IOMMU_MAP_ENTRY",
+ sizeof(struct iommu_map_entry), NULL, NULL,
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NODUMP);
}
SYSINIT(intel_gas, SI_SUB_DRIVERS, SI_ORDER_FIRST, intel_gas_init, NULL);
-struct dmar_map_entry *
-dmar_gas_alloc_entry(struct dmar_domain *domain, u_int flags)
+struct iommu_map_entry *
+dmar_gas_alloc_entry(struct iommu_domain *iodom, u_int flags)
{
- struct dmar_map_entry *res;
+ struct dmar_domain *domain;
+ struct iommu_map_entry *res;
+
+ domain = (struct dmar_domain *)iodom;
KASSERT((flags & ~(DMAR_PGF_WAITOK)) == 0,
("unsupported flags %x", flags));
- res = uma_zalloc(dmar_map_entry_zone, ((flags & DMAR_PGF_WAITOK) !=
+ res = uma_zalloc(iommu_map_entry_zone, ((flags & DMAR_PGF_WAITOK) !=
0 ? M_WAITOK : M_NOWAIT) | M_ZERO);
if (res != NULL) {
- res->domain = domain;
+ res->domain = (struct iommu_domain *)domain;
atomic_add_int(&domain->entries_cnt, 1);
}
return (res);
}
void
-dmar_gas_free_entry(struct dmar_domain *domain, struct dmar_map_entry *entry)
+dmar_gas_free_entry(struct iommu_domain *iodom, struct iommu_map_entry *entry)
{
+ struct dmar_domain *domain;
+
+ domain = (struct dmar_domain *)iodom;
- KASSERT(domain == entry->domain,
+ KASSERT(domain == (struct dmar_domain *)entry->domain,
("mismatched free domain %p entry %p entry->domain %p", domain,
entry, entry->domain));
atomic_subtract_int(&domain->entries_cnt, 1);
- uma_zfree(dmar_map_entry_zone, entry);
+ uma_zfree(iommu_map_entry_zone, entry);
}
static int
-dmar_gas_cmp_entries(struct dmar_map_entry *a, struct dmar_map_entry *b)
+dmar_gas_cmp_entries(struct iommu_map_entry *a, struct iommu_map_entry *b)
{
/* Last entry have zero size, so <= */
@@ -137,10 +143,10 @@
}
static void
-dmar_gas_augment_entry(struct dmar_map_entry *entry)
+dmar_gas_augment_entry(struct iommu_map_entry *entry)
{
- struct dmar_map_entry *child;
- dmar_gaddr_t free_down;
+ struct iommu_map_entry *child;
+ iommu_gaddr_t free_down;
free_down = 0;
if ((child = RB_LEFT(entry, rb_entry)) != NULL) {
@@ -159,18 +165,18 @@
entry->free_down = free_down;
}
-RB_GENERATE(dmar_gas_entries_tree, dmar_map_entry, rb_entry,
+RB_GENERATE(dmar_gas_entries_tree, iommu_map_entry, rb_entry,
dmar_gas_cmp_entries);
#ifdef INVARIANTS
static void
dmar_gas_check_free(struct dmar_domain *domain)
{
- struct dmar_map_entry *entry, *l, *r;
- dmar_gaddr_t v;
+ struct iommu_map_entry *entry, *l, *r;
+ iommu_gaddr_t v;
RB_FOREACH(entry, dmar_gas_entries_tree, &domain->rb_root) {
- KASSERT(domain == entry->domain,
+ KASSERT(domain == (struct dmar_domain *)entry->domain,
("mismatched free domain %p entry %p entry->domain %p",
domain, entry, entry->domain));
l = RB_LEFT(entry, rb_entry);
@@ -190,16 +196,16 @@
#endif
static bool
-dmar_gas_rb_insert(struct dmar_domain *domain, struct dmar_map_entry *entry)
+dmar_gas_rb_insert(struct dmar_domain *domain, struct iommu_map_entry *entry)
{
- struct dmar_map_entry *found;
+ struct iommu_map_entry *found;
found = RB_INSERT(dmar_gas_entries_tree, &domain->rb_root, entry);
return (found == NULL);
}
static void
-dmar_gas_rb_remove(struct dmar_domain *domain, struct dmar_map_entry *entry)
+dmar_gas_rb_remove(struct dmar_domain *domain, struct iommu_map_entry *entry)
{
RB_REMOVE(dmar_gas_entries_tree, &domain->rb_root, entry);
@@ -208,10 +214,10 @@
void
dmar_gas_init_domain(struct dmar_domain *domain)
{
- struct dmar_map_entry *begin, *end;
+ struct iommu_map_entry *begin, *end;
- begin = dmar_gas_alloc_entry(domain, DMAR_PGF_WAITOK);
- end = dmar_gas_alloc_entry(domain, DMAR_PGF_WAITOK);
+ begin = dmar_gas_alloc_entry(&domain->iodom, DMAR_PGF_WAITOK);
+ end = dmar_gas_alloc_entry(&domain->iodom, DMAR_PGF_WAITOK);
DMAR_DOMAIN_LOCK(domain);
KASSERT(domain->entries_cnt == 2, ("dirty domain %p", domain));
@@ -219,12 +225,12 @@
begin->start = 0;
begin->end = DMAR_PAGE_SIZE;
- begin->flags = DMAR_MAP_ENTRY_PLACE | DMAR_MAP_ENTRY_UNMAPPED;
+ begin->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED;
dmar_gas_rb_insert(domain, begin);
end->start = domain->end;
end->end = domain->end;
- end->flags = DMAR_MAP_ENTRY_PLACE | DMAR_MAP_ENTRY_UNMAPPED;
+ end->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED;
dmar_gas_rb_insert(domain, end);
domain->first_place = begin;
@@ -236,7 +242,7 @@
void
dmar_gas_fini_domain(struct dmar_domain *domain)
{
- struct dmar_map_entry *entry, *entry1;
+ struct iommu_map_entry *entry, *entry1;
DMAR_DOMAIN_ASSERT_LOCKED(domain);
KASSERT(domain->entries_cnt == 2, ("domain still in use %p", domain));
@@ -244,35 +250,35 @@
entry = RB_MIN(dmar_gas_entries_tree, &domain->rb_root);
KASSERT(entry->start == 0, ("start entry start %p", domain));
KASSERT(entry->end == DMAR_PAGE_SIZE, ("start entry end %p", domain));
- KASSERT(entry->flags == DMAR_MAP_ENTRY_PLACE,
+ KASSERT(entry->flags == IOMMU_MAP_ENTRY_PLACE,
("start entry flags %p", domain));
RB_REMOVE(dmar_gas_entries_tree, &domain->rb_root, entry);
- dmar_gas_free_entry(domain, entry);
+ dmar_gas_free_entry(&domain->iodom, entry);
entry = RB_MAX(dmar_gas_entries_tree, &domain->rb_root);
KASSERT(entry->start == domain->end, ("end entry start %p", domain));
KASSERT(entry->end == domain->end, ("end entry end %p", domain));
- KASSERT(entry->flags == DMAR_MAP_ENTRY_PLACE,
+ KASSERT(entry->flags == IOMMU_MAP_ENTRY_PLACE,
("end entry flags %p", domain));
RB_REMOVE(dmar_gas_entries_tree, &domain->rb_root, entry);
- dmar_gas_free_entry(domain, entry);
+ dmar_gas_free_entry(&domain->iodom, entry);
RB_FOREACH_SAFE(entry, dmar_gas_entries_tree, &domain->rb_root,
entry1) {
- KASSERT((entry->flags & DMAR_MAP_ENTRY_RMRR) != 0,
+ KASSERT((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0,
("non-RMRR entry left %p", domain));
RB_REMOVE(dmar_gas_entries_tree, &domain->rb_root, entry);
- dmar_gas_free_entry(domain, entry);
+ dmar_gas_free_entry(&domain->iodom, entry);
}
}
struct dmar_gas_match_args {
struct dmar_domain *domain;
- dmar_gaddr_t size;
+ iommu_gaddr_t size;
int offset;
const struct bus_dma_tag_common *common;
u_int gas_flags;
- struct dmar_map_entry *entry;
+ struct iommu_map_entry *entry;
};
/*
@@ -282,10 +288,10 @@
* by a, and return 'true' if and only if the allocation attempt succeeds.
*/
static bool
-dmar_gas_match_one(struct dmar_gas_match_args *a, dmar_gaddr_t beg,
- dmar_gaddr_t end, dmar_gaddr_t maxaddr)
+dmar_gas_match_one(struct dmar_gas_match_args *a, iommu_gaddr_t beg,
+ iommu_gaddr_t end, iommu_gaddr_t maxaddr)
{
- dmar_gaddr_t bs, start;
+ iommu_gaddr_t bs, start;
a->entry->start = roundup2(beg + DMAR_PAGE_SIZE,
a->common->alignment);
@@ -298,7 +304,7 @@
return (false);
/* No boundary crossing. */
- if (dmar_test_boundary(a->entry->start + a->offset, a->size,
+ if (iommu_test_boundary(a->entry->start + a->offset, a->size,
a->common->boundary))
return (true);
@@ -313,7 +319,7 @@
/* DMAR_PAGE_SIZE to create gap after new entry. */
if (start + a->offset + a->size + DMAR_PAGE_SIZE <= end &&
start + a->offset + a->size <= maxaddr &&
- dmar_test_boundary(start + a->offset, a->size,
+ iommu_test_boundary(start + a->offset, a->size,
a->common->boundary)) {
a->entry->start = start;
return (true);
@@ -327,7 +333,7 @@
* XXXKIB. It is possible that bs is exactly at the start of
* the next entry, then we do not have gap. Ignore for now.
*/
- if ((a->gas_flags & DMAR_GM_CANSPLIT) != 0) {
+ if ((a->gas_flags & IOMMU_MF_CANSPLIT) != 0) {
a->size = bs - a->entry->start;
return (true);
}
@@ -353,13 +359,13 @@
found = dmar_gas_rb_insert(a->domain, a->entry);
KASSERT(found, ("found dup %p start %jx size %jx",
a->domain, (uintmax_t)a->entry->start, (uintmax_t)a->size));
- a->entry->flags = DMAR_MAP_ENTRY_MAP;
+ a->entry->flags = IOMMU_MAP_ENTRY_MAP;
}
static int
-dmar_gas_lowermatch(struct dmar_gas_match_args *a, struct dmar_map_entry *entry)
+dmar_gas_lowermatch(struct dmar_gas_match_args *a, struct iommu_map_entry *entry)
{
- struct dmar_map_entry *child;
+ struct iommu_map_entry *child;
child = RB_RIGHT(entry, rb_entry);
if (child != NULL && entry->end < a->common->lowaddr &&
@@ -388,9 +394,9 @@
}
static int
-dmar_gas_uppermatch(struct dmar_gas_match_args *a, struct dmar_map_entry *entry)
+dmar_gas_uppermatch(struct dmar_gas_match_args *a, struct iommu_map_entry *entry)
{
- struct dmar_map_entry *child;
+ struct iommu_map_entry *child;
if (entry->free_down < a->size + a->offset + DMAR_PAGE_SIZE)
return (ENOMEM);
@@ -419,8 +425,8 @@
static int
dmar_gas_find_space(struct dmar_domain *domain,
- const struct bus_dma_tag_common *common, dmar_gaddr_t size,
- int offset, u_int flags, struct dmar_map_entry *entry)
+ const struct bus_dma_tag_common *common, iommu_gaddr_t size,
+ int offset, u_int flags, struct iommu_map_entry *entry)
{
struct dmar_gas_match_args a;
int error;
@@ -454,10 +460,10 @@
}
static int
-dmar_gas_alloc_region(struct dmar_domain *domain, struct dmar_map_entry *entry,
+dmar_gas_alloc_region(struct dmar_domain *domain, struct iommu_map_entry *entry,
u_int flags)
{
- struct dmar_map_entry *next, *prev;
+ struct iommu_map_entry *next, *prev;
bool found;
DMAR_DOMAIN_ASSERT_LOCKED(domain);
@@ -485,16 +491,16 @@
* extends both ways.
*/
if (prev != NULL && prev->end > entry->start &&
- (prev->flags & DMAR_MAP_ENTRY_PLACE) == 0) {
- if ((flags & DMAR_GM_RMRR) == 0 ||
- (prev->flags & DMAR_MAP_ENTRY_RMRR) == 0)
+ (prev->flags & IOMMU_MAP_ENTRY_PLACE) == 0) {
+ if ((flags & IOMMU_MF_RMRR) == 0 ||
+ (prev->flags & IOMMU_MAP_ENTRY_RMRR) == 0)
return (EBUSY);
entry->start = prev->end;
}
if (next->start < entry->end &&
- (next->flags & DMAR_MAP_ENTRY_PLACE) == 0) {
- if ((flags & DMAR_GM_RMRR) == 0 ||
- (next->flags & DMAR_MAP_ENTRY_RMRR) == 0)
+ (next->flags & IOMMU_MAP_ENTRY_PLACE) == 0) {
+ if ((flags & IOMMU_MF_RMRR) == 0 ||
+ (next->flags & IOMMU_MAP_ENTRY_RMRR) == 0)
return (EBUSY);
entry->end = next->start;
}
@@ -514,11 +520,11 @@
found = dmar_gas_rb_insert(domain, entry);
KASSERT(found, ("found RMRR dup %p start %jx end %jx",
domain, (uintmax_t)entry->start, (uintmax_t)entry->end));
- if ((flags & DMAR_GM_RMRR) != 0)
- entry->flags = DMAR_MAP_ENTRY_RMRR;
+ if ((flags & IOMMU_MF_RMRR) != 0)
+ entry->flags = IOMMU_MAP_ENTRY_RMRR;
#ifdef INVARIANTS
- struct dmar_map_entry *ip, *in;
+ struct iommu_map_entry *ip, *in;
ip = RB_PREV(dmar_gas_entries_tree, &domain->rb_root, entry);
in = RB_NEXT(dmar_gas_entries_tree, &domain->rb_root, entry);
KASSERT(prev == NULL || ip == prev,
@@ -537,16 +543,16 @@
}
void
-dmar_gas_free_space(struct dmar_domain *domain, struct dmar_map_entry *entry)
+dmar_gas_free_space(struct dmar_domain *domain, struct iommu_map_entry *entry)
{
DMAR_DOMAIN_ASSERT_LOCKED(domain);
- KASSERT((entry->flags & (DMAR_MAP_ENTRY_PLACE | DMAR_MAP_ENTRY_RMRR |
- DMAR_MAP_ENTRY_MAP)) == DMAR_MAP_ENTRY_MAP,
+ KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR |
+ IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_MAP,
("permanent entry %p %p", domain, entry));
dmar_gas_rb_remove(domain, entry);
- entry->flags &= ~DMAR_MAP_ENTRY_MAP;
+ entry->flags &= ~IOMMU_MAP_ENTRY_MAP;
#ifdef INVARIANTS
if (dmar_check_free)
dmar_gas_check_free(domain);
@@ -554,19 +560,19 @@
}
void
-dmar_gas_free_region(struct dmar_domain *domain, struct dmar_map_entry *entry)
+dmar_gas_free_region(struct dmar_domain *domain, struct iommu_map_entry *entry)
{
- struct dmar_map_entry *next, *prev;
+ struct iommu_map_entry *next, *prev;
DMAR_DOMAIN_ASSERT_LOCKED(domain);
- KASSERT((entry->flags & (DMAR_MAP_ENTRY_PLACE | DMAR_MAP_ENTRY_RMRR |
- DMAR_MAP_ENTRY_MAP)) == DMAR_MAP_ENTRY_RMRR,
+ KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR |
+ IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_RMRR,
("non-RMRR entry %p %p", domain, entry));
prev = RB_PREV(dmar_gas_entries_tree, &domain->rb_root, entry);
next = RB_NEXT(dmar_gas_entries_tree, &domain->rb_root, entry);
dmar_gas_rb_remove(domain, entry);
- entry->flags &= ~DMAR_MAP_ENTRY_RMRR;
+ entry->flags &= ~IOMMU_MAP_ENTRY_RMRR;
if (prev == NULL)
dmar_gas_rb_insert(domain, domain->first_place);
@@ -575,18 +581,21 @@
}
int
-dmar_gas_map(struct dmar_domain *domain,
- const struct bus_dma_tag_common *common, dmar_gaddr_t size, int offset,
- u_int eflags, u_int flags, vm_page_t *ma, struct dmar_map_entry **res)
+dmar_gas_map(struct iommu_domain *iodom,
+ const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset,
+ u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res)
{
- struct dmar_map_entry *entry;
+ struct dmar_domain *domain;
+ struct iommu_map_entry *entry;
int error;
- KASSERT((flags & ~(DMAR_GM_CANWAIT | DMAR_GM_CANSPLIT)) == 0,
+ domain = (struct dmar_domain *)iodom;
+
+ KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_CANSPLIT)) == 0,
("invalid flags 0x%x", flags));
- entry = dmar_gas_alloc_entry(domain, (flags & DMAR_GM_CANWAIT) != 0 ?
- DMAR_PGF_WAITOK : 0);
+ entry = dmar_gas_alloc_entry(&domain->iodom,
+ (flags & IOMMU_MF_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0);
if (entry == NULL)
return (ENOMEM);
DMAR_DOMAIN_LOCK(domain);
@@ -594,7 +603,7 @@
entry);
if (error == ENOMEM) {
DMAR_DOMAIN_UNLOCK(domain);
- dmar_gas_free_entry(domain, entry);
+ dmar_gas_free_entry(&domain->iodom, entry);
return (error);
}
#ifdef INVARIANTS
@@ -610,11 +619,11 @@
error = domain_map_buf(domain, entry->start, entry->end - entry->start,
ma,
- ((eflags & DMAR_MAP_ENTRY_READ) != 0 ? DMAR_PTE_R : 0) |
- ((eflags & DMAR_MAP_ENTRY_WRITE) != 0 ? DMAR_PTE_W : 0) |
- ((eflags & DMAR_MAP_ENTRY_SNOOP) != 0 ? DMAR_PTE_SNP : 0) |
- ((eflags & DMAR_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0),
- (flags & DMAR_GM_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0);
+ ((eflags & IOMMU_MAP_ENTRY_READ) != 0 ? DMAR_PTE_R : 0) |
+ ((eflags & IOMMU_MAP_ENTRY_WRITE) != 0 ? DMAR_PTE_W : 0) |
+ ((eflags & IOMMU_MAP_ENTRY_SNOOP) != 0 ? DMAR_PTE_SNP : 0) |
+ ((eflags & IOMMU_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0),
+ (flags & IOMMU_MF_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0);
if (error == ENOMEM) {
dmar_domain_unload_entry(entry, true);
return (error);
@@ -627,15 +636,18 @@
}
int
-dmar_gas_map_region(struct dmar_domain *domain, struct dmar_map_entry *entry,
+dmar_gas_map_region(struct iommu_domain *iodom, struct iommu_map_entry *entry,
u_int eflags, u_int flags, vm_page_t *ma)
{
- dmar_gaddr_t start;
+ struct dmar_domain *domain;
+ iommu_gaddr_t start;
int error;
+ domain = (struct dmar_domain *)iodom;
+
KASSERT(entry->flags == 0, ("used RMRR entry %p %p %x", domain,
entry, entry->flags));
- KASSERT((flags & ~(DMAR_GM_CANWAIT | DMAR_GM_RMRR)) == 0,
+ KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_RMRR)) == 0,
("invalid flags 0x%x", flags));
start = entry->start;
@@ -652,11 +664,11 @@
error = domain_map_buf(domain, entry->start, entry->end - entry->start,
ma + OFF_TO_IDX(start - entry->start),
- ((eflags & DMAR_MAP_ENTRY_READ) != 0 ? DMAR_PTE_R : 0) |
- ((eflags & DMAR_MAP_ENTRY_WRITE) != 0 ? DMAR_PTE_W : 0) |
- ((eflags & DMAR_MAP_ENTRY_SNOOP) != 0 ? DMAR_PTE_SNP : 0) |
- ((eflags & DMAR_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0),
- (flags & DMAR_GM_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0);
+ ((eflags & IOMMU_MAP_ENTRY_READ) != 0 ? DMAR_PTE_R : 0) |
+ ((eflags & IOMMU_MAP_ENTRY_WRITE) != 0 ? DMAR_PTE_W : 0) |
+ ((eflags & IOMMU_MAP_ENTRY_SNOOP) != 0 ? DMAR_PTE_SNP : 0) |
+ ((eflags & IOMMU_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0),
+ (flags & IOMMU_MF_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0);
if (error == ENOMEM) {
dmar_domain_unload_entry(entry, false);
return (error);
@@ -668,21 +680,21 @@
}
int
-dmar_gas_reserve_region(struct dmar_domain *domain, dmar_gaddr_t start,
- dmar_gaddr_t end)
+dmar_gas_reserve_region(struct dmar_domain *domain, iommu_gaddr_t start,
+ iommu_gaddr_t end)
{
- struct dmar_map_entry *entry;
+ struct iommu_map_entry *entry;
int error;
- entry = dmar_gas_alloc_entry(domain, DMAR_PGF_WAITOK);
+ entry = dmar_gas_alloc_entry(&domain->iodom, DMAR_PGF_WAITOK);
entry->start = start;
entry->end = end;
DMAR_DOMAIN_LOCK(domain);
- error = dmar_gas_alloc_region(domain, entry, DMAR_GM_CANWAIT);
+ error = dmar_gas_alloc_region(domain, entry, IOMMU_MF_CANWAIT);
if (error == 0)
- entry->flags |= DMAR_MAP_ENTRY_UNMAPPED;
+ entry->flags |= IOMMU_MAP_ENTRY_UNMAPPED;
DMAR_DOMAIN_UNLOCK(domain);
if (error != 0)
- dmar_gas_free_entry(domain, entry);
+ dmar_gas_free_entry(&domain->iodom, entry);
return (error);
}
Index: sys/x86/iommu/intel_idpgtbl.c
===================================================================
--- sys/x86/iommu/intel_idpgtbl.c
+++ sys/x86/iommu/intel_idpgtbl.c
@@ -70,7 +70,7 @@
#include <x86/iommu/intel_dmar.h>
static int domain_unmap_buf_locked(struct dmar_domain *domain,
- dmar_gaddr_t base, dmar_gaddr_t size, int flags);
+ iommu_gaddr_t base, iommu_gaddr_t size, int flags);
/*
* The cache of the identity mapping page tables for the DMARs. Using
@@ -82,7 +82,7 @@
*/
struct idpgtbl {
- dmar_gaddr_t maxaddr; /* Page table covers the guest address
+ iommu_gaddr_t maxaddr; /* Page table covers the guest address
range [0..maxaddr) */
int pglvl; /* Total page table levels ignoring
superpages */
@@ -109,12 +109,12 @@
*/
static void
domain_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx,
- dmar_gaddr_t addr)
+ iommu_gaddr_t addr)
{
vm_page_t m1;
dmar_pte_t *pte;
struct sf_buf *sf;
- dmar_gaddr_t f, pg_sz;
+ iommu_gaddr_t f, pg_sz;
vm_pindex_t base;
int i;
@@ -163,7 +163,7 @@
* maxaddr is typically mapped.
*/
vm_object_t
-domain_get_idmap_pgtbl(struct dmar_domain *domain, dmar_gaddr_t maxaddr)
+domain_get_idmap_pgtbl(struct dmar_domain *domain, iommu_gaddr_t maxaddr)
{
struct dmar_unit *unit;
struct idpgtbl *tbl;
@@ -323,7 +323,7 @@
* the level lvl.
*/
static int
-domain_pgtbl_pte_off(struct dmar_domain *domain, dmar_gaddr_t base, int lvl)
+domain_pgtbl_pte_off(struct dmar_domain *domain, iommu_gaddr_t base, int lvl)
{
base >>= DMAR_PAGE_SHIFT + (domain->pglvl - lvl - 1) *
@@ -337,7 +337,7 @@
* lvl.
*/
static vm_pindex_t
-domain_pgtbl_get_pindex(struct dmar_domain *domain, dmar_gaddr_t base, int lvl)
+domain_pgtbl_get_pindex(struct dmar_domain *domain, iommu_gaddr_t base, int lvl)
{
vm_pindex_t idx, pidx;
int i;
@@ -353,7 +353,7 @@
}
static dmar_pte_t *
-domain_pgtbl_map_pte(struct dmar_domain *domain, dmar_gaddr_t base, int lvl,
+domain_pgtbl_map_pte(struct dmar_domain *domain, iommu_gaddr_t base, int lvl,
int flags, vm_pindex_t *idxp, struct sf_buf **sf)
{
vm_page_t m;
@@ -421,12 +421,12 @@
}
static int
-domain_map_buf_locked(struct dmar_domain *domain, dmar_gaddr_t base,
- dmar_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags)
+domain_map_buf_locked(struct dmar_domain *domain, iommu_gaddr_t base,
+ iommu_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags)
{
dmar_pte_t *pte;
struct sf_buf *sf;
- dmar_gaddr_t pg_sz, base1, size1;
+ iommu_gaddr_t pg_sz, base1, size1;
vm_pindex_t pi, c, idx, run_sz;
int lvl;
bool superpage;
@@ -499,7 +499,7 @@
}
int
-domain_map_buf(struct dmar_domain *domain, dmar_gaddr_t base, dmar_gaddr_t size,
+domain_map_buf(struct dmar_domain *domain, iommu_gaddr_t base, iommu_gaddr_t size,
vm_page_t *ma, uint64_t pflags, int flags)
{
struct dmar_unit *unit;
@@ -559,11 +559,11 @@
}
static void domain_unmap_clear_pte(struct dmar_domain *domain,
- dmar_gaddr_t base, int lvl, int flags, dmar_pte_t *pte,
+ iommu_gaddr_t base, int lvl, int flags, dmar_pte_t *pte,
struct sf_buf **sf, bool free_fs);
static void
-domain_free_pgtbl_pde(struct dmar_domain *domain, dmar_gaddr_t base,
+domain_free_pgtbl_pde(struct dmar_domain *domain, iommu_gaddr_t base,
int lvl, int flags)
{
struct sf_buf *sf;
@@ -576,7 +576,7 @@
}
static void
-domain_unmap_clear_pte(struct dmar_domain *domain, dmar_gaddr_t base, int lvl,
+domain_unmap_clear_pte(struct dmar_domain *domain, iommu_gaddr_t base, int lvl,
int flags, dmar_pte_t *pte, struct sf_buf **sf, bool free_sf)
{
vm_page_t m;
@@ -605,13 +605,13 @@
* Assumes that the unmap is never partial.
*/
static int
-domain_unmap_buf_locked(struct dmar_domain *domain, dmar_gaddr_t base,
- dmar_gaddr_t size, int flags)
+domain_unmap_buf_locked(struct dmar_domain *domain, iommu_gaddr_t base,
+ iommu_gaddr_t size, int flags)
{
dmar_pte_t *pte;
struct sf_buf *sf;
vm_pindex_t idx;
- dmar_gaddr_t pg_sz;
+ iommu_gaddr_t pg_sz;
int lvl;
DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
@@ -677,8 +677,8 @@
}
int
-domain_unmap_buf(struct dmar_domain *domain, dmar_gaddr_t base,
- dmar_gaddr_t size, int flags)
+domain_unmap_buf(struct dmar_domain *domain, iommu_gaddr_t base,
+ iommu_gaddr_t size, int flags)
{
int error;
@@ -757,17 +757,17 @@
}
void
-domain_flush_iotlb_sync(struct dmar_domain *domain, dmar_gaddr_t base,
- dmar_gaddr_t size)
+domain_flush_iotlb_sync(struct dmar_domain *domain, iommu_gaddr_t base,
+ iommu_gaddr_t size)
{
struct dmar_unit *unit;
- dmar_gaddr_t isize;
+ iommu_gaddr_t isize;
uint64_t iotlbr;
int am, iro;
unit = domain->dmar;
KASSERT(!unit->qi_enabled, ("dmar%d: sync iotlb flush call",
- unit->unit));
+ unit->iommu.unit));
iro = DMAR_ECAP_IRO(unit->hw_ecap) * 16;
DMAR_LOCK(unit);
if ((unit->hw_cap & DMAR_CAP_PSI) == 0 || size > 2 * 1024 * 1024) {
@@ -775,7 +775,7 @@
DMAR_IOTLB_DID(domain->domain), iro);
KASSERT((iotlbr & DMAR_IOTLB_IAIG_MASK) !=
DMAR_IOTLB_IAIG_INVLD,
- ("dmar%d: invalidation failed %jx", unit->unit,
+ ("dmar%d: invalidation failed %jx", unit->iommu.unit,
(uintmax_t)iotlbr));
} else {
for (; size > 0; base += isize, size -= isize) {
@@ -788,7 +788,7 @@
DMAR_IOTLB_IAIG_INVLD,
("dmar%d: PSI invalidation failed "
"iotlbr 0x%jx base 0x%jx size 0x%jx am %d",
- unit->unit, (uintmax_t)iotlbr,
+ unit->iommu.unit, (uintmax_t)iotlbr,
(uintmax_t)base, (uintmax_t)size, am));
/*
* Any non-page granularity covers whole guest
Index: sys/x86/iommu/intel_intrmap.c
===================================================================
--- sys/x86/iommu/intel_intrmap.c
+++ sys/x86/iommu/intel_intrmap.c
@@ -255,7 +255,7 @@
} else {
unit = dmar_find(src, bootverbose);
if (unit != NULL && rid != NULL)
- dmar_get_requester(src, rid);
+ iommu_get_requester(src, rid);
}
return (unit);
}
Index: sys/x86/iommu/intel_qi.c
===================================================================
--- sys/x86/iommu/intel_qi.c
+++ sys/x86/iommu/intel_qi.c
@@ -63,7 +63,7 @@
static bool
dmar_qi_seq_processed(const struct dmar_unit *unit,
- const struct dmar_qi_genseq *pseq)
+ const struct iommu_qi_genseq *pseq)
{
return (pseq->gen < unit->inv_waitd_gen ||
@@ -174,10 +174,10 @@
}
static void
-dmar_qi_emit_wait_seq(struct dmar_unit *unit, struct dmar_qi_genseq *pseq,
+dmar_qi_emit_wait_seq(struct dmar_unit *unit, struct iommu_qi_genseq *pseq,
bool emit_wait)
{
- struct dmar_qi_genseq gsec;
+ struct iommu_qi_genseq gsec;
uint32_t seq;
KASSERT(pseq != NULL, ("wait descriptor with no place for seq"));
@@ -203,7 +203,7 @@
}
static void
-dmar_qi_wait_for_seq(struct dmar_unit *unit, const struct dmar_qi_genseq *gseq,
+dmar_qi_wait_for_seq(struct dmar_unit *unit, const struct iommu_qi_genseq *gseq,
bool nowait)
{
@@ -213,7 +213,7 @@
if (cold || nowait) {
cpu_spinwait();
} else {
- msleep(&unit->inv_seq_waiters, &unit->lock, 0,
+ msleep(&unit->inv_seq_waiters, &unit->iommu.lock, 0,
"dmarse", hz);
}
}
@@ -221,11 +221,11 @@
}
void
-dmar_qi_invalidate_locked(struct dmar_domain *domain, dmar_gaddr_t base,
- dmar_gaddr_t size, struct dmar_qi_genseq *pseq, bool emit_wait)
+dmar_qi_invalidate_locked(struct dmar_domain *domain, iommu_gaddr_t base,
+ iommu_gaddr_t size, struct iommu_qi_genseq *pseq, bool emit_wait)
{
struct dmar_unit *unit;
- dmar_gaddr_t isize;
+ iommu_gaddr_t isize;
int am;
unit = domain->dmar;
@@ -246,7 +246,7 @@
void
dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit)
{
- struct dmar_qi_genseq gseq;
+ struct iommu_qi_genseq gseq;
DMAR_ASSERT_LOCKED(unit);
dmar_qi_ensure(unit, 2);
@@ -259,7 +259,7 @@
void
dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit)
{
- struct dmar_qi_genseq gseq;
+ struct iommu_qi_genseq gseq;
DMAR_ASSERT_LOCKED(unit);
dmar_qi_ensure(unit, 2);
@@ -273,7 +273,7 @@
void
dmar_qi_invalidate_iec_glob(struct dmar_unit *unit)
{
- struct dmar_qi_genseq gseq;
+ struct iommu_qi_genseq gseq;
DMAR_ASSERT_LOCKED(unit);
dmar_qi_ensure(unit, 2);
@@ -286,7 +286,7 @@
void
dmar_qi_invalidate_iec(struct dmar_unit *unit, u_int start, u_int cnt)
{
- struct dmar_qi_genseq gseq;
+ struct iommu_qi_genseq gseq;
u_int c, l;
DMAR_ASSERT_LOCKED(unit);
@@ -329,7 +329,8 @@
struct dmar_unit *unit;
unit = arg;
- KASSERT(unit->qi_enabled, ("dmar%d: QI is not enabled", unit->unit));
+ KASSERT(unit->qi_enabled, ("dmar%d: QI is not enabled",
+ unit->iommu.unit));
taskqueue_enqueue(unit->qi_taskqueue, &unit->qi_task);
return (FILTER_HANDLED);
}
@@ -338,7 +339,7 @@
dmar_qi_task(void *arg, int pending __unused)
{
struct dmar_unit *unit;
- struct dmar_map_entry *entry;
+ struct iommu_map_entry *entry;
uint32_t ics;
unit = arg;
@@ -353,7 +354,7 @@
TAILQ_REMOVE(&unit->tlb_flush_entries, entry, dmamap_link);
DMAR_UNLOCK(unit);
dmar_domain_free_entry(entry, (entry->flags &
- DMAR_MAP_ENTRY_QI_NF) == 0);
+ IOMMU_MAP_ENTRY_QI_NF) == 0);
DMAR_LOCK(unit);
}
ics = dmar_read4(unit, DMAR_ICS_REG);
@@ -385,7 +386,7 @@
unit->qi_taskqueue = taskqueue_create_fast("dmarqf", M_WAITOK,
taskqueue_thread_enqueue, &unit->qi_taskqueue);
taskqueue_start_threads(&unit->qi_taskqueue, 1, PI_AV,
- "dmar%d qi taskq", unit->unit);
+ "dmar%d qi taskq", unit->iommu.unit);
unit->inv_waitd_gen = 0;
unit->inv_waitd_seq = 1;
@@ -424,7 +425,7 @@
void
dmar_fini_qi(struct dmar_unit *unit)
{
- struct dmar_qi_genseq gseq;
+ struct iommu_qi_genseq gseq;
if (!unit->qi_enabled)
return;
@@ -442,7 +443,7 @@
dmar_disable_qi_intr(unit);
dmar_disable_qi(unit);
KASSERT(unit->inv_seq_waiters == 0,
- ("dmar%d: waiters on disabled queue", unit->unit));
+ ("dmar%d: waiters on disabled queue", unit->iommu.unit));
DMAR_UNLOCK(unit);
kmem_free(unit->inv_queue, unit->inv_queue_size);
@@ -457,7 +458,8 @@
uint32_t iectl;
DMAR_ASSERT_LOCKED(unit);
- KASSERT(DMAR_HAS_QI(unit), ("dmar%d: QI is not supported", unit->unit));
+ KASSERT(DMAR_HAS_QI(unit), ("dmar%d: QI is not supported",
+ unit->iommu.unit));
iectl = dmar_read4(unit, DMAR_IECTL_REG);
iectl &= ~DMAR_IECTL_IM;
dmar_write4(unit, DMAR_IECTL_REG, iectl);
@@ -469,7 +471,8 @@
uint32_t iectl;
DMAR_ASSERT_LOCKED(unit);
- KASSERT(DMAR_HAS_QI(unit), ("dmar%d: QI is not supported", unit->unit));
+ KASSERT(DMAR_HAS_QI(unit), ("dmar%d: QI is not supported",
+ unit->iommu.unit));
iectl = dmar_read4(unit, DMAR_IECTL_REG);
dmar_write4(unit, DMAR_IECTL_REG, iectl | DMAR_IECTL_IM);
}
Index: sys/x86/iommu/intel_quirks.c
===================================================================
--- sys/x86/iommu/intel_quirks.c
+++ sys/x86/iommu/intel_quirks.c
@@ -222,8 +222,11 @@
};
void
-dmar_quirks_pre_use(struct dmar_unit *dmar)
+dmar_quirks_pre_use(struct iommu_unit *unit)
{
+ struct dmar_unit *dmar;
+
+ dmar = (struct dmar_unit *)unit;
if (!dmar_barrier_enter(dmar, DMAR_BARRIER_USEQ))
return;
Index: sys/x86/iommu/intel_utils.c
===================================================================
--- sys/x86/iommu/intel_utils.c
+++ sys/x86/iommu/intel_utils.c
@@ -148,7 +148,7 @@
* address space, accept the biggest sagaw, whatever is it.
*/
int
-dmar_maxaddr2mgaw(struct dmar_unit *unit, dmar_gaddr_t maxaddr, bool allow_less)
+dmar_maxaddr2mgaw(struct dmar_unit *unit, iommu_gaddr_t maxaddr, bool allow_less)
{
int i;
@@ -207,17 +207,17 @@
return (alvl < nitems(sagaw_sp) && (sagaw_sp[alvl] & cap_sps) != 0);
}
-dmar_gaddr_t
+iommu_gaddr_t
pglvl_page_size(int total_pglvl, int lvl)
{
int rlvl;
- static const dmar_gaddr_t pg_sz[] = {
- (dmar_gaddr_t)DMAR_PAGE_SIZE,
- (dmar_gaddr_t)DMAR_PAGE_SIZE << DMAR_NPTEPGSHIFT,
- (dmar_gaddr_t)DMAR_PAGE_SIZE << (2 * DMAR_NPTEPGSHIFT),
- (dmar_gaddr_t)DMAR_PAGE_SIZE << (3 * DMAR_NPTEPGSHIFT),
- (dmar_gaddr_t)DMAR_PAGE_SIZE << (4 * DMAR_NPTEPGSHIFT),
- (dmar_gaddr_t)DMAR_PAGE_SIZE << (5 * DMAR_NPTEPGSHIFT)
+ static const iommu_gaddr_t pg_sz[] = {
+ (iommu_gaddr_t)DMAR_PAGE_SIZE,
+ (iommu_gaddr_t)DMAR_PAGE_SIZE << DMAR_NPTEPGSHIFT,
+ (iommu_gaddr_t)DMAR_PAGE_SIZE << (2 * DMAR_NPTEPGSHIFT),
+ (iommu_gaddr_t)DMAR_PAGE_SIZE << (3 * DMAR_NPTEPGSHIFT),
+ (iommu_gaddr_t)DMAR_PAGE_SIZE << (4 * DMAR_NPTEPGSHIFT),
+ (iommu_gaddr_t)DMAR_PAGE_SIZE << (5 * DMAR_NPTEPGSHIFT)
};
KASSERT(lvl >= 0 && lvl < total_pglvl,
@@ -227,7 +227,7 @@
return (pg_sz[rlvl]);
}
-dmar_gaddr_t
+iommu_gaddr_t
domain_page_size(struct dmar_domain *domain, int lvl)
{
@@ -235,10 +235,10 @@
}
int
-calc_am(struct dmar_unit *unit, dmar_gaddr_t base, dmar_gaddr_t size,
- dmar_gaddr_t *isizep)
+calc_am(struct dmar_unit *unit, iommu_gaddr_t base, iommu_gaddr_t size,
+ iommu_gaddr_t *isizep)
{
- dmar_gaddr_t isize;
+ iommu_gaddr_t isize;
int am;
for (am = DMAR_CAP_MAMV(unit->hw_cap);; am--) {
@@ -252,7 +252,7 @@
return (am);
}
-dmar_haddr_t dmar_high;
+iommu_haddr_t dmar_high;
int haw;
int dmar_tbl_pagecnt;
@@ -482,7 +482,7 @@
* DMAR_GCMD_WBF is only valid when CAP_RWBF is reported.
*/
KASSERT((unit->hw_cap & DMAR_CAP_RWBF) != 0,
- ("dmar%d: no RWBF", unit->unit));
+ ("dmar%d: no RWBF", unit->iommu.unit));
dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd | DMAR_GCMD_WBF);
DMAR_WAIT_UNTIL(((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_WBFS)
@@ -586,11 +586,12 @@
if ((dmar->barrier_flags & f_inproc) != 0) {
while ((dmar->barrier_flags & f_inproc) != 0) {
dmar->barrier_flags |= f_wakeup;
- msleep(&dmar->barrier_flags, &dmar->lock, 0,
+ msleep(&dmar->barrier_flags, &dmar->iommu.lock, 0,
"dmarb", 0);
}
KASSERT((dmar->barrier_flags & f_done) != 0,
- ("dmar%d barrier %d missing done", dmar->unit, barrier_id));
+ ("dmar%d barrier %d missing done", dmar->iommu.unit,
+ barrier_id));
DMAR_UNLOCK(dmar);
return (false);
}
@@ -607,7 +608,7 @@
DMAR_ASSERT_LOCKED(dmar);
KASSERT((dmar->barrier_flags & (f_done | f_inproc)) == f_inproc,
- ("dmar%d barrier %d missed entry", dmar->unit, barrier_id));
+ ("dmar%d barrier %d missed entry", dmar->iommu.unit, barrier_id));
dmar->barrier_flags |= f_done;
if ((dmar->barrier_flags & f_wakeup) != 0)
wakeup(&dmar->barrier_flags);
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Sat, Jan 24, 8:24 AM (12 h, 2 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
27895242
Default Alt Text
D25574.id74237.diff (103 KB)
Attached To
Mode
D25574: rename dmar->iommu
Attached
Detach File
Event Timeline
Log In to Comment