Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F154110308
D25094.id72610.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
156 KB
Referenced Files
None
Subscribers
None
D25094.id72610.diff
View Options
Index: sys/conf/files.x86
===================================================================
--- sys/conf/files.x86
+++ sys/conf/files.x86
@@ -165,6 +165,7 @@
dev/imcsmb/imcsmb_pci.c optional imcsmb pci
dev/intel/spi.c optional intelspi
dev/io/iodev.c optional io
+dev/iommu/busdma_iommu.c optional acpi acpi_dmar pci
dev/ipmi/ipmi.c optional ipmi
dev/ipmi/ipmi_acpi.c optional ipmi acpi
dev/ipmi/ipmi_isa.c optional ipmi isa
Index: sys/dev/iommu/busdma_iommu.h
===================================================================
--- /dev/null
+++ sys/dev/iommu/busdma_iommu.h
@@ -0,0 +1,91 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2013 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _DEV_IOMMU_BUSDMA_IOMMU_H_
+#define _DEV_IOMMU_BUSDMA_IOMMU_H_
+
+struct iommu_map_entry;
+TAILQ_HEAD(iommu_map_entries_tailq, iommu_map_entry);
+
+struct bus_dma_tag_iommu {
+ struct bus_dma_tag_common common;
+ struct iommu_device *device;
+ device_t owner;
+ int map_count;
+ bus_dma_segment_t *segments;
+};
+
+struct bus_dmamap_iommu {
+ struct bus_dma_tag_iommu *device_tag;
+ struct memdesc mem;
+ bus_dmamap_callback_t *callback;
+ void *callback_arg;
+ struct iommu_map_entries_tailq map_entries;
+ TAILQ_ENTRY(bus_dmamap_iommu) delay_link;
+ bool locked;
+ bool cansleep;
+ int flags;
+};
+
+#define BUS_DMAMAP_IOMMU_MALLOC 0x0001
+#define BUS_DMAMAP_IOMMU_KMEM_ALLOC 0x0002
+
+extern struct bus_dma_impl bus_dma_iommu_impl;
+
+bus_dma_tag_t acpi_iommu_get_dma_tag(device_t dev, device_t child);
+
+struct iommu_device;
+struct iommu_domain;
+struct iommu_unit;
+
+int iommu_map(struct iommu_domain *domain,
+ const struct bus_dma_tag_common *common,
+ bus_size_t size, int offset,
+ int eflags, int iommu_flags,
+ vm_page_t *ma, struct iommu_map_entry **entry);
+int iommu_unmap(struct iommu_domain *domain,
+ struct iommu_map_entries_tailq *entries, bool free);
+
+struct iommu_device * iommu_get_device(struct iommu_unit *iommu,
+ device_t requester, uint16_t rid, bool disabled, bool rmrr);
+int iommu_free_device(struct iommu_device *device);
+int iommu_free_device_locked(struct iommu_unit *iommu,
+ struct iommu_device *device);
+
+struct iommu_unit * iommu_find(device_t dev, bool verbose);
+int iommu_init_busdma(struct iommu_unit *unit);
+void iommu_fini_busdma(struct iommu_unit *unit);
+struct iommu_device *iommu_instantiate_device(struct iommu_unit *dmar,
+ device_t dev, bool rmrr);
+
+#endif /* !_DEV_IOMMU_BUSDMA_IOMMU_H_*/
Index: sys/dev/iommu/busdma_iommu.c
===================================================================
--- /dev/null
+++ sys/dev/iommu/busdma_iommu.c
@@ -0,0 +1,957 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2013 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/domainset.h>
+#include <sys/malloc.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/interrupt.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/lock.h>
+#include <sys/proc.h>
+#include <sys/memdesc.h>
+#include <sys/mutex.h>
+#include <sys/sysctl.h>
+#include <sys/rman.h>
+#include <sys/taskqueue.h>
+#include <sys/tree.h>
+#include <sys/uio.h>
+#include <sys/vmem.h>
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+
+#include <machine/atomic.h>
+#include <machine/bus.h>
+#include <machine/md_var.h>
+
+#if defined(__amd64__)
+#include <x86/iommu/intel_reg.h>
+#include <x86/busdma_impl.h>
+#include <dev/iommu/busdma_iommu.h>
+#include <x86/iommu/intel_dmar.h>
+#else
+#include <machine/bus_dma_impl.h>
+#include <dev/iommu/busdma_iommu.h>
+#include <arm64/iommu/iommu.h>
+#endif
+
+/*
+ * busdma_iommu.c, the implementation of the busdma(9) interface using
+ * IOMMU units.
+ */
+
+static bool
+iommu_bus_dma_is_dev_disabled(int domain, int bus, int slot, int func)
+{
+ char str[128], *env;
+ int default_bounce;
+ bool ret;
+ static const char bounce_str[] = "bounce";
+ static const char iommu_str[] = "iommu";
+
+ default_bounce = 0;
+ env = kern_getenv("hw.busdma.default");
+ if (env != NULL) {
+ if (strcmp(env, bounce_str) == 0)
+ default_bounce = 1;
+ else if (strcmp(env, iommu_str) == 0)
+ default_bounce = 0;
+ freeenv(env);
+ }
+
+ snprintf(str, sizeof(str), "hw.busdma.pci%d.%d.%d.%d",
+ domain, bus, slot, func);
+ env = kern_getenv(str);
+ if (env == NULL)
+ return (default_bounce != 0);
+ if (strcmp(env, bounce_str) == 0)
+ ret = true;
+ else if (strcmp(env, iommu_str) == 0)
+ ret = false;
+ else
+ ret = default_bounce != 0;
+ freeenv(env);
+ return (ret);
+}
+
+/*
+ * Given original device, find the requester ID that will be seen by
+ * the IOMMU unit and used for page table lookup. PCI bridges may take
+ * ownership of transactions from downstream devices, so it may not be
+ * the same as the BSF of the target device. In those cases, all
+ * devices downstream of the bridge must share a single mapping
+ * domain, and must collectively be assigned to use either IOMMU or
+ * bounce mapping.
+ */
+static device_t
+iommu_get_requester(device_t dev, uint16_t *rid)
+{
+ devclass_t pci_class;
+ device_t l, pci, pcib, pcip, pcibp, requester;
+ int cap_offset;
+ uint16_t pcie_flags;
+ bool bridge_is_pcie;
+
+ pci_class = devclass_find("pci");
+ l = requester = dev;
+
+ *rid = pci_get_rid(dev);
+
+ /*
+ * Walk the bridge hierarchy from the target device to the
+ * host port to find the translating bridge nearest the IOMMU
+ * unit.
+ */
+ for (;;) {
+ pci = device_get_parent(l);
+ KASSERT(pci != NULL, ("iommu_get_requester(%s): NULL parent "
+ "for %s", device_get_name(dev), device_get_name(l)));
+ KASSERT(device_get_devclass(pci) == pci_class,
+ ("iommu_get_requester(%s): non-pci parent %s for %s",
+ device_get_name(dev), device_get_name(pci),
+ device_get_name(l)));
+
+ pcib = device_get_parent(pci);
+ KASSERT(pcib != NULL, ("iommu_get_requester(%s): NULL bridge "
+ "for %s", device_get_name(dev), device_get_name(pci)));
+
+ /*
+ * The parent of our "bridge" isn't another PCI bus,
+ * so pcib isn't a PCI->PCI bridge but rather a host
+ * port, and the requester ID won't be translated
+ * further.
+ */
+ pcip = device_get_parent(pcib);
+ if (device_get_devclass(pcip) != pci_class)
+ break;
+ pcibp = device_get_parent(pcip);
+
+ if (pci_find_cap(l, PCIY_EXPRESS, &cap_offset) == 0) {
+ /*
+ * Do not stop the loop even if the target
+ * device is PCIe, because it is possible (but
+ * unlikely) to have a PCI->PCIe bridge
+ * somewhere in the hierarchy.
+ */
+ l = pcib;
+ } else {
+ /*
+ * Device is not PCIe, it cannot be seen as a
+ * requester by IOMMU unit. Check whether the
+ * bridge is PCIe.
+ */
+ bridge_is_pcie = pci_find_cap(pcib, PCIY_EXPRESS,
+ &cap_offset) == 0;
+ requester = pcib;
+
+ /*
+ * Check for a buggy PCIe/PCI bridge that
+ * doesn't report the express capability. If
+ * the bridge above it is express but isn't a
+ * PCI bridge, then we know pcib is actually a
+ * PCIe/PCI bridge.
+ */
+ if (!bridge_is_pcie && pci_find_cap(pcibp,
+ PCIY_EXPRESS, &cap_offset) == 0) {
+ pcie_flags = pci_read_config(pcibp,
+ cap_offset + PCIER_FLAGS, 2);
+ if ((pcie_flags & PCIEM_FLAGS_TYPE) !=
+ PCIEM_TYPE_PCI_BRIDGE)
+ bridge_is_pcie = true;
+ }
+
+ if (bridge_is_pcie) {
+ /*
+ * The current device is not PCIe, but
+ * the bridge above it is. This is a
+ * PCIe->PCI bridge. Assume that the
+ * requester ID will be the secondary
+ * bus number with slot and function
+ * set to zero.
+ *
+ * XXX: Doesn't handle the case where
+ * the bridge is PCIe->PCI-X, and the
+ * bridge will only take ownership of
+ * requests in some cases. We should
+ * provide context entries with the
+ * same page tables for taken and
+ * non-taken transactions.
+ */
+ *rid = PCI_RID(pci_get_bus(l), 0, 0);
+ l = pcibp;
+ } else {
+ /*
+ * Neither the device nor the bridge
+ * above it are PCIe. This is a
+ * conventional PCI->PCI bridge, which
+ * will use the bridge's BSF as the
+ * requester ID.
+ */
+ *rid = pci_get_rid(pcib);
+ l = pcib;
+ }
+ }
+ }
+ return (requester);
+}
+
+struct iommu_device *
+iommu_instantiate_device(struct iommu_unit *iommu, device_t dev, bool rmrr)
+{
+ device_t requester;
+ struct iommu_device *device;
+ bool disabled;
+ uint16_t rid;
+
+ requester = iommu_get_requester(dev, &rid);
+
+ /*
+ * If the user requested the IOMMU disabled for the device, we
+ * cannot disable the IOMMU, due to possibility of other
+ * devices on the same IOMMU still requiring translation.
+ * Instead provide the identity mapping for the device
+ * context.
+ */
+ disabled = iommu_bus_dma_is_dev_disabled(pci_get_domain(requester),
+ pci_get_bus(requester), pci_get_slot(requester),
+ pci_get_function(requester));
+ device = iommu_get_device(iommu, requester, rid, disabled, rmrr);
+ if (device == NULL)
+ return (NULL);
+ if (disabled) {
+ /*
+ * Keep the first reference on context, release the
+ * later refs.
+ */
+ IOMMU_LOCK(iommu);
+ if ((device->flags & IOMMU_DEVICE_DISABLED) == 0) {
+ device->flags |= IOMMU_DEVICE_DISABLED;
+ IOMMU_UNLOCK(iommu);
+ } else {
+ iommu_free_device_locked(iommu, device);
+ }
+ device = NULL;
+ }
+ return (device);
+}
+
+bus_dma_tag_t
+acpi_iommu_get_dma_tag(device_t dev, device_t child)
+{
+ struct iommu_unit *iommu;
+ struct iommu_device *device;
+ bus_dma_tag_t res;
+
+ iommu = iommu_find(child, bootverbose);
+ /* Not in scope of any IOMMU ? */
+ if (iommu == NULL)
+ return (NULL);
+ if (!iommu->dma_enabled)
+ return (NULL);
+#if defined(__amd64__)
+ dmar_quirks_pre_use(iommu);
+ dmar_instantiate_rmrr_ctxs(iommu);
+#endif
+
+ device = iommu_instantiate_device(iommu, child, false);
+ res = device == NULL ? NULL : (bus_dma_tag_t)&device->device_tag;
+ return (res);
+}
+
+static MALLOC_DEFINE(M_IOMMU_DMAMAP, "iommu_dmamap", "IOMMU DMA Map");
+
+static void iommu_bus_schedule_dmamap(struct iommu_unit *unit,
+ struct bus_dmamap_iommu *map);
+
+static int
+iommu_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
+ bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
+ bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
+ int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
+ void *lockfuncarg, bus_dma_tag_t *dmat)
+{
+ struct bus_dma_tag_iommu *newtag, *oldtag;
+ int error;
+
+ *dmat = NULL;
+ error = common_bus_dma_tag_create(parent != NULL ?
+ &((struct bus_dma_tag_iommu *)parent)->common : NULL, alignment,
+ boundary, lowaddr, highaddr, filter, filterarg, maxsize,
+ nsegments, maxsegsz, flags, lockfunc, lockfuncarg,
+ sizeof(struct bus_dma_tag_iommu), (void **)&newtag);
+ if (error != 0)
+ goto out;
+
+ oldtag = (struct bus_dma_tag_iommu *)parent;
+ newtag->common.impl = &bus_dma_iommu_impl;
+ newtag->device = oldtag->device;
+ newtag->owner = oldtag->owner;
+
+ *dmat = (bus_dma_tag_t)newtag;
+out:
+ CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
+ __func__, newtag, (newtag != NULL ? newtag->common.flags : 0),
+ error);
+ return (error);
+}
+
+static int
+iommu_bus_dma_tag_set_domain(bus_dma_tag_t dmat)
+{
+
+ return (0);
+}
+
+static int
+iommu_bus_dma_tag_destroy(bus_dma_tag_t dmat1)
+{
+ struct bus_dma_tag_iommu *dmat, *dmat_copy, *parent;
+ int error;
+
+ error = 0;
+ dmat_copy = dmat = (struct bus_dma_tag_iommu *)dmat1;
+
+ if (dmat != NULL) {
+ if (dmat->map_count != 0) {
+ error = EBUSY;
+ goto out;
+ }
+ while (dmat != NULL) {
+ parent = (struct bus_dma_tag_iommu *)
+ dmat->common.parent;
+ if (atomic_fetchadd_int(&dmat->common.ref_count, -1) ==
+ 1) {
+ if (dmat == &dmat->device->device_tag)
+ iommu_free_device(dmat->device);
+ free_domain(dmat->segments, M_IOMMU_DMAMAP);
+ free(dmat, M_DEVBUF);
+ dmat = parent;
+ } else
+ dmat = NULL;
+ }
+ }
+out:
+ CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
+ return (error);
+}
+
+static bool
+iommu_bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen)
+{
+
+ return (false);
+}
+
+static int
+iommu_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
+{
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
+
+ tag = (struct bus_dma_tag_iommu *)dmat;
+ map = malloc_domainset(sizeof(*map), M_IOMMU_DMAMAP,
+ DOMAINSET_PREF(tag->common.domain), M_NOWAIT | M_ZERO);
+ if (map == NULL) {
+ *mapp = NULL;
+ return (ENOMEM);
+ }
+ if (tag->segments == NULL) {
+ tag->segments = malloc_domainset(sizeof(bus_dma_segment_t) *
+ tag->common.nsegments, M_IOMMU_DMAMAP,
+ DOMAINSET_PREF(tag->common.domain), M_NOWAIT);
+ if (tag->segments == NULL) {
+ free_domain(map, M_IOMMU_DMAMAP);
+ *mapp = NULL;
+ return (ENOMEM);
+ }
+ }
+ TAILQ_INIT(&map->map_entries);
+ map->device_tag = tag;
+ map->locked = true;
+ map->cansleep = false;
+ tag->map_count++;
+ *mapp = (bus_dmamap_t)map;
+
+ return (0);
+}
+
+static int
+iommu_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map1)
+{
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
+ struct iommu_domain *domain;
+
+ tag = (struct bus_dma_tag_iommu *)dmat;
+ map = (struct bus_dmamap_iommu *)map1;
+ if (map != NULL) {
+ domain = tag->device->domain;
+ IOMMU_DOMAIN_LOCK(domain);
+ if (!TAILQ_EMPTY(&map->map_entries)) {
+ IOMMU_DOMAIN_UNLOCK(domain);
+ return (EBUSY);
+ }
+ IOMMU_DOMAIN_UNLOCK(domain);
+ free_domain(map, M_IOMMU_DMAMAP);
+ }
+ tag->map_count--;
+ return (0);
+}
+
+
+static int
+iommu_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
+ bus_dmamap_t *mapp)
+{
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
+ int error, mflags;
+ vm_memattr_t attr;
+
+ error = iommu_bus_dmamap_create(dmat, flags, mapp);
+ if (error != 0)
+ return (error);
+
+ mflags = (flags & BUS_DMA_NOWAIT) != 0 ? M_NOWAIT : M_WAITOK;
+ mflags |= (flags & BUS_DMA_ZERO) != 0 ? M_ZERO : 0;
+ attr = (flags & BUS_DMA_NOCACHE) != 0 ? VM_MEMATTR_UNCACHEABLE :
+ VM_MEMATTR_DEFAULT;
+
+ tag = (struct bus_dma_tag_iommu *)dmat;
+ map = (struct bus_dmamap_iommu *)*mapp;
+
+ if (tag->common.maxsize < PAGE_SIZE &&
+ tag->common.alignment <= tag->common.maxsize &&
+ attr == VM_MEMATTR_DEFAULT) {
+ *vaddr = malloc_domainset(tag->common.maxsize, M_DEVBUF,
+ DOMAINSET_PREF(tag->common.domain), mflags);
+ map->flags |= BUS_DMAMAP_IOMMU_MALLOC;
+ } else {
+ *vaddr = (void *)kmem_alloc_attr_domainset(
+ DOMAINSET_PREF(tag->common.domain), tag->common.maxsize,
+ mflags, 0ul, BUS_SPACE_MAXADDR, attr);
+ map->flags |= BUS_DMAMAP_IOMMU_KMEM_ALLOC;
+ }
+ if (*vaddr == NULL) {
+ iommu_bus_dmamap_destroy(dmat, *mapp);
+ *mapp = NULL;
+ return (ENOMEM);
+ }
+ return (0);
+}
+
+static void
+iommu_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map1)
+{
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
+
+ tag = (struct bus_dma_tag_iommu *)dmat;
+ map = (struct bus_dmamap_iommu *)map1;
+
+ if ((map->flags & BUS_DMAMAP_IOMMU_MALLOC) != 0) {
+ free_domain(vaddr, M_DEVBUF);
+ map->flags &= ~BUS_DMAMAP_IOMMU_MALLOC;
+ } else {
+ KASSERT((map->flags & BUS_DMAMAP_IOMMU_KMEM_ALLOC) != 0,
+ ("iommu_bus_dmamem_free for non alloced map %p", map));
+ kmem_free((vm_offset_t)vaddr, tag->common.maxsize);
+ map->flags &= ~BUS_DMAMAP_IOMMU_KMEM_ALLOC;
+ }
+
+ iommu_bus_dmamap_destroy(dmat, map1);
+}
+
+static int
+iommu_bus_dmamap_load_something1(struct bus_dma_tag_iommu *tag,
+ struct bus_dmamap_iommu *map, vm_page_t *ma, int offset, bus_size_t buflen,
+ int flags, bus_dma_segment_t *segs, int *segp,
+ struct iommu_map_entries_tailq *unroll_list)
+{
+ struct iommu_device *device;
+ struct iommu_domain *domain;
+ struct iommu_map_entry *entry;
+ bus_size_t size;
+ bus_size_t buflen1;
+ int error, idx, iommu_flags, seg;
+
+ KASSERT(offset < IOMMU_PAGE_SIZE, ("offset %d", offset));
+ if (segs == NULL)
+ segs = tag->segments;
+ device = tag->device;
+ domain = device->domain;
+ seg = *segp;
+ error = 0;
+ idx = 0;
+ while (buflen > 0) {
+ seg++;
+ if (seg >= tag->common.nsegments) {
+ error = EFBIG;
+ break;
+ }
+ buflen1 = buflen > tag->common.maxsegsz ?
+ tag->common.maxsegsz : buflen;
+ size = round_page(offset + buflen1);
+
+ /*
+ * (Too) optimistically allow split if there are more
+ * then one segments left.
+ */
+ iommu_flags = map->cansleep ? IOMMU_MF_CANWAIT : 0;
+ if (seg + 1 < tag->common.nsegments)
+ iommu_flags |= IOMMU_MF_CANSPLIT;
+
+ error = iommu_map(domain, &tag->common, size, offset,
+ IOMMU_MAP_ENTRY_READ |
+ ((flags & BUS_DMA_NOWRITE) == 0 ? IOMMU_MAP_ENTRY_WRITE: 0),
+ iommu_flags, ma + idx, &entry);
+ if (error != 0)
+ break;
+ if ((iommu_flags & IOMMU_MF_CANSPLIT) != 0) {
+ KASSERT(size >= entry->end - entry->start,
+ ("split increased entry size %jx %jx %jx",
+ (uintmax_t)size, (uintmax_t)entry->start,
+ (uintmax_t)entry->end));
+ size = entry->end - entry->start;
+ if (buflen1 > size)
+ buflen1 = size;
+ } else {
+ KASSERT(entry->end - entry->start == size,
+ ("no split allowed %jx %jx %jx",
+ (uintmax_t)size, (uintmax_t)entry->start,
+ (uintmax_t)entry->end));
+ }
+ if (offset + buflen1 > size)
+ buflen1 = size - offset;
+ if (buflen1 > tag->common.maxsegsz)
+ buflen1 = tag->common.maxsegsz;
+
+ KASSERT(((entry->start + offset) & (tag->common.alignment - 1))
+ == 0,
+ ("alignment failed: device %p start 0x%jx offset %x "
+ "align 0x%jx", device, (uintmax_t)entry->start, offset,
+ (uintmax_t)tag->common.alignment));
+ KASSERT(entry->end <= tag->common.lowaddr ||
+ entry->start >= tag->common.highaddr,
+ ("entry placement failed: device %p start 0x%jx end 0x%jx "
+ "lowaddr 0x%jx highaddr 0x%jx", device,
+ (uintmax_t)entry->start, (uintmax_t)entry->end,
+ (uintmax_t)tag->common.lowaddr,
+ (uintmax_t)tag->common.highaddr));
+ KASSERT(iommu_test_boundary(entry->start + offset, buflen1,
+ tag->common.boundary),
+ ("boundary failed: device %p start 0x%jx end 0x%jx "
+ "boundary 0x%jx", device, (uintmax_t)entry->start,
+ (uintmax_t)entry->end, (uintmax_t)tag->common.boundary));
+ KASSERT(buflen1 <= tag->common.maxsegsz,
+ ("segment too large: device %p start 0x%jx end 0x%jx "
+ "buflen1 0x%jx maxsegsz 0x%jx", device,
+ (uintmax_t)entry->start, (uintmax_t)entry->end,
+ (uintmax_t)buflen1, (uintmax_t)tag->common.maxsegsz));
+
+ IOMMU_DOMAIN_LOCK(domain);
+ TAILQ_INSERT_TAIL(&map->map_entries, entry, dmamap_link);
+ entry->flags |= IOMMU_MAP_ENTRY_MAP;
+ IOMMU_DOMAIN_UNLOCK(domain);
+ TAILQ_INSERT_TAIL(unroll_list, entry, unroll_link);
+
+ segs[seg].ds_addr = entry->start + offset;
+ segs[seg].ds_len = buflen1;
+
+ idx += OFF_TO_IDX(trunc_page(offset + buflen1));
+ offset += buflen1;
+ offset &= IOMMU_PAGE_MASK;
+ buflen -= buflen1;
+ }
+ if (error == 0)
+ *segp = seg;
+ return (error);
+}
+
+static int
+iommu_bus_dmamap_load_something(struct bus_dma_tag_iommu *tag,
+ struct bus_dmamap_iommu *map, vm_page_t *ma, int offset, bus_size_t buflen,
+ int flags, bus_dma_segment_t *segs, int *segp)
+{
+ struct iommu_device *device;
+ struct iommu_domain *domain;
+ struct iommu_map_entry *entry, *entry1;
+ struct iommu_map_entries_tailq unroll_list;
+ int error;
+
+ device = tag->device;
+ domain = device->domain;
+ atomic_add_long(&device->loads, 1);
+
+ TAILQ_INIT(&unroll_list);
+ error = iommu_bus_dmamap_load_something1(tag, map, ma, offset,
+ buflen, flags, segs, segp, &unroll_list);
+ if (error != 0) {
+ /*
+ * The busdma interface does not allow us to report
+ * partial buffer load, so unfortunately we have to
+ * revert all work done.
+ */
+ IOMMU_DOMAIN_LOCK(domain);
+ TAILQ_FOREACH_SAFE(entry, &unroll_list, unroll_link,
+ entry1) {
+ /*
+ * No entries other than what we have created
+ * during the failed run might have been
+ * inserted there in between, since we own device
+ * pglock.
+ */
+ TAILQ_REMOVE(&map->map_entries, entry, dmamap_link);
+ TAILQ_REMOVE(&unroll_list, entry, unroll_link);
+ TAILQ_INSERT_TAIL(&domain->unload_entries, entry,
+ dmamap_link);
+ }
+ IOMMU_DOMAIN_UNLOCK(domain);
+ taskqueue_enqueue(domain->iommu->delayed_taskqueue,
+ &domain->unload_task);
+ }
+
+ if (error == ENOMEM && (flags & BUS_DMA_NOWAIT) == 0 &&
+ !map->cansleep)
+ error = EINPROGRESS;
+ if (error == EINPROGRESS)
+ iommu_bus_schedule_dmamap(domain->iommu, map);
+ return (error);
+}
+
+static int
+iommu_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map1,
+ struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
+ bus_dma_segment_t *segs, int *segp)
+{
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
+
+ tag = (struct bus_dma_tag_iommu *)dmat;
+ map = (struct bus_dmamap_iommu *)map1;
+ return (iommu_bus_dmamap_load_something(tag, map, ma, ma_offs, tlen,
+ flags, segs, segp));
+}
+
+static int
+iommu_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map1,
+ vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
+ int *segp)
+{
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
+ vm_page_t *ma, fma;
+ vm_paddr_t pstart, pend, paddr;
+ int error, i, ma_cnt, mflags, offset;
+
+ tag = (struct bus_dma_tag_iommu *)dmat;
+ map = (struct bus_dmamap_iommu *)map1;
+ pstart = trunc_page(buf);
+ pend = round_page(buf + buflen);
+ offset = buf & PAGE_MASK;
+ ma_cnt = OFF_TO_IDX(pend - pstart);
+ mflags = map->cansleep ? M_WAITOK : M_NOWAIT;
+ ma = malloc(sizeof(vm_page_t) * ma_cnt, M_DEVBUF, mflags);
+ if (ma == NULL)
+ return (ENOMEM);
+ fma = NULL;
+ for (i = 0; i < ma_cnt; i++) {
+ paddr = pstart + ptoa(i);
+ ma[i] = PHYS_TO_VM_PAGE(paddr);
+ if (ma[i] == NULL || VM_PAGE_TO_PHYS(ma[i]) != paddr) {
+ /*
+ * If PHYS_TO_VM_PAGE() returned NULL or the
+ * vm_page was not initialized we'll use a
+ * fake page.
+ */
+ if (fma == NULL) {
+ fma = malloc(sizeof(struct vm_page) * ma_cnt,
+ M_DEVBUF, M_ZERO | mflags);
+ if (fma == NULL) {
+ free(ma, M_DEVBUF);
+ return (ENOMEM);
+ }
+ }
+ vm_page_initfake(&fma[i], pstart + ptoa(i),
+ VM_MEMATTR_DEFAULT);
+ ma[i] = &fma[i];
+ }
+ }
+ error = iommu_bus_dmamap_load_something(tag, map, ma, offset, buflen,
+ flags, segs, segp);
+ free(fma, M_DEVBUF);
+ free(ma, M_DEVBUF);
+ return (error);
+}
+
+static int
+iommu_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map1, void *buf,
+ bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
+ int *segp)
+{
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
+ vm_page_t *ma, fma;
+ vm_paddr_t pstart, pend, paddr;
+ int error, i, ma_cnt, mflags, offset;
+
+ tag = (struct bus_dma_tag_iommu *)dmat;
+ map = (struct bus_dmamap_iommu *)map1;
+ pstart = trunc_page((vm_offset_t)buf);
+ pend = round_page((vm_offset_t)buf + buflen);
+ offset = (vm_offset_t)buf & PAGE_MASK;
+ ma_cnt = OFF_TO_IDX(pend - pstart);
+ mflags = map->cansleep ? M_WAITOK : M_NOWAIT;
+ ma = malloc(sizeof(vm_page_t) * ma_cnt, M_DEVBUF, mflags);
+ if (ma == NULL)
+ return (ENOMEM);
+ fma = NULL;
+ for (i = 0; i < ma_cnt; i++, pstart += PAGE_SIZE) {
+ if (pmap == kernel_pmap)
+ paddr = pmap_kextract(pstart);
+ else
+ paddr = pmap_extract(pmap, pstart);
+ ma[i] = PHYS_TO_VM_PAGE(paddr);
+ if (ma[i] == NULL || VM_PAGE_TO_PHYS(ma[i]) != paddr) {
+ /*
+ * If PHYS_TO_VM_PAGE() returned NULL or the
+ * vm_page was not initialized we'll use a
+ * fake page.
+ */
+ if (fma == NULL) {
+ fma = malloc(sizeof(struct vm_page) * ma_cnt,
+ M_DEVBUF, M_ZERO | mflags);
+ if (fma == NULL) {
+ free(ma, M_DEVBUF);
+ return (ENOMEM);
+ }
+ }
+ vm_page_initfake(&fma[i], paddr, VM_MEMATTR_DEFAULT);
+ ma[i] = &fma[i];
+ }
+ }
+ error = iommu_bus_dmamap_load_something(tag, map, ma, offset, buflen,
+ flags, segs, segp);
+ free(ma, M_DEVBUF);
+ free(fma, M_DEVBUF);
+ return (error);
+}
+
+static void
+iommu_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map1,
+ struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
+{
+ struct bus_dmamap_iommu *map;
+
+ if (map1 == NULL)
+ return;
+ map = (struct bus_dmamap_iommu *)map1;
+ map->mem = *mem;
+ map->device_tag = (struct bus_dma_tag_iommu *)dmat;
+ map->callback = callback;
+ map->callback_arg = callback_arg;
+}
+
+static bus_dma_segment_t *
+iommu_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map1,
+ bus_dma_segment_t *segs, int nsegs, int error)
+{
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
+
+ tag = (struct bus_dma_tag_iommu *)dmat;
+ map = (struct bus_dmamap_iommu *)map1;
+
+ if (!map->locked) {
+ KASSERT(map->cansleep,
+ ("map not locked and not sleepable context %p", map));
+
+ /*
+ * We are called from the delayed context. Relock the
+ * driver.
+ */
+ (tag->common.lockfunc)(tag->common.lockfuncarg, BUS_DMA_LOCK);
+ map->locked = true;
+ }
+
+ if (segs == NULL)
+ segs = tag->segments;
+ return (segs);
+}
+
+/*
+ * The limitations of busdma KPI forces the iommu to perform the actual
+ * unload, consisting of the unmapping of the map entries page tables,
+ * from the delayed context on i386, since page table page mapping
+ * might require a sleep to be successfull. The unfortunate
+ * consequence is that the DMA requests can be served some time after
+ * the bus_dmamap_unload() call returned.
+ *
+ * On amd64, we assume that sf allocation cannot fail.
+ */
+static void
+iommu_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map1)
+{
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
+ struct iommu_device *device;
+ struct iommu_domain *domain;
+#if defined(__amd64__) || defined(__aarch64__)
+ struct iommu_map_entries_tailq entries;
+#endif
+
+ tag = (struct bus_dma_tag_iommu *)dmat;
+ map = (struct bus_dmamap_iommu *)map1;
+ device = tag->device;
+ domain = device->domain;
+ atomic_add_long(&device->unloads, 1);
+
+#if defined(__i386__)
+ IOMMU_DOMAIN_LOCK(domain);
+ TAILQ_CONCAT(&domain->unload_entries, &map->map_entries, dmamap_link);
+ IOMMU_DOMAIN_UNLOCK(domain);
+ taskqueue_enqueue(domain->iommu->delayed_taskqueue,
+ &domain->unload_task);
+#else /* defined(__amd64__) || defined(__aarch64__) */
+ TAILQ_INIT(&entries);
+ IOMMU_DOMAIN_LOCK(domain);
+ TAILQ_CONCAT(&entries, &map->map_entries, dmamap_link);
+ IOMMU_DOMAIN_UNLOCK(domain);
+ THREAD_NO_SLEEPING();
+ iommu_unmap(domain, &entries, false);
+ THREAD_SLEEPING_OK();
+ KASSERT(TAILQ_EMPTY(&entries), ("lazy iommu_device_unload %p", device));
+#endif
+}
+
+static void
+iommu_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
+ bus_dmasync_op_t op)
+{
+}
+
+struct bus_dma_impl bus_dma_iommu_impl = {
+ .tag_create = iommu_bus_dma_tag_create,
+ .tag_destroy = iommu_bus_dma_tag_destroy,
+ .tag_set_domain = iommu_bus_dma_tag_set_domain,
+ .id_mapped = iommu_bus_dma_id_mapped,
+ .map_create = iommu_bus_dmamap_create,
+ .map_destroy = iommu_bus_dmamap_destroy,
+ .mem_alloc = iommu_bus_dmamem_alloc,
+ .mem_free = iommu_bus_dmamem_free,
+ .load_phys = iommu_bus_dmamap_load_phys,
+ .load_buffer = iommu_bus_dmamap_load_buffer,
+ .load_ma = iommu_bus_dmamap_load_ma,
+ .map_waitok = iommu_bus_dmamap_waitok,
+ .map_complete = iommu_bus_dmamap_complete,
+ .map_unload = iommu_bus_dmamap_unload,
+ .map_sync = iommu_bus_dmamap_sync,
+};
+
+static void
+iommu_bus_task_dmamap(void *arg, int pending)
+{
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
+ struct iommu_unit *unit;
+
+ unit = arg;
+ IOMMU_LOCK(unit);
+ while ((map = TAILQ_FIRST(&unit->delayed_maps)) != NULL) {
+ TAILQ_REMOVE(&unit->delayed_maps, map, delay_link);
+ IOMMU_UNLOCK(unit);
+ tag = map->device_tag;
+ map->cansleep = true;
+ map->locked = false;
+ bus_dmamap_load_mem((bus_dma_tag_t)tag, (bus_dmamap_t)map,
+ &map->mem, map->callback, map->callback_arg,
+ BUS_DMA_WAITOK);
+ map->cansleep = false;
+ if (map->locked) {
+ (tag->common.lockfunc)(tag->common.lockfuncarg,
+ BUS_DMA_UNLOCK);
+ } else
+ map->locked = true;
+ map->cansleep = false;
+ IOMMU_LOCK(unit);
+ }
+ IOMMU_UNLOCK(unit);
+}
+
+static void
+iommu_bus_schedule_dmamap(struct iommu_unit *unit, struct bus_dmamap_iommu *map)
+{
+
+ map->locked = false;
+ IOMMU_LOCK(unit);
+ TAILQ_INSERT_TAIL(&unit->delayed_maps, map, delay_link);
+ IOMMU_UNLOCK(unit);
+ taskqueue_enqueue(unit->delayed_taskqueue, &unit->dmamap_load_task);
+}
+
+int
+iommu_init_busdma(struct iommu_unit *unit)
+{
+
+ unit->dma_enabled = 1;
+ TUNABLE_INT_FETCH("hw.iommu.dma", &unit->dma_enabled);
+ TAILQ_INIT(&unit->delayed_maps);
+ TASK_INIT(&unit->dmamap_load_task, 0, iommu_bus_task_dmamap, unit);
+ unit->delayed_taskqueue = taskqueue_create("iommu", M_WAITOK,
+ taskqueue_thread_enqueue, &unit->delayed_taskqueue);
+ taskqueue_start_threads(&unit->delayed_taskqueue, 1, PI_DISK,
+ "iommu%d busdma taskq", unit->unit);
+ return (0);
+}
+
+void
+iommu_fini_busdma(struct iommu_unit *unit)
+{
+
+ if (unit->delayed_taskqueue == NULL)
+ return;
+
+ taskqueue_drain(unit->delayed_taskqueue, &unit->dmamap_load_task);
+ taskqueue_free(unit->delayed_taskqueue);
+ unit->delayed_taskqueue = NULL;
+}
Index: sys/x86/iommu/busdma_dmar.h
===================================================================
--- sys/x86/iommu/busdma_dmar.h
+++ sys/x86/iommu/busdma_dmar.h
@@ -34,34 +34,6 @@
#ifndef __X86_IOMMU_BUSDMA_DMAR_H
#define __X86_IOMMU_BUSDMA_DMAR_H
-struct dmar_map_entry;
-TAILQ_HEAD(dmar_map_entries_tailq, dmar_map_entry);
-
-struct bus_dma_tag_dmar {
- struct bus_dma_tag_common common;
- struct dmar_ctx *ctx;
- device_t owner;
- int map_count;
- bus_dma_segment_t *segments;
-};
-
-struct bus_dmamap_dmar {
- struct bus_dma_tag_dmar *tag;
- struct memdesc mem;
- bus_dmamap_callback_t *callback;
- void *callback_arg;
- struct dmar_map_entries_tailq map_entries;
- TAILQ_ENTRY(bus_dmamap_dmar) delay_link;
- bool locked;
- bool cansleep;
- int flags;
-};
-
-#define BUS_DMAMAP_DMAR_MALLOC 0x0001
-#define BUS_DMAMAP_DMAR_KMEM_ALLOC 0x0002
-
-extern struct bus_dma_impl bus_dma_dmar_impl;
-
-bus_dma_tag_t acpi_iommu_get_dma_tag(device_t dev, device_t child);
+#include <dev/iommu/busdma_iommu.h>
#endif
Index: sys/x86/iommu/busdma_dmar.c
===================================================================
--- sys/x86/iommu/busdma_dmar.c
+++ sys/x86/iommu/busdma_dmar.c
@@ -68,45 +68,6 @@
#include <x86/iommu/busdma_dmar.h>
#include <x86/iommu/intel_dmar.h>
-/*
- * busdma_dmar.c, the implementation of the busdma(9) interface using
- * DMAR units from Intel VT-d.
- */
-
-static bool
-dmar_bus_dma_is_dev_disabled(int domain, int bus, int slot, int func)
-{
- char str[128], *env;
- int default_bounce;
- bool ret;
- static const char bounce_str[] = "bounce";
- static const char dmar_str[] = "dmar";
-
- default_bounce = 0;
- env = kern_getenv("hw.busdma.default");
- if (env != NULL) {
- if (strcmp(env, bounce_str) == 0)
- default_bounce = 1;
- else if (strcmp(env, dmar_str) == 0)
- default_bounce = 0;
- freeenv(env);
- }
-
- snprintf(str, sizeof(str), "hw.busdma.pci%d.%d.%d.%d",
- domain, bus, slot, func);
- env = kern_getenv(str);
- if (env == NULL)
- return (default_bounce != 0);
- if (strcmp(env, bounce_str) == 0)
- ret = true;
- else if (strcmp(env, dmar_str) == 0)
- ret = false;
- else
- ret = default_bounce != 0;
- freeenv(env);
- return (ret);
-}
-
/*
* Given original device, find the requester ID that will be seen by
* the DMAR unit and used for page table lookup. PCI bridges may take
@@ -228,71 +189,10 @@
return (requester);
}
-struct dmar_ctx *
-dmar_instantiate_ctx(struct dmar_unit *dmar, device_t dev, bool rmrr)
-{
- device_t requester;
- struct dmar_ctx *ctx;
- bool disabled;
- uint16_t rid;
-
- requester = dmar_get_requester(dev, &rid);
-
- /*
- * If the user requested the IOMMU disabled for the device, we
- * cannot disable the DMAR, due to possibility of other
- * devices on the same DMAR still requiring translation.
- * Instead provide the identity mapping for the device
- * context.
- */
- disabled = dmar_bus_dma_is_dev_disabled(pci_get_domain(requester),
- pci_get_bus(requester), pci_get_slot(requester),
- pci_get_function(requester));
- ctx = dmar_get_ctx_for_dev(dmar, requester, rid, disabled, rmrr);
- if (ctx == NULL)
- return (NULL);
- if (disabled) {
- /*
- * Keep the first reference on context, release the
- * later refs.
- */
- DMAR_LOCK(dmar);
- if ((ctx->flags & DMAR_CTX_DISABLED) == 0) {
- ctx->flags |= DMAR_CTX_DISABLED;
- DMAR_UNLOCK(dmar);
- } else {
- dmar_free_ctx_locked(dmar, ctx);
- }
- ctx = NULL;
- }
- return (ctx);
-}
-
-bus_dma_tag_t
-acpi_iommu_get_dma_tag(device_t dev, device_t child)
-{
- struct dmar_unit *dmar;
- struct dmar_ctx *ctx;
- bus_dma_tag_t res;
-
- dmar = dmar_find(child, bootverbose);
- /* Not in scope of any DMAR ? */
- if (dmar == NULL)
- return (NULL);
- if (!dmar->dma_enabled)
- return (NULL);
- dmar_quirks_pre_use(dmar);
- dmar_instantiate_rmrr_ctxs(dmar);
-
- ctx = dmar_instantiate_ctx(dmar, child, false);
- res = ctx == NULL ? NULL : (bus_dma_tag_t)&ctx->ctx_tag;
- return (res);
-}
-
bool
bus_dma_dmar_set_buswide(device_t dev)
{
- struct dmar_unit *dmar;
+ struct iommu_unit *dmar;
device_t parent;
u_int busno, slot, func;
@@ -317,673 +217,16 @@
return (true);
}
-static MALLOC_DEFINE(M_DMAR_DMAMAP, "dmar_dmamap", "Intel DMAR DMA Map");
-
-static void dmar_bus_schedule_dmamap(struct dmar_unit *unit,
- struct bus_dmamap_dmar *map);
-
-static int
-dmar_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
- bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
- bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
- int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
- void *lockfuncarg, bus_dma_tag_t *dmat)
-{
- struct bus_dma_tag_dmar *newtag, *oldtag;
- int error;
-
- *dmat = NULL;
- error = common_bus_dma_tag_create(parent != NULL ?
- &((struct bus_dma_tag_dmar *)parent)->common : NULL, alignment,
- boundary, lowaddr, highaddr, filter, filterarg, maxsize,
- nsegments, maxsegsz, flags, lockfunc, lockfuncarg,
- sizeof(struct bus_dma_tag_dmar), (void **)&newtag);
- if (error != 0)
- goto out;
-
- oldtag = (struct bus_dma_tag_dmar *)parent;
- newtag->common.impl = &bus_dma_dmar_impl;
- newtag->ctx = oldtag->ctx;
- newtag->owner = oldtag->owner;
-
- *dmat = (bus_dma_tag_t)newtag;
-out:
- CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
- __func__, newtag, (newtag != NULL ? newtag->common.flags : 0),
- error);
- return (error);
-}
-
-static int
-dmar_bus_dma_tag_set_domain(bus_dma_tag_t dmat)
-{
-
- return (0);
-}
-
-static int
-dmar_bus_dma_tag_destroy(bus_dma_tag_t dmat1)
-{
- struct bus_dma_tag_dmar *dmat, *dmat_copy, *parent;
- int error;
-
- error = 0;
- dmat_copy = dmat = (struct bus_dma_tag_dmar *)dmat1;
-
- if (dmat != NULL) {
- if (dmat->map_count != 0) {
- error = EBUSY;
- goto out;
- }
- while (dmat != NULL) {
- parent = (struct bus_dma_tag_dmar *)dmat->common.parent;
- if (atomic_fetchadd_int(&dmat->common.ref_count, -1) ==
- 1) {
- if (dmat == &dmat->ctx->ctx_tag)
- dmar_free_ctx(dmat->ctx);
- free_domain(dmat->segments, M_DMAR_DMAMAP);
- free(dmat, M_DEVBUF);
- dmat = parent;
- } else
- dmat = NULL;
- }
- }
-out:
- CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
- return (error);
-}
-
-static bool
-dmar_bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen)
-{
-
- return (false);
-}
-
-static int
-dmar_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
-{
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
-
- tag = (struct bus_dma_tag_dmar *)dmat;
- map = malloc_domainset(sizeof(*map), M_DMAR_DMAMAP,
- DOMAINSET_PREF(tag->common.domain), M_NOWAIT | M_ZERO);
- if (map == NULL) {
- *mapp = NULL;
- return (ENOMEM);
- }
- if (tag->segments == NULL) {
- tag->segments = malloc_domainset(sizeof(bus_dma_segment_t) *
- tag->common.nsegments, M_DMAR_DMAMAP,
- DOMAINSET_PREF(tag->common.domain), M_NOWAIT);
- if (tag->segments == NULL) {
- free_domain(map, M_DMAR_DMAMAP);
- *mapp = NULL;
- return (ENOMEM);
- }
- }
- TAILQ_INIT(&map->map_entries);
- map->tag = tag;
- map->locked = true;
- map->cansleep = false;
- tag->map_count++;
- *mapp = (bus_dmamap_t)map;
-
- return (0);
-}
-
-static int
-dmar_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map1)
-{
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
- struct dmar_domain *domain;
-
- tag = (struct bus_dma_tag_dmar *)dmat;
- map = (struct bus_dmamap_dmar *)map1;
- if (map != NULL) {
- domain = tag->ctx->domain;
- DMAR_DOMAIN_LOCK(domain);
- if (!TAILQ_EMPTY(&map->map_entries)) {
- DMAR_DOMAIN_UNLOCK(domain);
- return (EBUSY);
- }
- DMAR_DOMAIN_UNLOCK(domain);
- free_domain(map, M_DMAR_DMAMAP);
- }
- tag->map_count--;
- return (0);
-}
-
-
-static int
-dmar_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
- bus_dmamap_t *mapp)
-{
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
- int error, mflags;
- vm_memattr_t attr;
-
- error = dmar_bus_dmamap_create(dmat, flags, mapp);
- if (error != 0)
- return (error);
-
- mflags = (flags & BUS_DMA_NOWAIT) != 0 ? M_NOWAIT : M_WAITOK;
- mflags |= (flags & BUS_DMA_ZERO) != 0 ? M_ZERO : 0;
- attr = (flags & BUS_DMA_NOCACHE) != 0 ? VM_MEMATTR_UNCACHEABLE :
- VM_MEMATTR_DEFAULT;
-
- tag = (struct bus_dma_tag_dmar *)dmat;
- map = (struct bus_dmamap_dmar *)*mapp;
-
- if (tag->common.maxsize < PAGE_SIZE &&
- tag->common.alignment <= tag->common.maxsize &&
- attr == VM_MEMATTR_DEFAULT) {
- *vaddr = malloc_domainset(tag->common.maxsize, M_DEVBUF,
- DOMAINSET_PREF(tag->common.domain), mflags);
- map->flags |= BUS_DMAMAP_DMAR_MALLOC;
- } else {
- *vaddr = (void *)kmem_alloc_attr_domainset(
- DOMAINSET_PREF(tag->common.domain), tag->common.maxsize,
- mflags, 0ul, BUS_SPACE_MAXADDR, attr);
- map->flags |= BUS_DMAMAP_DMAR_KMEM_ALLOC;
- }
- if (*vaddr == NULL) {
- dmar_bus_dmamap_destroy(dmat, *mapp);
- *mapp = NULL;
- return (ENOMEM);
- }
- return (0);
-}
-
-static void
-dmar_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map1)
-{
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
-
- tag = (struct bus_dma_tag_dmar *)dmat;
- map = (struct bus_dmamap_dmar *)map1;
-
- if ((map->flags & BUS_DMAMAP_DMAR_MALLOC) != 0) {
- free_domain(vaddr, M_DEVBUF);
- map->flags &= ~BUS_DMAMAP_DMAR_MALLOC;
- } else {
- KASSERT((map->flags & BUS_DMAMAP_DMAR_KMEM_ALLOC) != 0,
- ("dmar_bus_dmamem_free for non alloced map %p", map));
- kmem_free((vm_offset_t)vaddr, tag->common.maxsize);
- map->flags &= ~BUS_DMAMAP_DMAR_KMEM_ALLOC;
- }
-
- dmar_bus_dmamap_destroy(dmat, map1);
-}
-
-static int
-dmar_bus_dmamap_load_something1(struct bus_dma_tag_dmar *tag,
- struct bus_dmamap_dmar *map, vm_page_t *ma, int offset, bus_size_t buflen,
- int flags, bus_dma_segment_t *segs, int *segp,
- struct dmar_map_entries_tailq *unroll_list)
-{
- struct dmar_ctx *ctx;
- struct dmar_domain *domain;
- struct dmar_map_entry *entry;
- dmar_gaddr_t size;
- bus_size_t buflen1;
- int error, idx, gas_flags, seg;
-
- KASSERT(offset < DMAR_PAGE_SIZE, ("offset %d", offset));
- if (segs == NULL)
- segs = tag->segments;
- ctx = tag->ctx;
- domain = ctx->domain;
- seg = *segp;
- error = 0;
- idx = 0;
- while (buflen > 0) {
- seg++;
- if (seg >= tag->common.nsegments) {
- error = EFBIG;
- break;
- }
- buflen1 = buflen > tag->common.maxsegsz ?
- tag->common.maxsegsz : buflen;
- size = round_page(offset + buflen1);
-
- /*
- * (Too) optimistically allow split if there are more
- * then one segments left.
- */
- gas_flags = map->cansleep ? DMAR_GM_CANWAIT : 0;
- if (seg + 1 < tag->common.nsegments)
- gas_flags |= DMAR_GM_CANSPLIT;
-
- error = dmar_gas_map(domain, &tag->common, size, offset,
- DMAR_MAP_ENTRY_READ |
- ((flags & BUS_DMA_NOWRITE) == 0 ? DMAR_MAP_ENTRY_WRITE : 0),
- gas_flags, ma + idx, &entry);
- if (error != 0)
- break;
- if ((gas_flags & DMAR_GM_CANSPLIT) != 0) {
- KASSERT(size >= entry->end - entry->start,
- ("split increased entry size %jx %jx %jx",
- (uintmax_t)size, (uintmax_t)entry->start,
- (uintmax_t)entry->end));
- size = entry->end - entry->start;
- if (buflen1 > size)
- buflen1 = size;
- } else {
- KASSERT(entry->end - entry->start == size,
- ("no split allowed %jx %jx %jx",
- (uintmax_t)size, (uintmax_t)entry->start,
- (uintmax_t)entry->end));
- }
- if (offset + buflen1 > size)
- buflen1 = size - offset;
- if (buflen1 > tag->common.maxsegsz)
- buflen1 = tag->common.maxsegsz;
-
- KASSERT(((entry->start + offset) & (tag->common.alignment - 1))
- == 0,
- ("alignment failed: ctx %p start 0x%jx offset %x "
- "align 0x%jx", ctx, (uintmax_t)entry->start, offset,
- (uintmax_t)tag->common.alignment));
- KASSERT(entry->end <= tag->common.lowaddr ||
- entry->start >= tag->common.highaddr,
- ("entry placement failed: ctx %p start 0x%jx end 0x%jx "
- "lowaddr 0x%jx highaddr 0x%jx", ctx,
- (uintmax_t)entry->start, (uintmax_t)entry->end,
- (uintmax_t)tag->common.lowaddr,
- (uintmax_t)tag->common.highaddr));
- KASSERT(dmar_test_boundary(entry->start + offset, buflen1,
- tag->common.boundary),
- ("boundary failed: ctx %p start 0x%jx end 0x%jx "
- "boundary 0x%jx", ctx, (uintmax_t)entry->start,
- (uintmax_t)entry->end, (uintmax_t)tag->common.boundary));
- KASSERT(buflen1 <= tag->common.maxsegsz,
- ("segment too large: ctx %p start 0x%jx end 0x%jx "
- "buflen1 0x%jx maxsegsz 0x%jx", ctx,
- (uintmax_t)entry->start, (uintmax_t)entry->end,
- (uintmax_t)buflen1, (uintmax_t)tag->common.maxsegsz));
-
- DMAR_DOMAIN_LOCK(domain);
- TAILQ_INSERT_TAIL(&map->map_entries, entry, dmamap_link);
- entry->flags |= DMAR_MAP_ENTRY_MAP;
- DMAR_DOMAIN_UNLOCK(domain);
- TAILQ_INSERT_TAIL(unroll_list, entry, unroll_link);
-
- segs[seg].ds_addr = entry->start + offset;
- segs[seg].ds_len = buflen1;
-
- idx += OFF_TO_IDX(trunc_page(offset + buflen1));
- offset += buflen1;
- offset &= DMAR_PAGE_MASK;
- buflen -= buflen1;
- }
- if (error == 0)
- *segp = seg;
- return (error);
-}
-
-static int
-dmar_bus_dmamap_load_something(struct bus_dma_tag_dmar *tag,
- struct bus_dmamap_dmar *map, vm_page_t *ma, int offset, bus_size_t buflen,
- int flags, bus_dma_segment_t *segs, int *segp)
-{
- struct dmar_ctx *ctx;
- struct dmar_domain *domain;
- struct dmar_map_entry *entry, *entry1;
- struct dmar_map_entries_tailq unroll_list;
- int error;
-
- ctx = tag->ctx;
- domain = ctx->domain;
- atomic_add_long(&ctx->loads, 1);
-
- TAILQ_INIT(&unroll_list);
- error = dmar_bus_dmamap_load_something1(tag, map, ma, offset,
- buflen, flags, segs, segp, &unroll_list);
- if (error != 0) {
- /*
- * The busdma interface does not allow us to report
- * partial buffer load, so unfortunately we have to
- * revert all work done.
- */
- DMAR_DOMAIN_LOCK(domain);
- TAILQ_FOREACH_SAFE(entry, &unroll_list, unroll_link,
- entry1) {
- /*
- * No entries other than what we have created
- * during the failed run might have been
- * inserted there in between, since we own ctx
- * pglock.
- */
- TAILQ_REMOVE(&map->map_entries, entry, dmamap_link);
- TAILQ_REMOVE(&unroll_list, entry, unroll_link);
- TAILQ_INSERT_TAIL(&domain->unload_entries, entry,
- dmamap_link);
- }
- DMAR_DOMAIN_UNLOCK(domain);
- taskqueue_enqueue(domain->dmar->delayed_taskqueue,
- &domain->unload_task);
- }
-
- if (error == ENOMEM && (flags & BUS_DMA_NOWAIT) == 0 &&
- !map->cansleep)
- error = EINPROGRESS;
- if (error == EINPROGRESS)
- dmar_bus_schedule_dmamap(domain->dmar, map);
- return (error);
-}
-
-static int
-dmar_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map1,
- struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
- bus_dma_segment_t *segs, int *segp)
-{
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
-
- tag = (struct bus_dma_tag_dmar *)dmat;
- map = (struct bus_dmamap_dmar *)map1;
- return (dmar_bus_dmamap_load_something(tag, map, ma, ma_offs, tlen,
- flags, segs, segp));
-}
-
-static int
-dmar_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map1,
- vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
- int *segp)
-{
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
- vm_page_t *ma, fma;
- vm_paddr_t pstart, pend, paddr;
- int error, i, ma_cnt, mflags, offset;
-
- tag = (struct bus_dma_tag_dmar *)dmat;
- map = (struct bus_dmamap_dmar *)map1;
- pstart = trunc_page(buf);
- pend = round_page(buf + buflen);
- offset = buf & PAGE_MASK;
- ma_cnt = OFF_TO_IDX(pend - pstart);
- mflags = map->cansleep ? M_WAITOK : M_NOWAIT;
- ma = malloc(sizeof(vm_page_t) * ma_cnt, M_DEVBUF, mflags);
- if (ma == NULL)
- return (ENOMEM);
- fma = NULL;
- for (i = 0; i < ma_cnt; i++) {
- paddr = pstart + ptoa(i);
- ma[i] = PHYS_TO_VM_PAGE(paddr);
- if (ma[i] == NULL || VM_PAGE_TO_PHYS(ma[i]) != paddr) {
- /*
- * If PHYS_TO_VM_PAGE() returned NULL or the
- * vm_page was not initialized we'll use a
- * fake page.
- */
- if (fma == NULL) {
- fma = malloc(sizeof(struct vm_page) * ma_cnt,
- M_DEVBUF, M_ZERO | mflags);
- if (fma == NULL) {
- free(ma, M_DEVBUF);
- return (ENOMEM);
- }
- }
- vm_page_initfake(&fma[i], pstart + ptoa(i),
- VM_MEMATTR_DEFAULT);
- ma[i] = &fma[i];
- }
- }
- error = dmar_bus_dmamap_load_something(tag, map, ma, offset, buflen,
- flags, segs, segp);
- free(fma, M_DEVBUF);
- free(ma, M_DEVBUF);
- return (error);
-}
-
-static int
-dmar_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map1, void *buf,
- bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
- int *segp)
-{
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
- vm_page_t *ma, fma;
- vm_paddr_t pstart, pend, paddr;
- int error, i, ma_cnt, mflags, offset;
-
- tag = (struct bus_dma_tag_dmar *)dmat;
- map = (struct bus_dmamap_dmar *)map1;
- pstart = trunc_page((vm_offset_t)buf);
- pend = round_page((vm_offset_t)buf + buflen);
- offset = (vm_offset_t)buf & PAGE_MASK;
- ma_cnt = OFF_TO_IDX(pend - pstart);
- mflags = map->cansleep ? M_WAITOK : M_NOWAIT;
- ma = malloc(sizeof(vm_page_t) * ma_cnt, M_DEVBUF, mflags);
- if (ma == NULL)
- return (ENOMEM);
- fma = NULL;
- for (i = 0; i < ma_cnt; i++, pstart += PAGE_SIZE) {
- if (pmap == kernel_pmap)
- paddr = pmap_kextract(pstart);
- else
- paddr = pmap_extract(pmap, pstart);
- ma[i] = PHYS_TO_VM_PAGE(paddr);
- if (ma[i] == NULL || VM_PAGE_TO_PHYS(ma[i]) != paddr) {
- /*
- * If PHYS_TO_VM_PAGE() returned NULL or the
- * vm_page was not initialized we'll use a
- * fake page.
- */
- if (fma == NULL) {
- fma = malloc(sizeof(struct vm_page) * ma_cnt,
- M_DEVBUF, M_ZERO | mflags);
- if (fma == NULL) {
- free(ma, M_DEVBUF);
- return (ENOMEM);
- }
- }
- vm_page_initfake(&fma[i], paddr, VM_MEMATTR_DEFAULT);
- ma[i] = &fma[i];
- }
- }
- error = dmar_bus_dmamap_load_something(tag, map, ma, offset, buflen,
- flags, segs, segp);
- free(ma, M_DEVBUF);
- free(fma, M_DEVBUF);
- return (error);
-}
-
-static void
-dmar_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map1,
- struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
-{
- struct bus_dmamap_dmar *map;
-
- if (map1 == NULL)
- return;
- map = (struct bus_dmamap_dmar *)map1;
- map->mem = *mem;
- map->tag = (struct bus_dma_tag_dmar *)dmat;
- map->callback = callback;
- map->callback_arg = callback_arg;
-}
-
-static bus_dma_segment_t *
-dmar_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map1,
- bus_dma_segment_t *segs, int nsegs, int error)
-{
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
-
- tag = (struct bus_dma_tag_dmar *)dmat;
- map = (struct bus_dmamap_dmar *)map1;
-
- if (!map->locked) {
- KASSERT(map->cansleep,
- ("map not locked and not sleepable context %p", map));
-
- /*
- * We are called from the delayed context. Relock the
- * driver.
- */
- (tag->common.lockfunc)(tag->common.lockfuncarg, BUS_DMA_LOCK);
- map->locked = true;
- }
-
- if (segs == NULL)
- segs = tag->segments;
- return (segs);
-}
-
-/*
- * The limitations of busdma KPI forces the dmar to perform the actual
- * unload, consisting of the unmapping of the map entries page tables,
- * from the delayed context on i386, since page table page mapping
- * might require a sleep to be successfull. The unfortunate
- * consequence is that the DMA requests can be served some time after
- * the bus_dmamap_unload() call returned.
- *
- * On amd64, we assume that sf allocation cannot fail.
- */
-static void
-dmar_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map1)
-{
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
- struct dmar_ctx *ctx;
- struct dmar_domain *domain;
-#if defined(__amd64__)
- struct dmar_map_entries_tailq entries;
-#endif
-
- tag = (struct bus_dma_tag_dmar *)dmat;
- map = (struct bus_dmamap_dmar *)map1;
- ctx = tag->ctx;
- domain = ctx->domain;
- atomic_add_long(&ctx->unloads, 1);
-
-#if defined(__i386__)
- DMAR_DOMAIN_LOCK(domain);
- TAILQ_CONCAT(&domain->unload_entries, &map->map_entries, dmamap_link);
- DMAR_DOMAIN_UNLOCK(domain);
- taskqueue_enqueue(domain->dmar->delayed_taskqueue,
- &domain->unload_task);
-#else /* defined(__amd64__) */
- TAILQ_INIT(&entries);
- DMAR_DOMAIN_LOCK(domain);
- TAILQ_CONCAT(&entries, &map->map_entries, dmamap_link);
- DMAR_DOMAIN_UNLOCK(domain);
- THREAD_NO_SLEEPING();
- dmar_domain_unload(domain, &entries, false);
- THREAD_SLEEPING_OK();
- KASSERT(TAILQ_EMPTY(&entries), ("lazy dmar_ctx_unload %p", ctx));
-#endif
-}
-
-static void
-dmar_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
- bus_dmasync_op_t op)
-{
-}
-
-struct bus_dma_impl bus_dma_dmar_impl = {
- .tag_create = dmar_bus_dma_tag_create,
- .tag_destroy = dmar_bus_dma_tag_destroy,
- .tag_set_domain = dmar_bus_dma_tag_set_domain,
- .id_mapped = dmar_bus_dma_id_mapped,
- .map_create = dmar_bus_dmamap_create,
- .map_destroy = dmar_bus_dmamap_destroy,
- .mem_alloc = dmar_bus_dmamem_alloc,
- .mem_free = dmar_bus_dmamem_free,
- .load_phys = dmar_bus_dmamap_load_phys,
- .load_buffer = dmar_bus_dmamap_load_buffer,
- .load_ma = dmar_bus_dmamap_load_ma,
- .map_waitok = dmar_bus_dmamap_waitok,
- .map_complete = dmar_bus_dmamap_complete,
- .map_unload = dmar_bus_dmamap_unload,
- .map_sync = dmar_bus_dmamap_sync,
-};
-
-static void
-dmar_bus_task_dmamap(void *arg, int pending)
-{
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
- struct dmar_unit *unit;
-
- unit = arg;
- DMAR_LOCK(unit);
- while ((map = TAILQ_FIRST(&unit->delayed_maps)) != NULL) {
- TAILQ_REMOVE(&unit->delayed_maps, map, delay_link);
- DMAR_UNLOCK(unit);
- tag = map->tag;
- map->cansleep = true;
- map->locked = false;
- bus_dmamap_load_mem((bus_dma_tag_t)tag, (bus_dmamap_t)map,
- &map->mem, map->callback, map->callback_arg,
- BUS_DMA_WAITOK);
- map->cansleep = false;
- if (map->locked) {
- (tag->common.lockfunc)(tag->common.lockfuncarg,
- BUS_DMA_UNLOCK);
- } else
- map->locked = true;
- map->cansleep = false;
- DMAR_LOCK(unit);
- }
- DMAR_UNLOCK(unit);
-}
-
-static void
-dmar_bus_schedule_dmamap(struct dmar_unit *unit, struct bus_dmamap_dmar *map)
-{
-
- map->locked = false;
- DMAR_LOCK(unit);
- TAILQ_INSERT_TAIL(&unit->delayed_maps, map, delay_link);
- DMAR_UNLOCK(unit);
- taskqueue_enqueue(unit->delayed_taskqueue, &unit->dmamap_load_task);
-}
-
-int
-dmar_init_busdma(struct dmar_unit *unit)
-{
-
- unit->dma_enabled = 1;
- TUNABLE_INT_FETCH("hw.dmar.dma", &unit->dma_enabled);
- TAILQ_INIT(&unit->delayed_maps);
- TASK_INIT(&unit->dmamap_load_task, 0, dmar_bus_task_dmamap, unit);
- unit->delayed_taskqueue = taskqueue_create("dmar", M_WAITOK,
- taskqueue_thread_enqueue, &unit->delayed_taskqueue);
- taskqueue_start_threads(&unit->delayed_taskqueue, 1, PI_DISK,
- "dmar%d busdma taskq", unit->unit);
- return (0);
-}
-
-void
-dmar_fini_busdma(struct dmar_unit *unit)
-{
-
- if (unit->delayed_taskqueue == NULL)
- return;
-
- taskqueue_drain(unit->delayed_taskqueue, &unit->dmamap_load_task);
- taskqueue_free(unit->delayed_taskqueue);
- unit->delayed_taskqueue = NULL;
-}
-
int
bus_dma_dmar_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map1,
vm_paddr_t start, vm_size_t length, int flags)
{
struct bus_dma_tag_common *tc;
- struct bus_dma_tag_dmar *tag;
- struct bus_dmamap_dmar *map;
- struct dmar_ctx *ctx;
- struct dmar_domain *domain;
- struct dmar_map_entry *entry;
+ struct bus_dma_tag_iommu *tag;
+ struct bus_dmamap_iommu *map;
+ struct iommu_device *ctx;
+ struct iommu_domain *domain;
+ struct iommu_map_entry *entry;
vm_page_t *ma;
vm_size_t i;
int error;
@@ -996,13 +239,13 @@
MPASS((flags & ~(BUS_DMA_NOWAIT | BUS_DMA_NOWRITE)) == 0);
tc = (struct bus_dma_tag_common *)dmat;
- if (tc->impl != &bus_dma_dmar_impl)
+ if (tc->impl != &bus_dma_iommu_impl)
return (0);
- tag = (struct bus_dma_tag_dmar *)dmat;
- ctx = tag->ctx;
+ tag = (struct bus_dma_tag_iommu *)dmat;
+ ctx = tag->device;
domain = ctx->domain;
- map = (struct bus_dmamap_dmar *)map1;
+ map = (struct bus_dmamap_iommu *)map1;
waitok = (flags & BUS_DMA_NOWAIT) != 0;
entry = dmar_gas_alloc_entry(domain, waitok ? 0 : DMAR_PGF_WAITOK);
@@ -1020,14 +263,14 @@
ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i,
VM_MEMATTR_DEFAULT);
}
- error = dmar_gas_map_region(domain, entry, DMAR_MAP_ENTRY_READ |
- ((flags & BUS_DMA_NOWRITE) ? 0 : DMAR_MAP_ENTRY_WRITE),
- waitok ? DMAR_GM_CANWAIT : 0, ma);
+ error = dmar_gas_map_region(domain, entry, IOMMU_MAP_ENTRY_READ |
+ ((flags & BUS_DMA_NOWRITE) ? 0 : IOMMU_MAP_ENTRY_WRITE),
+ waitok ? IOMMU_MF_CANWAIT : 0, ma);
if (error == 0) {
- DMAR_DOMAIN_LOCK(domain);
+ IOMMU_DOMAIN_LOCK(domain);
TAILQ_INSERT_TAIL(&map->map_entries, entry, dmamap_link);
- entry->flags |= DMAR_MAP_ENTRY_MAP;
- DMAR_DOMAIN_UNLOCK(domain);
+ entry->flags |= IOMMU_MAP_ENTRY_MAP;
+ IOMMU_DOMAIN_UNLOCK(domain);
} else {
dmar_domain_unload_entry(entry, true);
}
@@ -1036,3 +279,62 @@
free(ma, M_TEMP);
return (error);
}
+
+int
+iommu_unmap(struct iommu_domain *domain,
+ struct iommu_map_entries_tailq *entries, bool cansleep)
+{
+
+ dmar_domain_unload(domain, entries, cansleep);
+
+ return (0);
+}
+
+int
+iommu_map(struct iommu_domain *domain,
+ const struct bus_dma_tag_common *common,
+ bus_size_t size, int offset,
+ int eflags, int iommu_flags,
+ vm_page_t *ma, struct iommu_map_entry **entry)
+{
+ int ret;
+
+ ret = dmar_gas_map(domain, common, (dmar_gaddr_t)size, offset,
+ eflags, iommu_flags, ma, entry);
+
+ return (ret);
+}
+
+struct iommu_device *
+iommu_get_device(struct iommu_unit *iommu, device_t dev,
+ uint16_t rid, bool id_mapped, bool rmrr_init)
+{
+
+ return (dmar_get_ctx_for_dev(iommu, dev, rid, id_mapped, rmrr_init));
+}
+
+
+int
+iommu_free_device(struct iommu_device *device)
+{
+
+ dmar_free_ctx(device);
+
+ return (0);
+}
+
+int
+iommu_free_device_locked(struct iommu_unit *dmar, struct iommu_device *ctx)
+{
+
+ dmar_free_ctx_locked(dmar, ctx);
+
+ return (0);
+}
+
+struct iommu_unit *
+iommu_find(device_t dev, bool verbose)
+{
+
+ return (dmar_find(dev, verbose));
+}
Index: sys/x86/iommu/intel_ctx.c
===================================================================
--- sys/x86/iommu/intel_ctx.c
+++ sys/x86/iommu/intel_ctx.c
@@ -71,16 +71,16 @@
#include <x86/iommu/intel_dmar.h>
#include <dev/pci/pcivar.h>
-static MALLOC_DEFINE(M_DMAR_CTX, "dmar_ctx", "Intel DMAR Context");
+static MALLOC_DEFINE(M_DMAR_CTX, "iommu_device", "Intel DMAR Context");
static MALLOC_DEFINE(M_DMAR_DOMAIN, "dmar_dom", "Intel DMAR Domain");
static void dmar_domain_unload_task(void *arg, int pending);
-static void dmar_unref_domain_locked(struct dmar_unit *dmar,
- struct dmar_domain *domain);
-static void dmar_domain_destroy(struct dmar_domain *domain);
+static void dmar_unref_domain_locked(struct iommu_unit *dmar,
+ struct iommu_domain *domain);
+static void dmar_domain_destroy(struct iommu_domain *domain);
static void
-dmar_ensure_ctx_page(struct dmar_unit *dmar, int bus)
+dmar_ensure_ctx_page(struct iommu_unit *dmar, int bus)
{
struct sf_buf *sf;
dmar_root_entry_t *re;
@@ -111,37 +111,37 @@
TD_PINNED_ASSERT;
}
-static dmar_ctx_entry_t *
-dmar_map_ctx_entry(struct dmar_ctx *ctx, struct sf_buf **sfp)
+static iommu_device_entry_t *
+dmar_map_ctx_entry(struct iommu_device *ctx, struct sf_buf **sfp)
{
- dmar_ctx_entry_t *ctxp;
+ iommu_device_entry_t *ctxp;
- ctxp = dmar_map_pgtbl(ctx->domain->dmar->ctx_obj, 1 +
+ ctxp = dmar_map_pgtbl(ctx->domain->iommu->ctx_obj, 1 +
PCI_RID2BUS(ctx->rid), DMAR_PGF_NOALLOC | DMAR_PGF_WAITOK, sfp);
ctxp += ctx->rid & 0xff;
return (ctxp);
}
static void
-ctx_tag_init(struct dmar_ctx *ctx, device_t dev)
+device_tag_init(struct iommu_device *ctx, device_t dev)
{
bus_addr_t maxaddr;
maxaddr = MIN(ctx->domain->end, BUS_SPACE_MAXADDR);
- ctx->ctx_tag.common.ref_count = 1; /* Prevent free */
- ctx->ctx_tag.common.impl = &bus_dma_dmar_impl;
- ctx->ctx_tag.common.boundary = 0;
- ctx->ctx_tag.common.lowaddr = maxaddr;
- ctx->ctx_tag.common.highaddr = maxaddr;
- ctx->ctx_tag.common.maxsize = maxaddr;
- ctx->ctx_tag.common.nsegments = BUS_SPACE_UNRESTRICTED;
- ctx->ctx_tag.common.maxsegsz = maxaddr;
- ctx->ctx_tag.ctx = ctx;
- ctx->ctx_tag.owner = dev;
+ ctx->device_tag.common.ref_count = 1; /* Prevent free */
+ ctx->device_tag.common.impl = &bus_dma_iommu_impl;
+ ctx->device_tag.common.boundary = 0;
+ ctx->device_tag.common.lowaddr = maxaddr;
+ ctx->device_tag.common.highaddr = maxaddr;
+ ctx->device_tag.common.maxsize = maxaddr;
+ ctx->device_tag.common.nsegments = BUS_SPACE_UNRESTRICTED;
+ ctx->device_tag.common.maxsegsz = maxaddr;
+ ctx->device_tag.device = ctx;
+ ctx->device_tag.owner = dev;
}
static void
-ctx_id_entry_init_one(dmar_ctx_entry_t *ctxp, struct dmar_domain *domain,
+ctx_id_entry_init_one(iommu_device_entry_t *ctxp, struct iommu_domain *domain,
vm_page_t ctx_root)
{
/*
@@ -165,20 +165,20 @@
}
static void
-ctx_id_entry_init(struct dmar_ctx *ctx, dmar_ctx_entry_t *ctxp, bool move,
- int busno)
+ctx_id_entry_init(struct iommu_device *ctx, iommu_device_entry_t *ctxp,
+ bool move, int busno)
{
- struct dmar_unit *unit;
- struct dmar_domain *domain;
+ struct iommu_unit *unit;
+ struct iommu_domain *domain;
vm_page_t ctx_root;
int i;
domain = ctx->domain;
- unit = domain->dmar;
+ unit = domain->iommu;
KASSERT(move || (ctxp->ctx1 == 0 && ctxp->ctx2 == 0),
("dmar%d: initialized ctx entry %d:%d:%d 0x%jx 0x%jx",
- unit->unit, busno, pci_get_slot(ctx->ctx_tag.owner),
- pci_get_function(ctx->ctx_tag.owner),
+ unit->unit, busno, pci_get_slot(ctx->device_tag.owner),
+ pci_get_function(ctx->device_tag.owner),
ctxp->ctx1, ctxp->ctx2));
if ((domain->flags & DMAR_DOMAIN_IDMAP) != 0 &&
@@ -202,7 +202,7 @@
}
static int
-dmar_flush_for_ctx_entry(struct dmar_unit *dmar, bool force)
+dmar_flush_for_ctx_entry(struct iommu_unit *dmar, bool force)
{
int error;
@@ -226,12 +226,12 @@
}
static int
-domain_init_rmrr(struct dmar_domain *domain, device_t dev, int bus,
+domain_init_rmrr(struct iommu_domain *domain, device_t dev, int bus,
int slot, int func, int dev_domain, int dev_busno,
const void *dev_path, int dev_path_len)
{
- struct dmar_map_entries_tailq rmrr_entries;
- struct dmar_map_entry *entry, *entry1;
+ struct iommu_map_entries_tailq rmrr_entries;
+ struct iommu_map_entry *entry, *entry1;
vm_page_t *ma;
dmar_gaddr_t start, end;
vm_pindex_t size, i;
@@ -255,7 +255,7 @@
end = entry->end;
if (bootverbose)
printf("dmar%d ctx pci%d:%d:%d RMRR [%#jx, %#jx]\n",
- domain->dmar->unit, bus, slot, func,
+ domain->iommu->unit, bus, slot, func,
(uintmax_t)start, (uintmax_t)end);
entry->start = trunc_page(start);
entry->end = round_page(end);
@@ -267,7 +267,7 @@
printf("pci%d:%d:%d ", bus, slot, func);
printf("BIOS bug: dmar%d RMRR "
"region (%jx, %jx) corrected\n",
- domain->dmar->unit, start, end);
+ domain->iommu->unit, start, end);
}
entry->end += DMAR_PAGE_SIZE * 0x20;
}
@@ -278,8 +278,8 @@
VM_MEMATTR_DEFAULT);
}
error1 = dmar_gas_map_region(domain, entry,
- DMAR_MAP_ENTRY_READ | DMAR_MAP_ENTRY_WRITE,
- DMAR_GM_CANWAIT | DMAR_GM_RMRR, ma);
+ IOMMU_MAP_ENTRY_READ | IOMMU_MAP_ENTRY_WRITE,
+ IOMMU_MF_CANWAIT | IOMMU_MF_RMRR, ma);
/*
* Non-failed RMRR entries are owned by context rb
* tree. Get rid of the failed entry, but do not stop
@@ -287,10 +287,10 @@
* loaded and removed on the context destruction.
*/
if (error1 == 0 && entry->end != entry->start) {
- DMAR_LOCK(domain->dmar);
+ IOMMU_LOCK(domain->iommu);
domain->refs++; /* XXXKIB prevent free */
domain->flags |= DMAR_DOMAIN_RMRR;
- DMAR_UNLOCK(domain->dmar);
+ IOMMU_UNLOCK(domain->iommu);
} else {
if (error1 != 0) {
if (dev != NULL)
@@ -298,7 +298,7 @@
printf("pci%d:%d:%d ", bus, slot, func);
printf(
"dmar%d failed to map RMRR region (%jx, %jx) %d\n",
- domain->dmar->unit, start, end,
+ domain->iommu->unit, start, end,
error1);
error = error1;
}
@@ -312,10 +312,10 @@
return (error);
}
-static struct dmar_domain *
-dmar_domain_alloc(struct dmar_unit *dmar, bool id_mapped)
+static struct iommu_domain *
+dmar_domain_alloc(struct iommu_unit *dmar, bool id_mapped)
{
- struct dmar_domain *domain;
+ struct iommu_domain *domain;
int error, id, mgaw;
id = alloc_unr(dmar->domids);
@@ -328,7 +328,7 @@
TAILQ_INIT(&domain->unload_entries);
TASK_INIT(&domain->unload_task, 0, dmar_domain_unload_task, domain);
mtx_init(&domain->lock, "dmardom", NULL, MTX_DEF);
- domain->dmar = dmar;
+ domain->iommu = dmar;
/*
* For now, use the maximal usable physical address of the
@@ -370,10 +370,10 @@
return (NULL);
}
-static struct dmar_ctx *
-dmar_ctx_alloc(struct dmar_domain *domain, uint16_t rid)
+static struct iommu_device *
+dmar_device_alloc(struct iommu_domain *domain, uint16_t rid)
{
- struct dmar_ctx *ctx;
+ struct iommu_device *ctx;
ctx = malloc(sizeof(*ctx), M_DMAR_CTX, M_WAITOK | M_ZERO);
ctx->domain = domain;
@@ -383,12 +383,12 @@
}
static void
-dmar_ctx_link(struct dmar_ctx *ctx)
+dmar_device_link(struct iommu_device *ctx)
{
- struct dmar_domain *domain;
+ struct iommu_domain *domain;
domain = ctx->domain;
- DMAR_ASSERT_LOCKED(domain->dmar);
+ IOMMU_ASSERT_LOCKED(domain->iommu);
KASSERT(domain->refs >= domain->ctx_cnt,
("dom %p ref underflow %d %d", domain, domain->refs,
domain->ctx_cnt));
@@ -398,12 +398,12 @@
}
static void
-dmar_ctx_unlink(struct dmar_ctx *ctx)
+dmar_device_unlink(struct iommu_device *ctx)
{
- struct dmar_domain *domain;
+ struct iommu_domain *domain;
domain = ctx->domain;
- DMAR_ASSERT_LOCKED(domain->dmar);
+ IOMMU_ASSERT_LOCKED(domain->iommu);
KASSERT(domain->refs > 0,
("domain %p ctx dtr refs %d", domain, domain->refs));
KASSERT(domain->ctx_cnt >= domain->refs,
@@ -415,7 +415,7 @@
}
static void
-dmar_domain_destroy(struct dmar_domain *domain)
+dmar_domain_destroy(struct iommu_domain *domain)
{
KASSERT(TAILQ_EMPTY(&domain->unload_entries),
@@ -427,9 +427,9 @@
KASSERT(domain->refs == 0,
("destroying dom %p with refs %d", domain, domain->refs));
if ((domain->flags & DMAR_DOMAIN_GAS_INITED) != 0) {
- DMAR_DOMAIN_LOCK(domain);
+ IOMMU_DOMAIN_LOCK(domain);
dmar_gas_fini_domain(domain);
- DMAR_DOMAIN_UNLOCK(domain);
+ IOMMU_DOMAIN_UNLOCK(domain);
}
if ((domain->flags & DMAR_DOMAIN_PGTBL_INITED) != 0) {
if (domain->pgtbl_obj != NULL)
@@ -437,18 +437,18 @@
domain_free_pgtbl(domain);
}
mtx_destroy(&domain->lock);
- free_unr(domain->dmar->domids, domain->domain);
+ free_unr(domain->iommu->domids, domain->domain);
free(domain, M_DMAR_DOMAIN);
}
-static struct dmar_ctx *
-dmar_get_ctx_for_dev1(struct dmar_unit *dmar, device_t dev, uint16_t rid,
+static struct iommu_device *
+dmar_get_ctx_for_dev1(struct iommu_unit *dmar, device_t dev, uint16_t rid,
int dev_domain, int dev_busno, const void *dev_path, int dev_path_len,
bool id_mapped, bool rmrr_init)
{
- struct dmar_domain *domain, *domain1;
- struct dmar_ctx *ctx, *ctx1;
- dmar_ctx_entry_t *ctxp;
+ struct iommu_domain *domain, *domain1;
+ struct iommu_device *ctx, *ctx1;
+ iommu_device_entry_t *ctxp;
struct sf_buf *sf;
int bus, slot, func, error;
bool enable;
@@ -464,7 +464,7 @@
}
enable = false;
TD_PREP_PINNED_ASSERT;
- DMAR_LOCK(dmar);
+ IOMMU_LOCK(dmar);
KASSERT(!dmar_is_buswide_ctx(dmar, bus) || (slot == 0 && func == 0),
("dmar%d pci%d:%d:%d get_ctx for buswide", dmar->unit, bus,
slot, func));
@@ -475,7 +475,7 @@
* Perform the allocations which require sleep or have
* higher chance to succeed if the sleep is allowed.
*/
- DMAR_UNLOCK(dmar);
+ IOMMU_UNLOCK(dmar);
dmar_ensure_ctx_page(dmar, PCI_RID2BUS(rid));
domain1 = dmar_domain_alloc(dmar, id_mapped);
if (domain1 == NULL) {
@@ -492,9 +492,9 @@
return (NULL);
}
}
- ctx1 = dmar_ctx_alloc(domain1, rid);
+ ctx1 = dmar_device_alloc(domain1, rid);
ctxp = dmar_map_ctx_entry(ctx1, &sf);
- DMAR_LOCK(dmar);
+ IOMMU_LOCK(dmar);
/*
* Recheck the contexts, other thread might have
@@ -504,9 +504,9 @@
if (ctx == NULL) {
domain = domain1;
ctx = ctx1;
- dmar_ctx_link(ctx);
- ctx->ctx_tag.owner = dev;
- ctx_tag_init(ctx, dev);
+ dmar_device_link(ctx);
+ ctx->device_tag.owner = dev;
+ device_tag_init(ctx, dev);
/*
* This is the first activated context for the
@@ -536,8 +536,8 @@
}
} else {
domain = ctx->domain;
- if (ctx->ctx_tag.owner == NULL)
- ctx->ctx_tag.owner = dev;
+ if (ctx->device_tag.owner == NULL)
+ ctx->device_tag.owner = dev;
ctx->refs++; /* tag referenced us */
}
@@ -568,13 +568,13 @@
return (NULL);
}
}
- DMAR_UNLOCK(dmar);
+ IOMMU_UNLOCK(dmar);
TD_PINNED_ASSERT;
return (ctx);
}
-struct dmar_ctx *
-dmar_get_ctx_for_dev(struct dmar_unit *dmar, device_t dev, uint16_t rid,
+struct iommu_device *
+dmar_get_ctx_for_dev(struct iommu_unit *dmar, device_t dev, uint16_t rid,
bool id_mapped, bool rmrr_init)
{
int dev_domain, dev_path_len, dev_busno;
@@ -587,8 +587,8 @@
dev_path, dev_path_len, id_mapped, rmrr_init));
}
-struct dmar_ctx *
-dmar_get_ctx_for_devpath(struct dmar_unit *dmar, uint16_t rid,
+struct iommu_device *
+dmar_get_ctx_for_devpath(struct iommu_unit *dmar, uint16_t rid,
int dev_domain, int dev_busno,
const void *dev_path, int dev_path_len,
bool id_mapped, bool rmrr_init)
@@ -599,28 +599,28 @@
}
int
-dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx)
+dmar_move_ctx_to_domain(struct iommu_domain *domain, struct iommu_device *ctx)
{
- struct dmar_unit *dmar;
- struct dmar_domain *old_domain;
- dmar_ctx_entry_t *ctxp;
+ struct iommu_unit *dmar;
+ struct iommu_domain *old_domain;
+ iommu_device_entry_t *ctxp;
struct sf_buf *sf;
int error;
- dmar = domain->dmar;
+ dmar = domain->iommu;
old_domain = ctx->domain;
if (domain == old_domain)
return (0);
- KASSERT(old_domain->dmar == dmar,
+ KASSERT(old_domain->iommu == dmar,
("domain %p %u moving between dmars %u %u", domain,
- domain->domain, old_domain->dmar->unit, domain->dmar->unit));
+ domain->domain, old_domain->iommu->unit, domain->iommu->unit));
TD_PREP_PINNED_ASSERT;
ctxp = dmar_map_ctx_entry(ctx, &sf);
- DMAR_LOCK(dmar);
- dmar_ctx_unlink(ctx);
+ IOMMU_LOCK(dmar);
+ dmar_device_unlink(ctx);
ctx->domain = domain;
- dmar_ctx_link(ctx);
+ dmar_device_link(ctx);
ctx_id_entry_init(ctx, ctxp, true, PCI_BUSMAX + 100);
dmar_unmap_pgtbl(sf);
error = dmar_flush_for_ctx_entry(dmar, true);
@@ -634,10 +634,10 @@
}
static void
-dmar_unref_domain_locked(struct dmar_unit *dmar, struct dmar_domain *domain)
+dmar_unref_domain_locked(struct iommu_unit *dmar, struct iommu_domain *domain)
{
- DMAR_ASSERT_LOCKED(dmar);
+ IOMMU_ASSERT_LOCKED(dmar);
KASSERT(domain->refs >= 1,
("dmar %d domain %p refs %u", dmar->unit, domain, domain->refs));
KASSERT(domain->refs > domain->ctx_cnt,
@@ -646,7 +646,7 @@
if (domain->refs > 1) {
domain->refs--;
- DMAR_UNLOCK(dmar);
+ IOMMU_UNLOCK(dmar);
return;
}
@@ -654,20 +654,20 @@
("lost ref on RMRR domain %p", domain));
LIST_REMOVE(domain, link);
- DMAR_UNLOCK(dmar);
+ IOMMU_UNLOCK(dmar);
taskqueue_drain(dmar->delayed_taskqueue, &domain->unload_task);
dmar_domain_destroy(domain);
}
void
-dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx)
+dmar_free_ctx_locked(struct iommu_unit *dmar, struct iommu_device *ctx)
{
struct sf_buf *sf;
- dmar_ctx_entry_t *ctxp;
- struct dmar_domain *domain;
+ iommu_device_entry_t *ctxp;
+ struct iommu_domain *domain;
- DMAR_ASSERT_LOCKED(dmar);
+ IOMMU_ASSERT_LOCKED(dmar);
KASSERT(ctx->refs >= 1,
("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs));
@@ -677,11 +677,11 @@
*/
if (ctx->refs > 1) {
ctx->refs--;
- DMAR_UNLOCK(dmar);
+ IOMMU_UNLOCK(dmar);
return;
}
- KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0,
+ KASSERT((ctx->flags & IOMMU_DEVICE_DISABLED) == 0,
("lost ref on disabled ctx %p", ctx));
/*
@@ -689,10 +689,10 @@
* page table is destroyed. The mapping of the context
* entries page could require sleep, unlock the dmar.
*/
- DMAR_UNLOCK(dmar);
+ IOMMU_UNLOCK(dmar);
TD_PREP_PINNED_ASSERT;
ctxp = dmar_map_ctx_entry(ctx, &sf);
- DMAR_LOCK(dmar);
+ IOMMU_LOCK(dmar);
KASSERT(ctx->refs >= 1,
("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs));
@@ -702,13 +702,13 @@
*/
if (ctx->refs > 1) {
ctx->refs--;
- DMAR_UNLOCK(dmar);
+ IOMMU_UNLOCK(dmar);
dmar_unmap_pgtbl(sf);
TD_PINNED_ASSERT;
return;
}
- KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0,
+ KASSERT((ctx->flags & IOMMU_DEVICE_DISABLED) == 0,
("lost ref on disabled ctx %p", ctx));
/*
@@ -727,32 +727,32 @@
}
dmar_unmap_pgtbl(sf);
domain = ctx->domain;
- dmar_ctx_unlink(ctx);
+ dmar_device_unlink(ctx);
free(ctx, M_DMAR_CTX);
dmar_unref_domain_locked(dmar, domain);
TD_PINNED_ASSERT;
}
void
-dmar_free_ctx(struct dmar_ctx *ctx)
+dmar_free_ctx(struct iommu_device *ctx)
{
- struct dmar_unit *dmar;
+ struct iommu_unit *dmar;
- dmar = ctx->domain->dmar;
- DMAR_LOCK(dmar);
+ dmar = ctx->domain->iommu;
+ IOMMU_LOCK(dmar);
dmar_free_ctx_locked(dmar, ctx);
}
/*
* Returns with the domain locked.
*/
-struct dmar_ctx *
-dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid)
+struct iommu_device *
+dmar_find_ctx_locked(struct iommu_unit *dmar, uint16_t rid)
{
- struct dmar_domain *domain;
- struct dmar_ctx *ctx;
+ struct iommu_domain *domain;
+ struct iommu_device *ctx;
- DMAR_ASSERT_LOCKED(dmar);
+ IOMMU_ASSERT_LOCKED(dmar);
LIST_FOREACH(domain, &dmar->domains, link) {
LIST_FOREACH(ctx, &domain->contexts, link) {
@@ -764,17 +764,17 @@
}
void
-dmar_domain_free_entry(struct dmar_map_entry *entry, bool free)
+dmar_domain_free_entry(struct iommu_map_entry *entry, bool free)
{
- struct dmar_domain *domain;
+ struct iommu_domain *domain;
domain = entry->domain;
- DMAR_DOMAIN_LOCK(domain);
- if ((entry->flags & DMAR_MAP_ENTRY_RMRR) != 0)
+ IOMMU_DOMAIN_LOCK(domain);
+ if ((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0)
dmar_gas_free_region(domain, entry);
else
dmar_gas_free_space(domain, entry);
- DMAR_DOMAIN_UNLOCK(domain);
+ IOMMU_DOMAIN_UNLOCK(domain);
if (free)
dmar_gas_free_entry(domain, entry);
else
@@ -782,19 +782,19 @@
}
void
-dmar_domain_unload_entry(struct dmar_map_entry *entry, bool free)
+dmar_domain_unload_entry(struct iommu_map_entry *entry, bool free)
{
- struct dmar_unit *unit;
+ struct iommu_unit *unit;
- unit = entry->domain->dmar;
+ unit = entry->domain->iommu;
if (unit->qi_enabled) {
- DMAR_LOCK(unit);
+ IOMMU_LOCK(unit);
dmar_qi_invalidate_locked(entry->domain, entry->start,
entry->end - entry->start, &entry->gseq, true);
if (!free)
- entry->flags |= DMAR_MAP_ENTRY_QI_NF;
+ entry->flags |= IOMMU_MAP_ENTRY_QI_NF;
TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link);
- DMAR_UNLOCK(unit);
+ IOMMU_UNLOCK(unit);
} else {
domain_flush_iotlb_sync(entry->domain, entry->start,
entry->end - entry->start);
@@ -803,8 +803,8 @@
}
static bool
-dmar_domain_unload_emit_wait(struct dmar_domain *domain,
- struct dmar_map_entry *entry)
+dmar_domain_unload_emit_wait(struct iommu_domain *domain,
+ struct iommu_map_entry *entry)
{
if (TAILQ_NEXT(entry, dmamap_link) == NULL)
@@ -813,17 +813,17 @@
}
void
-dmar_domain_unload(struct dmar_domain *domain,
- struct dmar_map_entries_tailq *entries, bool cansleep)
+dmar_domain_unload(struct iommu_domain *domain,
+ struct iommu_map_entries_tailq *entries, bool cansleep)
{
- struct dmar_unit *unit;
- struct dmar_map_entry *entry, *entry1;
+ struct iommu_unit *unit;
+ struct iommu_map_entry *entry, *entry1;
int error;
- unit = domain->dmar;
+ unit = domain->iommu;
TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) {
- KASSERT((entry->flags & DMAR_MAP_ENTRY_MAP) != 0,
+ KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0,
("not mapped entry %p %p", domain, entry));
error = domain_unmap_buf(domain, entry->start, entry->end -
entry->start, cansleep ? DMAR_PGF_WAITOK : 0);
@@ -839,30 +839,30 @@
return;
KASSERT(unit->qi_enabled, ("loaded entry left"));
- DMAR_LOCK(unit);
+ IOMMU_LOCK(unit);
TAILQ_FOREACH(entry, entries, dmamap_link) {
dmar_qi_invalidate_locked(domain, entry->start, entry->end -
entry->start, &entry->gseq,
dmar_domain_unload_emit_wait(domain, entry));
}
TAILQ_CONCAT(&unit->tlb_flush_entries, entries, dmamap_link);
- DMAR_UNLOCK(unit);
+ IOMMU_UNLOCK(unit);
}
static void
dmar_domain_unload_task(void *arg, int pending)
{
- struct dmar_domain *domain;
- struct dmar_map_entries_tailq entries;
+ struct iommu_domain *domain;
+ struct iommu_map_entries_tailq entries;
domain = arg;
TAILQ_INIT(&entries);
for (;;) {
- DMAR_DOMAIN_LOCK(domain);
- TAILQ_SWAP(&domain->unload_entries, &entries, dmar_map_entry,
+ IOMMU_DOMAIN_LOCK(domain);
+ TAILQ_SWAP(&domain->unload_entries, &entries, iommu_map_entry,
dmamap_link);
- DMAR_DOMAIN_UNLOCK(domain);
+ IOMMU_DOMAIN_UNLOCK(domain);
if (TAILQ_EMPTY(&entries))
break;
dmar_domain_unload(domain, &entries, true);
Index: sys/x86/iommu/intel_dmar.h
===================================================================
--- sys/x86/iommu/intel_dmar.h
+++ sys/x86/iommu/intel_dmar.h
@@ -44,7 +44,7 @@
uint32_t seq;
};
-struct dmar_map_entry {
+struct iommu_map_entry {
dmar_gaddr_t start;
dmar_gaddr_t end;
dmar_gaddr_t first; /* Least start in subtree */
@@ -52,29 +52,29 @@
dmar_gaddr_t free_down; /* Max free space below the
current R/B tree node */
u_int flags;
- TAILQ_ENTRY(dmar_map_entry) dmamap_link; /* Link for dmamap entries */
- RB_ENTRY(dmar_map_entry) rb_entry; /* Links for domain entries */
- TAILQ_ENTRY(dmar_map_entry) unroll_link; /* Link for unroll after
+ TAILQ_ENTRY(iommu_map_entry) dmamap_link; /* Link for dmamap entries */
+ RB_ENTRY(iommu_map_entry) rb_entry; /* Links for domain entries */
+ TAILQ_ENTRY(iommu_map_entry) unroll_link; /* Link for unroll after
dmamap_load failure */
- struct dmar_domain *domain;
+ struct iommu_domain *domain;
struct dmar_qi_genseq gseq;
};
-RB_HEAD(dmar_gas_entries_tree, dmar_map_entry);
-RB_PROTOTYPE(dmar_gas_entries_tree, dmar_map_entry, rb_entry,
+RB_HEAD(dmar_gas_entries_tree, iommu_map_entry);
+RB_PROTOTYPE(dmar_gas_entries_tree, iommu_map_entry, rb_entry,
dmar_gas_cmp_entries);
-#define DMAR_MAP_ENTRY_PLACE 0x0001 /* Fake entry */
-#define DMAR_MAP_ENTRY_RMRR 0x0002 /* Permanent, not linked by
+#define IOMMU_MAP_ENTRY_PLACE 0x0001 /* Fake entry */
+#define IOMMU_MAP_ENTRY_RMRR 0x0002 /* Permanent, not linked by
dmamap_link */
-#define DMAR_MAP_ENTRY_MAP 0x0004 /* Busdma created, linked by
+#define IOMMU_MAP_ENTRY_MAP 0x0004 /* Busdma created, linked by
dmamap_link */
-#define DMAR_MAP_ENTRY_UNMAPPED 0x0010 /* No backing pages */
-#define DMAR_MAP_ENTRY_QI_NF 0x0020 /* qi task, do not free entry */
-#define DMAR_MAP_ENTRY_READ 0x1000 /* Read permitted */
-#define DMAR_MAP_ENTRY_WRITE 0x2000 /* Write permitted */
-#define DMAR_MAP_ENTRY_SNOOP 0x4000 /* Snoop */
-#define DMAR_MAP_ENTRY_TM 0x8000 /* Transient */
+#define IOMMU_MAP_ENTRY_UNMAPPED 0x0010 /* No backing pages */
+#define IOMMU_MAP_ENTRY_QI_NF 0x0020 /* qi task, do not free entry */
+#define IOMMU_MAP_ENTRY_READ 0x1000 /* Read permitted */
+#define IOMMU_MAP_ENTRY_WRITE 0x2000 /* Write permitted */
+#define IOMMU_MAP_ENTRY_SNOOP 0x4000 /* Snoop */
+#define IOMMU_MAP_ENTRY_TM 0x8000 /* Transient */
/*
* Locking annotations:
@@ -94,7 +94,7 @@
* Page tables pages and pages content is protected by the vm object
* lock pgtbl_obj, which contains the page tables pages.
*/
-struct dmar_domain {
+struct iommu_domain {
int domain; /* (c) DID, written in context entry */
int mgaw; /* (c) Real max address width */
int agaw; /* (c) Adjusted guest address width */
@@ -105,27 +105,27 @@
the guest AS */
u_int ctx_cnt; /* (u) Number of contexts owned */
u_int refs; /* (u) Refs, including ctx */
- struct dmar_unit *dmar; /* (c) */
+ struct iommu_unit *iommu; /* (c) */
struct mtx lock; /* (c) */
- LIST_ENTRY(dmar_domain) link; /* (u) Member in the dmar list */
- LIST_HEAD(, dmar_ctx) contexts; /* (u) */
+ LIST_ENTRY(iommu_domain) link; /* (u) Member in the dmar list */
+ LIST_HEAD(, iommu_device) contexts; /* (u) */
vm_object_t pgtbl_obj; /* (c) Page table pages */
u_int flags; /* (u) */
u_int entries_cnt; /* (d) */
struct dmar_gas_entries_tree rb_root; /* (d) */
- struct dmar_map_entries_tailq unload_entries; /* (d) Entries to
- unload */
- struct dmar_map_entry *first_place, *last_place; /* (d) */
+ struct iommu_map_entries_tailq unload_entries; /* (d) Entries to
+ unload */
+ struct iommu_map_entry *first_place, *last_place; /* (d) */
struct task unload_task; /* (c) */
u_int batch_no;
};
-struct dmar_ctx {
- struct bus_dma_tag_dmar ctx_tag; /* (c) Root tag */
+struct iommu_device {
+ struct bus_dma_tag_iommu device_tag; /* (c) Root tag */
uint16_t rid; /* (c) pci RID */
uint64_t last_fault_rec[2]; /* Last fault reported */
- struct dmar_domain *domain; /* (c) */
- LIST_ENTRY(dmar_ctx) link; /* (u) Member in the domain list */
+ struct iommu_domain *domain; /* (c) */
+ LIST_ENTRY(iommu_device) link; /* (u) Member in the domain list */
u_int refs; /* (u) References from tags */
u_int flags; /* (u) */
u_long loads; /* atomic updates, for stat only */
@@ -139,10 +139,10 @@
#define DMAR_DOMAIN_RMRR 0x0020 /* Domain contains RMRR entry,
cannot be turned off */
-/* struct dmar_ctx flags */
-#define DMAR_CTX_FAULTED 0x0001 /* Fault was reported,
+/* struct iommu_device flags */
+#define IOMMU_DEVICE_FAULTED 0x0001 /* Fault was reported,
last_fault_rec is valid */
-#define DMAR_CTX_DISABLED 0x0002 /* Device is disabled, the
+#define IOMMU_DEVICE_DISABLED 0x0002 /* Device is disabled, the
ephemeral reference is kept
to prevent context destruction */
@@ -152,9 +152,9 @@
#define DMAR_DOMAIN_ASSERT_PGLOCKED(dom) \
VM_OBJECT_ASSERT_WLOCKED((dom)->pgtbl_obj)
-#define DMAR_DOMAIN_LOCK(dom) mtx_lock(&(dom)->lock)
-#define DMAR_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->lock)
-#define DMAR_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->lock, MA_OWNED)
+#define IOMMU_DOMAIN_LOCK(dom) mtx_lock(&(dom)->lock)
+#define IOMMU_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->lock)
+#define IOMMU_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->lock, MA_OWNED)
struct dmar_msi_data {
int irq;
@@ -165,8 +165,8 @@
int msi_data_reg;
int msi_addr_reg;
int msi_uaddr_reg;
- void (*enable_intr)(struct dmar_unit *);
- void (*disable_intr)(struct dmar_unit *);
+ void (*enable_intr)(struct iommu_unit *);
+ void (*disable_intr)(struct iommu_unit *);
const char *name;
};
@@ -174,7 +174,7 @@
#define DMAR_INTR_QI 1
#define DMAR_INTR_TOTAL 2
-struct dmar_unit {
+struct iommu_unit {
device_t dev;
int unit;
uint16_t segment;
@@ -194,7 +194,7 @@
/* Data for being a dmar */
struct mtx lock;
- LIST_HEAD(, dmar_domain) domains;
+ LIST_HEAD(, iommu_domain) domains;
struct unrhdr *domids;
vm_object_t ctx_obj;
u_int barrier_flags;
@@ -230,13 +230,13 @@
vmem_t *irtids;
/* Delayed freeing of map entries queue processing */
- struct dmar_map_entries_tailq tlb_flush_entries;
+ struct iommu_map_entries_tailq tlb_flush_entries;
struct task qi_task;
struct taskqueue *qi_taskqueue;
/* Busdma delayed map load */
struct task dmamap_load_task;
- TAILQ_HEAD(, bus_dmamap_dmar) delayed_maps;
+ TAILQ_HEAD(, bus_dmamap_iommu) delayed_maps;
struct taskqueue *delayed_taskqueue;
int dma_enabled;
@@ -251,9 +251,9 @@
};
-#define DMAR_LOCK(dmar) mtx_lock(&(dmar)->lock)
-#define DMAR_UNLOCK(dmar) mtx_unlock(&(dmar)->lock)
-#define DMAR_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->lock, MA_OWNED)
+#define IOMMU_LOCK(dmar) mtx_lock(&(dmar)->lock)
+#define IOMMU_UNLOCK(dmar) mtx_unlock(&(dmar)->lock)
+#define IOMMU_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->lock, MA_OWNED)
#define DMAR_FAULT_LOCK(dmar) mtx_lock_spin(&(dmar)->fault_lock)
#define DMAR_FAULT_UNLOCK(dmar) mtx_unlock_spin(&(dmar)->fault_lock)
@@ -268,131 +268,129 @@
#define DMAR_BARRIER_RMRR 0
#define DMAR_BARRIER_USEQ 1
-struct dmar_unit *dmar_find(device_t dev, bool verbose);
-struct dmar_unit *dmar_find_hpet(device_t dev, uint16_t *rid);
-struct dmar_unit *dmar_find_ioapic(u_int apic_id, uint16_t *rid);
+struct iommu_unit *dmar_find(device_t dev, bool verbose);
+struct iommu_unit *dmar_find_hpet(device_t dev, uint16_t *rid);
+struct iommu_unit *dmar_find_ioapic(u_int apic_id, uint16_t *rid);
u_int dmar_nd2mask(u_int nd);
-bool dmar_pglvl_supported(struct dmar_unit *unit, int pglvl);
-int domain_set_agaw(struct dmar_domain *domain, int mgaw);
-int dmar_maxaddr2mgaw(struct dmar_unit *unit, dmar_gaddr_t maxaddr,
+bool dmar_pglvl_supported(struct iommu_unit *unit, int pglvl);
+int domain_set_agaw(struct iommu_domain *domain, int mgaw);
+int dmar_maxaddr2mgaw(struct iommu_unit *unit, dmar_gaddr_t maxaddr,
bool allow_less);
vm_pindex_t pglvl_max_pages(int pglvl);
-int domain_is_sp_lvl(struct dmar_domain *domain, int lvl);
+int domain_is_sp_lvl(struct iommu_domain *domain, int lvl);
dmar_gaddr_t pglvl_page_size(int total_pglvl, int lvl);
-dmar_gaddr_t domain_page_size(struct dmar_domain *domain, int lvl);
-int calc_am(struct dmar_unit *unit, dmar_gaddr_t base, dmar_gaddr_t size,
+dmar_gaddr_t domain_page_size(struct iommu_domain *domain, int lvl);
+int calc_am(struct iommu_unit *unit, dmar_gaddr_t base, dmar_gaddr_t size,
dmar_gaddr_t *isizep);
struct vm_page *dmar_pgalloc(vm_object_t obj, vm_pindex_t idx, int flags);
void dmar_pgfree(vm_object_t obj, vm_pindex_t idx, int flags);
void *dmar_map_pgtbl(vm_object_t obj, vm_pindex_t idx, int flags,
struct sf_buf **sf);
void dmar_unmap_pgtbl(struct sf_buf *sf);
-int dmar_load_root_entry_ptr(struct dmar_unit *unit);
-int dmar_inv_ctx_glob(struct dmar_unit *unit);
-int dmar_inv_iotlb_glob(struct dmar_unit *unit);
-int dmar_flush_write_bufs(struct dmar_unit *unit);
-void dmar_flush_pte_to_ram(struct dmar_unit *unit, dmar_pte_t *dst);
-void dmar_flush_ctx_to_ram(struct dmar_unit *unit, dmar_ctx_entry_t *dst);
-void dmar_flush_root_to_ram(struct dmar_unit *unit, dmar_root_entry_t *dst);
-int dmar_enable_translation(struct dmar_unit *unit);
-int dmar_disable_translation(struct dmar_unit *unit);
-int dmar_load_irt_ptr(struct dmar_unit *unit);
-int dmar_enable_ir(struct dmar_unit *unit);
-int dmar_disable_ir(struct dmar_unit *unit);
-bool dmar_barrier_enter(struct dmar_unit *dmar, u_int barrier_id);
-void dmar_barrier_exit(struct dmar_unit *dmar, u_int barrier_id);
+int dmar_load_root_entry_ptr(struct iommu_unit *unit);
+int dmar_inv_ctx_glob(struct iommu_unit *unit);
+int dmar_inv_iotlb_glob(struct iommu_unit *unit);
+int dmar_flush_write_bufs(struct iommu_unit *unit);
+void dmar_flush_pte_to_ram(struct iommu_unit *unit, dmar_pte_t *dst);
+void dmar_flush_ctx_to_ram(struct iommu_unit *unit, iommu_device_entry_t *dst);
+void dmar_flush_root_to_ram(struct iommu_unit *unit, dmar_root_entry_t *dst);
+int dmar_enable_translation(struct iommu_unit *unit);
+int dmar_disable_translation(struct iommu_unit *unit);
+int dmar_load_irt_ptr(struct iommu_unit *unit);
+int dmar_enable_ir(struct iommu_unit *unit);
+int dmar_disable_ir(struct iommu_unit *unit);
+bool dmar_barrier_enter(struct iommu_unit *dmar, u_int barrier_id);
+void dmar_barrier_exit(struct iommu_unit *dmar, u_int barrier_id);
uint64_t dmar_get_timeout(void);
void dmar_update_timeout(uint64_t newval);
int dmar_fault_intr(void *arg);
-void dmar_enable_fault_intr(struct dmar_unit *unit);
-void dmar_disable_fault_intr(struct dmar_unit *unit);
-int dmar_init_fault_log(struct dmar_unit *unit);
-void dmar_fini_fault_log(struct dmar_unit *unit);
+void dmar_enable_fault_intr(struct iommu_unit *unit);
+void dmar_disable_fault_intr(struct iommu_unit *unit);
+int dmar_init_fault_log(struct iommu_unit *unit);
+void dmar_fini_fault_log(struct iommu_unit *unit);
int dmar_qi_intr(void *arg);
-void dmar_enable_qi_intr(struct dmar_unit *unit);
-void dmar_disable_qi_intr(struct dmar_unit *unit);
-int dmar_init_qi(struct dmar_unit *unit);
-void dmar_fini_qi(struct dmar_unit *unit);
-void dmar_qi_invalidate_locked(struct dmar_domain *domain, dmar_gaddr_t start,
+void dmar_enable_qi_intr(struct iommu_unit *unit);
+void dmar_disable_qi_intr(struct iommu_unit *unit);
+int dmar_init_qi(struct iommu_unit *unit);
+void dmar_fini_qi(struct iommu_unit *unit);
+void dmar_qi_invalidate_locked(struct iommu_domain *domain, dmar_gaddr_t start,
dmar_gaddr_t size, struct dmar_qi_genseq *psec, bool emit_wait);
-void dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit);
-void dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit);
-void dmar_qi_invalidate_iec_glob(struct dmar_unit *unit);
-void dmar_qi_invalidate_iec(struct dmar_unit *unit, u_int start, u_int cnt);
+void dmar_qi_invalidate_ctx_glob_locked(struct iommu_unit *unit);
+void dmar_qi_invalidate_iotlb_glob_locked(struct iommu_unit *unit);
+void dmar_qi_invalidate_iec_glob(struct iommu_unit *unit);
+void dmar_qi_invalidate_iec(struct iommu_unit *unit, u_int start, u_int cnt);
-vm_object_t domain_get_idmap_pgtbl(struct dmar_domain *domain,
+vm_object_t domain_get_idmap_pgtbl(struct iommu_domain *domain,
dmar_gaddr_t maxaddr);
void put_idmap_pgtbl(vm_object_t obj);
-int domain_map_buf(struct dmar_domain *domain, dmar_gaddr_t base,
+int domain_map_buf(struct iommu_domain *domain, dmar_gaddr_t base,
dmar_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags);
-int domain_unmap_buf(struct dmar_domain *domain, dmar_gaddr_t base,
+int domain_unmap_buf(struct iommu_domain *domain, dmar_gaddr_t base,
dmar_gaddr_t size, int flags);
-void domain_flush_iotlb_sync(struct dmar_domain *domain, dmar_gaddr_t base,
+void domain_flush_iotlb_sync(struct iommu_domain *domain, dmar_gaddr_t base,
dmar_gaddr_t size);
-int domain_alloc_pgtbl(struct dmar_domain *domain);
-void domain_free_pgtbl(struct dmar_domain *domain);
+int domain_alloc_pgtbl(struct iommu_domain *domain);
+void domain_free_pgtbl(struct iommu_domain *domain);
int dmar_dev_depth(device_t child);
void dmar_dev_path(device_t child, int *busno, void *path1, int depth);
-struct dmar_ctx *dmar_instantiate_ctx(struct dmar_unit *dmar, device_t dev,
- bool rmrr);
-struct dmar_ctx *dmar_get_ctx_for_dev(struct dmar_unit *dmar, device_t dev,
+struct iommu_device *dmar_get_ctx_for_dev(struct iommu_unit *dmar, device_t dev,
uint16_t rid, bool id_mapped, bool rmrr_init);
-struct dmar_ctx *dmar_get_ctx_for_devpath(struct dmar_unit *dmar, uint16_t rid,
- int dev_domain, int dev_busno, const void *dev_path, int dev_path_len,
- bool id_mapped, bool rmrr_init);
-int dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx);
-void dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx);
-void dmar_free_ctx(struct dmar_ctx *ctx);
-struct dmar_ctx *dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid);
-void dmar_domain_unload_entry(struct dmar_map_entry *entry, bool free);
-void dmar_domain_unload(struct dmar_domain *domain,
- struct dmar_map_entries_tailq *entries, bool cansleep);
-void dmar_domain_free_entry(struct dmar_map_entry *entry, bool free);
-
-int dmar_init_busdma(struct dmar_unit *unit);
-void dmar_fini_busdma(struct dmar_unit *unit);
+struct iommu_device *dmar_get_ctx_for_devpath(struct iommu_unit *dmar,
+ uint16_t rid, int dev_domain, int dev_busno, const void *dev_path,
+ int dev_path_len, bool id_mapped, bool rmrr_init);
+int dmar_move_ctx_to_domain(struct iommu_domain *domain,
+ struct iommu_device *ctx);
+void dmar_free_ctx_locked(struct iommu_unit *dmar, struct iommu_device *ctx);
+void dmar_free_ctx(struct iommu_device *ctx);
+struct iommu_device *dmar_find_ctx_locked(struct iommu_unit *dmar,
+ uint16_t rid);
+void dmar_domain_unload_entry(struct iommu_map_entry *entry, bool free);
+void dmar_domain_unload(struct iommu_domain *domain,
+ struct iommu_map_entries_tailq *entries, bool cansleep);
+void dmar_domain_free_entry(struct iommu_map_entry *entry, bool free);
+
device_t dmar_get_requester(device_t dev, uint16_t *rid);
-void dmar_gas_init_domain(struct dmar_domain *domain);
-void dmar_gas_fini_domain(struct dmar_domain *domain);
-struct dmar_map_entry *dmar_gas_alloc_entry(struct dmar_domain *domain,
+void dmar_gas_init_domain(struct iommu_domain *domain);
+void dmar_gas_fini_domain(struct iommu_domain *domain);
+struct iommu_map_entry *dmar_gas_alloc_entry(struct iommu_domain *domain,
u_int flags);
-void dmar_gas_free_entry(struct dmar_domain *domain,
- struct dmar_map_entry *entry);
-void dmar_gas_free_space(struct dmar_domain *domain,
- struct dmar_map_entry *entry);
-int dmar_gas_map(struct dmar_domain *domain,
+void dmar_gas_free_entry(struct iommu_domain *domain,
+ struct iommu_map_entry *entry);
+void dmar_gas_free_space(struct iommu_domain *domain,
+ struct iommu_map_entry *entry);
+int dmar_gas_map(struct iommu_domain *domain,
const struct bus_dma_tag_common *common, dmar_gaddr_t size, int offset,
- u_int eflags, u_int flags, vm_page_t *ma, struct dmar_map_entry **res);
-void dmar_gas_free_region(struct dmar_domain *domain,
- struct dmar_map_entry *entry);
-int dmar_gas_map_region(struct dmar_domain *domain,
- struct dmar_map_entry *entry, u_int eflags, u_int flags, vm_page_t *ma);
-int dmar_gas_reserve_region(struct dmar_domain *domain, dmar_gaddr_t start,
+ u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res);
+void dmar_gas_free_region(struct iommu_domain *domain,
+ struct iommu_map_entry *entry);
+int dmar_gas_map_region(struct iommu_domain *domain,
+ struct iommu_map_entry *entry, u_int eflags, u_int flags, vm_page_t *ma);
+int dmar_gas_reserve_region(struct iommu_domain *domain, dmar_gaddr_t start,
dmar_gaddr_t end);
-void dmar_dev_parse_rmrr(struct dmar_domain *domain, int dev_domain,
+void dmar_dev_parse_rmrr(struct iommu_domain *domain, int dev_domain,
int dev_busno, const void *dev_path, int dev_path_len,
- struct dmar_map_entries_tailq *rmrr_entries);
-int dmar_instantiate_rmrr_ctxs(struct dmar_unit *dmar);
+ struct iommu_map_entries_tailq *rmrr_entries);
+int dmar_instantiate_rmrr_ctxs(struct iommu_unit *dmar);
-void dmar_quirks_post_ident(struct dmar_unit *dmar);
-void dmar_quirks_pre_use(struct dmar_unit *dmar);
+void dmar_quirks_post_ident(struct iommu_unit *dmar);
+void dmar_quirks_pre_use(struct iommu_unit *dmar);
-int dmar_init_irt(struct dmar_unit *unit);
-void dmar_fini_irt(struct dmar_unit *unit);
+int dmar_init_irt(struct iommu_unit *unit);
+void dmar_fini_irt(struct iommu_unit *unit);
-void dmar_set_buswide_ctx(struct dmar_unit *unit, u_int busno);
-bool dmar_is_buswide_ctx(struct dmar_unit *unit, u_int busno);
+void dmar_set_buswide_ctx(struct iommu_unit *unit, u_int busno);
+bool dmar_is_buswide_ctx(struct iommu_unit *unit, u_int busno);
-#define DMAR_GM_CANWAIT 0x0001
-#define DMAR_GM_CANSPLIT 0x0002
-#define DMAR_GM_RMRR 0x0004
+#define IOMMU_MF_CANWAIT 0x0001
+#define IOMMU_MF_CANSPLIT 0x0002
+#define IOMMU_MF_RMRR 0x0004
#define DMAR_PGF_WAITOK 0x0001
#define DMAR_PGF_ZERO 0x0002
@@ -407,14 +405,14 @@
extern int dmar_check_free;
static inline uint32_t
-dmar_read4(const struct dmar_unit *unit, int reg)
+dmar_read4(const struct iommu_unit *unit, int reg)
{
return (bus_read_4(unit->regs, reg));
}
static inline uint64_t
-dmar_read8(const struct dmar_unit *unit, int reg)
+dmar_read8(const struct iommu_unit *unit, int reg)
{
#ifdef __i386__
uint32_t high, low;
@@ -428,7 +426,7 @@
}
static inline void
-dmar_write4(const struct dmar_unit *unit, int reg, uint32_t val)
+dmar_write4(const struct iommu_unit *unit, int reg, uint32_t val)
{
KASSERT(reg != DMAR_GCMD_REG || (val & DMAR_GCMD_TE) ==
@@ -439,7 +437,7 @@
}
static inline void
-dmar_write8(const struct dmar_unit *unit, int reg, uint64_t val)
+dmar_write8(const struct iommu_unit *unit, int reg, uint64_t val)
{
KASSERT(reg != DMAR_GCMD_REG, ("8byte GCMD write"));
@@ -522,7 +520,7 @@
}
static inline bool
-dmar_test_boundary(dmar_gaddr_t start, dmar_gaddr_t size,
+iommu_test_boundary(dmar_gaddr_t start, dmar_gaddr_t size,
dmar_gaddr_t boundary)
{
Index: sys/x86/iommu/intel_drv.c
===================================================================
--- sys/x86/iommu/intel_drv.c
+++ sys/x86/iommu/intel_drv.c
@@ -232,7 +232,7 @@
}
static void
-dmar_release_intr(device_t dev, struct dmar_unit *unit, int idx)
+dmar_release_intr(device_t dev, struct iommu_unit *unit, int idx)
{
struct dmar_msi_data *dmd;
@@ -248,11 +248,11 @@
}
static void
-dmar_release_resources(device_t dev, struct dmar_unit *unit)
+dmar_release_resources(device_t dev, struct iommu_unit *unit)
{
int i;
- dmar_fini_busdma(unit);
+ iommu_fini_busdma(unit);
dmar_fini_irt(unit);
dmar_fini_qi(unit);
dmar_fini_fault_log(unit);
@@ -276,7 +276,7 @@
}
static int
-dmar_alloc_irq(device_t dev, struct dmar_unit *unit, int idx)
+dmar_alloc_irq(device_t dev, struct iommu_unit *unit, int idx)
{
device_t pcib;
struct dmar_msi_data *dmd;
@@ -344,7 +344,7 @@
static int
dmar_remap_intr(device_t dev, device_t child, u_int irq)
{
- struct dmar_unit *unit;
+ struct iommu_unit *unit;
struct dmar_msi_data *dmd;
uint64_t msi_addr;
uint32_t msi_data;
@@ -359,13 +359,13 @@
dev, irq, &msi_addr, &msi_data);
if (error != 0)
return (error);
- DMAR_LOCK(unit);
+ IOMMU_LOCK(unit);
(dmd->disable_intr)(unit);
dmar_write4(unit, dmd->msi_data_reg, msi_data);
dmar_write4(unit, dmd->msi_addr_reg, msi_addr);
dmar_write4(unit, dmd->msi_uaddr_reg, msi_addr >> 32);
(dmd->enable_intr)(unit);
- DMAR_UNLOCK(unit);
+ IOMMU_UNLOCK(unit);
return (0);
}
}
@@ -374,7 +374,7 @@
#endif
static void
-dmar_print_caps(device_t dev, struct dmar_unit *unit,
+dmar_print_caps(device_t dev, struct iommu_unit *unit,
ACPI_DMAR_HARDWARE_UNIT *dmaru)
{
uint32_t caphi, ecaphi;
@@ -406,7 +406,7 @@
static int
dmar_attach(device_t dev)
{
- struct dmar_unit *unit;
+ struct iommu_unit *unit;
ACPI_DMAR_HARDWARE_UNIT *dmaru;
uint64_t timeout;
int i, error;
@@ -493,29 +493,29 @@
* done.
*/
dmar_pgalloc(unit->ctx_obj, 0, DMAR_PGF_WAITOK | DMAR_PGF_ZERO);
- DMAR_LOCK(unit);
+ IOMMU_LOCK(unit);
error = dmar_load_root_entry_ptr(unit);
if (error != 0) {
- DMAR_UNLOCK(unit);
+ IOMMU_UNLOCK(unit);
dmar_release_resources(dev, unit);
return (error);
}
error = dmar_inv_ctx_glob(unit);
if (error != 0) {
- DMAR_UNLOCK(unit);
+ IOMMU_UNLOCK(unit);
dmar_release_resources(dev, unit);
return (error);
}
if ((unit->hw_ecap & DMAR_ECAP_DI) != 0) {
error = dmar_inv_iotlb_glob(unit);
if (error != 0) {
- DMAR_UNLOCK(unit);
+ IOMMU_UNLOCK(unit);
dmar_release_resources(dev, unit);
return (error);
}
}
- DMAR_UNLOCK(unit);
+ IOMMU_UNLOCK(unit);
error = dmar_init_fault_log(unit);
if (error != 0) {
dmar_release_resources(dev, unit);
@@ -531,21 +531,21 @@
dmar_release_resources(dev, unit);
return (error);
}
- error = dmar_init_busdma(unit);
+ error = iommu_init_busdma(unit);
if (error != 0) {
dmar_release_resources(dev, unit);
return (error);
}
#ifdef NOTYET
- DMAR_LOCK(unit);
+ IOMMU_LOCK(unit);
error = dmar_enable_translation(unit);
if (error != 0) {
- DMAR_UNLOCK(unit);
+ IOMMU_UNLOCK(unit);
dmar_release_resources(dev, unit);
return (error);
}
- DMAR_UNLOCK(unit);
+ IOMMU_UNLOCK(unit);
#endif
return (0);
@@ -589,25 +589,25 @@
static driver_t dmar_driver = {
"dmar",
dmar_methods,
- sizeof(struct dmar_unit),
+ sizeof(struct iommu_unit),
};
DRIVER_MODULE(dmar, acpi, dmar_driver, dmar_devclass, 0, 0);
MODULE_DEPEND(dmar, acpi, 1, 1, 1);
void
-dmar_set_buswide_ctx(struct dmar_unit *unit, u_int busno)
+dmar_set_buswide_ctx(struct iommu_unit *unit, u_int busno)
{
MPASS(busno <= PCI_BUSMAX);
- DMAR_LOCK(unit);
+ IOMMU_LOCK(unit);
unit->buswide_ctxs[busno / NBBY / sizeof(uint32_t)] |=
1 << (busno % (NBBY * sizeof(uint32_t)));
- DMAR_UNLOCK(unit);
+ IOMMU_UNLOCK(unit);
}
bool
-dmar_is_buswide_ctx(struct dmar_unit *unit, u_int busno)
+dmar_is_buswide_ctx(struct iommu_unit *unit, u_int busno)
{
MPASS(busno <= PCI_BUSMAX);
@@ -728,7 +728,7 @@
}
static bool
-dmar_match_by_path(struct dmar_unit *unit, int dev_domain, int dev_busno,
+dmar_match_by_path(struct iommu_unit *unit, int dev_domain, int dev_busno,
const ACPI_DMAR_PCI_PATH *dev_path, int dev_path_len, const char **banner)
{
ACPI_DMAR_HARDWARE_UNIT *dmarh;
@@ -764,11 +764,11 @@
return (false);
}
-static struct dmar_unit *
+static struct iommu_unit *
dmar_find_by_scope(int dev_domain, int dev_busno,
const ACPI_DMAR_PCI_PATH *dev_path, int dev_path_len)
{
- struct dmar_unit *unit;
+ struct iommu_unit *unit;
int i;
for (i = 0; i < dmar_devcnt; i++) {
@@ -782,11 +782,11 @@
return (NULL);
}
-struct dmar_unit *
+struct iommu_unit *
dmar_find(device_t dev, bool verbose)
{
device_t dmar_dev;
- struct dmar_unit *unit;
+ struct iommu_unit *unit;
const char *banner;
int i, dev_domain, dev_busno, dev_path_len;
@@ -826,11 +826,11 @@
return (unit);
}
-static struct dmar_unit *
+static struct iommu_unit *
dmar_find_nonpci(u_int id, u_int entry_type, uint16_t *rid)
{
device_t dmar_dev;
- struct dmar_unit *unit;
+ struct iommu_unit *unit;
ACPI_DMAR_HARDWARE_UNIT *dmarh;
ACPI_DMAR_DEVICE_SCOPE *devscope;
ACPI_DMAR_PCI_PATH *path;
@@ -844,7 +844,7 @@
dmar_dev = dmar_devs[i];
if (dmar_dev == NULL)
continue;
- unit = (struct dmar_unit *)device_get_softc(dmar_dev);
+ unit = (struct iommu_unit *)device_get_softc(dmar_dev);
dmarh = dmar_find_by_index(i);
if (dmarh == NULL)
continue;
@@ -890,7 +890,7 @@
}
-struct dmar_unit *
+struct iommu_unit *
dmar_find_hpet(device_t dev, uint16_t *rid)
{
@@ -898,7 +898,7 @@
rid));
}
-struct dmar_unit *
+struct iommu_unit *
dmar_find_ioapic(u_int apic_id, uint16_t *rid)
{
@@ -906,12 +906,12 @@
}
struct rmrr_iter_args {
- struct dmar_domain *domain;
+ struct iommu_domain *domain;
int dev_domain;
int dev_busno;
const ACPI_DMAR_PCI_PATH *dev_path;
int dev_path_len;
- struct dmar_map_entries_tailq *rmrr_entries;
+ struct iommu_map_entries_tailq *rmrr_entries;
};
static int
@@ -920,7 +920,7 @@
struct rmrr_iter_args *ria;
ACPI_DMAR_RESERVED_MEMORY *resmem;
ACPI_DMAR_DEVICE_SCOPE *devscope;
- struct dmar_map_entry *entry;
+ struct iommu_map_entry *entry;
char *ptr, *ptrend;
int match;
@@ -956,9 +956,9 @@
}
void
-dmar_dev_parse_rmrr(struct dmar_domain *domain, int dev_domain, int dev_busno,
+dmar_dev_parse_rmrr(struct iommu_domain *domain, int dev_domain, int dev_busno,
const void *dev_path, int dev_path_len,
- struct dmar_map_entries_tailq *rmrr_entries)
+ struct iommu_map_entries_tailq *rmrr_entries)
{
struct rmrr_iter_args ria;
@@ -972,7 +972,7 @@
}
struct inst_rmrr_iter_args {
- struct dmar_unit *dmar;
+ struct iommu_unit *dmar;
};
static device_t
@@ -1004,7 +1004,7 @@
struct inst_rmrr_iter_args *iria;
const char *ptr, *ptrend;
device_t dev;
- struct dmar_unit *unit;
+ struct iommu_unit *unit;
int dev_path_len;
uint16_t rid;
@@ -1059,7 +1059,7 @@
unit = dmar_find(dev, false);
if (iria->dmar != unit)
continue;
- dmar_instantiate_ctx(iria->dmar, dev, true);
+ iommu_instantiate_device(iria->dmar, dev, true);
}
}
@@ -1071,7 +1071,7 @@
* Pre-create all contexts for the DMAR which have RMRR entries.
*/
int
-dmar_instantiate_rmrr_ctxs(struct dmar_unit *dmar)
+dmar_instantiate_rmrr_ctxs(struct iommu_unit *dmar)
{
struct inst_rmrr_iter_args iria;
int error;
@@ -1082,7 +1082,7 @@
error = 0;
iria.dmar = dmar;
dmar_iterate_tbl(dmar_inst_rmrr_iter, &iria);
- DMAR_LOCK(dmar);
+ IOMMU_LOCK(dmar);
if (!LIST_EMPTY(&dmar->domains)) {
KASSERT((dmar->hw_gcmd & DMAR_GCMD_TE) == 0,
("dmar%d: RMRR not handled but translation is already enabled",
@@ -1107,9 +1107,9 @@
#include <ddb/db_lex.h>
static void
-dmar_print_domain_entry(const struct dmar_map_entry *entry)
+dmar_print_domain_entry(const struct iommu_map_entry *entry)
{
- struct dmar_map_entry *l, *r;
+ struct iommu_map_entry *l, *r;
db_printf(
" start %jx end %jx first %jx last %jx free_down %jx flags %x ",
@@ -1131,22 +1131,22 @@
}
static void
-dmar_print_ctx(struct dmar_ctx *ctx)
+dmar_print_ctx(struct iommu_device *ctx)
{
db_printf(
" @%p pci%d:%d:%d refs %d flags %x loads %lu unloads %lu\n",
- ctx, pci_get_bus(ctx->ctx_tag.owner),
- pci_get_slot(ctx->ctx_tag.owner),
- pci_get_function(ctx->ctx_tag.owner), ctx->refs, ctx->flags,
+ ctx, pci_get_bus(ctx->device_tag.owner),
+ pci_get_slot(ctx->device_tag.owner),
+ pci_get_function(ctx->device_tag.owner), ctx->refs, ctx->flags,
ctx->loads, ctx->unloads);
}
static void
-dmar_print_domain(struct dmar_domain *domain, bool show_mappings)
+dmar_print_domain(struct iommu_domain *domain, bool show_mappings)
{
- struct dmar_map_entry *entry;
- struct dmar_ctx *ctx;
+ struct iommu_map_entry *entry;
+ struct iommu_device *ctx;
db_printf(
" @%p dom %d mgaw %d agaw %d pglvl %d end %jx refs %d\n"
@@ -1177,11 +1177,11 @@
}
}
-DB_FUNC(dmar_domain, db_dmar_print_domain, db_show_table, CS_OWN, NULL)
+DB_FUNC(iommu_domain, db_dmar_print_domain, db_show_table, CS_OWN, NULL)
{
- struct dmar_unit *unit;
- struct dmar_domain *domain;
- struct dmar_ctx *ctx;
+ struct iommu_unit *unit;
+ struct iommu_domain *domain;
+ struct iommu_device *ctx;
bool show_mappings, valid;
int pci_domain, bus, device, function, i, t;
db_expr_t radix;
@@ -1222,7 +1222,7 @@
db_radix = radix;
db_skip_to_eol();
if (!valid) {
- db_printf("usage: show dmar_domain [/m] "
+ db_printf("usage: show iommu_domain [/m] "
"<domain> <bus> <device> <func>\n");
return;
}
@@ -1231,11 +1231,11 @@
LIST_FOREACH(domain, &unit->domains, link) {
LIST_FOREACH(ctx, &domain->contexts, link) {
if (pci_domain == unit->segment &&
- bus == pci_get_bus(ctx->ctx_tag.owner) &&
+ bus == pci_get_bus(ctx->device_tag.owner) &&
device ==
- pci_get_slot(ctx->ctx_tag.owner) &&
+ pci_get_slot(ctx->device_tag.owner) &&
function ==
- pci_get_function(ctx->ctx_tag.owner)) {
+ pci_get_function(ctx->device_tag.owner)) {
dmar_print_domain(domain,
show_mappings);
goto out;
@@ -1249,8 +1249,8 @@
static void
dmar_print_one(int idx, bool show_domains, bool show_mappings)
{
- struct dmar_unit *unit;
- struct dmar_domain *domain;
+ struct iommu_unit *unit;
+ struct iommu_domain *domain;
int i, frir;
unit = device_get_softc(dmar_devs[idx]);
Index: sys/x86/iommu/intel_fault.c
===================================================================
--- sys/x86/iommu/intel_fault.c
+++ sys/x86/iommu/intel_fault.c
@@ -80,7 +80,7 @@
*/
static int
-dmar_fault_next(struct dmar_unit *unit, int faultp)
+dmar_fault_next(struct iommu_unit *unit, int faultp)
{
faultp += 2;
@@ -90,7 +90,7 @@
}
static void
-dmar_fault_intr_clear(struct dmar_unit *unit, uint32_t fsts)
+dmar_fault_intr_clear(struct iommu_unit *unit, uint32_t fsts)
{
uint32_t clear;
@@ -124,7 +124,7 @@
int
dmar_fault_intr(void *arg)
{
- struct dmar_unit *unit;
+ struct iommu_unit *unit;
uint64_t fault_rec[2];
uint32_t fsts;
int fri, frir, faultp;
@@ -190,8 +190,8 @@
static void
dmar_fault_task(void *arg, int pending __unused)
{
- struct dmar_unit *unit;
- struct dmar_ctx *ctx;
+ struct iommu_unit *unit;
+ struct iommu_device *ctx;
uint64_t fault_rec[2];
int sid, bus, slot, func, faultp;
@@ -209,7 +209,7 @@
sid = DMAR_FRCD2_SID(fault_rec[1]);
printf("DMAR%d: ", unit->unit);
- DMAR_LOCK(unit);
+ IOMMU_LOCK(unit);
ctx = dmar_find_ctx_locked(unit, sid);
if (ctx == NULL) {
printf("<unknown dev>:");
@@ -223,15 +223,15 @@
slot = PCI_RID2SLOT(sid);
func = PCI_RID2FUNC(sid);
} else {
- ctx->flags |= DMAR_CTX_FAULTED;
+ ctx->flags |= IOMMU_DEVICE_FAULTED;
ctx->last_fault_rec[0] = fault_rec[0];
ctx->last_fault_rec[1] = fault_rec[1];
- device_print_prettyname(ctx->ctx_tag.owner);
- bus = pci_get_bus(ctx->ctx_tag.owner);
- slot = pci_get_slot(ctx->ctx_tag.owner);
- func = pci_get_function(ctx->ctx_tag.owner);
+ device_print_prettyname(ctx->device_tag.owner);
+ bus = pci_get_bus(ctx->device_tag.owner);
+ slot = pci_get_slot(ctx->device_tag.owner);
+ func = pci_get_function(ctx->device_tag.owner);
}
- DMAR_UNLOCK(unit);
+ IOMMU_UNLOCK(unit);
printf(
"pci%d:%d:%d sid %x fault acc %x adt 0x%x reason 0x%x "
"addr %jx\n",
@@ -244,7 +244,7 @@
}
static void
-dmar_clear_faults(struct dmar_unit *unit)
+dmar_clear_faults(struct iommu_unit *unit)
{
uint32_t frec, frir, fsts;
int i;
@@ -261,7 +261,7 @@
}
int
-dmar_init_fault_log(struct dmar_unit *unit)
+dmar_init_fault_log(struct iommu_unit *unit)
{
mtx_init(&unit->fault_lock, "dmarflt", NULL, MTX_SPIN);
@@ -278,25 +278,25 @@
taskqueue_start_threads(&unit->fault_taskqueue, 1, PI_AV,
"dmar%d fault taskq", unit->unit);
- DMAR_LOCK(unit);
+ IOMMU_LOCK(unit);
dmar_disable_fault_intr(unit);
dmar_clear_faults(unit);
dmar_enable_fault_intr(unit);
- DMAR_UNLOCK(unit);
+ IOMMU_UNLOCK(unit);
return (0);
}
void
-dmar_fini_fault_log(struct dmar_unit *unit)
+dmar_fini_fault_log(struct iommu_unit *unit)
{
if (unit->fault_taskqueue == NULL)
return;
- DMAR_LOCK(unit);
+ IOMMU_LOCK(unit);
dmar_disable_fault_intr(unit);
- DMAR_UNLOCK(unit);
+ IOMMU_UNLOCK(unit);
taskqueue_drain(unit->fault_taskqueue, &unit->fault_task);
taskqueue_free(unit->fault_taskqueue);
@@ -309,22 +309,22 @@
}
void
-dmar_enable_fault_intr(struct dmar_unit *unit)
+dmar_enable_fault_intr(struct iommu_unit *unit)
{
uint32_t fectl;
- DMAR_ASSERT_LOCKED(unit);
+ IOMMU_ASSERT_LOCKED(unit);
fectl = dmar_read4(unit, DMAR_FECTL_REG);
fectl &= ~DMAR_FECTL_IM;
dmar_write4(unit, DMAR_FECTL_REG, fectl);
}
void
-dmar_disable_fault_intr(struct dmar_unit *unit)
+dmar_disable_fault_intr(struct iommu_unit *unit)
{
uint32_t fectl;
- DMAR_ASSERT_LOCKED(unit);
+ IOMMU_ASSERT_LOCKED(unit);
fectl = dmar_read4(unit, DMAR_FECTL_REG);
dmar_write4(unit, DMAR_FECTL_REG, fectl | DMAR_FECTL_IM);
}
Index: sys/x86/iommu/intel_gas.c
===================================================================
--- sys/x86/iommu/intel_gas.c
+++ sys/x86/iommu/intel_gas.c
@@ -74,27 +74,27 @@
* Guest Address Space management.
*/
-static uma_zone_t dmar_map_entry_zone;
+static uma_zone_t iommu_map_entry_zone;
static void
intel_gas_init(void)
{
- dmar_map_entry_zone = uma_zcreate("DMAR_MAP_ENTRY",
- sizeof(struct dmar_map_entry), NULL, NULL,
+ iommu_map_entry_zone = uma_zcreate("IOMMU_MAP_ENTRY",
+ sizeof(struct iommu_map_entry), NULL, NULL,
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NODUMP);
}
SYSINIT(intel_gas, SI_SUB_DRIVERS, SI_ORDER_FIRST, intel_gas_init, NULL);
-struct dmar_map_entry *
-dmar_gas_alloc_entry(struct dmar_domain *domain, u_int flags)
+struct iommu_map_entry *
+dmar_gas_alloc_entry(struct iommu_domain *domain, u_int flags)
{
- struct dmar_map_entry *res;
+ struct iommu_map_entry *res;
KASSERT((flags & ~(DMAR_PGF_WAITOK)) == 0,
("unsupported flags %x", flags));
- res = uma_zalloc(dmar_map_entry_zone, ((flags & DMAR_PGF_WAITOK) !=
+ res = uma_zalloc(iommu_map_entry_zone, ((flags & DMAR_PGF_WAITOK) !=
0 ? M_WAITOK : M_NOWAIT) | M_ZERO);
if (res != NULL) {
res->domain = domain;
@@ -104,18 +104,18 @@
}
void
-dmar_gas_free_entry(struct dmar_domain *domain, struct dmar_map_entry *entry)
+dmar_gas_free_entry(struct iommu_domain *domain, struct iommu_map_entry *entry)
{
KASSERT(domain == entry->domain,
("mismatched free domain %p entry %p entry->domain %p", domain,
entry, entry->domain));
atomic_subtract_int(&domain->entries_cnt, 1);
- uma_zfree(dmar_map_entry_zone, entry);
+ uma_zfree(iommu_map_entry_zone, entry);
}
static int
-dmar_gas_cmp_entries(struct dmar_map_entry *a, struct dmar_map_entry *b)
+dmar_gas_cmp_entries(struct iommu_map_entry *a, struct iommu_map_entry *b)
{
/* Last entry have zero size, so <= */
@@ -137,9 +137,9 @@
}
static void
-dmar_gas_augment_entry(struct dmar_map_entry *entry)
+dmar_gas_augment_entry(struct iommu_map_entry *entry)
{
- struct dmar_map_entry *child;
+ struct iommu_map_entry *child;
dmar_gaddr_t free_down;
free_down = 0;
@@ -159,14 +159,14 @@
entry->free_down = free_down;
}
-RB_GENERATE(dmar_gas_entries_tree, dmar_map_entry, rb_entry,
+RB_GENERATE(dmar_gas_entries_tree, iommu_map_entry, rb_entry,
dmar_gas_cmp_entries);
#ifdef INVARIANTS
static void
-dmar_gas_check_free(struct dmar_domain *domain)
+dmar_gas_check_free(struct iommu_domain *domain)
{
- struct dmar_map_entry *entry, *l, *r;
+ struct iommu_map_entry *entry, *l, *r;
dmar_gaddr_t v;
RB_FOREACH(entry, dmar_gas_entries_tree, &domain->rb_root) {
@@ -190,61 +190,61 @@
#endif
static bool
-dmar_gas_rb_insert(struct dmar_domain *domain, struct dmar_map_entry *entry)
+dmar_gas_rb_insert(struct iommu_domain *domain, struct iommu_map_entry *entry)
{
- struct dmar_map_entry *found;
+ struct iommu_map_entry *found;
found = RB_INSERT(dmar_gas_entries_tree, &domain->rb_root, entry);
return (found == NULL);
}
static void
-dmar_gas_rb_remove(struct dmar_domain *domain, struct dmar_map_entry *entry)
+dmar_gas_rb_remove(struct iommu_domain *domain, struct iommu_map_entry *entry)
{
RB_REMOVE(dmar_gas_entries_tree, &domain->rb_root, entry);
}
void
-dmar_gas_init_domain(struct dmar_domain *domain)
+dmar_gas_init_domain(struct iommu_domain *domain)
{
- struct dmar_map_entry *begin, *end;
+ struct iommu_map_entry *begin, *end;
begin = dmar_gas_alloc_entry(domain, DMAR_PGF_WAITOK);
end = dmar_gas_alloc_entry(domain, DMAR_PGF_WAITOK);
- DMAR_DOMAIN_LOCK(domain);
+ IOMMU_DOMAIN_LOCK(domain);
KASSERT(domain->entries_cnt == 2, ("dirty domain %p", domain));
KASSERT(RB_EMPTY(&domain->rb_root), ("non-empty entries %p", domain));
begin->start = 0;
begin->end = DMAR_PAGE_SIZE;
- begin->flags = DMAR_MAP_ENTRY_PLACE | DMAR_MAP_ENTRY_UNMAPPED;
+ begin->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED;
dmar_gas_rb_insert(domain, begin);
end->start = domain->end;
end->end = domain->end;
- end->flags = DMAR_MAP_ENTRY_PLACE | DMAR_MAP_ENTRY_UNMAPPED;
+ end->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED;
dmar_gas_rb_insert(domain, end);
domain->first_place = begin;
domain->last_place = end;
domain->flags |= DMAR_DOMAIN_GAS_INITED;
- DMAR_DOMAIN_UNLOCK(domain);
+ IOMMU_DOMAIN_UNLOCK(domain);
}
void
-dmar_gas_fini_domain(struct dmar_domain *domain)
+dmar_gas_fini_domain(struct iommu_domain *domain)
{
- struct dmar_map_entry *entry, *entry1;
+ struct iommu_map_entry *entry, *entry1;
- DMAR_DOMAIN_ASSERT_LOCKED(domain);
+ IOMMU_DOMAIN_ASSERT_LOCKED(domain);
KASSERT(domain->entries_cnt == 2, ("domain still in use %p", domain));
entry = RB_MIN(dmar_gas_entries_tree, &domain->rb_root);
KASSERT(entry->start == 0, ("start entry start %p", domain));
KASSERT(entry->end == DMAR_PAGE_SIZE, ("start entry end %p", domain));
- KASSERT(entry->flags == DMAR_MAP_ENTRY_PLACE,
+ KASSERT(entry->flags == IOMMU_MAP_ENTRY_PLACE,
("start entry flags %p", domain));
RB_REMOVE(dmar_gas_entries_tree, &domain->rb_root, entry);
dmar_gas_free_entry(domain, entry);
@@ -252,14 +252,14 @@
entry = RB_MAX(dmar_gas_entries_tree, &domain->rb_root);
KASSERT(entry->start == domain->end, ("end entry start %p", domain));
KASSERT(entry->end == domain->end, ("end entry end %p", domain));
- KASSERT(entry->flags == DMAR_MAP_ENTRY_PLACE,
+ KASSERT(entry->flags == IOMMU_MAP_ENTRY_PLACE,
("end entry flags %p", domain));
RB_REMOVE(dmar_gas_entries_tree, &domain->rb_root, entry);
dmar_gas_free_entry(domain, entry);
RB_FOREACH_SAFE(entry, dmar_gas_entries_tree, &domain->rb_root,
entry1) {
- KASSERT((entry->flags & DMAR_MAP_ENTRY_RMRR) != 0,
+ KASSERT((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0,
("non-RMRR entry left %p", domain));
RB_REMOVE(dmar_gas_entries_tree, &domain->rb_root, entry);
dmar_gas_free_entry(domain, entry);
@@ -267,16 +267,16 @@
}
struct dmar_gas_match_args {
- struct dmar_domain *domain;
+ struct iommu_domain *domain;
dmar_gaddr_t size;
int offset;
const struct bus_dma_tag_common *common;
u_int gas_flags;
- struct dmar_map_entry *entry;
+ struct iommu_map_entry *entry;
};
/*
- * The interval [beg, end) is a free interval between two dmar_map_entries.
+ * The interval [beg, end) is a free interval between two iommu_map_entries.
* maxaddr is an upper bound on addresses that can be allocated. Try to
* allocate space in the free interval, subject to the conditions expressed
* by a, and return 'true' if and only if the allocation attempt succeeds.
@@ -298,7 +298,7 @@
return (false);
/* No boundary crossing. */
- if (dmar_test_boundary(a->entry->start + a->offset, a->size,
+ if (iommu_test_boundary(a->entry->start + a->offset, a->size,
a->common->boundary))
return (true);
@@ -313,7 +313,7 @@
/* DMAR_PAGE_SIZE to create gap after new entry. */
if (start + a->offset + a->size + DMAR_PAGE_SIZE <= end &&
start + a->offset + a->size <= maxaddr &&
- dmar_test_boundary(start + a->offset, a->size,
+ iommu_test_boundary(start + a->offset, a->size,
a->common->boundary)) {
a->entry->start = start;
return (true);
@@ -327,7 +327,7 @@
* XXXKIB. It is possible that bs is exactly at the start of
* the next entry, then we do not have gap. Ignore for now.
*/
- if ((a->gas_flags & DMAR_GM_CANSPLIT) != 0) {
+ if ((a->gas_flags & IOMMU_MF_CANSPLIT) != 0) {
a->size = bs - a->entry->start;
return (true);
}
@@ -353,13 +353,14 @@
found = dmar_gas_rb_insert(a->domain, a->entry);
KASSERT(found, ("found dup %p start %jx size %jx",
a->domain, (uintmax_t)a->entry->start, (uintmax_t)a->size));
- a->entry->flags = DMAR_MAP_ENTRY_MAP;
+ a->entry->flags = IOMMU_MAP_ENTRY_MAP;
}
static int
-dmar_gas_lowermatch(struct dmar_gas_match_args *a, struct dmar_map_entry *entry)
+dmar_gas_lowermatch(struct dmar_gas_match_args *a,
+ struct iommu_map_entry *entry)
{
- struct dmar_map_entry *child;
+ struct iommu_map_entry *child;
child = RB_RIGHT(entry, rb_entry);
if (child != NULL && entry->end < a->common->lowaddr &&
@@ -388,9 +389,10 @@
}
static int
-dmar_gas_uppermatch(struct dmar_gas_match_args *a, struct dmar_map_entry *entry)
+dmar_gas_uppermatch(struct dmar_gas_match_args *a,
+ struct iommu_map_entry *entry)
{
- struct dmar_map_entry *child;
+ struct iommu_map_entry *child;
if (entry->free_down < a->size + a->offset + DMAR_PAGE_SIZE)
return (ENOMEM);
@@ -418,14 +420,14 @@
}
static int
-dmar_gas_find_space(struct dmar_domain *domain,
+dmar_gas_find_space(struct iommu_domain *domain,
const struct bus_dma_tag_common *common, dmar_gaddr_t size,
- int offset, u_int flags, struct dmar_map_entry *entry)
+ int offset, u_int flags, struct iommu_map_entry *entry)
{
struct dmar_gas_match_args a;
int error;
- DMAR_DOMAIN_ASSERT_LOCKED(domain);
+ IOMMU_DOMAIN_ASSERT_LOCKED(domain);
KASSERT(entry->flags == 0, ("dirty entry %p %p", domain, entry));
KASSERT((size & DMAR_PAGE_MASK) == 0, ("size %jx", (uintmax_t)size));
@@ -454,13 +456,13 @@
}
static int
-dmar_gas_alloc_region(struct dmar_domain *domain, struct dmar_map_entry *entry,
- u_int flags)
+dmar_gas_alloc_region(struct iommu_domain *domain,
+ struct iommu_map_entry *entry, u_int flags)
{
- struct dmar_map_entry *next, *prev;
+ struct iommu_map_entry *next, *prev;
bool found;
- DMAR_DOMAIN_ASSERT_LOCKED(domain);
+ IOMMU_DOMAIN_ASSERT_LOCKED(domain);
if ((entry->start & DMAR_PAGE_MASK) != 0 ||
(entry->end & DMAR_PAGE_MASK) != 0)
@@ -485,16 +487,16 @@
* extends both ways.
*/
if (prev != NULL && prev->end > entry->start &&
- (prev->flags & DMAR_MAP_ENTRY_PLACE) == 0) {
- if ((flags & DMAR_GM_RMRR) == 0 ||
- (prev->flags & DMAR_MAP_ENTRY_RMRR) == 0)
+ (prev->flags & IOMMU_MAP_ENTRY_PLACE) == 0) {
+ if ((flags & IOMMU_MF_RMRR) == 0 ||
+ (prev->flags & IOMMU_MAP_ENTRY_RMRR) == 0)
return (EBUSY);
entry->start = prev->end;
}
if (next->start < entry->end &&
- (next->flags & DMAR_MAP_ENTRY_PLACE) == 0) {
- if ((flags & DMAR_GM_RMRR) == 0 ||
- (next->flags & DMAR_MAP_ENTRY_RMRR) == 0)
+ (next->flags & IOMMU_MAP_ENTRY_PLACE) == 0) {
+ if ((flags & IOMMU_MF_RMRR) == 0 ||
+ (next->flags & IOMMU_MAP_ENTRY_RMRR) == 0)
return (EBUSY);
entry->end = next->start;
}
@@ -514,11 +516,11 @@
found = dmar_gas_rb_insert(domain, entry);
KASSERT(found, ("found RMRR dup %p start %jx end %jx",
domain, (uintmax_t)entry->start, (uintmax_t)entry->end));
- if ((flags & DMAR_GM_RMRR) != 0)
- entry->flags = DMAR_MAP_ENTRY_RMRR;
+ if ((flags & IOMMU_MF_RMRR) != 0)
+ entry->flags = IOMMU_MAP_ENTRY_RMRR;
#ifdef INVARIANTS
- struct dmar_map_entry *ip, *in;
+ struct iommu_map_entry *ip, *in;
ip = RB_PREV(dmar_gas_entries_tree, &domain->rb_root, entry);
in = RB_NEXT(dmar_gas_entries_tree, &domain->rb_root, entry);
KASSERT(prev == NULL || ip == prev,
@@ -537,16 +539,16 @@
}
void
-dmar_gas_free_space(struct dmar_domain *domain, struct dmar_map_entry *entry)
+dmar_gas_free_space(struct iommu_domain *domain, struct iommu_map_entry *entry)
{
- DMAR_DOMAIN_ASSERT_LOCKED(domain);
- KASSERT((entry->flags & (DMAR_MAP_ENTRY_PLACE | DMAR_MAP_ENTRY_RMRR |
- DMAR_MAP_ENTRY_MAP)) == DMAR_MAP_ENTRY_MAP,
+ IOMMU_DOMAIN_ASSERT_LOCKED(domain);
+ KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR |
+ IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_MAP,
("permanent entry %p %p", domain, entry));
dmar_gas_rb_remove(domain, entry);
- entry->flags &= ~DMAR_MAP_ENTRY_MAP;
+ entry->flags &= ~IOMMU_MAP_ENTRY_MAP;
#ifdef INVARIANTS
if (dmar_check_free)
dmar_gas_check_free(domain);
@@ -554,19 +556,19 @@
}
void
-dmar_gas_free_region(struct dmar_domain *domain, struct dmar_map_entry *entry)
+dmar_gas_free_region(struct iommu_domain *domain, struct iommu_map_entry *entry)
{
- struct dmar_map_entry *next, *prev;
+ struct iommu_map_entry *next, *prev;
- DMAR_DOMAIN_ASSERT_LOCKED(domain);
- KASSERT((entry->flags & (DMAR_MAP_ENTRY_PLACE | DMAR_MAP_ENTRY_RMRR |
- DMAR_MAP_ENTRY_MAP)) == DMAR_MAP_ENTRY_RMRR,
+ IOMMU_DOMAIN_ASSERT_LOCKED(domain);
+ KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR |
+ IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_RMRR,
("non-RMRR entry %p %p", domain, entry));
prev = RB_PREV(dmar_gas_entries_tree, &domain->rb_root, entry);
next = RB_NEXT(dmar_gas_entries_tree, &domain->rb_root, entry);
dmar_gas_rb_remove(domain, entry);
- entry->flags &= ~DMAR_MAP_ENTRY_RMRR;
+ entry->flags &= ~IOMMU_MAP_ENTRY_RMRR;
if (prev == NULL)
dmar_gas_rb_insert(domain, domain->first_place);
@@ -575,25 +577,25 @@
}
int
-dmar_gas_map(struct dmar_domain *domain,
+dmar_gas_map(struct iommu_domain *domain,
const struct bus_dma_tag_common *common, dmar_gaddr_t size, int offset,
- u_int eflags, u_int flags, vm_page_t *ma, struct dmar_map_entry **res)
+ u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res)
{
- struct dmar_map_entry *entry;
+ struct iommu_map_entry *entry;
int error;
- KASSERT((flags & ~(DMAR_GM_CANWAIT | DMAR_GM_CANSPLIT)) == 0,
+ KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_CANSPLIT)) == 0,
("invalid flags 0x%x", flags));
- entry = dmar_gas_alloc_entry(domain, (flags & DMAR_GM_CANWAIT) != 0 ?
+ entry = dmar_gas_alloc_entry(domain, (flags & IOMMU_MF_CANWAIT) != 0 ?
DMAR_PGF_WAITOK : 0);
if (entry == NULL)
return (ENOMEM);
- DMAR_DOMAIN_LOCK(domain);
+ IOMMU_DOMAIN_LOCK(domain);
error = dmar_gas_find_space(domain, common, size, offset, flags,
entry);
if (error == ENOMEM) {
- DMAR_DOMAIN_UNLOCK(domain);
+ IOMMU_DOMAIN_UNLOCK(domain);
dmar_gas_free_entry(domain, entry);
return (error);
}
@@ -606,15 +608,15 @@
KASSERT(entry->end < domain->end, ("allocated GPA %jx, max GPA %jx",
(uintmax_t)entry->end, (uintmax_t)domain->end));
entry->flags |= eflags;
- DMAR_DOMAIN_UNLOCK(domain);
+ IOMMU_DOMAIN_UNLOCK(domain);
error = domain_map_buf(domain, entry->start, entry->end - entry->start,
ma,
- ((eflags & DMAR_MAP_ENTRY_READ) != 0 ? DMAR_PTE_R : 0) |
- ((eflags & DMAR_MAP_ENTRY_WRITE) != 0 ? DMAR_PTE_W : 0) |
- ((eflags & DMAR_MAP_ENTRY_SNOOP) != 0 ? DMAR_PTE_SNP : 0) |
- ((eflags & DMAR_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0),
- (flags & DMAR_GM_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0);
+ ((eflags & IOMMU_MAP_ENTRY_READ) != 0 ? DMAR_PTE_R : 0) |
+ ((eflags & IOMMU_MAP_ENTRY_WRITE) != 0 ? DMAR_PTE_W : 0) |
+ ((eflags & IOMMU_MAP_ENTRY_SNOOP) != 0 ? DMAR_PTE_SNP : 0) |
+ ((eflags & IOMMU_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0),
+ (flags & IOMMU_MF_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0);
if (error == ENOMEM) {
dmar_domain_unload_entry(entry, true);
return (error);
@@ -627,7 +629,7 @@
}
int
-dmar_gas_map_region(struct dmar_domain *domain, struct dmar_map_entry *entry,
+dmar_gas_map_region(struct iommu_domain *domain, struct iommu_map_entry *entry,
u_int eflags, u_int flags, vm_page_t *ma)
{
dmar_gaddr_t start;
@@ -635,28 +637,28 @@
KASSERT(entry->flags == 0, ("used RMRR entry %p %p %x", domain,
entry, entry->flags));
- KASSERT((flags & ~(DMAR_GM_CANWAIT | DMAR_GM_RMRR)) == 0,
+ KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_RMRR)) == 0,
("invalid flags 0x%x", flags));
start = entry->start;
- DMAR_DOMAIN_LOCK(domain);
+ IOMMU_DOMAIN_LOCK(domain);
error = dmar_gas_alloc_region(domain, entry, flags);
if (error != 0) {
- DMAR_DOMAIN_UNLOCK(domain);
+ IOMMU_DOMAIN_UNLOCK(domain);
return (error);
}
entry->flags |= eflags;
- DMAR_DOMAIN_UNLOCK(domain);
+ IOMMU_DOMAIN_UNLOCK(domain);
if (entry->end == entry->start)
return (0);
error = domain_map_buf(domain, entry->start, entry->end - entry->start,
ma + OFF_TO_IDX(start - entry->start),
- ((eflags & DMAR_MAP_ENTRY_READ) != 0 ? DMAR_PTE_R : 0) |
- ((eflags & DMAR_MAP_ENTRY_WRITE) != 0 ? DMAR_PTE_W : 0) |
- ((eflags & DMAR_MAP_ENTRY_SNOOP) != 0 ? DMAR_PTE_SNP : 0) |
- ((eflags & DMAR_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0),
- (flags & DMAR_GM_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0);
+ ((eflags & IOMMU_MAP_ENTRY_READ) != 0 ? DMAR_PTE_R : 0) |
+ ((eflags & IOMMU_MAP_ENTRY_WRITE) != 0 ? DMAR_PTE_W : 0) |
+ ((eflags & IOMMU_MAP_ENTRY_SNOOP) != 0 ? DMAR_PTE_SNP : 0) |
+ ((eflags & IOMMU_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0),
+ (flags & IOMMU_MF_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0);
if (error == ENOMEM) {
dmar_domain_unload_entry(entry, false);
return (error);
@@ -668,20 +670,20 @@
}
int
-dmar_gas_reserve_region(struct dmar_domain *domain, dmar_gaddr_t start,
+dmar_gas_reserve_region(struct iommu_domain *domain, dmar_gaddr_t start,
dmar_gaddr_t end)
{
- struct dmar_map_entry *entry;
+ struct iommu_map_entry *entry;
int error;
entry = dmar_gas_alloc_entry(domain, DMAR_PGF_WAITOK);
entry->start = start;
entry->end = end;
- DMAR_DOMAIN_LOCK(domain);
- error = dmar_gas_alloc_region(domain, entry, DMAR_GM_CANWAIT);
+ IOMMU_DOMAIN_LOCK(domain);
+ error = dmar_gas_alloc_region(domain, entry, IOMMU_MF_CANWAIT);
if (error == 0)
- entry->flags |= DMAR_MAP_ENTRY_UNMAPPED;
- DMAR_DOMAIN_UNLOCK(domain);
+ entry->flags |= IOMMU_MAP_ENTRY_UNMAPPED;
+ IOMMU_DOMAIN_UNLOCK(domain);
if (error != 0)
dmar_gas_free_entry(domain, entry);
return (error);
Index: sys/x86/iommu/intel_idpgtbl.c
===================================================================
--- sys/x86/iommu/intel_idpgtbl.c
+++ sys/x86/iommu/intel_idpgtbl.c
@@ -69,7 +69,7 @@
#include <dev/pci/pcireg.h>
#include <x86/iommu/intel_dmar.h>
-static int domain_unmap_buf_locked(struct dmar_domain *domain,
+static int domain_unmap_buf_locked(struct iommu_domain *domain,
dmar_gaddr_t base, dmar_gaddr_t size, int flags);
/*
@@ -163,9 +163,9 @@
* maxaddr is typically mapped.
*/
vm_object_t
-domain_get_idmap_pgtbl(struct dmar_domain *domain, dmar_gaddr_t maxaddr)
+domain_get_idmap_pgtbl(struct iommu_domain *domain, dmar_gaddr_t maxaddr)
{
- struct dmar_unit *unit;
+ struct iommu_unit *unit;
struct idpgtbl *tbl;
vm_object_t res;
vm_page_t m;
@@ -194,7 +194,7 @@
sx_slock(&idpgtbl_lock);
LIST_FOREACH(tbl, &idpgtbls, link) {
if (tbl->maxaddr >= maxaddr &&
- dmar_pglvl_supported(domain->dmar, tbl->pglvl) &&
+ dmar_pglvl_supported(domain->iommu, tbl->pglvl) &&
tbl->leaf == leaf) {
res = tbl->pgtbl_obj;
vm_object_reference(res);
@@ -213,7 +213,7 @@
sx_xlock(&idpgtbl_lock);
LIST_FOREACH(tbl, &idpgtbls, link) {
if (tbl->maxaddr >= maxaddr &&
- dmar_pglvl_supported(domain->dmar, tbl->pglvl) &&
+ dmar_pglvl_supported(domain->iommu, tbl->pglvl) &&
tbl->leaf == leaf) {
res = tbl->pgtbl_obj;
vm_object_reference(res);
@@ -254,7 +254,7 @@
* If DMAR cannot look into the chipset write buffer, flush it
* as well.
*/
- unit = domain->dmar;
+ unit = domain->iommu;
if (!DMAR_IS_COHERENT(unit)) {
VM_OBJECT_WLOCK(res);
for (m = vm_page_lookup(res, 0); m != NULL;
@@ -263,9 +263,9 @@
VM_OBJECT_WUNLOCK(res);
}
if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) {
- DMAR_LOCK(unit);
+ IOMMU_LOCK(unit);
dmar_flush_write_bufs(unit);
- DMAR_UNLOCK(unit);
+ IOMMU_UNLOCK(unit);
}
return (res);
@@ -323,7 +323,7 @@
* the level lvl.
*/
static int
-domain_pgtbl_pte_off(struct dmar_domain *domain, dmar_gaddr_t base, int lvl)
+domain_pgtbl_pte_off(struct iommu_domain *domain, dmar_gaddr_t base, int lvl)
{
base >>= DMAR_PAGE_SHIFT + (domain->pglvl - lvl - 1) *
@@ -337,7 +337,7 @@
* lvl.
*/
static vm_pindex_t
-domain_pgtbl_get_pindex(struct dmar_domain *domain, dmar_gaddr_t base, int lvl)
+domain_pgtbl_get_pindex(struct iommu_domain *domain, dmar_gaddr_t base, int lvl)
{
vm_pindex_t idx, pidx;
int i;
@@ -353,7 +353,7 @@
}
static dmar_pte_t *
-domain_pgtbl_map_pte(struct dmar_domain *domain, dmar_gaddr_t base, int lvl,
+domain_pgtbl_map_pte(struct iommu_domain *domain, dmar_gaddr_t base, int lvl,
int flags, vm_pindex_t *idxp, struct sf_buf **sf)
{
vm_page_t m;
@@ -408,7 +408,7 @@
}
dmar_pte_store(&ptep->pte, DMAR_PTE_R | DMAR_PTE_W |
VM_PAGE_TO_PHYS(m));
- dmar_flush_pte_to_ram(domain->dmar, ptep);
+ dmar_flush_pte_to_ram(domain->iommu, ptep);
sf_buf_page(sfp)->ref_count += 1;
m->ref_count--;
dmar_unmap_pgtbl(sfp);
@@ -421,7 +421,7 @@
}
static int
-domain_map_buf_locked(struct dmar_domain *domain, dmar_gaddr_t base,
+domain_map_buf_locked(struct iommu_domain *domain, dmar_gaddr_t base,
dmar_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags)
{
dmar_pte_t *pte;
@@ -489,7 +489,7 @@
}
dmar_pte_store(&pte->pte, VM_PAGE_TO_PHYS(ma[pi]) | pflags |
(superpage ? DMAR_PTE_SP : 0));
- dmar_flush_pte_to_ram(domain->dmar, pte);
+ dmar_flush_pte_to_ram(domain->iommu, pte);
sf_buf_page(sf)->ref_count += 1;
}
if (sf != NULL)
@@ -499,13 +499,13 @@
}
int
-domain_map_buf(struct dmar_domain *domain, dmar_gaddr_t base, dmar_gaddr_t size,
- vm_page_t *ma, uint64_t pflags, int flags)
+domain_map_buf(struct iommu_domain *domain, dmar_gaddr_t base,
+ dmar_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags)
{
- struct dmar_unit *unit;
+ struct iommu_unit *unit;
int error;
- unit = domain->dmar;
+ unit = domain->iommu;
KASSERT((domain->flags & DMAR_DOMAIN_IDMAP) == 0,
("modifying idmap pagetable domain %p", domain));
@@ -551,19 +551,19 @@
domain_flush_iotlb_sync(domain, base, size);
else if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) {
/* See 11.1 Write Buffer Flushing. */
- DMAR_LOCK(unit);
+ IOMMU_LOCK(unit);
dmar_flush_write_bufs(unit);
- DMAR_UNLOCK(unit);
+ IOMMU_UNLOCK(unit);
}
return (0);
}
-static void domain_unmap_clear_pte(struct dmar_domain *domain,
+static void domain_unmap_clear_pte(struct iommu_domain *domain,
dmar_gaddr_t base, int lvl, int flags, dmar_pte_t *pte,
struct sf_buf **sf, bool free_fs);
static void
-domain_free_pgtbl_pde(struct dmar_domain *domain, dmar_gaddr_t base,
+domain_free_pgtbl_pde(struct iommu_domain *domain, dmar_gaddr_t base,
int lvl, int flags)
{
struct sf_buf *sf;
@@ -576,13 +576,13 @@
}
static void
-domain_unmap_clear_pte(struct dmar_domain *domain, dmar_gaddr_t base, int lvl,
+domain_unmap_clear_pte(struct iommu_domain *domain, dmar_gaddr_t base, int lvl,
int flags, dmar_pte_t *pte, struct sf_buf **sf, bool free_sf)
{
vm_page_t m;
dmar_pte_clear(&pte->pte);
- dmar_flush_pte_to_ram(domain->dmar, pte);
+ dmar_flush_pte_to_ram(domain->iommu, pte);
m = sf_buf_page(*sf);
if (free_sf) {
dmar_unmap_pgtbl(*sf);
@@ -605,7 +605,7 @@
* Assumes that the unmap is never partial.
*/
static int
-domain_unmap_buf_locked(struct dmar_domain *domain, dmar_gaddr_t base,
+domain_unmap_buf_locked(struct iommu_domain *domain, dmar_gaddr_t base,
dmar_gaddr_t size, int flags)
{
dmar_pte_t *pte;
@@ -677,7 +677,7 @@
}
int
-domain_unmap_buf(struct dmar_domain *domain, dmar_gaddr_t base,
+domain_unmap_buf(struct iommu_domain *domain, dmar_gaddr_t base,
dmar_gaddr_t size, int flags)
{
int error;
@@ -689,7 +689,7 @@
}
int
-domain_alloc_pgtbl(struct dmar_domain *domain)
+domain_alloc_pgtbl(struct iommu_domain *domain)
{
vm_page_t m;
@@ -704,21 +704,21 @@
/* No implicit free of the top level page table page. */
m->ref_count = 1;
DMAR_DOMAIN_PGUNLOCK(domain);
- DMAR_LOCK(domain->dmar);
+ IOMMU_LOCK(domain->iommu);
domain->flags |= DMAR_DOMAIN_PGTBL_INITED;
- DMAR_UNLOCK(domain->dmar);
+ IOMMU_UNLOCK(domain->iommu);
return (0);
}
void
-domain_free_pgtbl(struct dmar_domain *domain)
+domain_free_pgtbl(struct iommu_domain *domain)
{
vm_object_t obj;
vm_page_t m;
obj = domain->pgtbl_obj;
if (obj == NULL) {
- KASSERT((domain->dmar->hw_ecap & DMAR_ECAP_PT) != 0 &&
+ KASSERT((domain->iommu->hw_ecap & DMAR_ECAP_PT) != 0 &&
(domain->flags & DMAR_DOMAIN_IDMAP) != 0,
("lost pagetable object domain %p", domain));
return;
@@ -741,7 +741,7 @@
}
static inline uint64_t
-domain_wait_iotlb_flush(struct dmar_unit *unit, uint64_t wt, int iro)
+domain_wait_iotlb_flush(struct iommu_unit *unit, uint64_t wt, int iro)
{
uint64_t iotlbr;
@@ -757,19 +757,19 @@
}
void
-domain_flush_iotlb_sync(struct dmar_domain *domain, dmar_gaddr_t base,
+domain_flush_iotlb_sync(struct iommu_domain *domain, dmar_gaddr_t base,
dmar_gaddr_t size)
{
- struct dmar_unit *unit;
+ struct iommu_unit *unit;
dmar_gaddr_t isize;
uint64_t iotlbr;
int am, iro;
- unit = domain->dmar;
+ unit = domain->iommu;
KASSERT(!unit->qi_enabled, ("dmar%d: sync iotlb flush call",
unit->unit));
iro = DMAR_ECAP_IRO(unit->hw_ecap) * 16;
- DMAR_LOCK(unit);
+ IOMMU_LOCK(unit);
if ((unit->hw_cap & DMAR_CAP_PSI) == 0 || size > 2 * 1024 * 1024) {
iotlbr = domain_wait_iotlb_flush(unit, DMAR_IOTLB_IIRG_DOM |
DMAR_IOTLB_DID(domain->domain), iro);
@@ -799,5 +799,5 @@
break;
}
}
- DMAR_UNLOCK(unit);
+ IOMMU_UNLOCK(unit);
}
Index: sys/x86/iommu/intel_intrmap.c
===================================================================
--- sys/x86/iommu/intel_intrmap.c
+++ sys/x86/iommu/intel_intrmap.c
@@ -60,16 +60,16 @@
#include <dev/pci/pcivar.h>
#include <x86/iommu/iommu_intrmap.h>
-static struct dmar_unit *dmar_ir_find(device_t src, uint16_t *rid,
+static struct iommu_unit *dmar_ir_find(device_t src, uint16_t *rid,
int *is_dmar);
-static void dmar_ir_program_irte(struct dmar_unit *unit, u_int idx,
+static void dmar_ir_program_irte(struct iommu_unit *unit, u_int idx,
uint64_t low, uint16_t rid);
-static int dmar_ir_free_irte(struct dmar_unit *unit, u_int cookie);
+static int dmar_ir_free_irte(struct iommu_unit *unit, u_int cookie);
int
iommu_alloc_msi_intr(device_t src, u_int *cookies, u_int count)
{
- struct dmar_unit *unit;
+ struct iommu_unit *unit;
vmem_addr_t vmem_res;
u_int idx, i;
int error;
@@ -98,7 +98,7 @@
iommu_map_msi_intr(device_t src, u_int cpu, u_int vector, u_int cookie,
uint64_t *addr, uint32_t *data)
{
- struct dmar_unit *unit;
+ struct iommu_unit *unit;
uint64_t low;
uint16_t rid;
int is_dmar;
@@ -143,7 +143,7 @@
int
iommu_unmap_msi_intr(device_t src, u_int cookie)
{
- struct dmar_unit *unit;
+ struct iommu_unit *unit;
if (cookie == -1)
return (0);
@@ -155,7 +155,7 @@
iommu_map_ioapic_intr(u_int ioapic_id, u_int cpu, u_int vector, bool edge,
bool activehi, int irq, u_int *cookie, uint32_t *hi, uint32_t *lo)
{
- struct dmar_unit *unit;
+ struct iommu_unit *unit;
vmem_addr_t vmem_res;
uint64_t low, iorte;
u_int idx;
@@ -217,7 +217,7 @@
int
iommu_unmap_ioapic_intr(u_int ioapic_id, u_int *cookie)
{
- struct dmar_unit *unit;
+ struct iommu_unit *unit;
u_int idx;
idx = *cookie;
@@ -230,11 +230,11 @@
return (dmar_ir_free_irte(unit, idx));
}
-static struct dmar_unit *
+static struct iommu_unit *
dmar_ir_find(device_t src, uint16_t *rid, int *is_dmar)
{
devclass_t src_class;
- struct dmar_unit *unit;
+ struct iommu_unit *unit;
/*
* We need to determine if the interrupt source generates FSB
@@ -261,7 +261,7 @@
}
static void
-dmar_ir_program_irte(struct dmar_unit *unit, u_int idx, uint64_t low,
+dmar_ir_program_irte(struct iommu_unit *unit, u_int idx, uint64_t low,
uint16_t rid)
{
dmar_irte_t *irte;
@@ -277,7 +277,7 @@
"programming irte[%d] rid %#x high %#jx low %#jx\n",
idx, rid, (uintmax_t)high, (uintmax_t)low);
}
- DMAR_LOCK(unit);
+ IOMMU_LOCK(unit);
if ((irte->irte1 & DMAR_IRTE1_P) != 0) {
/*
* The rte is already valid. Assume that the request
@@ -294,12 +294,12 @@
dmar_pte_store(&irte->irte1, low);
}
dmar_qi_invalidate_iec(unit, idx, 1);
- DMAR_UNLOCK(unit);
+ IOMMU_UNLOCK(unit);
}
static int
-dmar_ir_free_irte(struct dmar_unit *unit, u_int cookie)
+dmar_ir_free_irte(struct iommu_unit *unit, u_int cookie)
{
dmar_irte_t *irte;
@@ -310,9 +310,9 @@
irte = &(unit->irt[cookie]);
dmar_pte_clear(&irte->irte1);
dmar_pte_clear(&irte->irte2);
- DMAR_LOCK(unit);
+ IOMMU_LOCK(unit);
dmar_qi_invalidate_iec(unit, cookie, 1);
- DMAR_UNLOCK(unit);
+ IOMMU_UNLOCK(unit);
vmem_free(unit->irtids, cookie, 1);
return (0);
}
@@ -325,7 +325,7 @@
}
int
-dmar_init_irt(struct dmar_unit *unit)
+dmar_init_irt(struct iommu_unit *unit)
{
if ((unit->hw_ecap & DMAR_ECAP_IR) == 0)
@@ -351,10 +351,10 @@
unit->irt_phys = pmap_kextract((vm_offset_t)unit->irt);
unit->irtids = vmem_create("dmarirt", 0, unit->irte_cnt, 1, 0,
M_FIRSTFIT | M_NOWAIT);
- DMAR_LOCK(unit);
+ IOMMU_LOCK(unit);
dmar_load_irt_ptr(unit);
dmar_qi_invalidate_iec_glob(unit);
- DMAR_UNLOCK(unit);
+ IOMMU_UNLOCK(unit);
/*
* Initialize mappings for already configured interrupt pins.
@@ -363,14 +363,14 @@
*/
intr_reprogram();
- DMAR_LOCK(unit);
+ IOMMU_LOCK(unit);
dmar_enable_ir(unit);
- DMAR_UNLOCK(unit);
+ IOMMU_UNLOCK(unit);
return (0);
}
void
-dmar_fini_irt(struct dmar_unit *unit)
+dmar_fini_irt(struct iommu_unit *unit)
{
unit->ir_enabled = 0;
Index: sys/x86/iommu/intel_qi.c
===================================================================
--- sys/x86/iommu/intel_qi.c
+++ sys/x86/iommu/intel_qi.c
@@ -62,7 +62,7 @@
#include <x86/iommu/intel_dmar.h>
static bool
-dmar_qi_seq_processed(const struct dmar_unit *unit,
+dmar_qi_seq_processed(const struct iommu_unit *unit,
const struct dmar_qi_genseq *pseq)
{
@@ -72,11 +72,11 @@
}
static int
-dmar_enable_qi(struct dmar_unit *unit)
+dmar_enable_qi(struct iommu_unit *unit)
{
int error;
- DMAR_ASSERT_LOCKED(unit);
+ IOMMU_ASSERT_LOCKED(unit);
unit->hw_gcmd |= DMAR_GCMD_QIE;
dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd);
DMAR_WAIT_UNTIL(((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_QIES)
@@ -85,11 +85,11 @@
}
static int
-dmar_disable_qi(struct dmar_unit *unit)
+dmar_disable_qi(struct iommu_unit *unit)
{
int error;
- DMAR_ASSERT_LOCKED(unit);
+ IOMMU_ASSERT_LOCKED(unit);
unit->hw_gcmd &= ~DMAR_GCMD_QIE;
dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd);
DMAR_WAIT_UNTIL(((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_QIES)
@@ -98,20 +98,20 @@
}
static void
-dmar_qi_advance_tail(struct dmar_unit *unit)
+dmar_qi_advance_tail(struct iommu_unit *unit)
{
- DMAR_ASSERT_LOCKED(unit);
+ IOMMU_ASSERT_LOCKED(unit);
dmar_write4(unit, DMAR_IQT_REG, unit->inv_queue_tail);
}
static void
-dmar_qi_ensure(struct dmar_unit *unit, int descr_count)
+dmar_qi_ensure(struct iommu_unit *unit, int descr_count)
{
uint32_t head;
int bytes;
- DMAR_ASSERT_LOCKED(unit);
+ IOMMU_ASSERT_LOCKED(unit);
bytes = descr_count << DMAR_IQ_DESCR_SZ_SHIFT;
for (;;) {
if (bytes <= unit->inv_queue_avail)
@@ -141,10 +141,10 @@
}
static void
-dmar_qi_emit(struct dmar_unit *unit, uint64_t data1, uint64_t data2)
+dmar_qi_emit(struct iommu_unit *unit, uint64_t data1, uint64_t data2)
{
- DMAR_ASSERT_LOCKED(unit);
+ IOMMU_ASSERT_LOCKED(unit);
*(volatile uint64_t *)(unit->inv_queue + unit->inv_queue_tail) = data1;
unit->inv_queue_tail += DMAR_IQ_DESCR_SZ / 2;
KASSERT(unit->inv_queue_tail <= unit->inv_queue_size,
@@ -160,11 +160,11 @@
}
static void
-dmar_qi_emit_wait_descr(struct dmar_unit *unit, uint32_t seq, bool intr,
+dmar_qi_emit_wait_descr(struct iommu_unit *unit, uint32_t seq, bool intr,
bool memw, bool fence)
{
- DMAR_ASSERT_LOCKED(unit);
+ IOMMU_ASSERT_LOCKED(unit);
dmar_qi_emit(unit, DMAR_IQ_DESCR_WAIT_ID |
(intr ? DMAR_IQ_DESCR_WAIT_IF : 0) |
(memw ? DMAR_IQ_DESCR_WAIT_SW : 0) |
@@ -174,14 +174,14 @@
}
static void
-dmar_qi_emit_wait_seq(struct dmar_unit *unit, struct dmar_qi_genseq *pseq,
+dmar_qi_emit_wait_seq(struct iommu_unit *unit, struct dmar_qi_genseq *pseq,
bool emit_wait)
{
struct dmar_qi_genseq gsec;
uint32_t seq;
KASSERT(pseq != NULL, ("wait descriptor with no place for seq"));
- DMAR_ASSERT_LOCKED(unit);
+ IOMMU_ASSERT_LOCKED(unit);
if (unit->inv_waitd_seq == 0xffffffff) {
gsec.gen = unit->inv_waitd_gen;
gsec.seq = unit->inv_waitd_seq;
@@ -203,11 +203,11 @@
}
static void
-dmar_qi_wait_for_seq(struct dmar_unit *unit, const struct dmar_qi_genseq *gseq,
+dmar_qi_wait_for_seq(struct iommu_unit *unit, const struct dmar_qi_genseq *gseq,
bool nowait)
{
- DMAR_ASSERT_LOCKED(unit);
+ IOMMU_ASSERT_LOCKED(unit);
unit->inv_seq_waiters++;
while (!dmar_qi_seq_processed(unit, gseq)) {
if (cold || nowait) {
@@ -221,15 +221,15 @@
}
void
-dmar_qi_invalidate_locked(struct dmar_domain *domain, dmar_gaddr_t base,
+dmar_qi_invalidate_locked(struct iommu_domain *domain, dmar_gaddr_t base,
dmar_gaddr_t size, struct dmar_qi_genseq *pseq, bool emit_wait)
{
- struct dmar_unit *unit;
+ struct iommu_unit *unit;
dmar_gaddr_t isize;
int am;
- unit = domain->dmar;
- DMAR_ASSERT_LOCKED(unit);
+ unit = domain->iommu;
+ IOMMU_ASSERT_LOCKED(unit);
for (; size > 0; base += isize, size -= isize) {
am = calc_am(unit, base, size, &isize);
dmar_qi_ensure(unit, 1);
@@ -244,11 +244,11 @@
}
void
-dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit)
+dmar_qi_invalidate_ctx_glob_locked(struct iommu_unit *unit)
{
struct dmar_qi_genseq gseq;
- DMAR_ASSERT_LOCKED(unit);
+ IOMMU_ASSERT_LOCKED(unit);
dmar_qi_ensure(unit, 2);
dmar_qi_emit(unit, DMAR_IQ_DESCR_CTX_INV | DMAR_IQ_DESCR_CTX_GLOB, 0);
dmar_qi_emit_wait_seq(unit, &gseq, true);
@@ -257,11 +257,11 @@
}
void
-dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit)
+dmar_qi_invalidate_iotlb_glob_locked(struct iommu_unit *unit)
{
struct dmar_qi_genseq gseq;
- DMAR_ASSERT_LOCKED(unit);
+ IOMMU_ASSERT_LOCKED(unit);
dmar_qi_ensure(unit, 2);
dmar_qi_emit(unit, DMAR_IQ_DESCR_IOTLB_INV | DMAR_IQ_DESCR_IOTLB_GLOB |
DMAR_IQ_DESCR_IOTLB_DW | DMAR_IQ_DESCR_IOTLB_DR, 0);
@@ -271,11 +271,11 @@
}
void
-dmar_qi_invalidate_iec_glob(struct dmar_unit *unit)
+dmar_qi_invalidate_iec_glob(struct iommu_unit *unit)
{
struct dmar_qi_genseq gseq;
- DMAR_ASSERT_LOCKED(unit);
+ IOMMU_ASSERT_LOCKED(unit);
dmar_qi_ensure(unit, 2);
dmar_qi_emit(unit, DMAR_IQ_DESCR_IEC_INV, 0);
dmar_qi_emit_wait_seq(unit, &gseq, true);
@@ -284,12 +284,12 @@
}
void
-dmar_qi_invalidate_iec(struct dmar_unit *unit, u_int start, u_int cnt)
+dmar_qi_invalidate_iec(struct iommu_unit *unit, u_int start, u_int cnt)
{
struct dmar_qi_genseq gseq;
u_int c, l;
- DMAR_ASSERT_LOCKED(unit);
+ IOMMU_ASSERT_LOCKED(unit);
KASSERT(start < unit->irte_cnt && start < start + cnt &&
start + cnt <= unit->irte_cnt,
("inv iec overflow %d %d %d", unit->irte_cnt, start, cnt));
@@ -326,7 +326,7 @@
int
dmar_qi_intr(void *arg)
{
- struct dmar_unit *unit;
+ struct iommu_unit *unit;
unit = arg;
KASSERT(unit->qi_enabled, ("dmar%d: QI is not enabled", unit->unit));
@@ -337,13 +337,13 @@
static void
dmar_qi_task(void *arg, int pending __unused)
{
- struct dmar_unit *unit;
- struct dmar_map_entry *entry;
+ struct iommu_unit *unit;
+ struct iommu_map_entry *entry;
uint32_t ics;
unit = arg;
- DMAR_LOCK(unit);
+ IOMMU_LOCK(unit);
for (;;) {
entry = TAILQ_FIRST(&unit->tlb_flush_entries);
if (entry == NULL)
@@ -351,10 +351,10 @@
if (!dmar_qi_seq_processed(unit, &entry->gseq))
break;
TAILQ_REMOVE(&unit->tlb_flush_entries, entry, dmamap_link);
- DMAR_UNLOCK(unit);
+ IOMMU_UNLOCK(unit);
dmar_domain_free_entry(entry, (entry->flags &
- DMAR_MAP_ENTRY_QI_NF) == 0);
- DMAR_LOCK(unit);
+ IOMMU_MAP_ENTRY_QI_NF) == 0);
+ IOMMU_LOCK(unit);
}
ics = dmar_read4(unit, DMAR_ICS_REG);
if ((ics & DMAR_ICS_IWC) != 0) {
@@ -363,11 +363,11 @@
}
if (unit->inv_seq_waiters > 0)
wakeup(&unit->inv_seq_waiters);
- DMAR_UNLOCK(unit);
+ IOMMU_UNLOCK(unit);
}
int
-dmar_init_qi(struct dmar_unit *unit)
+dmar_init_qi(struct iommu_unit *unit)
{
uint64_t iqa;
uint32_t ics;
@@ -404,7 +404,7 @@
unit->inv_waitd_seq_hw_phys = pmap_kextract(
(vm_offset_t)&unit->inv_waitd_seq_hw);
- DMAR_LOCK(unit);
+ IOMMU_LOCK(unit);
dmar_write8(unit, DMAR_IQT_REG, 0);
iqa = pmap_kextract(unit->inv_queue);
iqa |= qi_sz;
@@ -416,13 +416,13 @@
dmar_write4(unit, DMAR_ICS_REG, ics);
}
dmar_enable_qi_intr(unit);
- DMAR_UNLOCK(unit);
+ IOMMU_UNLOCK(unit);
return (0);
}
void
-dmar_fini_qi(struct dmar_unit *unit)
+dmar_fini_qi(struct iommu_unit *unit)
{
struct dmar_qi_genseq gseq;
@@ -432,7 +432,7 @@
taskqueue_free(unit->qi_taskqueue);
unit->qi_taskqueue = NULL;
- DMAR_LOCK(unit);
+ IOMMU_LOCK(unit);
/* quisce */
dmar_qi_ensure(unit, 1);
dmar_qi_emit_wait_seq(unit, &gseq, true);
@@ -443,7 +443,7 @@
dmar_disable_qi(unit);
KASSERT(unit->inv_seq_waiters == 0,
("dmar%d: waiters on disabled queue", unit->unit));
- DMAR_UNLOCK(unit);
+ IOMMU_UNLOCK(unit);
kmem_free(unit->inv_queue, unit->inv_queue_size);
unit->inv_queue = 0;
@@ -452,11 +452,11 @@
}
void
-dmar_enable_qi_intr(struct dmar_unit *unit)
+dmar_enable_qi_intr(struct iommu_unit *unit)
{
uint32_t iectl;
- DMAR_ASSERT_LOCKED(unit);
+ IOMMU_ASSERT_LOCKED(unit);
KASSERT(DMAR_HAS_QI(unit), ("dmar%d: QI is not supported", unit->unit));
iectl = dmar_read4(unit, DMAR_IECTL_REG);
iectl &= ~DMAR_IECTL_IM;
@@ -464,11 +464,11 @@
}
void
-dmar_disable_qi_intr(struct dmar_unit *unit)
+dmar_disable_qi_intr(struct iommu_unit *unit)
{
uint32_t iectl;
- DMAR_ASSERT_LOCKED(unit);
+ IOMMU_ASSERT_LOCKED(unit);
KASSERT(DMAR_HAS_QI(unit), ("dmar%d: QI is not supported", unit->unit));
iectl = dmar_read4(unit, DMAR_IECTL_REG);
dmar_write4(unit, DMAR_IECTL_REG, iectl | DMAR_IECTL_IM);
Index: sys/x86/iommu/intel_quirks.c
===================================================================
--- sys/x86/iommu/intel_quirks.c
+++ sys/x86/iommu/intel_quirks.c
@@ -64,7 +64,7 @@
#include <x86/iommu/intel_dmar.h>
#include <dev/pci/pcivar.h>
-typedef void (*dmar_quirk_cpu_fun)(struct dmar_unit *);
+typedef void (*dmar_quirk_cpu_fun)(struct iommu_unit *);
struct intel_dmar_quirk_cpu {
u_int ext_family;
@@ -76,7 +76,7 @@
const char *descr;
};
-typedef void (*dmar_quirk_nb_fun)(struct dmar_unit *, device_t nb);
+typedef void (*dmar_quirk_nb_fun)(struct iommu_unit *, device_t nb);
struct intel_dmar_quirk_nb {
u_int dev_id;
@@ -88,7 +88,7 @@
#define QUIRK_NB_ALL_REV 0xffffffff
static void
-dmar_match_quirks(struct dmar_unit *dmar,
+dmar_match_quirks(struct iommu_unit *dmar,
const struct intel_dmar_quirk_nb *nb_quirks, int nb_quirks_len,
const struct intel_dmar_quirk_cpu *cpu_quirks, int cpu_quirks_len)
{
@@ -149,21 +149,21 @@
}
static void
-nb_5400_no_low_high_prot_mem(struct dmar_unit *unit, device_t nb __unused)
+nb_5400_no_low_high_prot_mem(struct iommu_unit *unit, device_t nb __unused)
{
unit->hw_cap &= ~(DMAR_CAP_PHMR | DMAR_CAP_PLMR);
}
static void
-nb_no_ir(struct dmar_unit *unit, device_t nb __unused)
+nb_no_ir(struct iommu_unit *unit, device_t nb __unused)
{
unit->hw_ecap &= ~(DMAR_ECAP_IR | DMAR_ECAP_EIM);
}
static void
-nb_5500_no_ir_rev13(struct dmar_unit *unit, device_t nb)
+nb_5500_no_ir_rev13(struct iommu_unit *unit, device_t nb)
{
u_int rev_no;
@@ -206,7 +206,7 @@
};
static void
-cpu_e5_am9(struct dmar_unit *unit)
+cpu_e5_am9(struct iommu_unit *unit)
{
unit->hw_cap &= ~(0x3fULL << 48);
@@ -222,19 +222,19 @@
};
void
-dmar_quirks_pre_use(struct dmar_unit *dmar)
+dmar_quirks_pre_use(struct iommu_unit *dmar)
{
if (!dmar_barrier_enter(dmar, DMAR_BARRIER_USEQ))
return;
- DMAR_LOCK(dmar);
+ IOMMU_LOCK(dmar);
dmar_match_quirks(dmar, pre_use_nb, nitems(pre_use_nb),
NULL, 0);
dmar_barrier_exit(dmar, DMAR_BARRIER_USEQ);
}
void
-dmar_quirks_post_ident(struct dmar_unit *dmar)
+dmar_quirks_post_ident(struct iommu_unit *dmar)
{
dmar_match_quirks(dmar, NULL, 0, post_ident_cpu,
Index: sys/x86/iommu/intel_reg.h
===================================================================
--- sys/x86/iommu/intel_reg.h
+++ sys/x86/iommu/intel_reg.h
@@ -34,12 +34,16 @@
#ifndef __X86_IOMMU_INTEL_REG_H
#define __X86_IOMMU_INTEL_REG_H
-#define DMAR_PAGE_SIZE PAGE_SIZE
-#define DMAR_PAGE_MASK (DMAR_PAGE_SIZE - 1)
-#define DMAR_PAGE_SHIFT PAGE_SHIFT
-#define DMAR_NPTEPG (DMAR_PAGE_SIZE / sizeof(dmar_pte_t))
-#define DMAR_NPTEPGSHIFT 9
-#define DMAR_PTEMASK (DMAR_NPTEPG - 1)
+#define DMAR_PAGE_SIZE PAGE_SIZE
+#define DMAR_PAGE_MASK (DMAR_PAGE_SIZE - 1)
+#define DMAR_PAGE_SHIFT PAGE_SHIFT
+#define DMAR_NPTEPG (DMAR_PAGE_SIZE / sizeof(dmar_pte_t))
+#define DMAR_NPTEPGSHIFT 9
+#define DMAR_PTEMASK (DMAR_NPTEPG - 1)
+
+/* For the iommu generic busdma backend. */
+#define IOMMU_PAGE_SIZE DMAR_PAGE_SIZE
+#define IOMMU_PAGE_MASK DMAR_PAGE_MASK
typedef struct dmar_root_entry {
uint64_t r1;
@@ -51,10 +55,10 @@
#define DMAR_CTX_CNT (DMAR_PAGE_SIZE / sizeof(dmar_root_entry_t))
-typedef struct dmar_ctx_entry {
+typedef struct iommu_device_entry {
uint64_t ctx1;
uint64_t ctx2;
-} dmar_ctx_entry_t;
+} iommu_device_entry_t;
#define DMAR_CTX1_P 1 /* Present */
#define DMAR_CTX1_FPD 2 /* Fault Processing Disable */
/* Translation Type: */
Index: sys/x86/iommu/intel_utils.c
===================================================================
--- sys/x86/iommu/intel_utils.c
+++ sys/x86/iommu/intel_utils.c
@@ -106,7 +106,7 @@
};
bool
-dmar_pglvl_supported(struct dmar_unit *unit, int pglvl)
+dmar_pglvl_supported(struct iommu_unit *unit, int pglvl)
{
int i;
@@ -120,12 +120,12 @@
}
int
-domain_set_agaw(struct dmar_domain *domain, int mgaw)
+domain_set_agaw(struct iommu_domain *domain, int mgaw)
{
int sagaw, i;
domain->mgaw = mgaw;
- sagaw = DMAR_CAP_SAGAW(domain->dmar->hw_cap);
+ sagaw = DMAR_CAP_SAGAW(domain->iommu->hw_cap);
for (i = 0; i < nitems(sagaw_bits); i++) {
if (sagaw_bits[i].agaw >= mgaw) {
domain->agaw = sagaw_bits[i].agaw;
@@ -134,7 +134,7 @@
return (0);
}
}
- device_printf(domain->dmar->dev,
+ device_printf(domain->iommu->dev,
"context request mgaw %d: no agaw found, sagaw %x\n",
mgaw, sagaw);
return (EINVAL);
@@ -148,7 +148,8 @@
* address space, accept the biggest sagaw, whatever is it.
*/
int
-dmar_maxaddr2mgaw(struct dmar_unit *unit, dmar_gaddr_t maxaddr, bool allow_less)
+dmar_maxaddr2mgaw(struct iommu_unit *unit, dmar_gaddr_t maxaddr,
+ bool allow_less)
{
int i;
@@ -192,7 +193,7 @@
* the context ctx.
*/
int
-domain_is_sp_lvl(struct dmar_domain *domain, int lvl)
+domain_is_sp_lvl(struct iommu_domain *domain, int lvl)
{
int alvl, cap_sps;
static const int sagaw_sp[] = {
@@ -203,7 +204,7 @@
};
alvl = domain->pglvl - lvl - 1;
- cap_sps = DMAR_CAP_SPS(domain->dmar->hw_cap);
+ cap_sps = DMAR_CAP_SPS(domain->iommu->hw_cap);
return (alvl < nitems(sagaw_sp) && (sagaw_sp[alvl] & cap_sps) != 0);
}
@@ -228,14 +229,14 @@
}
dmar_gaddr_t
-domain_page_size(struct dmar_domain *domain, int lvl)
+domain_page_size(struct iommu_domain *domain, int lvl)
{
return (pglvl_page_size(domain->pglvl, lvl));
}
int
-calc_am(struct dmar_unit *unit, dmar_gaddr_t base, dmar_gaddr_t size,
+calc_am(struct iommu_unit *unit, dmar_gaddr_t base, dmar_gaddr_t size,
dmar_gaddr_t *isizep)
{
dmar_gaddr_t isize;
@@ -360,7 +361,7 @@
}
static void
-dmar_flush_transl_to_ram(struct dmar_unit *unit, void *dst, size_t sz)
+dmar_flush_transl_to_ram(struct iommu_unit *unit, void *dst, size_t sz)
{
if (DMAR_IS_COHERENT(unit))
@@ -373,21 +374,21 @@
}
void
-dmar_flush_pte_to_ram(struct dmar_unit *unit, dmar_pte_t *dst)
+dmar_flush_pte_to_ram(struct iommu_unit *unit, dmar_pte_t *dst)
{
dmar_flush_transl_to_ram(unit, dst, sizeof(*dst));
}
void
-dmar_flush_ctx_to_ram(struct dmar_unit *unit, dmar_ctx_entry_t *dst)
+dmar_flush_ctx_to_ram(struct iommu_unit *unit, iommu_device_entry_t *dst)
{
dmar_flush_transl_to_ram(unit, dst, sizeof(*dst));
}
void
-dmar_flush_root_to_ram(struct dmar_unit *unit, dmar_root_entry_t *dst)
+dmar_flush_root_to_ram(struct iommu_unit *unit, dmar_root_entry_t *dst)
{
dmar_flush_transl_to_ram(unit, dst, sizeof(*dst));
@@ -398,7 +399,7 @@
* the completion.
*/
int
-dmar_load_root_entry_ptr(struct dmar_unit *unit)
+dmar_load_root_entry_ptr(struct iommu_unit *unit)
{
vm_page_t root_entry;
int error;
@@ -407,7 +408,7 @@
* Access to the GCMD register must be serialized while the
* command is submitted.
*/
- DMAR_ASSERT_LOCKED(unit);
+ IOMMU_ASSERT_LOCKED(unit);
VM_OBJECT_RLOCK(unit->ctx_obj);
root_entry = vm_page_lookup(unit->ctx_obj, 0);
@@ -424,7 +425,7 @@
* the completion.
*/
int
-dmar_inv_ctx_glob(struct dmar_unit *unit)
+dmar_inv_ctx_glob(struct iommu_unit *unit)
{
int error;
@@ -432,7 +433,7 @@
* Access to the CCMD register must be serialized while the
* command is submitted.
*/
- DMAR_ASSERT_LOCKED(unit);
+ IOMMU_ASSERT_LOCKED(unit);
KASSERT(!unit->qi_enabled, ("QI enabled"));
/*
@@ -451,11 +452,11 @@
* Globally invalidate the IOTLB, busily waiting for the completion.
*/
int
-dmar_inv_iotlb_glob(struct dmar_unit *unit)
+dmar_inv_iotlb_glob(struct iommu_unit *unit)
{
int error, reg;
- DMAR_ASSERT_LOCKED(unit);
+ IOMMU_ASSERT_LOCKED(unit);
KASSERT(!unit->qi_enabled, ("QI enabled"));
reg = 16 * DMAR_ECAP_IRO(unit->hw_ecap);
@@ -472,11 +473,11 @@
* in the architecture specification.
*/
int
-dmar_flush_write_bufs(struct dmar_unit *unit)
+dmar_flush_write_bufs(struct iommu_unit *unit)
{
int error;
- DMAR_ASSERT_LOCKED(unit);
+ IOMMU_ASSERT_LOCKED(unit);
/*
* DMAR_GCMD_WBF is only valid when CAP_RWBF is reported.
@@ -491,11 +492,11 @@
}
int
-dmar_enable_translation(struct dmar_unit *unit)
+dmar_enable_translation(struct iommu_unit *unit)
{
int error;
- DMAR_ASSERT_LOCKED(unit);
+ IOMMU_ASSERT_LOCKED(unit);
unit->hw_gcmd |= DMAR_GCMD_TE;
dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd);
DMAR_WAIT_UNTIL(((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_TES)
@@ -504,11 +505,11 @@
}
int
-dmar_disable_translation(struct dmar_unit *unit)
+dmar_disable_translation(struct iommu_unit *unit)
{
int error;
- DMAR_ASSERT_LOCKED(unit);
+ IOMMU_ASSERT_LOCKED(unit);
unit->hw_gcmd &= ~DMAR_GCMD_TE;
dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd);
DMAR_WAIT_UNTIL(((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_TES)
@@ -517,12 +518,12 @@
}
int
-dmar_load_irt_ptr(struct dmar_unit *unit)
+dmar_load_irt_ptr(struct iommu_unit *unit)
{
uint64_t irta, s;
int error;
- DMAR_ASSERT_LOCKED(unit);
+ IOMMU_ASSERT_LOCKED(unit);
irta = unit->irt_phys;
if (DMAR_X2APIC(unit))
irta |= DMAR_IRTA_EIME;
@@ -539,11 +540,11 @@
}
int
-dmar_enable_ir(struct dmar_unit *unit)
+dmar_enable_ir(struct iommu_unit *unit)
{
int error;
- DMAR_ASSERT_LOCKED(unit);
+ IOMMU_ASSERT_LOCKED(unit);
unit->hw_gcmd |= DMAR_GCMD_IRE;
unit->hw_gcmd &= ~DMAR_GCMD_CFI;
dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd);
@@ -553,11 +554,11 @@
}
int
-dmar_disable_ir(struct dmar_unit *unit)
+dmar_disable_ir(struct iommu_unit *unit)
{
int error;
- DMAR_ASSERT_LOCKED(unit);
+ IOMMU_ASSERT_LOCKED(unit);
unit->hw_gcmd &= ~DMAR_GCMD_IRE;
dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd);
DMAR_WAIT_UNTIL(((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_IRES)
@@ -573,13 +574,13 @@
f_wakeup = 1 << (barrier_id * 3 + 2)
bool
-dmar_barrier_enter(struct dmar_unit *dmar, u_int barrier_id)
+dmar_barrier_enter(struct iommu_unit *dmar, u_int barrier_id)
{
BARRIER_F;
- DMAR_LOCK(dmar);
+ IOMMU_LOCK(dmar);
if ((dmar->barrier_flags & f_done) != 0) {
- DMAR_UNLOCK(dmar);
+ IOMMU_UNLOCK(dmar);
return (false);
}
@@ -591,28 +592,28 @@
}
KASSERT((dmar->barrier_flags & f_done) != 0,
("dmar%d barrier %d missing done", dmar->unit, barrier_id));
- DMAR_UNLOCK(dmar);
+ IOMMU_UNLOCK(dmar);
return (false);
}
dmar->barrier_flags |= f_inproc;
- DMAR_UNLOCK(dmar);
+ IOMMU_UNLOCK(dmar);
return (true);
}
void
-dmar_barrier_exit(struct dmar_unit *dmar, u_int barrier_id)
+dmar_barrier_exit(struct iommu_unit *dmar, u_int barrier_id)
{
BARRIER_F;
- DMAR_ASSERT_LOCKED(dmar);
+ IOMMU_ASSERT_LOCKED(dmar);
KASSERT((dmar->barrier_flags & (f_done | f_inproc)) == f_inproc,
("dmar%d barrier %d missed entry", dmar->unit, barrier_id));
dmar->barrier_flags |= f_done;
if ((dmar->barrier_flags & f_wakeup) != 0)
wakeup(&dmar->barrier_flags);
dmar->barrier_flags &= ~(f_inproc | f_wakeup);
- DMAR_UNLOCK(dmar);
+ IOMMU_UNLOCK(dmar);
}
int dmar_batch_coalesce = 100;
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Mon, Apr 27, 6:42 AM (7 h, 17 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
32168922
Default Alt Text
D25094.id72610.diff (156 KB)
Attached To
Mode
D25094: Split-out DMAR busdma backend
Attached
Detach File
Event Timeline
Log In to Comment