Page MenuHomeFreeBSD

D24618.id71155.diff
No OneTemporary

D24618.id71155.diff

Index: sys/arm64/acpica/acpi_iort.c
===================================================================
--- sys/arm64/acpica/acpi_iort.c
+++ sys/arm64/acpica/acpi_iort.c
@@ -160,7 +160,7 @@
if (i == node->nentries)
return (NULL);
if ((entry->flags & ACPI_IORT_ID_SINGLE_MAPPING) == 0)
- *outid = entry->outbase + (id - entry->base);
+ *outid = entry->outbase + (id - entry->base);
else
*outid = entry->outbase;
return (entry->out_node);
@@ -564,3 +564,22 @@
*xref = node->entries.its[0].xref;
return (0);
}
+
+int
+acpi_iort_map_pci_smmuv3(u_int seg, u_int rid, u_int *xref, u_int *sid)
+{
+ ACPI_IORT_SMMU_V3 *smmu;
+ struct iort_node *node;
+
+ node = iort_pci_rc_map(seg, rid, ACPI_IORT_NODE_SMMU_V3, sid);
+ if (node == NULL)
+ return (ENOENT);
+
+ /* This should be an SMMU node. */
+ KASSERT(node->type == ACPI_IORT_NODE_SMMU_V3, ("bad node"));
+
+ smmu = (ACPI_IORT_SMMU_V3 *)&node->data.smmu_v3;
+ *xref = smmu->BaseAddress;
+
+ return (0);
+}
Index: sys/arm64/arm64/busdma_bounce.c
===================================================================
--- sys/arm64/arm64/busdma_bounce.c
+++ sys/arm64/arm64/busdma_bounce.c
@@ -59,6 +59,8 @@
#include <machine/md_var.h>
#include <arm64/include/bus_dma_impl.h>
+#include <dev/iommu/iommu.h>
+
#define MAX_BPAGES 4096
enum {
@@ -68,16 +70,6 @@
BF_COHERENT = 0x10,
};
-struct bounce_zone;
-
-struct bus_dma_tag {
- struct bus_dma_tag_common common;
- int map_count;
- int bounce_flags;
- bus_dma_segment_t *segments;
- struct bounce_zone *bounce_zone;
-};
-
struct bounce_page {
vm_offset_t vaddr; /* kva of bounce buffer */
bus_addr_t busaddr; /* Physical address */
@@ -138,6 +130,8 @@
#define DMAMAP_COULD_BOUNCE (1 << 0)
#define DMAMAP_FROM_DMAMEM (1 << 1)
int sync_count;
+ int nsegs;
+ bus_dma_segment_t *segments;
struct sync_list slist[];
};
@@ -185,7 +179,6 @@
newtag->common.impl = &bus_dma_bounce_impl;
newtag->map_count = 0;
- newtag->segments = NULL;
if ((flags & BUS_DMA_COHERENT) != 0)
newtag->bounce_flags |= BF_COHERENT;
@@ -197,6 +190,8 @@
/* Copy some flags from the parent */
newtag->bounce_flags |= parent->bounce_flags & BF_COHERENT;
+ newtag->iommu_domain = parent->iommu_domain;
+ newtag->owner = parent->owner;
}
if (newtag->common.lowaddr < ptoa((vm_paddr_t)Maxmem) ||
@@ -241,6 +236,7 @@
static int
bounce_bus_dma_tag_destroy(bus_dma_tag_t dmat)
{
+ struct iommu_domain *domain;
bus_dma_tag_t dmat_copy, parent;
int error;
@@ -256,8 +252,9 @@
parent = (bus_dma_tag_t)dmat->common.parent;
atomic_subtract_int(&dmat->common.ref_count, 1);
if (dmat->common.ref_count == 0) {
- if (dmat->segments != NULL)
- free(dmat->segments, M_DEVBUF);
+ domain = dmat->iommu_domain;
+ if (domain && dmat == domain->tag)
+ bounce_smmu_domain_free(dmat);
free(dmat, M_DEVBUF);
/*
* Last reference count, so
@@ -295,6 +292,17 @@
if (map == NULL)
return (NULL);
+ map->segments = (bus_dma_segment_t *)malloc(
+ sizeof(bus_dma_segment_t) * dmat->common.nsegments,
+ M_DEVBUF, M_NOWAIT);
+ if (map->segments == NULL) {
+ CTR3(KTR_BUSDMA, "%s: tag %p error %d",
+ __func__, dmat, ENOMEM);
+ free(map, M_DEVBUF);
+ return (NULL);
+ }
+ map->nsegs = 0;
+
/* Initialize the new map */
STAILQ_INIT(&map->bpages);
@@ -313,17 +321,6 @@
error = 0;
- if (dmat->segments == NULL) {
- dmat->segments = (bus_dma_segment_t *)malloc(
- sizeof(bus_dma_segment_t) * dmat->common.nsegments,
- M_DEVBUF, M_NOWAIT);
- if (dmat->segments == NULL) {
- CTR3(KTR_BUSDMA, "%s: tag %p error %d",
- __func__, dmat, ENOMEM);
- return (ENOMEM);
- }
- }
-
*mapp = alloc_dmamap(dmat, M_NOWAIT);
if (*mapp == NULL) {
CTR3(KTR_BUSDMA, "%s: tag %p error %d",
@@ -340,6 +337,7 @@
/* Must bounce */
if (dmat->bounce_zone == NULL) {
if ((error = alloc_bounce_zone(dmat)) != 0) {
+ free((*mapp)->segments, M_DEVBUF);
free(*mapp, M_DEVBUF);
return (error);
}
@@ -377,8 +375,10 @@
}
if (error == 0)
dmat->map_count++;
- else
+ else {
+ free((*mapp)->segments, M_DEVBUF);
free(*mapp, M_DEVBUF);
+ }
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
__func__, dmat, dmat->common.flags, error);
return (error);
@@ -405,6 +405,7 @@
("%s: Bounce zone when cannot bounce", __func__));
dmat->bounce_zone->map_count--;
}
+ free(map->segments, M_DEVBUF);
free(map, M_DEVBUF);
dmat->map_count--;
CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
@@ -436,16 +437,6 @@
else
mflags = M_WAITOK;
- if (dmat->segments == NULL) {
- dmat->segments = (bus_dma_segment_t *)malloc(
- sizeof(bus_dma_segment_t) * dmat->common.nsegments,
- M_DEVBUF, mflags);
- if (dmat->segments == NULL) {
- CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
- __func__, dmat, dmat->common.flags, ENOMEM);
- return (ENOMEM);
- }
- }
if (flags & BUS_DMA_ZERO)
mflags |= M_ZERO;
if (flags & BUS_DMA_NOCACHE)
@@ -515,6 +506,7 @@
if (*vaddr == NULL) {
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
__func__, dmat, dmat->common.flags, ENOMEM);
+ free((*mapp)->segments, M_DEVBUF);
free(*mapp, M_DEVBUF);
return (ENOMEM);
} else if (vtophys(*vaddr) & (dmat->common.alignment - 1)) {
@@ -719,7 +711,7 @@
int error;
if (segs == NULL)
- segs = dmat->segments;
+ segs = map->segments;
if ((dmat->bounce_flags & BF_COULD_BOUNCE) != 0) {
_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
@@ -790,7 +782,7 @@
int error;
if (segs == NULL)
- segs = dmat->segments;
+ segs = map->segments;
if ((dmat->bounce_flags & BF_COULD_BOUNCE) != 0) {
_bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
@@ -893,9 +885,26 @@
bounce_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
bus_dma_segment_t *segs, int nsegs, int error)
{
+ int ret;
+
+ map->nsegs = nsegs;
+
+ if (segs != NULL)
+ memcpy(map->segments, segs, map->nsegs * sizeof(segs[0]));
+
+ if (dmat->iommu_domain) {
+ ret = iommu_map(dmat->iommu_domain, map->segments, nsegs);
+ if (ret) {
+ printf("iommu failed to map page: error %d\n", ret);
+ /* TODO: handle this case. */
+ }
+ }
+
+ if (segs != NULL)
+ memcpy(segs, map->segments, map->nsegs * sizeof(segs[0]));
+ else
+ segs = map->segments;
- if (segs == NULL)
- segs = dmat->segments;
return (segs);
}
@@ -907,6 +916,11 @@
{
struct bounce_page *bpage;
+ if (dmat->iommu_domain) {
+ iommu_unmap(dmat->iommu_domain, map->segments, map->nsegs);
+ map->nsegs = 0;
+ }
+
while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
STAILQ_REMOVE_HEAD(&map->bpages, links);
free_bounce_page(dmat, bpage);
Index: sys/arm64/arm64/busdma_bounce_smmu.c
===================================================================
--- /dev/null
+++ sys/arm64/arm64/busdma_bounce_smmu.c
@@ -0,0 +1,204 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory (Department of Computer Science and
+ * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
+ * DARPA SSITH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_acpi.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+
+#include <machine/bus.h>
+#include <arm64/include/bus_dma_impl.h>
+
+#include <dev/iommu/iommu.h>
+#include <dev/pci/pcivar.h>
+
+#ifdef DEV_ACPI
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+
+#include <dev/acpica/acpivar.h>
+#include <dev/acpica/acpi_pcibvar.h>
+#endif
+
+#define GICV3_ITS_PAGE 0x300b0000
+
+static MALLOC_DEFINE(M_BUSDMA, "bounce SMMU", "ARM64 busdma bounce SMMU");
+
+static void
+bounce_smmu_tag_init(struct bus_dma_tag *t)
+{
+ bus_addr_t maxaddr;
+
+ maxaddr = BUS_SPACE_MAXADDR;
+
+ t->common.ref_count = 0;
+ t->common.impl = &bus_dma_bounce_impl;
+ t->common.boundary = 0;
+ t->common.lowaddr = maxaddr;
+ t->common.highaddr = maxaddr;
+ t->common.maxsize = maxaddr;
+ t->common.nsegments = BUS_SPACE_UNRESTRICTED;
+ t->common.maxsegsz = maxaddr;
+}
+
+int
+bounce_smmu_domain_free(bus_dma_tag_t dmat)
+{
+ struct iommu_domain *domain;
+ int error;
+
+ domain = iommu_get_domain_for_dev(dmat->owner);
+ if (domain == NULL)
+ return (0);
+
+ error = iommu_device_detach(domain, dmat->owner);
+ if (error) {
+ device_printf(dmat->owner,
+ "Could not detach a device from IOMMU domain.\n");
+ return (error);
+ }
+
+ /* Unmap the GICv3 ITS page. */
+ error = iommu_unmap_page(dmat->iommu_domain, GICV3_ITS_PAGE);
+ if (error) {
+ device_printf(dmat->owner,
+ "Could not unmap GICv3 ITS page.\n");
+ return (error);
+ }
+
+ error = iommu_domain_free(domain);
+ if (error) {
+ device_printf(dmat->owner,
+ "Could not deallocate IOMMU domain.\n");
+ return (error);
+ }
+
+ return (0);
+}
+
+static struct iommu_domain *
+bounce_smmu_domain_alloc(device_t dev)
+{
+ struct iommu_domain *domain;
+ struct iommu *iommu;
+ u_int xref, sid;
+ uint16_t rid;
+ int error;
+ int seg;
+
+ rid = pci_get_rid(dev);
+ seg = pci_get_domain(dev);
+
+ /*
+ * Find an xref of an IOMMU controller that serves traffic for dev.
+ */
+#ifdef DEV_ACPI
+ error = acpi_iort_map_pci_smmuv3(seg, rid, &xref, &sid);
+ if (error) {
+ /* Could not find reference to an SMMU device. */
+ return (NULL);
+ }
+#else
+ /* TODO: add FDT support. */
+ return (NULL);
+#endif
+
+ /*
+ * Find the registered IOMMU controller by xref.
+ */
+ iommu = iommu_lookup(xref, 0);
+ if (iommu == NULL) {
+ /* SMMU device is not registered in the IOMMU framework. */
+ return (NULL);
+ }
+
+ domain = iommu_domain_alloc(iommu);
+ if (domain == NULL)
+ return (NULL);
+
+ /* Add some virtual address range for this domain. */
+ iommu_domain_add_va_range(domain, 0x40000000, 0x40000000);
+
+ /* Map the GICv3 ITS page so the device could send MSI interrupts. */
+ iommu_map_page(domain, GICV3_ITS_PAGE, GICV3_ITS_PAGE, VM_PROT_WRITE);
+
+ return (domain);
+}
+
+bus_dma_tag_t
+bounce_smmu_get_dma_tag(device_t dev, device_t child)
+{
+ struct iommu_domain *domain;
+ devclass_t pci_class;
+ bus_dma_tag_t tag;
+ int error;
+
+ pci_class = devclass_find("pci");
+ if (device_get_devclass(device_get_parent(child)) != pci_class)
+ return (NULL);
+
+ domain = iommu_get_domain_for_dev(child);
+ if (domain)
+ return (domain->tag);
+
+ /* A single device per domain. */
+
+ domain = bounce_smmu_domain_alloc(child);
+ if (!domain)
+ return (NULL);
+
+ tag = malloc(sizeof(*tag), M_BUSDMA, M_WAITOK | M_ZERO);
+ if (!tag) {
+ iommu_domain_free(domain);
+ return (NULL);
+ }
+
+ bounce_smmu_tag_init(tag);
+ tag->owner = child;
+ tag->iommu_domain = domain;
+ domain->tag = tag;
+
+ error = iommu_device_attach(domain, child);
+ if (error) {
+ free(tag, M_BUSDMA);
+ iommu_domain_free(domain);
+ return (NULL);
+ }
+
+ return (tag);
+}
Index: sys/arm64/arm64/pmap.c
===================================================================
--- sys/arm64/arm64/pmap.c
+++ sys/arm64/arm64/pmap.c
@@ -3323,6 +3323,230 @@
}
#endif /* VM_NRESERVLEVEL > 0 */
+/*
+ * Preallocate l1, l2 page directories for a specific VA range.
+ * This is optional and not in use currently.
+ */
+int
+pmap_bootstrap_smmu(pmap_t pmap, vm_offset_t sva, int count)
+{
+ struct rwlock *lock;
+ pd_entry_t *pde;
+ vm_page_t mpte;
+ vm_offset_t va;
+ int lvl;
+ int i;
+
+ lock = NULL;
+ PMAP_LOCK(pmap);
+
+ va = sva;
+ for (i = 0; i < count; i++) {
+ pde = pmap_pde(pmap, va, &lvl);
+ if (pde != NULL && lvl == 2)
+ return (EEXIST);
+ mpte = _pmap_alloc_l3(pmap, pmap_l2_pindex(va), &lock);
+ if (mpte == NULL)
+ return (ENOMEM);
+ va += L2_SIZE;
+ }
+
+ if (lock != NULL)
+ rw_wunlock(lock);
+ PMAP_UNLOCK(pmap);
+
+ return (0);
+}
+
+/*
+ * Add a single SMMU entry. This function does not sleep.
+ */
+int
+pmap_senter(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
+ vm_prot_t prot, u_int flags)
+{
+ pd_entry_t *pde;
+ pt_entry_t new_l3, orig_l3;
+ pt_entry_t *l3;
+ vm_page_t mpte;
+ int lvl;
+ int rv;
+
+ PMAP_ASSERT_STAGE1(pmap);
+ KASSERT(va < VM_MAXUSER_ADDRESS, ("wrong address space"));
+
+ va = trunc_page(va);
+ new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT |
+ ATTR_S1_IDX(VM_MEMATTR_DEVICE) | L3_PAGE);
+ if ((prot & VM_PROT_WRITE) == 0)
+ new_l3 |= ATTR_S1_AP(ATTR_S1_AP_RO);
+ new_l3 |= ATTR_S1_XN; /* Execute never. */
+ new_l3 |= ATTR_S1_AP(ATTR_S1_AP_USER);
+ new_l3 |= ATTR_S1_nG; /* Non global. */
+
+ CTR2(KTR_PMAP, "pmap_senter: %.16lx -> %.16lx", va, pa);
+
+ PMAP_LOCK(pmap);
+
+ /*
+ * In the case that a page table page is not
+ * resident, we are creating it here.
+ */
+retry:
+ pde = pmap_pde(pmap, va, &lvl);
+ if (pde != NULL && lvl == 2) {
+ l3 = pmap_l2_to_l3(pde, va);
+ } else {
+ mpte = _pmap_alloc_l3(pmap, pmap_l2_pindex(va), NULL);
+ if (mpte == NULL) {
+ CTR0(KTR_PMAP, "pmap_enter: mpte == NULL");
+ rv = KERN_RESOURCE_SHORTAGE;
+ goto out;
+ }
+ goto retry;
+ }
+
+ orig_l3 = pmap_load(l3);
+ KASSERT(!pmap_l3_valid(orig_l3), ("l3 is valid"));
+
+ /* New mapping */
+ pmap_store(l3, new_l3);
+ dsb(ishst);
+
+ rv = KERN_SUCCESS;
+out:
+ PMAP_UNLOCK(pmap);
+
+ return (rv);
+}
+
+/*
+ * Remove a single SMMU entry.
+ */
+int
+pmap_sremove(pmap_t pmap, vm_offset_t va)
+{
+ pt_entry_t *pte;
+ int lvl;
+ int rc;
+
+ PMAP_LOCK(pmap);
+
+ pte = pmap_pte(pmap, va, &lvl);
+ KASSERT(lvl == 3,
+ ("Invalid SMMU pagetable level: %d != 3", lvl));
+
+ if (pte != NULL) {
+ pmap_clear(pte);
+ rc = KERN_FAILURE;
+ } else
+ rc = KERN_SUCCESS;
+
+ PMAP_UNLOCK(pmap);
+
+ return (rc);
+}
+
+/*
+ * Remove all the allocated L1, L2 pages from SMMU pmap.
+ * All the L3 entires must be cleared in advance, otherwise
+ * this function returns error.
+ */
+int
+pmap_sremove_all(pmap_t pmap)
+{
+ pd_entry_t l0e, *l1, l1e, *l2, l2e;
+ pt_entry_t *l3, l3e;
+ vm_offset_t sva;
+ vm_paddr_t pa;
+ vm_paddr_t pa0;
+ vm_paddr_t pa1;
+ int i, j, k, l;
+ vm_page_t m;
+ vm_page_t m0;
+ vm_page_t m1;
+ int rc;
+
+ PMAP_LOCK(pmap);
+
+ for (sva = VM_MINUSER_ADDRESS, i = pmap_l0_index(sva); i < Ln_ENTRIES;
+ i++) {
+ l0e = pmap->pm_l0[i];
+ if ((l0e & ATTR_DESCR_VALID) == 0) {
+ sva += L0_SIZE;
+ continue;
+ }
+ pa0 = l0e & ~ATTR_MASK;
+ m0 = PHYS_TO_VM_PAGE(pa0);
+ l1 = (pd_entry_t *)PHYS_TO_DMAP(pa0);
+
+ for (j = pmap_l1_index(sva); j < Ln_ENTRIES; j++) {
+ l1e = l1[j];
+ if ((l1e & ATTR_DESCR_VALID) == 0) {
+ sva += L1_SIZE;
+ continue;
+ }
+ if ((l1e & ATTR_DESCR_MASK) == L1_BLOCK) {
+ sva += L1_SIZE;
+ continue;
+ }
+ pa1 = l1e & ~ATTR_MASK;
+ m1 = PHYS_TO_VM_PAGE(pa1);
+ l2 = (pd_entry_t *)PHYS_TO_DMAP(pa1);
+
+ for (k = pmap_l2_index(sva); k < Ln_ENTRIES; k++) {
+ l2e = l2[k];
+ if ((l2e & ATTR_DESCR_VALID) == 0) {
+ sva += L2_SIZE;
+ continue;
+ }
+ if ((l2e & ATTR_DESCR_MASK) == L2_BLOCK) {
+ sva += L2_SIZE;
+ continue;
+ }
+ pa = l2e & ~ATTR_MASK;
+ m = PHYS_TO_VM_PAGE(pa);
+ l3 = (pt_entry_t *)PHYS_TO_DMAP(pa);
+
+ for (l = pmap_l3_index(sva); l < Ln_ENTRIES;
+ l++, sva += L3_SIZE) {
+ l3e = l3[l];
+ if ((l3e & ATTR_DESCR_VALID) == 0)
+ continue;
+ printf("%s: l3e found for va %jx\n",
+ __func__, sva);
+ rc = KERN_FAILURE;
+ goto out;
+ }
+
+ vm_page_unwire_noq(m1);
+ vm_page_unwire_noq(m);
+ pmap_resident_count_dec(pmap, 1);
+ vm_page_free(m);
+ pmap_clear(&l2[k]);
+ }
+
+ vm_page_unwire_noq(m0);
+ pmap_resident_count_dec(pmap, 1);
+ vm_page_free(m1);
+ pmap_clear(&l1[j]);
+ }
+
+ pmap_resident_count_dec(pmap, 1);
+ vm_page_free(m0);
+ pmap_clear(&pmap->pm_l0[i]);
+ }
+
+ KASSERT(pmap->pm_stats.resident_count == 0,
+ ("Invalid resident count %jd", pmap->pm_stats.resident_count));
+
+ rc = KERN_SUCCESS;
+out:
+ PMAP_UNLOCK(pmap);
+
+ return (rc);
+}
+
/*
* Insert the given physical page (p) at
* the specified virtual address (v) in the
Index: sys/arm64/include/bus_dma_impl.h
===================================================================
--- sys/arm64/include/bus_dma_impl.h
+++ sys/arm64/include/bus_dma_impl.h
@@ -50,6 +50,17 @@
int ref_count;
};
+struct bounce_zone;
+
+struct bus_dma_tag {
+ struct bus_dma_tag_common common;
+ int map_count;
+ int bounce_flags;
+ struct bounce_zone *bounce_zone;
+ struct iommu_domain *iommu_domain;
+ device_t owner;
+};
+
struct bus_dma_impl {
int (*tag_create)(bus_dma_tag_t parent,
bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr,
@@ -92,6 +103,9 @@
int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
void *lockfuncarg, size_t sz, void **dmat);
+bus_dma_tag_t bounce_smmu_get_dma_tag(device_t dev, device_t child);
+int bounce_smmu_domain_free(bus_dma_tag_t dmat);
+
extern struct bus_dma_impl bus_dma_bounce_impl;
#endif
Index: sys/arm64/include/pmap.h
===================================================================
--- sys/arm64/include/pmap.h
+++ sys/arm64/include/pmap.h
@@ -185,6 +185,13 @@
int pmap_fault(pmap_t, uint64_t, uint64_t);
+/* System MMU (SMMU). */
+int pmap_senter(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
+ vm_prot_t prot, u_int flags);
+int pmap_sremove(pmap_t pmap, vm_offset_t va);
+int pmap_sremove_all(pmap_t pmap);
+int pmap_bootstrap_smmu(pmap_t pmap, vm_offset_t sva, int count);
+
struct pcb *pmap_switch(struct thread *, struct thread *);
static inline int
Index: sys/conf/files.arm64
===================================================================
--- sys/conf/files.arm64
+++ sys/conf/files.arm64
@@ -129,6 +129,7 @@
arm64/arm64/bus_machdep.c standard
arm64/arm64/bus_space_asm.S standard
arm64/arm64/busdma_bounce.c standard
+arm64/arm64/busdma_bounce_smmu.c standard
arm64/arm64/busdma_machdep.c standard
arm64/arm64/bzero.S standard
arm64/arm64/clock.c standard
@@ -240,6 +241,10 @@
dev/iicbus/twsi/mv_twsi.c optional twsi fdt
dev/iicbus/twsi/a10_twsi.c optional twsi fdt
dev/iicbus/twsi/twsi.c optional twsi fdt
+dev/iommu/iommu.c standard
+dev/iommu/iommu_if.m standard
+dev/iommu/smmu.c optional smmu
+dev/iommu/smmu_acpi.c optional smmu acpi
dev/hwpmc/hwpmc_arm64.c optional hwpmc
dev/hwpmc/hwpmc_arm64_md.c optional hwpmc
dev/mbox/mbox_if.m optional soc_brcm_bcm2837
Index: sys/conf/options
===================================================================
--- sys/conf/options
+++ sys/conf/options
@@ -705,6 +705,7 @@
ACPI_MAX_TASKS opt_acpi.h
ACPI_MAX_THREADS opt_acpi.h
ACPI_DMAR opt_acpi.h
+ACPI_SMMU opt_acpi.h
DEV_ACPI opt_acpi.h
# ISA support
Index: sys/dev/acpica/acpivar.h
===================================================================
--- sys/dev/acpica/acpivar.h
+++ sys/dev/acpica/acpivar.h
@@ -557,6 +557,7 @@
* ARM specific ACPI interfaces, relating to IORT table.
*/
int acpi_iort_map_pci_msi(u_int seg, u_int rid, u_int *xref, u_int *devid);
+int acpi_iort_map_pci_smmuv3(u_int seg, u_int rid, u_int *xref, u_int *devid);
int acpi_iort_its_lookup(u_int its_id, u_int *xref, int *pxm);
#endif
#endif /* _KERNEL */
Index: sys/dev/iommu/iommu.h
===================================================================
--- /dev/null
+++ sys/dev/iommu/iommu.h
@@ -0,0 +1,97 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory (Department of Computer Science and
+ * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
+ * DARPA SSITH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _DEV_IOMMU_IOMMU_H_
+#define _DEV_IOMMU_IOMMU_H_
+
+#include <machine/bus.h>
+
+#include <sys/mutex.h>
+#include <sys/bus_dma.h>
+#include <sys/vmem.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+/* MMU unit */
+struct iommu {
+ LIST_HEAD(, iommu_domain) domain_list;
+ LIST_ENTRY(iommu) next;
+ struct mtx mtx_lock;
+ device_t dev;
+ intptr_t xref;
+};
+
+/* Minimal translation domain. */
+struct iommu_domain {
+ LIST_HEAD(, iommu_device) device_list;
+ LIST_ENTRY(iommu_domain) next;
+ struct mtx mtx_lock;
+ bus_dma_tag_t tag;
+ vmem_t *vmem;
+ struct iommu *iommu;
+};
+
+/* Consumer device. */
+struct iommu_device {
+ LIST_ENTRY(iommu_device) next;
+ struct iommu_domain *domain;
+ device_t dev;
+ uint16_t rid;
+};
+
+#define DOMAIN_LOCK(domain) mtx_lock(&(domain)->mtx_lock)
+#define DOMAIN_UNLOCK(domain) mtx_unlock(&(domain)->mtx_lock)
+#define DOMAIN_ASSERT_LOCKED(domain) \
+ mtx_assert(&(domain)->mtx_lock, MA_OWNED)
+
+int iommu_domain_free(struct iommu_domain *domain);
+struct iommu_domain * iommu_domain_alloc(struct iommu *iommu);
+struct iommu_domain * iommu_get_domain_for_dev(device_t dev);
+int iommu_device_attach(struct iommu_domain *domain, device_t dev);
+int iommu_device_detach(struct iommu_domain *domain, device_t dev);
+int iommu_capable(device_t dev);
+struct iommu * iommu_lookup(intptr_t xref, int flags);
+int iommu_register(device_t dev, intptr_t xref);
+int iommu_unregister(device_t dev);
+int iommu_domain_add_va_range(struct iommu_domain *domain,
+ vm_offset_t va, vm_size_t size);
+
+int iommu_map(struct iommu_domain *, bus_dma_segment_t *segs, int nsegs);
+void iommu_unmap(struct iommu_domain *, bus_dma_segment_t *segs, int nsegs);
+int iommu_map_page(struct iommu_domain *domain,
+ vm_offset_t va, vm_paddr_t pa, vm_prot_t prot);
+int iommu_unmap_page(struct iommu_domain *domain, vm_offset_t va);
+
+#endif /* _DEV_IOMMU_IOMMU_H_ */
Index: sys/dev/iommu/iommu.c
===================================================================
--- /dev/null
+++ sys/dev/iommu/iommu.c
@@ -0,0 +1,432 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory (Department of Computer Science and
+ * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
+ * DARPA SSITH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_acpi.h"
+#include "opt_platform.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bitstring.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/queue.h>
+#include <sys/rman.h>
+#include <sys/pcpu.h>
+#include <sys/proc.h>
+#include <sys/cpuset.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/smp.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/bus.h>
+#include <machine/cpu.h>
+#include <machine/intr.h>
+
+#include <dev/iommu/smmu_var.h>
+#include <dev/pci/pcivar.h>
+
+#include "iommu.h"
+#include "iommu_if.h"
+
+static MALLOC_DEFINE(M_IOMMU, "IOMMU", "IOMMU framework");
+
+static struct mtx iommu_mtx;
+
+#define IOMMU_LOCK(iommu) mtx_lock(&(iommu)->mtx_lock)
+#define IOMMU_UNLOCK(iommu) mtx_unlock(&(iommu)->mtx_lock)
+#define IOMMU_ASSERT_LOCKED(iommu) mtx_assert(&(iommu)->mtx_lock, MA_OWNED)
+
+#define IOMMU_LIST_LOCK() mtx_lock(&iommu_mtx)
+#define IOMMU_LIST_UNLOCK() mtx_unlock(&iommu_mtx)
+#define IOMMU_LIST_ASSERT_LOCKED() mtx_assert(&iommu_mtx, MA_OWNED)
+
+#define IOMMU_DEBUG
+#undef IOMMU_DEBUG
+
+#ifdef IOMMU_DEBUG
+#define DPRINTF(fmt, ...) printf(fmt, ##__VA_ARGS__)
+#else
+#define DPRINTF(fmt, ...)
+#endif
+
+static LIST_HEAD(, iommu) iommu_list = LIST_HEAD_INITIALIZER(iommu_list);
+
+int
+iommu_domain_add_va_range(struct iommu_domain *domain,
+ vm_offset_t va, vm_size_t size)
+{
+ struct iommu *iommu;
+ int error;
+
+ KASSERT(size > 0, ("wrong size"));
+
+ iommu = domain->iommu;
+
+ error = vmem_add(domain->vmem, va, size, M_WAITOK);
+
+ return (error);
+}
+
+struct iommu_domain *
+iommu_domain_alloc(struct iommu *iommu)
+{
+ struct iommu_domain *domain;
+
+ domain = IOMMU_DOMAIN_ALLOC(iommu->dev);
+ if (domain == NULL)
+ return (NULL);
+
+ LIST_INIT(&domain->device_list);
+ mtx_init(&domain->mtx_lock, "IOMMU domain", NULL, MTX_DEF);
+ domain->iommu = iommu;
+
+ domain->vmem = vmem_create("IOMMU vmem", 0, 0, PAGE_SIZE,
+ PAGE_SIZE, M_FIRSTFIT | M_WAITOK);
+ if (domain->vmem == NULL)
+ return (NULL);
+
+ IOMMU_LOCK(iommu);
+ LIST_INSERT_HEAD(&iommu->domain_list, domain, next);
+ IOMMU_UNLOCK(iommu);
+
+ return (domain);
+}
+
+int
+iommu_domain_free(struct iommu_domain *domain)
+{
+ struct iommu *iommu;
+ vmem_t *vmem;
+ int error;
+
+ iommu = domain->iommu;
+ vmem = domain->vmem;
+
+ IOMMU_LOCK(iommu);
+ LIST_REMOVE(domain, next);
+ error = IOMMU_DOMAIN_FREE(iommu->dev, domain);
+ if (error) {
+ LIST_INSERT_HEAD(&iommu->domain_list, domain, next);
+ IOMMU_UNLOCK(iommu);
+ return (error);
+ }
+
+ IOMMU_UNLOCK(iommu);
+
+ vmem_destroy(vmem);
+
+ return (0);
+}
+
+struct iommu_domain *
+iommu_get_domain_for_dev(device_t dev)
+{
+ struct iommu_domain *domain;
+ struct iommu_device *device;
+ struct iommu *iommu;
+
+ LIST_FOREACH(iommu, &iommu_list, next) {
+ LIST_FOREACH(domain, &iommu->domain_list, next) {
+ LIST_FOREACH(device, &domain->device_list, next) {
+ if (device->dev == dev)
+ return (domain);
+ }
+ }
+ }
+
+ return (NULL);
+}
+
+/*
+ * Attach a consumer device to a domain.
+ */
+int
+iommu_device_attach(struct iommu_domain *domain, device_t dev)
+{
+ struct iommu_device *device;
+ struct iommu *iommu;
+ int err;
+
+ iommu = domain->iommu;
+
+ device = malloc(sizeof(*device), M_IOMMU, M_WAITOK | M_ZERO);
+ device->rid = pci_get_rid(dev);
+ device->dev = dev;
+ device->domain = domain;
+
+ err = IOMMU_DEVICE_ATTACH(iommu->dev, domain, device);
+ if (err) {
+ device_printf(iommu->dev, "Failed to add device\n");
+ free(device, M_IOMMU);
+ return (err);
+ }
+
+ DOMAIN_LOCK(domain);
+ LIST_INSERT_HEAD(&domain->device_list, device, next);
+ DOMAIN_UNLOCK(domain);
+
+ return (err);
+}
+
+/*
+ * Detach a consumer device from IOMMU domain.
+ */
+int
+iommu_device_detach(struct iommu_domain *domain, device_t dev)
+{
+ struct iommu_device *device;
+ struct iommu *iommu;
+ bool found;
+ int err;
+
+ iommu = domain->iommu;
+
+ found = false;
+
+ DOMAIN_LOCK(domain);
+ LIST_FOREACH(device, &domain->device_list, next) {
+ if (device->dev == dev) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ DOMAIN_UNLOCK(domain);
+ return (ENODEV);
+ }
+
+ err = IOMMU_DEVICE_DETACH(iommu->dev, device);
+ if (err) {
+ device_printf(iommu->dev, "Failed to remove device\n");
+ DOMAIN_UNLOCK(domain);
+ return (err);
+ }
+
+ LIST_REMOVE(device, next);
+ DOMAIN_UNLOCK(domain);
+
+ return (0);
+}
+
+int
+iommu_map_page(struct iommu_domain *domain,
+ vm_offset_t va, vm_paddr_t pa, vm_prot_t prot)
+{
+ struct iommu *iommu;
+ int error;
+
+ iommu = domain->iommu;
+
+ error = IOMMU_MAP(iommu->dev, domain, va, pa, PAGE_SIZE, prot);
+ if (error)
+ return (error);
+
+ return (0);
+}
+
+int
+iommu_unmap_page(struct iommu_domain *domain, vm_offset_t va)
+{
+ struct iommu *iommu;
+ int error;
+
+ iommu = domain->iommu;
+
+ error = IOMMU_UNMAP(iommu->dev, domain, va, PAGE_SIZE);
+ if (error)
+ return (error);
+
+ return (0);
+}
+
+/*
+ * Busdma map/unmap interface.
+ */
+int
+iommu_map(struct iommu_domain *domain, bus_dma_segment_t *segs, int nsegs)
+{
+ struct iommu *iommu;
+ vm_offset_t offset;
+ vm_offset_t va;
+ vm_paddr_t pa;
+ vm_size_t size;
+ vm_prot_t prot;
+ int error;
+ int i;
+
+ iommu = domain->iommu;
+
+ for (i = 0; i < nsegs; i++) {
+ pa = segs[i].ds_addr & ~(PAGE_SIZE - 1);
+ offset = segs[i].ds_addr & (PAGE_SIZE - 1);
+ size = roundup2(offset + segs[i].ds_len, PAGE_SIZE);
+
+ error = vmem_alloc(domain->vmem, size,
+ M_FIRSTFIT | M_NOWAIT, &va);
+ if (error) {
+ device_printf(iommu->dev, "Could not allocate VA.\n");
+ return (error);
+ }
+
+ DPRINTF("%s: %jx -> %jx (%jd pages)\n",
+ __func__, va, pa, size / PAGE_SIZE);
+
+ prot = VM_PROT_READ | VM_PROT_WRITE;
+
+ error = IOMMU_MAP(iommu->dev, domain, va, pa, size, prot);
+ if (error)
+ return (error);
+ segs[i].ds_addr = va | offset;
+ }
+
+ return (0);
+}
+
+void
+iommu_unmap(struct iommu_domain *domain, bus_dma_segment_t *segs, int nsegs)
+{
+ struct iommu *iommu;
+ vm_offset_t offset;
+ vm_offset_t va;
+ vm_size_t size;
+ int err;
+ int i;
+
+ iommu = domain->iommu;
+
+ for (i = 0; i < nsegs; i++) {
+ va = segs[i].ds_addr & ~(PAGE_SIZE - 1);
+ offset = segs[i].ds_addr & (PAGE_SIZE - 1);
+ size = roundup2(offset + segs[i].ds_len, PAGE_SIZE);
+
+ DPRINTF("%s: %jx (%jd pages)\n",
+ __func__, va, size / PAGE_SIZE);
+
+ err = IOMMU_UNMAP(iommu->dev, domain, va, size);
+ if (err) {
+ /*
+ * It could be that busdma backend tries to unload
+ * the same address twice due to a bug in a device
+ * driver. We can't add this VA back to vmem twice.
+ */
+ device_printf(iommu->dev,
+ "Could not unmap VA %jx\n", va);
+ continue;
+ }
+ vmem_free(domain->vmem, va, size);
+ }
+}
+
+int
+iommu_register(device_t dev, intptr_t xref)
+{
+ struct iommu *iommu;
+
+ iommu = malloc(sizeof(*iommu), M_IOMMU, M_WAITOK | M_ZERO);
+ iommu->dev = dev;
+ iommu->xref = xref;
+
+ LIST_INIT(&iommu->domain_list);
+ mtx_init(&iommu->mtx_lock, "IOMMU", NULL, MTX_DEF);
+
+ IOMMU_LIST_LOCK();
+ LIST_INSERT_HEAD(&iommu_list, iommu, next);
+ IOMMU_LIST_UNLOCK();
+
+ return (0);
+}
+
+int
+iommu_unregister(device_t dev)
+{
+ struct iommu *iommu;
+ bool found;
+
+ found = false;
+
+ IOMMU_LIST_LOCK();
+ LIST_FOREACH(iommu, &iommu_list, next) {
+ if (iommu->dev == dev) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ IOMMU_LIST_UNLOCK();
+ return (ENOENT);
+ }
+
+ if (!LIST_EMPTY(&iommu->domain_list)) {
+ IOMMU_LIST_UNLOCK();
+ return (EBUSY);
+ }
+
+ LIST_REMOVE(iommu, next);
+ IOMMU_LIST_UNLOCK();
+
+ free(iommu, M_IOMMU);
+
+ return (0);
+}
+
+struct iommu *
+iommu_lookup(intptr_t xref, int flags)
+{
+ struct iommu *iommu;
+
+ LIST_FOREACH(iommu, &iommu_list, next) {
+ if (iommu->xref == xref)
+ return (iommu);
+ }
+
+ return (NULL);
+}
+
+static void
+iommu_init(void)
+{
+
+ mtx_init(&iommu_mtx, "IOMMU", NULL, MTX_DEF);
+}
+
+SYSINIT(iommu, SI_SUB_DRIVERS, SI_ORDER_FIRST, iommu_init, NULL);
Index: sys/dev/iommu/iommu_if.m
===================================================================
--- /dev/null
+++ sys/dev/iommu/iommu_if.m
@@ -0,0 +1,93 @@
+#-
+# SPDX-License-Identifier: BSD-2-Clause
+#:
+# Copyright (c) 2020 Ruslan Bukin <br@bsdpad.com>
+#
+# This software was developed by SRI International and the University of
+# Cambridge Computer Laboratory (Department of Computer Science and
+# Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
+# DARPA SSITH research programme.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# $FreeBSD$
+#
+
+#include <vm/vm.h>
+
+#include <dev/iommu/iommu.h>
+
+INTERFACE iommu;
+
+#
+# Map a virtual address VA to a physical address PA.
+#
+METHOD int map {
+ device_t dev;
+ struct iommu_domain *domain;
+ vm_offset_t va;
+ vm_paddr_t pa;
+ vm_size_t size;
+ vm_prot_t prot;
+};
+
+#
+# Unmap a virtual address VA.
+#
+METHOD int unmap {
+ device_t dev;
+ struct iommu_domain *domain;
+ vm_offset_t va;
+ vm_size_t size;
+};
+
+#
+# Allocate an IOMMU domain.
+#
+METHOD struct iommu_domain * domain_alloc {
+ device_t dev;
+};
+
+#
+# Release all the resources held by IOMMU domain.
+#
+METHOD int domain_free {
+ device_t dev;
+ struct iommu_domain *domain;
+};
+
+#
+# Attach a consumer device to a IOMMU domain.
+#
+METHOD int device_attach {
+ device_t dev;
+ struct iommu_domain *domain;
+ struct iommu_device *device;
+};
+
+#
+# Detach a consumer device from IOMMU domain.
+#
+METHOD int device_detach {
+ device_t dev;
+ struct iommu_device *device;
+};
Index: sys/dev/iommu/smmu.c
===================================================================
--- /dev/null
+++ sys/dev/iommu/smmu.c
@@ -0,0 +1,1690 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019-2020 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory (Department of Computer Science and
+ * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
+ * DARPA SSITH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_platform.h"
+#include "opt_acpi.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/rman.h>
+#include <sys/mutex.h>
+#include <sys/lock.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/bus.h>
+#include <machine/cpu.h>
+#include <machine/intr.h>
+
+#if DEV_ACPI
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+
+#include <dev/acpica/acpivar.h>
+#include <dev/acpica/acpi_pcibvar.h>
+#endif
+
+#include <dev/pci/pcivar.h>
+
+#include "iommu.h"
+#include "iommu_if.h"
+
+#include "smmu_reg.h"
+#include "smmu_var.h"
+
+#define STRTAB_L1_SZ_SHIFT 20
+#define STRTAB_SPLIT 8
+
+#define STRTAB_L1_DESC_L2PTR_M (0x3fffffffffff << 6)
+#define STRTAB_L1_DESC_DWORDS 1
+
+#define STRTAB_STE_DWORDS 8
+
+#define CMDQ_ENTRY_DWORDS 2
+#define EVTQ_ENTRY_DWORDS 4
+#define PRIQ_ENTRY_DWORDS 2
+
+#define CD_DWORDS 8
+
+#define Q_WRP(q, p) ((p) & (1 << (q)->size_log2))
+#define Q_IDX(q, p) ((p) & ((1 << (q)->size_log2) - 1))
+#define Q_OVF(p) ((p) & (1 << 31)) /* Event queue overflowed */
+
+#define SMMU_Q_ALIGN (64 * 1024)
+
+static struct resource_spec smmu_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE },
+ { SYS_RES_IRQ, 0, RF_ACTIVE },
+ { SYS_RES_IRQ, 1, RF_ACTIVE },
+ { SYS_RES_IRQ, 2, RF_ACTIVE },
+ RESOURCE_SPEC_END
+};
+
+MALLOC_DEFINE(M_SMMU, "SMMU", SMMU_DEVSTR);
+
+struct smmu_event {
+ int ident;
+ char *str;
+ char *msg;
+};
+
+static struct smmu_event events[] = {
+ { 0x01, "F_UUT",
+ "Unsupported Upstream Transaction."},
+ { 0x02, "C_BAD_STREAMID",
+ "Transaction StreamID out of range."},
+ { 0x03, "F_STE_FETCH",
+ "Fetch of STE caused external abort."},
+ { 0x04, "C_BAD_STE",
+ "Used STE invalid."},
+ { 0x05, "F_BAD_ATS_TREQ",
+ "Address Translation Request disallowed for a StreamID "
+ "and a PCIe ATS Translation Request received."},
+ { 0x06, "F_STREAM_DISABLED",
+ "The STE of a transaction marks non-substream transactions "
+ "disabled."},
+ { 0x07, "F_TRANSL_FORBIDDEN",
+ "An incoming PCIe transaction is marked Translated but "
+ "SMMU bypass is disallowed for this StreamID."},
+ { 0x08, "C_BAD_SUBSTREAMID",
+ "Incoming SubstreamID present, but configuration is invalid."},
+ { 0x09, "F_CD_FETCH",
+ "Fetch of CD caused external abort."},
+ { 0x0a, "C_BAD_CD",
+ "Fetched CD invalid."},
+ { 0x0b, "F_WALK_EABT",
+ "An external abort occurred fetching (or updating) "
+ "a translation table descriptor."},
+ { 0x10, "F_TRANSLATION",
+ "Translation fault."},
+ { 0x11, "F_ADDR_SIZE",
+ "Address Size fault."},
+ { 0x12, "F_ACCESS",
+ "Access flag fault due to AF == 0 in a page or block TTD."},
+ { 0x13, "F_PERMISSION",
+ "Permission fault occurred on page access."},
+ { 0x20, "F_TLB_CONFLICT",
+ "A TLB conflict occurred because of the transaction."},
+ { 0x21, "F_CFG_CONFLICT",
+ "A configuration cache conflict occurred due to "
+ "the transaction."},
+ { 0x24, "E_PAGE_REQUEST",
+ "Speculative page request hint."},
+ { 0x25, "F_VMS_FETCH",
+ "Fetch of VMS caused external abort."},
+ { 0, NULL, NULL },
+};
+
+static int
+smmu_q_has_space(struct smmu_queue *q)
+{
+
+ /*
+ * See 6.3.27 SMMU_CMDQ_PROD
+ *
+ * There is space in the queue for additional commands if:
+ * SMMU_CMDQ_CONS.RD != SMMU_CMDQ_PROD.WR ||
+ * SMMU_CMDQ_CONS.RD_WRAP == SMMU_CMDQ_PROD.WR_WRAP
+ */
+
+ if (Q_IDX(q, q->lc.cons) != Q_IDX(q, q->lc.prod) ||
+ Q_WRP(q, q->lc.cons) == Q_WRP(q, q->lc.prod))
+ return (1);
+
+ return (0);
+}
+
+static int
+smmu_q_empty(struct smmu_queue *q)
+{
+
+ if (Q_IDX(q, q->lc.cons) == Q_IDX(q, q->lc.prod) &&
+ Q_WRP(q, q->lc.cons) == Q_WRP(q, q->lc.prod))
+ return (1);
+
+ return (0);
+}
+
+static int __unused
+smmu_q_consumed(struct smmu_queue *q, uint32_t prod)
+{
+
+ if ((Q_WRP(q, q->lc.cons) == Q_WRP(q, prod)) &&
+ (Q_IDX(q, q->lc.cons) >= Q_IDX(q, prod)))
+ return (1);
+
+ if ((Q_WRP(q, q->lc.cons) != Q_WRP(q, prod)) &&
+ (Q_IDX(q, q->lc.cons) <= Q_IDX(q, prod)))
+ return (1);
+
+ return (0);
+}
+
+static uint32_t
+smmu_q_inc_cons(struct smmu_queue *q)
+{
+ uint32_t cons;
+ uint32_t val;
+
+ cons = (Q_WRP(q, q->lc.cons) | Q_IDX(q, q->lc.cons)) + 1;
+ val = (Q_OVF(q->lc.cons) | Q_WRP(q, cons) | Q_IDX(q, cons));
+
+ return (val);
+}
+
+static uint32_t
+smmu_q_inc_prod(struct smmu_queue *q)
+{
+ uint32_t prod;
+ uint32_t val;
+
+ prod = (Q_WRP(q, q->lc.prod) | Q_IDX(q, q->lc.prod)) + 1;
+ val = (Q_OVF(q->lc.prod) | Q_WRP(q, prod) | Q_IDX(q, prod));
+
+ return (val);
+}
+
+static int
+smmu_write_ack(struct smmu_softc *sc, uint32_t reg,
+ uint32_t reg_ack, uint32_t val)
+{
+ uint32_t v;
+ int timeout;
+
+ timeout = 100000;
+
+ bus_write_4(sc->res[0], reg, val);
+
+ do {
+ v = bus_read_4(sc->res[0], reg_ack);
+ if (v == val)
+ break;
+ } while (timeout--);
+
+ if (timeout <= 0) {
+ device_printf(sc->dev, "Failed to write reg.\n");
+ return (-1);
+ }
+
+ return (0);
+}
+
+static inline int
+ilog2(long x)
+{
+
+ KASSERT(x > 0 && powerof2(x),
+ ("%s: invalid arg %ld", __func__, x));
+
+ return (flsl(x) - 1);
+}
+
+static int
+smmu_init_queue(struct smmu_softc *sc, struct smmu_queue *q,
+ uint32_t prod_off, uint32_t cons_off, uint32_t dwords)
+{
+ int sz;
+
+ sz = (1 << q->size_log2) * dwords * 8;
+
+ /* Set up the command circular buffer */
+ q->vaddr = contigmalloc(sz, M_SMMU,
+ M_WAITOK | M_ZERO, 0, (1ul << 48) - 1, SMMU_Q_ALIGN, 0);
+ if (q->vaddr == NULL) {
+ device_printf(sc->dev, "failed to allocate %d bytes\n", sz);
+ return (-1);
+ }
+
+ q->prod_off = prod_off;
+ q->cons_off = cons_off;
+ q->paddr = vtophys(q->vaddr);
+
+ q->base = CMDQ_BASE_RA | EVENTQ_BASE_WA | PRIQ_BASE_WA;
+ q->base |= q->paddr & Q_BASE_ADDR_M;
+ q->base |= q->size_log2 << Q_LOG2SIZE_S;
+
+ return (0);
+}
+
+static int
+smmu_init_queues(struct smmu_softc *sc)
+{
+ int err;
+
+ /* Command queue. */
+ err = smmu_init_queue(sc, &sc->cmdq,
+ SMMU_CMDQ_PROD, SMMU_CMDQ_CONS, CMDQ_ENTRY_DWORDS);
+ if (err)
+ return (ENXIO);
+
+ /* Event queue. */
+ err = smmu_init_queue(sc, &sc->evtq,
+ SMMU_EVENTQ_PROD, SMMU_EVENTQ_CONS, EVTQ_ENTRY_DWORDS);
+ if (err)
+ return (ENXIO);
+
+ if (!(sc->features & SMMU_FEATURE_PRI))
+ return (0);
+
+ /* PRI queue. */
+ err = smmu_init_queue(sc, &sc->priq,
+ SMMU_PRIQ_PROD, SMMU_PRIQ_CONS, PRIQ_ENTRY_DWORDS);
+ if (err)
+ return (ENXIO);
+
+ return (0);
+}
+
+/*
+ * Dump 2LVL or linear STE.
+ */
+static void
+smmu_dump_ste(struct smmu_softc *sc, int sid)
+{
+ struct smmu_strtab *strtab;
+ struct l1_desc *l1_desc;
+ uint64_t *ste, *l1;
+ int i;
+
+ strtab = &sc->strtab;
+
+ if (sc->features & SMMU_FEATURE_2_LVL_STREAM_TABLE) {
+ i = sid >> STRTAB_SPLIT;
+ l1 = (void *)((uint64_t)strtab->vaddr +
+ STRTAB_L1_DESC_DWORDS * 8 * i);
+ device_printf(sc->dev, "L1 ste == %lx\n", l1[0]);
+
+ l1_desc = &strtab->l1[i];
+ ste = l1_desc->va;
+ if (ste == NULL) /* L2 is not initialized */
+ return;
+ } else {
+ ste = (void *)((uint64_t)strtab->vaddr +
+ sid * (STRTAB_STE_DWORDS << 3));
+ }
+
+ /* Dump L2 or linear STE. */
+ for (i = 0; i < STRTAB_STE_DWORDS; i++)
+ device_printf(sc->dev, "ste[%d] == %lx\n", i, ste[i]);
+}
+
+static void __unused
+smmu_dump_cd(struct smmu_softc *sc, struct smmu_cd *cd)
+{
+ uint64_t *vaddr;
+ int i;
+
+ device_printf(sc->dev, "%s\n", __func__);
+
+ vaddr = cd->vaddr;
+ for (i = 0; i < CD_DWORDS; i++)
+ device_printf(sc->dev, "cd[%d] == %lx\n", i, vaddr[i]);
+}
+
+static void
+smmu_evtq_dequeue(struct smmu_softc *sc, uint32_t *evt)
+{
+ struct smmu_queue *evtq;
+ void *entry_addr;
+
+ evtq = &sc->evtq;
+
+ evtq->lc.val = bus_read_8(sc->res[0], evtq->prod_off);
+ entry_addr = (void *)((uint64_t)evtq->vaddr +
+ evtq->lc.cons * EVTQ_ENTRY_DWORDS * 8);
+ memcpy(evt, entry_addr, EVTQ_ENTRY_DWORDS * 8);
+ evtq->lc.cons = smmu_q_inc_cons(evtq);
+ bus_write_4(sc->res[0], evtq->cons_off, evtq->lc.cons);
+}
+
+static void
+smmu_print_event(struct smmu_softc *sc, uint32_t *evt)
+{
+ struct smmu_event *ev;
+ uint64_t input_addr;
+ uint8_t event_id;
+ int sid;
+ int i;
+
+ ev = NULL;
+ event_id = evt[0] & 0xff;
+ for (i = 0; events[i].ident != 0; i++) {
+ if (events[i].ident == event_id) {
+ ev = &events[i];
+ break;
+ }
+ }
+
+ if (ev) {
+ device_printf(sc->dev,
+ "Event %s (%s) received.\n", ev->str, ev->msg);
+ } else
+ device_printf(sc->dev, "Event 0x%x received\n", event_id);
+
+ sid = evt[1];
+ input_addr = evt[5];
+ input_addr <<= 32;
+ input_addr |= evt[4];
+
+ device_printf(sc->dev, "SID %x, Input Address: %jx\n",
+ sid, input_addr);
+
+ for (i = 0; i < 8; i++)
+ device_printf(sc->dev, "evt[%d] %x\n", i, evt[i]);
+
+ smmu_dump_ste(sc, sid);
+}
+
+static void
+make_cmd(struct smmu_softc *sc, uint64_t *cmd,
+ struct smmu_cmdq_entry *entry)
+{
+
+ memset(cmd, 0, CMDQ_ENTRY_DWORDS * 8);
+ cmd[0] = entry->opcode << CMD_QUEUE_OPCODE_S;
+
+ switch (entry->opcode) {
+ case CMD_TLBI_NH_VA:
+ cmd[1] = entry->tlbi.addr & TLBI_1_ADDR_M;
+ if (entry->tlbi.leaf) {
+ /*
+ * Leaf flag means that only cached entries
+ * for the last level of translation table walk
+ * are required to be invalidated.
+ */
+ cmd[1] |= TLBI_1_LEAF;
+ }
+ break;
+ case CMD_TLBI_NSNH_ALL:
+ case CMD_TLBI_NH_ALL:
+ case CMD_TLBI_EL2_ALL:
+ break;
+ case CMD_CFGI_CD:
+ cmd[0] |= ((uint64_t)entry->cfgi.ssid << CFGI_0_SSID_S);
+ /* FALLTROUGH */
+ case CMD_CFGI_STE:
+ cmd[0] |= ((uint64_t)entry->cfgi.sid << CFGI_0_STE_SID_S);
+ cmd[1] |= ((uint64_t)entry->cfgi.leaf << CFGI_1_LEAF_S);
+ break;
+ case CMD_CFGI_STE_RANGE:
+ cmd[1] = (31 << CFGI_1_STE_RANGE_S);
+ break;
+ case CMD_SYNC:
+ cmd[0] |= SYNC_0_MSH_IS | SYNC_0_MSIATTR_OIWB;
+ if (entry->sync.msiaddr) {
+ cmd[0] |= SYNC_0_CS_SIG_IRQ;
+ cmd[1] |= (entry->sync.msiaddr & SYNC_1_MSIADDRESS_M);
+ } else
+ cmd[0] |= SYNC_0_CS_SIG_SEV;
+ break;
+ case CMD_PREFETCH_CONFIG:
+ cmd[0] |= ((uint64_t)entry->prefetch.sid << PREFETCH_0_SID_S);
+ break;
+ };
+}
+
+static void
+smmu_cmdq_enqueue_cmd(struct smmu_softc *sc, struct smmu_cmdq_entry *entry)
+{
+ uint64_t cmd[CMDQ_ENTRY_DWORDS];
+ struct smmu_queue *cmdq;
+ void *entry_addr;
+
+ cmdq = &sc->cmdq;
+
+ make_cmd(sc, cmd, entry);
+
+ SMMU_LOCK(sc);
+
+ /* Ensure that a space is available. */
+ do {
+ cmdq->lc.cons = bus_read_4(sc->res[0], cmdq->cons_off);
+ } while (smmu_q_has_space(cmdq) == 0);
+
+ /* Write the command to the current prod entry. */
+ entry_addr = (void *)((uint64_t)cmdq->vaddr +
+ Q_IDX(cmdq, cmdq->lc.prod) * CMDQ_ENTRY_DWORDS * 8);
+ memcpy(entry_addr, cmd, CMDQ_ENTRY_DWORDS * 8);
+
+ /* Increment prod index. */
+ cmdq->lc.prod = smmu_q_inc_prod(cmdq);
+ bus_write_4(sc->res[0], cmdq->prod_off, cmdq->lc.prod);
+
+ SMMU_UNLOCK(sc);
+}
+
+static void __unused
+smmu_poll_until_consumed(struct smmu_softc *sc, struct smmu_queue *q)
+{
+
+ while (1) {
+ q->lc.val = bus_read_8(sc->res[0], q->prod_off);
+ if (smmu_q_empty(q))
+ break;
+ cpu_spinwait();
+ }
+}
+
+static int
+smmu_sync(struct smmu_softc *sc)
+{
+ struct smmu_cmdq_entry cmd;
+ struct smmu_queue *q;
+ uint32_t *base;
+ int timeout;
+ int prod;
+
+ q = &sc->cmdq;
+ prod = q->lc.prod;
+
+ /* Enqueue sync command. */
+ cmd.opcode = CMD_SYNC;
+ cmd.sync.msiaddr = q->paddr + Q_IDX(q, prod) * CMDQ_ENTRY_DWORDS * 8;
+ smmu_cmdq_enqueue_cmd(sc, &cmd);
+
+ /* Wait for the sync completion. */
+ base = (void *)((uint64_t)q->vaddr +
+ Q_IDX(q, prod) * CMDQ_ENTRY_DWORDS * 8);
+
+ /*
+ * It takes around 200 loops (6 instructions each)
+ * on Neoverse N1 to complete the sync.
+ */
+ timeout = 10000;
+
+ do {
+ if (*base == 0) {
+ /* MSI write completed. */
+ break;
+ }
+ cpu_spinwait();
+ } while (timeout--);
+
+ if (timeout < 0)
+ device_printf(sc->dev, "Failed to sync\n");
+
+ return (0);
+}
+
+static int
+smmu_sync_cd(struct smmu_softc *sc, int sid, int ssid, bool leaf)
+{
+ struct smmu_cmdq_entry cmd;
+
+ cmd.opcode = CMD_CFGI_CD;
+ cmd.cfgi.sid = sid;
+ cmd.cfgi.ssid = ssid;
+ cmd.cfgi.leaf = leaf;
+ smmu_cmdq_enqueue_cmd(sc, &cmd);
+
+ return (0);
+}
+
+static void
+smmu_invalidate_all_sid(struct smmu_softc *sc)
+{
+ struct smmu_cmdq_entry cmd;
+
+ /* Invalidate cached config */
+ cmd.opcode = CMD_CFGI_STE_RANGE;
+ smmu_cmdq_enqueue_cmd(sc, &cmd);
+ smmu_sync(sc);
+}
+
+static void
+smmu_tlbi_all(struct smmu_softc *sc)
+{
+ struct smmu_cmdq_entry cmd;
+
+ /* Invalidate entire TLB */
+ cmd.opcode = CMD_TLBI_NSNH_ALL;
+ smmu_cmdq_enqueue_cmd(sc, &cmd);
+ smmu_sync(sc);
+}
+
+static void
+smmu_tlbi_va(struct smmu_softc *sc, vm_offset_t va)
+{
+ struct smmu_cmdq_entry cmd;
+
+ /* Invalidate specific range */
+ cmd.opcode = CMD_TLBI_NH_VA;
+ cmd.tlbi.asid = 0;
+ cmd.tlbi.vmid = 0;
+ cmd.tlbi.leaf = false;
+ cmd.tlbi.addr = va;
+ smmu_cmdq_enqueue_cmd(sc, &cmd);
+}
+
+static void
+smmu_invalidate_sid(struct smmu_softc *sc, uint32_t sid)
+{
+ struct smmu_cmdq_entry cmd;
+
+ /* Invalidate cached config */
+ cmd.opcode = CMD_CFGI_STE;
+ cmd.cfgi.sid = sid;
+ smmu_cmdq_enqueue_cmd(sc, &cmd);
+ smmu_sync(sc);
+}
+
+static void
+smmu_prefetch_sid(struct smmu_softc *sc, uint32_t sid)
+{
+ struct smmu_cmdq_entry cmd;
+
+ cmd.opcode = CMD_PREFETCH_CONFIG;
+ cmd.prefetch.sid = sid;
+ smmu_cmdq_enqueue_cmd(sc, &cmd);
+ smmu_sync(sc);
+}
+
+/*
+ * Init STE in bypass mode. Traffic is not translated for the sid.
+ */
+static void
+smmu_init_ste_bypass(struct smmu_softc *sc, uint32_t sid, uint64_t *ste)
+{
+ uint64_t val;
+
+ val = STE0_VALID | STE0_CONFIG_BYPASS;
+
+ ste[1] = STE1_SHCFG_INCOMING | STE1_EATS_FULLATS;
+ ste[2] = 0;
+ ste[3] = 0;
+ ste[4] = 0;
+ ste[5] = 0;
+ ste[6] = 0;
+ ste[7] = 0;
+
+ smmu_invalidate_sid(sc, sid);
+ ste[0] = val;
+ dsb(sy);
+ smmu_invalidate_sid(sc, sid);
+
+ smmu_prefetch_sid(sc, sid);
+}
+
+/*
+ * Enable Stage1 (S1) translation for the sid.
+ */
+static int
+smmu_init_ste_s1(struct smmu_softc *sc, struct smmu_cd *cd,
+ uint32_t sid, uint64_t *ste)
+{
+ uint64_t val;
+
+ val = STE0_VALID;
+
+ ste[1] = 0;
+ ste[2] = 0;
+ ste[3] = 0;
+ ste[4] = 0;
+ ste[5] = 0;
+ ste[6] = 0;
+ ste[7] = 0;
+
+ /* S1 */
+ ste[1] |= STE1_EATS_FULLATS
+ | STE1_S1CSH_IS
+ | STE1_S1CIR_WBRA
+ | STE1_S1COR_WBRA
+ | STE1_STRW_NS_EL1;
+
+ if (sc->features & SMMU_FEATURE_STALL &&
+ ((sc->features & SMMU_FEATURE_STALL_FORCE) == 0))
+ ste[1] |= STE1_S1STALLD;
+
+ /* Configure STE */
+ val |= (cd->paddr & STE0_S1CONTEXTPTR_M);
+ val |= STE0_CONFIG_S1_TRANS;
+
+ /* One Context descriptor (S1Fmt is IGNORED). */
+
+ /*
+ * Set for a linear table of CDs.
+ *
+ * val |= STE0_S1FMT_LINEAR;
+ * val |= 1 << STE0_S1CDMAX_S;
+ */
+
+ smmu_invalidate_sid(sc, sid);
+
+ /* The STE[0] has to be written in a single blast, last of all. */
+ ste[0] = val;
+ dsb(sy);
+
+ smmu_invalidate_sid(sc, sid);
+ smmu_sync_cd(sc, sid, 0, true);
+ smmu_invalidate_sid(sc, sid);
+
+ /* The sid will be used soon most likely. */
+ smmu_prefetch_sid(sc, sid);
+
+ return (0);
+}
+
+static int
+smmu_init_ste(struct smmu_softc *sc, struct smmu_cd *cd, int sid, bool s1)
+{
+ struct smmu_strtab *strtab;
+ struct l1_desc *l1_desc;
+ uint64_t *addr;
+
+ strtab = &sc->strtab;
+
+ if (sc->features & SMMU_FEATURE_2_LVL_STREAM_TABLE) {
+ l1_desc = &strtab->l1[sid >> STRTAB_SPLIT];
+ addr = l1_desc->va;
+ } else {
+ addr = (void *)((uint64_t)strtab->vaddr +
+ STRTAB_STE_DWORDS * 8 * sid);
+ };
+
+ if (s1)
+ smmu_init_ste_s1(sc, cd, sid, addr);
+ else
+ smmu_init_ste_bypass(sc, sid, addr);
+
+ smmu_sync(sc);
+
+ return (0);
+}
+
+static int
+smmu_init_cd(struct smmu_softc *sc, struct smmu_domain *domain)
+{
+ vm_paddr_t paddr;
+ uint64_t *ptr;
+ uint64_t val;
+ vm_size_t size;
+ struct smmu_cd *cd;
+ pmap_t p;
+
+ size = 1 * (CD_DWORDS << 3);
+
+ p = &domain->p;
+ cd = &domain->cd;
+
+ cd->vaddr = contigmalloc(size, M_SMMU,
+ M_WAITOK | M_ZERO, /* flags */
+ 0, /* low */
+ (1ul << 40) - 1, /* high */
+ size, /* alignment */
+ 0); /* boundary */
+ if (cd->vaddr == NULL) {
+ device_printf(sc->dev, "Failed to allocate CD\n");
+ return (ENXIO);
+ }
+
+ cd->size = size;
+ cd->paddr = vtophys(cd->vaddr);
+
+ ptr = cd->vaddr;
+
+ val = CD0_VALID;
+ val |= CD0_AA64;
+ val |= CD0_ASET;
+ val |= CD0_R;
+ val |= CD0_A;
+ val |= CD0_TG0_4KB;
+ val |= CD0_EPD1; /* Disable TT1 */
+ val |= ((64 - sc->ias) << CD0_T0SZ_S);
+ val |= CD0_IPS_48BITS;
+
+ paddr = p->pm_l0_paddr & CD1_TTB0_M;
+ KASSERT(paddr == p->pm_l0_paddr, ("bad allocation 1"));
+
+ ptr[1] = paddr;
+ ptr[2] = 0;
+ ptr[3] = MAIR_ATTR(MAIR_DEVICE_nGnRnE, VM_MEMATTR_DEVICE) |\
+ MAIR_ATTR(MAIR_NORMAL_NC, VM_MEMATTR_UNCACHEABLE) |\
+ MAIR_ATTR(MAIR_NORMAL_WB, VM_MEMATTR_WRITE_BACK) |\
+ MAIR_ATTR(MAIR_NORMAL_WT, VM_MEMATTR_WRITE_THROUGH);
+
+ /* Install the CD. */
+ ptr[0] = val;
+
+ return (0);
+}
+
+static int
+smmu_init_strtab_linear(struct smmu_softc *sc)
+{
+ struct smmu_strtab *strtab;
+ vm_paddr_t base;
+ uint32_t size;
+ uint64_t reg;
+
+ strtab = &sc->strtab;
+ strtab->num_l1_entries = (1 << sc->sid_bits);
+
+ size = strtab->num_l1_entries * (STRTAB_STE_DWORDS << 3);
+
+ if (bootverbose)
+ device_printf(sc->dev,
+ "%s: linear strtab size %d, num_l1_entries %d\n",
+ __func__, size, strtab->num_l1_entries);
+
+ strtab->vaddr = contigmalloc(size, M_SMMU,
+ M_WAITOK | M_ZERO, /* flags */
+ 0, /* low */
+ (1ul << 48) - 1, /* high */
+ size, /* alignment */
+ 0); /* boundary */
+ if (strtab->vaddr == NULL) {
+ device_printf(sc->dev, "failed to allocate strtab\n");
+ return (ENXIO);
+ }
+
+ reg = STRTAB_BASE_CFG_FMT_LINEAR;
+ reg |= sc->sid_bits << STRTAB_BASE_CFG_LOG2SIZE_S;
+ strtab->base_cfg = (uint32_t)reg;
+
+ base = vtophys(strtab->vaddr);
+
+ reg = base & STRTAB_BASE_ADDR_M;
+ KASSERT(reg == base, ("bad allocation 2"));
+ reg |= STRTAB_BASE_RA;
+ strtab->base = reg;
+
+ return (0);
+}
+
+static int
+smmu_init_strtab_2lvl(struct smmu_softc *sc)
+{
+ struct smmu_strtab *strtab;
+ vm_paddr_t base;
+ uint64_t reg_base;
+ uint32_t l1size;
+ uint32_t size;
+ uint32_t reg;
+ int sz;
+
+ strtab = &sc->strtab;
+
+ size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
+ size = min(size, sc->sid_bits - STRTAB_SPLIT);
+ strtab->num_l1_entries = (1 << size);
+ size += STRTAB_SPLIT;
+
+ l1size = strtab->num_l1_entries * (STRTAB_L1_DESC_DWORDS << 3);
+
+ if (bootverbose)
+ device_printf(sc->dev,
+ "%s: size %d, l1 entries %d, l1size %d\n",
+ __func__, size, strtab->num_l1_entries, l1size);
+
+ strtab->vaddr = contigmalloc(l1size, M_SMMU,
+ M_WAITOK | M_ZERO, /* flags */
+ 0, /* low */
+ (1ul << 48) - 1, /* high */
+ l1size, /* alignment */
+ 0); /* boundary */
+ if (strtab->vaddr == NULL) {
+ device_printf(sc->dev, "Failed to allocate 2lvl strtab.\n");
+ return (ENOMEM);
+ }
+
+ sz = strtab->num_l1_entries * sizeof(struct l1_desc);
+
+ strtab->l1 = malloc(sz, M_SMMU, M_WAITOK | M_ZERO);
+ if (strtab->l1 == NULL) {
+ contigfree(strtab->vaddr, l1size, M_SMMU);
+ return (ENOMEM);
+ }
+
+ reg = STRTAB_BASE_CFG_FMT_2LVL;
+ reg |= size << STRTAB_BASE_CFG_LOG2SIZE_S;
+ reg |= STRTAB_SPLIT << STRTAB_BASE_CFG_SPLIT_S;
+ strtab->base_cfg = (uint32_t)reg;
+
+ base = vtophys(strtab->vaddr);
+
+ reg_base = base & STRTAB_BASE_ADDR_M;
+ KASSERT(reg_base == base, ("bad allocation 3"));
+ reg_base |= STRTAB_BASE_RA;
+ strtab->base = reg_base;
+
+ return (0);
+}
+
+static int
+smmu_init_strtab(struct smmu_softc *sc)
+{
+ int error;
+
+ if (sc->features & SMMU_FEATURE_2_LVL_STREAM_TABLE)
+ error = smmu_init_strtab_2lvl(sc);
+ else
+ error = smmu_init_strtab_linear(sc);
+
+ return (error);
+}
+
+static int
+smmu_init_l1_entry(struct smmu_softc *sc, int sid)
+{
+ struct smmu_strtab *strtab;
+ struct l1_desc *l1_desc;
+ uint64_t *addr;
+ uint64_t val;
+ size_t size;
+ int i;
+
+ strtab = &sc->strtab;
+ l1_desc = &strtab->l1[sid >> STRTAB_SPLIT];
+
+ size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
+
+ l1_desc->span = STRTAB_SPLIT + 1;
+ l1_desc->size = size;
+ l1_desc->va = contigmalloc(size, M_SMMU,
+ M_WAITOK | M_ZERO, /* flags */
+ 0, /* low */
+ (1ul << 48) - 1, /* high */
+ size, /* alignment */
+ 0); /* boundary */
+ if (l1_desc->va == NULL) {
+ device_printf(sc->dev, "failed to allocate l2 entry\n");
+ return (ENXIO);
+ }
+
+ l1_desc->pa = vtophys(l1_desc->va);
+
+ i = sid >> STRTAB_SPLIT;
+ addr = (void *)((uint64_t)strtab->vaddr +
+ STRTAB_L1_DESC_DWORDS * 8 * i);
+
+ /* Install the L1 entry. */
+ val = l1_desc->pa & STRTAB_L1_DESC_L2PTR_M;
+ KASSERT(val == l1_desc->pa, ("bad allocation 4"));
+ val |= l1_desc->span;
+ *addr = val;
+
+ return (0);
+}
+
+static void
+smmu_deinit_l1_entry(struct smmu_softc *sc, int sid)
+{
+ struct smmu_strtab *strtab;
+ struct l1_desc *l1_desc;
+ uint64_t *addr;
+ int i;
+
+ strtab = &sc->strtab;
+
+ i = sid >> STRTAB_SPLIT;
+ addr = (void *)((uint64_t)strtab->vaddr +
+ STRTAB_L1_DESC_DWORDS * 8 * i);
+ *addr = 0;
+
+ if (sc->features & SMMU_FEATURE_2_LVL_STREAM_TABLE) {
+ l1_desc = &strtab->l1[sid >> STRTAB_SPLIT];
+ contigfree(l1_desc->va, l1_desc->size, M_SMMU);
+ }
+}
+
+static int
+smmu_disable(struct smmu_softc *sc)
+{
+ uint32_t reg;
+ int error;
+
+ /* Disable SMMU */
+ reg = bus_read_4(sc->res[0], SMMU_CR0);
+ reg &= ~CR0_SMMUEN;
+ error = smmu_write_ack(sc, SMMU_CR0, SMMU_CR0ACK, reg);
+ if (error)
+ device_printf(sc->dev, "Could not disable SMMU.\n");
+
+ return (0);
+}
+
+static int
+smmu_event_intr(void *arg)
+{
+ uint32_t evt[EVTQ_ENTRY_DWORDS * 2];
+ struct smmu_softc *sc;
+
+ sc = arg;
+
+ do {
+ smmu_evtq_dequeue(sc, evt);
+ smmu_print_event(sc, evt);
+ } while (!smmu_q_empty(&sc->evtq));
+
+ return (FILTER_HANDLED);
+}
+
+static int __unused
+smmu_sync_intr(void *arg)
+{
+ struct smmu_softc *sc;
+
+ sc = arg;
+
+ device_printf(sc->dev, "%s\n", __func__);
+
+ return (FILTER_HANDLED);
+}
+
+static int
+smmu_gerr_intr(void *arg)
+{
+ struct smmu_softc *sc;
+
+ sc = arg;
+
+ device_printf(sc->dev, "SMMU Global Error\n");
+
+ return (FILTER_HANDLED);
+}
+
+static int
+smmu_enable_interrupts(struct smmu_softc *sc)
+{
+ uint32_t reg;
+ int error;
+
+ /* Disable MSI. */
+ bus_write_8(sc->res[0], SMMU_GERROR_IRQ_CFG0, 0);
+ bus_write_4(sc->res[0], SMMU_GERROR_IRQ_CFG1, 0);
+ bus_write_4(sc->res[0], SMMU_GERROR_IRQ_CFG2, 0);
+
+ bus_write_8(sc->res[0], SMMU_EVENTQ_IRQ_CFG0, 0);
+ bus_write_4(sc->res[0], SMMU_EVENTQ_IRQ_CFG1, 0);
+ bus_write_4(sc->res[0], SMMU_EVENTQ_IRQ_CFG2, 0);
+
+ if (sc->features & CR0_PRIQEN) {
+ bus_write_8(sc->res[0], SMMU_PRIQ_IRQ_CFG0, 0);
+ bus_write_4(sc->res[0], SMMU_PRIQ_IRQ_CFG1, 0);
+ bus_write_4(sc->res[0], SMMU_PRIQ_IRQ_CFG2, 0);
+ }
+
+ /* Disable any interrupts. */
+ error = smmu_write_ack(sc, SMMU_IRQ_CTRL, SMMU_IRQ_CTRLACK, 0);
+ if (error) {
+ device_printf(sc->dev, "Could not disable interrupts.\n");
+ return (ENXIO);
+ }
+
+ /* Enable interrupts. */
+ reg = IRQ_CTRL_EVENTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
+ if (sc->features & SMMU_FEATURE_PRI)
+ reg |= IRQ_CTRL_PRIQ_IRQEN;
+
+ error = smmu_write_ack(sc, SMMU_IRQ_CTRL, SMMU_IRQ_CTRLACK, reg);
+ if (error) {
+ device_printf(sc->dev, "Could not enable interrupts.\n");
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+#if DEV_ACPI
+static void
+smmu_configure_intr(struct smmu_softc *sc, struct resource *res)
+{
+ struct intr_map_data_acpi *ad;
+ struct intr_map_data *data;
+
+ data = rman_get_virtual(res);
+ KASSERT(data != NULL, ("data is NULL"));
+
+ if (data->type == INTR_MAP_DATA_ACPI) {
+ ad = (struct intr_map_data_acpi *)data;
+ ad->trig = INTR_TRIGGER_EDGE;
+ ad->pol = INTR_POLARITY_HIGH;
+ }
+}
+#endif
+
+static int
+smmu_setup_interrupts(struct smmu_softc *sc)
+{
+ device_t dev;
+ int error;
+
+ dev = sc->dev;
+
+#if DEV_ACPI
+ /*
+ * Configure SMMU interrupts as EDGE triggered manually
+ * as ACPI tables carries no information for that.
+ */
+ smmu_configure_intr(sc, sc->res[1]);
+ smmu_configure_intr(sc, sc->res[2]);
+ smmu_configure_intr(sc, sc->res[3]);
+#endif
+
+ error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC,
+ smmu_event_intr, NULL, sc, &sc->intr_cookie[0]);
+ if (error) {
+ device_printf(dev, "Couldn't setup Event interrupt handler\n");
+ return (ENXIO);
+ }
+
+ error = bus_setup_intr(dev, sc->res[3], INTR_TYPE_MISC,
+ smmu_gerr_intr, NULL, sc, &sc->intr_cookie[2]);
+ if (error) {
+ device_printf(dev, "Couldn't setup Gerr interrupt handler\n");
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static int
+smmu_reset(struct smmu_softc *sc)
+{
+ struct smmu_cmdq_entry cmd;
+ struct smmu_strtab *strtab;
+ int error;
+ int reg;
+
+ reg = bus_read_4(sc->res[0], SMMU_CR0);
+
+ if (reg & CR0_SMMUEN)
+ device_printf(sc->dev,
+ "%s: Warning: SMMU is enabled\n", __func__);
+
+ error = smmu_disable(sc);
+ if (error)
+ device_printf(sc->dev,
+ "%s: Could not disable SMMU.\n", __func__);
+
+ if (smmu_enable_interrupts(sc) != 0) {
+ device_printf(sc->dev, "Could not enable interrupts.\n");
+ return (ENXIO);
+ }
+
+ reg = CR1_TABLE_SH_IS
+ | CR1_TABLE_OC_WBC
+ | CR1_TABLE_IC_WBC
+ | CR1_QUEUE_SH_IS
+ | CR1_QUEUE_OC_WBC
+ | CR1_QUEUE_IC_WBC;
+ bus_write_4(sc->res[0], SMMU_CR1, reg);
+
+ reg = CR2_PTM | CR2_RECINVSID | CR2_E2H;
+ bus_write_4(sc->res[0], SMMU_CR2, reg);
+
+ /* Stream table. */
+ strtab = &sc->strtab;
+ bus_write_8(sc->res[0], SMMU_STRTAB_BASE, strtab->base);
+ bus_write_4(sc->res[0], SMMU_STRTAB_BASE_CFG, strtab->base_cfg);
+
+ /* Command queue. */
+ bus_write_8(sc->res[0], SMMU_CMDQ_BASE, sc->cmdq.base);
+ bus_write_4(sc->res[0], SMMU_CMDQ_PROD, sc->cmdq.lc.prod);
+ bus_write_4(sc->res[0], SMMU_CMDQ_CONS, sc->cmdq.lc.cons);
+
+ reg = CR0_CMDQEN;
+ error = smmu_write_ack(sc, SMMU_CR0, SMMU_CR0ACK, reg);
+ if (error) {
+ device_printf(sc->dev, "Could not enable command queue\n");
+ return (ENXIO);
+ }
+
+ /* Invalidate cached configuration. */
+ smmu_invalidate_all_sid(sc);
+
+ if (sc->features & SMMU_FEATURE_HYP) {
+ cmd.opcode = CMD_TLBI_EL2_ALL;
+ smmu_cmdq_enqueue_cmd(sc, &cmd);
+ };
+
+ /* Invalidate TLB. */
+ smmu_tlbi_all(sc);
+
+ /* Event queue */
+ bus_write_8(sc->res[0], SMMU_EVENTQ_BASE, sc->evtq.base);
+ bus_write_4(sc->res[0], SMMU_EVENTQ_PROD, sc->evtq.lc.prod);
+ bus_write_4(sc->res[0], SMMU_EVENTQ_CONS, sc->evtq.lc.cons);
+
+ reg |= CR0_EVENTQEN;
+ error = smmu_write_ack(sc, SMMU_CR0, SMMU_CR0ACK, reg);
+ if (error) {
+ device_printf(sc->dev, "Could not enable event queue\n");
+ return (ENXIO);
+ }
+
+ if (sc->features & SMMU_FEATURE_PRI) {
+ /* PRI queue */
+ bus_write_8(sc->res[0], SMMU_PRIQ_BASE, sc->priq.base);
+ bus_write_4(sc->res[0], SMMU_PRIQ_PROD, sc->priq.lc.prod);
+ bus_write_4(sc->res[0], SMMU_PRIQ_CONS, sc->priq.lc.cons);
+
+ reg |= CR0_PRIQEN;
+ error = smmu_write_ack(sc, SMMU_CR0, SMMU_CR0ACK, reg);
+ if (error) {
+ device_printf(sc->dev, "Could not enable PRI queue\n");
+ return (ENXIO);
+ }
+ }
+
+ if (sc->features & SMMU_FEATURE_ATS) {
+ reg |= CR0_ATSCHK;
+ error = smmu_write_ack(sc, SMMU_CR0, SMMU_CR0ACK, reg);
+ if (error) {
+ device_printf(sc->dev, "Could not enable ATS check.\n");
+ return (ENXIO);
+ }
+ }
+
+ reg |= CR0_SMMUEN;
+ error = smmu_write_ack(sc, SMMU_CR0, SMMU_CR0ACK, reg);
+ if (error) {
+ device_printf(sc->dev, "Could not enable SMMU.\n");
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static int
+smmu_check_features(struct smmu_softc *sc)
+{
+ uint32_t reg;
+ uint32_t val;
+
+ sc->features = 0;
+
+ reg = bus_read_4(sc->res[0], SMMU_IDR0);
+
+ if (reg & IDR0_ST_LVL_2) {
+ device_printf(sc->dev, "2-level stream table supported.\n");
+ sc->features |= SMMU_FEATURE_2_LVL_STREAM_TABLE;
+ }
+
+ if (reg & IDR0_CD2L) {
+ device_printf(sc->dev, "2-level CD table supported.\n");
+ sc->features |= SMMU_FEATURE_2_LVL_CD;
+ }
+
+ switch (reg & IDR0_TTENDIAN_M) {
+ case IDR0_TTENDIAN_MIXED:
+ device_printf(sc->dev, "Mixed endianess supported.\n");
+ sc->features |= SMMU_FEATURE_TT_LE;
+ sc->features |= SMMU_FEATURE_TT_BE;
+ break;
+ case IDR0_TTENDIAN_LITTLE:
+ device_printf(sc->dev, "Little endian supported only.\n");
+ sc->features |= SMMU_FEATURE_TT_LE;
+ break;
+ case IDR0_TTENDIAN_BIG:
+ device_printf(sc->dev, "Big endian supported only.\n");
+ sc->features |= SMMU_FEATURE_TT_BE;
+ break;
+ default:
+ device_printf(sc->dev, "Unsupported endianness.\n");
+ return (ENXIO);
+ }
+
+ if (reg & IDR0_SEV)
+ sc->features |= SMMU_FEATURE_SEV;
+
+ if (reg & IDR0_MSI) {
+ device_printf(sc->dev, "MSI feature present.\n");
+ sc->features |= SMMU_FEATURE_MSI;
+ } else
+ device_printf(sc->dev, "MSI feature not present.\n");
+
+ if (reg & IDR0_HYP) {
+ device_printf(sc->dev, "HYP feature present.\n");
+ sc->features |= SMMU_FEATURE_HYP;
+ }
+
+ if (reg & IDR0_ATS)
+ sc->features |= SMMU_FEATURE_ATS;
+
+ if (reg & IDR0_PRI)
+ sc->features |= SMMU_FEATURE_PRI;
+
+ switch (reg & IDR0_STALL_MODEL_M) {
+ case IDR0_STALL_MODEL_FORCE:
+ /* Stall is forced. */
+ sc->features |= SMMU_FEATURE_STALL_FORCE;
+ /* FALLTHROUGH */
+ case IDR0_STALL_MODEL_STALL:
+ sc->features |= SMMU_FEATURE_STALL;
+ break;
+ }
+
+ /* Grab translation stages supported. */
+ if (reg & IDR0_S1P) {
+ device_printf(sc->dev, "Stage 1 translation supported.\n");
+ sc->features |= SMMU_FEATURE_S1P;
+ }
+ if (reg & IDR0_S2P) {
+ device_printf(sc->dev, "Stage 2 translation supported.\n");
+ sc->features |= SMMU_FEATURE_S2P;
+ }
+
+ switch (reg & IDR0_TTF_M) {
+ case IDR0_TTF_ALL:
+ case IDR0_TTF_AA64:
+ sc->ias = 40;
+ break;
+ default:
+ device_printf(sc->dev, "No AArch64 table format support.\n");
+ return (ENXIO);
+ }
+
+ if (reg & IDR0_ASID16)
+ sc->asid_bits = 16;
+ else
+ sc->asid_bits = 8;
+
+ if (reg & IDR0_VMID16)
+ sc->vmid_bits = 16;
+ else
+ sc->vmid_bits = 8;
+
+ reg = bus_read_4(sc->res[0], SMMU_IDR1);
+
+ if (reg & (IDR1_TABLES_PRESET | IDR1_QUEUES_PRESET | IDR1_REL)) {
+ device_printf(sc->dev,
+ "Embedded implementations not supported by this driver.\n");
+ return (ENXIO);
+ }
+
+ val = (reg & IDR1_CMDQS_M) >> IDR1_CMDQS_S;
+ sc->cmdq.size_log2 = val;
+ device_printf(sc->dev, "CMD queue bits %d\n", val);
+
+ val = (reg & IDR1_EVENTQS_M) >> IDR1_EVENTQS_S;
+ sc->evtq.size_log2 = val;
+ device_printf(sc->dev, "EVENT queue bits %d\n", val);
+
+ if (sc->features & SMMU_FEATURE_PRI) {
+ val = (reg & IDR1_PRIQS_M) >> IDR1_PRIQS_S;
+ sc->priq.size_log2 = val;
+ device_printf(sc->dev, "PRI queue bits %d\n", val);
+ }
+
+ sc->ssid_bits = (reg & IDR1_SSIDSIZE_M) >> IDR1_SSIDSIZE_S;
+ sc->sid_bits = (reg & IDR1_SIDSIZE_M) >> IDR1_SIDSIZE_S;
+
+ if (sc->sid_bits <= STRTAB_SPLIT)
+ sc->features &= ~SMMU_FEATURE_2_LVL_STREAM_TABLE;
+
+ device_printf(sc->dev, "SSID bits %d\n", sc->ssid_bits);
+ device_printf(sc->dev, "SID bits %d\n", sc->sid_bits);
+
+ /* IDR3 */
+ reg = bus_read_4(sc->res[0], SMMU_IDR3);
+ if (reg & IDR3_RIL)
+ sc->features |= SMMU_FEATURE_RANGE_INV;
+
+ /* IDR5 */
+ reg = bus_read_4(sc->res[0], SMMU_IDR5);
+
+ switch (reg & IDR5_OAS_M) {
+ case IDR5_OAS_32:
+ sc->oas = 32;
+ break;
+ case IDR5_OAS_36:
+ sc->oas = 36;
+ break;
+ case IDR5_OAS_40:
+ sc->oas = 40;
+ break;
+ case IDR5_OAS_42:
+ sc->oas = 42;
+ break;
+ case IDR5_OAS_44:
+ sc->oas = 44;
+ break;
+ case IDR5_OAS_48:
+ sc->oas = 48;
+ break;
+ case IDR5_OAS_52:
+ sc->oas = 52;
+ break;
+ }
+
+ sc->pgsizes = 0;
+ if (reg & IDR5_GRAN64K)
+ sc->pgsizes |= 64 * 1024;
+ if (reg & IDR5_GRAN16K)
+ sc->pgsizes |= 16 * 1024;
+ if (reg & IDR5_GRAN4K)
+ sc->pgsizes |= 4 * 1024;
+
+ if ((reg & IDR5_VAX_M) == IDR5_VAX_52)
+ sc->features |= SMMU_FEATURE_VAX;
+
+ return (0);
+}
+
+/*
+ * Device interface.
+ */
+int
+smmu_attach(device_t dev)
+{
+ struct smmu_softc *sc;
+ int error;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+
+ mtx_init(&sc->sc_mtx, device_get_nameunit(sc->dev), "smmu", MTX_DEF);
+
+ error = bus_alloc_resources(dev, smmu_spec, sc->res);
+ if (error) {
+ device_printf(dev, "Couldn't allocate resources.\n");
+ return (ENXIO);
+ }
+
+ error = smmu_setup_interrupts(sc);
+ if (error) {
+ bus_release_resources(dev, smmu_spec, sc->res);
+ return (ENXIO);
+ }
+
+ error = smmu_check_features(sc);
+ if (error) {
+ device_printf(dev, "Some features are required "
+ "but not supported by hardware.\n");
+ return (ENXIO);
+ }
+
+ error = smmu_init_queues(sc);
+ if (error) {
+ device_printf(dev, "Couldn't allocate queues.\n");
+ return (ENXIO);
+ }
+
+ error = smmu_init_strtab(sc);
+ if (error) {
+ device_printf(dev, "Couldn't allocate strtab.\n");
+ return (ENXIO);
+ }
+
+ error = smmu_reset(sc);
+ if (error) {
+ device_printf(dev, "Couldn't reset SMMU.\n");
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+int
+smmu_detach(device_t dev)
+{
+ struct smmu_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ bus_release_resources(dev, smmu_spec, sc->res);
+
+ return (0);
+}
+
+static int
+smmu_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
+{
+ struct smmu_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ device_printf(sc->dev, "%s\n", __func__);
+
+ return (ENOENT);
+}
+
+static int
+smmu_unmap(device_t dev, struct iommu_domain *domain,
+ vm_offset_t va, vm_size_t size)
+{
+ struct smmu_domain *smmu_domain;
+ struct smmu_softc *sc;
+ int err;
+ int i;
+
+ sc = device_get_softc(dev);
+ smmu_domain = (struct smmu_domain *)domain;
+
+ err = 0;
+
+ for (i = 0; i < size; i += PAGE_SIZE) {
+ if (pmap_sremove(&smmu_domain->p, va)) {
+ /* pmap entry removed, invalidate TLB. */
+ smmu_tlbi_va(sc, va);
+ } else {
+ err = ENOENT;
+ break;
+ }
+ va += PAGE_SIZE;
+ }
+
+ smmu_sync(sc);
+
+ return (err);
+}
+
+static int
+smmu_map(device_t dev, struct iommu_domain *domain,
+ vm_offset_t va, vm_paddr_t pa, vm_size_t size,
+ vm_prot_t prot)
+{
+ struct smmu_domain *smmu_domain;
+ struct smmu_softc *sc;
+ int error;
+
+ sc = device_get_softc(dev);
+ smmu_domain = (struct smmu_domain *)domain;
+
+ for (; size > 0; size -= PAGE_SIZE) {
+ error = pmap_senter(&smmu_domain->p, va, pa, prot, 0);
+ if (error)
+ return (error);
+ smmu_tlbi_va(sc, va);
+ pa += PAGE_SIZE;
+ va += PAGE_SIZE;
+ }
+
+ smmu_sync(sc);
+
+ return (0);
+}
+
+static struct iommu_domain *
+smmu_domain_alloc(device_t dev)
+{
+ struct smmu_domain *domain;
+ struct smmu_softc *sc;
+ int err;
+
+ sc = device_get_softc(dev);
+
+ domain = malloc(sizeof(*domain), M_SMMU, M_WAITOK | M_ZERO);
+
+ mtx_init(&domain->mtx_lock, "SMMU domain", NULL, MTX_DEF);
+
+ LIST_INIT(&domain->master_list);
+
+ pmap_pinit(&domain->p);
+ PMAP_LOCK_INIT(&domain->p);
+
+ err = smmu_init_cd(sc, domain);
+ if (err) {
+ device_printf(sc->dev, "Could not initialize CD\n");
+ return (NULL);
+ }
+
+ return (&domain->domain);
+}
+
+static int
+smmu_domain_free(device_t dev, struct iommu_domain *domain)
+{
+ struct smmu_domain *smmu_domain;
+ struct smmu_cd *cd;
+ int error;
+
+ smmu_domain = (struct smmu_domain *)domain;
+ cd = &smmu_domain->cd;
+
+ error = pmap_sremove_all(&smmu_domain->p);
+ if (error != 0)
+ return (error);
+
+ pmap_release(&smmu_domain->p);
+
+ contigfree(cd->vaddr, cd->size, M_SMMU);
+
+ free(smmu_domain, M_SMMU);
+
+ return (0);
+}
+
+static int
+smmu_device_attach(device_t dev, struct iommu_domain *domain,
+ struct iommu_device *device)
+{
+ struct smmu_domain *smmu_domain;
+ struct smmu_master *master;
+ struct smmu_softc *sc;
+ uint16_t rid;
+ u_int xref, sid;
+ int seg;
+ int err;
+
+ sc = device_get_softc(dev);
+ smmu_domain = (struct smmu_domain *)domain;
+
+ master = malloc(sizeof(*master), M_SMMU, M_WAITOK | M_ZERO);
+ master->device = device;
+
+ seg = pci_get_domain(device->dev);
+ rid = pci_get_rid(device->dev);
+ err = acpi_iort_map_pci_smmuv3(seg, rid, &xref, &sid);
+ if (err) {
+ free(master, M_SMMU);
+ return (ENOENT);
+ }
+
+ master->sid = sid;
+
+ if (sc->features & SMMU_FEATURE_2_LVL_STREAM_TABLE) {
+ err = smmu_init_l1_entry(sc, sid);
+ if (err)
+ return (ENXIO);
+ }
+
+ /*
+ * Neoverse N1 SDP:
+ * 0x800 xhci
+ * 0x700 re
+ * 0x600 sata
+ */
+
+ DOMAIN_LOCK(smmu_domain);
+ LIST_INSERT_HEAD(&smmu_domain->master_list, master, next);
+ DOMAIN_UNLOCK(smmu_domain);
+
+ smmu_init_ste(sc, &smmu_domain->cd, master->sid, true);
+
+ return (0);
+}
+
+static int
+smmu_device_detach(device_t dev, struct iommu_device *device)
+{
+ struct smmu_domain *smmu_domain;
+ struct smmu_master *master, *master1;
+ struct iommu_domain *domain;
+ struct smmu_softc *sc;
+ bool found;
+
+ sc = device_get_softc(dev);
+ domain = device->domain;
+ smmu_domain = (struct smmu_domain *)domain;
+
+ found = false;
+
+ DOMAIN_LOCK(smmu_domain);
+ LIST_FOREACH_SAFE(master, &smmu_domain->master_list, next, master1) {
+ if (master->device == device) {
+ found = true;
+ LIST_REMOVE(master, next);
+ break;
+ }
+ }
+ DOMAIN_UNLOCK(smmu_domain);
+
+ if (!found)
+ return (ENODEV);
+
+ smmu_deinit_l1_entry(sc, master->sid);
+
+ free(master, M_SMMU);
+
+ return (0);
+}
+
+static device_method_t smmu_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_detach, smmu_detach),
+
+ /* IOMMU interface */
+ DEVMETHOD(iommu_map, smmu_map),
+ DEVMETHOD(iommu_unmap, smmu_unmap),
+ DEVMETHOD(iommu_domain_alloc, smmu_domain_alloc),
+ DEVMETHOD(iommu_domain_free, smmu_domain_free),
+ DEVMETHOD(iommu_device_attach, smmu_device_attach),
+ DEVMETHOD(iommu_device_detach, smmu_device_detach),
+
+ /* Bus interface */
+ DEVMETHOD(bus_read_ivar, smmu_read_ivar),
+
+ /* End */
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(smmu, smmu_driver, smmu_methods,
+ sizeof(struct smmu_softc));
Index: sys/dev/iommu/smmu_acpi.c
===================================================================
--- /dev/null
+++ sys/dev/iommu/smmu_acpi.c
@@ -0,0 +1,249 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019-2020 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory (Department of Computer Science and
+ * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
+ * DARPA SSITH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_acpi.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+
+#include <machine/intr.h>
+#include <machine/resource.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <dev/acpica/acpivar.h>
+
+#include "smmu_reg.h"
+#include "smmu_var.h"
+#include "pic_if.h"
+
+/* TODO: check this */
+#define MEMORY_RESOURCE_SIZE 0x40000
+#define MAX_SMMU 8
+
+struct smmu_acpi_devinfo {
+ struct resource_list di_rl;
+};
+
+struct iort_table_data {
+ device_t parent;
+ device_t dev;
+ ACPI_IORT_SMMU_V3 *smmu[MAX_SMMU];
+ int count;
+};
+
+static void
+iort_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
+{
+ struct iort_table_data *iort_data;
+ ACPI_IORT_NODE *node;
+ int i;
+
+ iort_data = (struct iort_table_data *)arg;
+ i = iort_data->count;
+
+ switch(entry->Type) {
+ case ACPI_IORT_NODE_SMMU_V3:
+ if (i == MAX_SMMU) {
+ printf("SMMUv3 found, but no space available.\n");
+ break;
+ }
+
+ if (iort_data->smmu[i] != NULL) {
+ if (bootverbose)
+ device_printf(iort_data->parent,
+ "smmu: Already have an SMMU table");
+ break;
+ }
+ node = (ACPI_IORT_NODE *)entry;
+ iort_data->smmu[i] = (ACPI_IORT_SMMU_V3 *)node->NodeData;
+ iort_data->count++;
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+smmu_acpi_identify(driver_t *driver, device_t parent)
+{
+ struct iort_table_data iort_data;
+ ACPI_TABLE_IORT *iort;
+ vm_paddr_t iort_pa;
+ uintptr_t priv;
+ device_t dev;
+ int i;
+
+ iort_pa = acpi_find_table(ACPI_SIG_IORT);
+ if (iort_pa == 0)
+ return;
+
+ iort = acpi_map_table(iort_pa, ACPI_SIG_IORT);
+ if (iort == NULL) {
+ device_printf(parent, "smmu: Unable to map the IORT\n");
+ return;
+ }
+
+ iort_data.parent = parent;
+ for (i = 0; i < MAX_SMMU; i++)
+ iort_data.smmu[i] = NULL;
+ iort_data.count = 0;
+
+ acpi_walk_subtables(iort + 1, (char *)iort + iort->Header.Length,
+ iort_handler, &iort_data);
+ if (iort_data.count == 0) {
+ device_printf(parent, "No SMMU found.\n");
+ goto out;
+ }
+
+ for (i = 0; i < iort_data.count; i++) {
+ dev = BUS_ADD_CHILD(parent,
+ BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE, "smmu", -1);
+ if (dev == NULL) {
+ device_printf(parent, "add smmu child failed\n");
+ goto out;
+ }
+
+ printf("intr ids %d %d %d, prio %d\n",
+ iort_data.smmu[i]->EventGsiv,
+ iort_data.smmu[i]->SyncGsiv,
+ iort_data.smmu[i]->GerrGsiv,
+ iort_data.smmu[i]->PriGsiv);
+
+ /* Add the IORT data */
+ BUS_SET_RESOURCE(parent, dev, SYS_RES_IRQ, 0,
+ iort_data.smmu[i]->EventGsiv, 1);
+ BUS_SET_RESOURCE(parent, dev, SYS_RES_IRQ, 1,
+ iort_data.smmu[i]->SyncGsiv, 1);
+ BUS_SET_RESOURCE(parent, dev, SYS_RES_IRQ, 2,
+ iort_data.smmu[i]->GerrGsiv, 1);
+ BUS_SET_RESOURCE(parent, dev, SYS_RES_MEMORY, 0,
+ iort_data.smmu[i]->BaseAddress, MEMORY_RESOURCE_SIZE);
+
+ priv = iort_data.smmu[i]->Flags;
+ priv <<= 32;
+ priv |= iort_data.smmu[i]->Model;
+
+ acpi_set_private(dev, (void *)priv);
+ }
+
+ iort_data.dev = dev;
+
+out:
+ acpi_unmap_table(iort);
+}
+
+static int
+smmu_acpi_probe(device_t dev)
+{
+
+ switch((uintptr_t)acpi_get_private(dev) & 0xffffffff) {
+ case ACPI_IORT_SMMU_V3_GENERIC:
+ /* Generic SMMUv3 */
+ break;
+ default:
+ return (ENXIO);
+ }
+
+ device_set_desc(dev, SMMU_DEVSTR);
+
+ return (BUS_PROBE_NOWILDCARD);
+}
+
+static int
+smmu_acpi_attach(device_t dev)
+{
+ struct smmu_softc *sc;
+ uintptr_t start;
+ uintptr_t priv;
+ int err;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+
+ priv = (uintptr_t)acpi_get_private(dev);
+ if ((priv >> 32) & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE)
+ sc->features |= SMMU_FEATURE_COHERENCY;
+
+ if (bootverbose)
+ device_printf(sc->dev, "%s: features %x\n",
+ __func__, sc->features);
+
+ err = smmu_attach(dev);
+ if (err != 0)
+ goto error;
+
+ /* Use memory start address as an xref. */
+ start = bus_get_resource_start(dev, SYS_RES_MEMORY, 0);
+ err = iommu_register(dev, start);
+ if (err) {
+ device_printf(dev, "Failed to register SMMU.\n");
+ return (ENXIO);
+ }
+
+ return (0);
+
+error:
+ if (bootverbose) {
+ device_printf(dev,
+ "Failed to attach. Error %d\n", err);
+ }
+ /* Failure so free resources. */
+ smmu_detach(dev);
+
+ return (err);
+}
+
+static device_method_t smmu_acpi_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_identify, smmu_acpi_identify),
+ DEVMETHOD(device_probe, smmu_acpi_probe),
+ DEVMETHOD(device_attach, smmu_acpi_attach),
+
+ /* End */
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(smmu, smmu_acpi_driver, smmu_acpi_methods,
+ sizeof(struct smmu_softc), smmu_driver);
+
+static devclass_t smmu_acpi_devclass;
+
+EARLY_DRIVER_MODULE(smmu, acpi, smmu_acpi_driver, smmu_acpi_devclass,
+ 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
Index: sys/dev/iommu/smmu_reg.h
===================================================================
--- /dev/null
+++ sys/dev/iommu/smmu_reg.h
@@ -0,0 +1,477 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory (Department of Computer Science and
+ * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
+ * DARPA SSITH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _DEV_IOMMU_SMMU_REG_H_
+#define _DEV_IOMMU_SMMU_REG_H_
+
+#define SMMU_IDR0 0x000
+#define IDR0_ST_LVL_S 27
+#define IDR0_ST_LVL_M (0x3 << IDR0_ST_LVL_S)
+#define IDR0_ST_LVL_LINEAR (0x0 << IDR0_ST_LVL_S) /* Linear Stream table*/
+#define IDR0_ST_LVL_2 (0x1 << IDR0_ST_LVL_S) /* 2-level Stream Table*/
+#define IDR0_ST_TERM_MODEL (1 << 26) /* Terminate model behavior */
+#define IDR0_STALL_MODEL_S 24 /* Stall model support */
+#define IDR0_STALL_MODEL_M (0x3 << IDR0_STALL_MODEL_S)
+#define IDR0_STALL_MODEL_STALL (0x0 << IDR0_STALL_MODEL_S) /* Stall and Term*/
+#define IDR0_STALL_MODEL_FORCE (0x2 << IDR0_STALL_MODEL_S) /* Stall is forced*/
+#define IDR0_TTENDIAN_S 21 /* Endianness for translation table walks.*/
+#define IDR0_TTENDIAN_M (0x3 << IDR0_TTENDIAN_S)
+#define IDR0_TTENDIAN_MIXED (0x0 << IDR0_TTENDIAN_S)
+#define IDR0_TTENDIAN_LITTLE (0x2 << IDR0_TTENDIAN_S)
+#define IDR0_TTENDIAN_BIG (0x3 << IDR0_TTENDIAN_S)
+#define IDR0_VATOS (1 << 20) / * Virtual ATOS page interface */
+#define IDR0_CD2L (1 << 19) /* 2-level Context descriptor table*/
+#define IDR0_VMID16 (1 << 18) /* 16-bit VMID supported */
+#define IDR0_VMW (1 << 17) /* VMID wildcard-matching */
+#define IDR0_PRI (1 << 16) /* Page Request Interface supported*/
+#define IDR0_ATOS (1 << 15) /* Address Translation Operations */
+#define IDR0_SEV (1 << 14) /* WFE wake-up events */
+#define IDR0_MSI (1 << 13) /* Message Signalled Interrupts */
+#define IDR0_ASID16 (1 << 12) /* 16-bit ASID supported */
+#define IDR0_NS1ATS (1 << 11) /* Split-stage ATS not supported */
+#define IDR0_ATS (1 << 10) /* PCIe ATS supported by SMMU */
+#define IDR0_HYP (1 << 9) /* Hypervisor stage 1 contexts */
+#define IDR0_DORMHINT (1 << 8) /* Dormant hint supported */
+#define IDR0_HTTU_S 6 /* H/W transl. table A-flag and Dirty state */
+#define IDR0_HTTU_M (0x3 << IDR0_HTTU_S)
+#define IDR0_HTTU_A (0x1 << IDR0_HTTU_S) /* Access flag (A-flag) */
+#define IDR0_HTTU_AD (0x2 << IDR0_HTTU_S) /* A-flag and Dirty State*/
+#define IDR0_BTM (1 << 5) /* Broadcast TLB Maintenance */
+#define IDR0_COHACC (1 << 4) /* Coherent access to translations*/
+#define IDR0_TTF_S 2 /* Translation Table Formats supported */
+#define IDR0_TTF_M (0x3 << IDR0_TTF_S)
+#define IDR0_TTF_AA32 (0x1 << IDR0_TTF_S) /* AArch32 (LPAE) */
+#define IDR0_TTF_AA64 (0x2 << IDR0_TTF_S) /* AArch64 */
+#define IDR0_TTF_ALL (0x3 << IDR0_TTF_S) /* AArch32 and AArch64 */
+#define IDR0_S1P (1 << 1) /* Stage1 translation supported. */
+#define IDR0_S2P (1 << 0) /* Stage2 translation supported. */
+#define SMMU_IDR1 0x004
+#define IDR1_TABLES_PRESET (1 << 30) /* Table base addresses fixed. */
+#define IDR1_QUEUES_PRESET (1 << 29) /* Queue base addresses fixed. */
+#define IDR1_REL (1 << 28) /* Relative base pointers */
+#define IDR1_ATTR_TYPES_OVR (1 << 27) /* Incoming attrs can be overridden*/
+#define IDR1_ATTR_PERMS_OVR (1 << 26) /* Incoming attrs can be overridden*/
+#define IDR1_CMDQS_S 21 /* Maximum number of Command queue entries*/
+#define IDR1_CMDQS_M (0x1f << IDR1_CMDQS_S)
+#define IDR1_EVENTQS_S 16 /* Maximum number of Event queue entries */
+#define IDR1_EVENTQS_M (0x1f << IDR1_EVENTQS_S)
+#define IDR1_PRIQS_S 11 /* Maximum number of PRI queue entries */
+#define IDR1_PRIQS_M (0x1f << IDR1_PRIQS_S)
+#define IDR1_SSIDSIZE_S 6 /* Max bits of SubstreamID */
+#define IDR1_SSIDSIZE_M (0x1f << IDR1_SSIDSIZE_S)
+#define IDR1_SIDSIZE_S 0 /* Max bits of StreamID */
+#define IDR1_SIDSIZE_M (0x3f << IDR1_SIDSIZE_S)
+#define SMMU_IDR2 0x008
+#define SMMU_IDR3 0x00C
+#define IDR3_RIL (1 << 10) /* Range-based Invalidations. */
+#define SMMU_IDR4 0x010
+#define SMMU_IDR5 0x014
+#define IDR5_STALL_MAX_S 16 /* Max outstanding stalled transactions */
+#define IDR5_STALL_MAX_M (0xffff << IDR5_STALL_MAX_S)
+#define IDR5_VAX_S 10 /* Virtual Address eXtend */
+#define IDR5_VAX_M (0x3 << IDR5_VAX_S)
+#define IDR5_VAX_48 (0 << IDR5_VAX_S)
+#define IDR5_VAX_52 (1 << IDR5_VAX_S)
+#define IDR5_GRAN64K (1 << 6) /* 64KB translation granule */
+#define IDR5_GRAN16K (1 << 5) /* 16KB translation granule */
+#define IDR5_GRAN4K (1 << 4) /* 4KB translation granule */
+#define IDR5_OAS_S 0 /* Output Address Size */
+#define IDR5_OAS_M (0x7 << IDR5_OAS_S)
+#define IDR5_OAS_32 (0x0 << IDR5_OAS_S)
+#define IDR5_OAS_36 (0x1 << IDR5_OAS_S)
+#define IDR5_OAS_40 (0x2 << IDR5_OAS_S)
+#define IDR5_OAS_42 (0x3 << IDR5_OAS_S)
+#define IDR5_OAS_44 (0x4 << IDR5_OAS_S)
+#define IDR5_OAS_48 (0x5 << IDR5_OAS_S)
+#define IDR5_OAS_52 (0x6 << IDR5_OAS_S) /* Reserved in SMMU v3.0 */
+#define SMMU_IIDR 0x018
+#define SMMU_AIDR 0x01C
+#define SMMU_CR0 0x020
+#define CR0_VMW_S 6 /* VMID Wildcard */
+#define CR0_VMW_M (0x7 << CR0_VMW_S)
+#define CR0_ATSCHK (1 << 4) /* ATS behavior: Safe mode */
+#define CR0_CMDQEN (1 << 3) /* Enable Command queue processing */
+#define CR0_EVENTQEN (1 << 2) /* Enable Event queue writes */
+#define CR0_PRIQEN (1 << 1) /* Enable PRI queue writes */
+#define CR0_SMMUEN (1 << 0) /* Non-secure SMMU enable */
+#define SMMU_CR0ACK 0x024
+#define SMMU_CR1 0x028
+#define CR1_TABLE_SH_S 10 /* Table access Shareability. */
+#define CR1_TABLE_SH_M (0x3 << CR1_TABLE_SH_S)
+#define CR1_TABLE_SH_NS (0x0 << CR1_TABLE_SH_S)
+#define CR1_TABLE_SH_OS (0x2 << CR1_TABLE_SH_S)
+#define CR1_TABLE_SH_IS (0x3 << CR1_TABLE_SH_S)
+#define CR1_TABLE_OC_S 8 /* Table access Outer Cacheability. */
+#define CR1_TABLE_OC_M (0x3 << CR1_TABLE_OC_S)
+#define CR1_TABLE_OC_NC (0x0 << CR1_TABLE_OC_S)
+#define CR1_TABLE_OC_WBC (0x1 << CR1_TABLE_OC_S)
+#define CR1_TABLE_OC_WTC (0x2 << CR1_TABLE_OC_S)
+#define CR1_TABLE_IC_S 6 /* Table access Inner Cacheability. */
+#define CR1_TABLE_IC_M (0x3 << CR1_TABLE_IC_S)
+#define CR1_TABLE_IC_NC (0x0 << CR1_TABLE_IC_S)
+#define CR1_TABLE_IC_WBC (0x1 << CR1_TABLE_IC_S)
+#define CR1_TABLE_IC_WTC (0x2 << CR1_TABLE_IC_S)
+#define CR1_QUEUE_SH_S 4 /* Queue access Shareability. */
+#define CR1_QUEUE_SH_M (0x3 << CR1_QUEUE_SH_S)
+#define CR1_QUEUE_SH_NS (0x0 << CR1_QUEUE_SH_S)
+#define CR1_QUEUE_SH_OS (0x2 << CR1_QUEUE_SH_S)
+#define CR1_QUEUE_SH_IS (0x3 << CR1_QUEUE_SH_S)
+#define CR1_QUEUE_OC_S 2 /* Queue access Outer Cacheability. */
+#define CR1_QUEUE_OC_M (0x3 << CR1_QUEUE_OC_S)
+#define CR1_QUEUE_OC_NC (0x0 << CR1_QUEUE_OC_S)
+#define CR1_QUEUE_OC_WBC (0x1 << CR1_QUEUE_OC_S)
+#define CR1_QUEUE_OC_WTC (0x2 << CR1_QUEUE_OC_S)
+#define CR1_QUEUE_IC_S 0 /* Queue access Inner Cacheability. */
+#define CR1_QUEUE_IC_M (0x3 << CR1_QUEUE_IC_S)
+#define CR1_QUEUE_IC_NC (0x0 << CR1_QUEUE_IC_S)
+#define CR1_QUEUE_IC_WBC (0x1 << CR1_QUEUE_IC_S)
+#define CR1_QUEUE_IC_WTC (0x2 << CR1_QUEUE_IC_S)
+#define SMMU_CR2 0x02C
+#define CR2_PTM (1 << 2) /* Private TLB Maintenance. */
+#define CR2_RECINVSID (1 << 1) /* Record invalid SID. */
+#define CR2_E2H (1 << 0) /* Enable EL2-E2H translation regime */
+#define SMMU_STATUSR 0x040
+#define SMMU_GBPA 0x044
+#define SMMU_AGBPA 0x048
+#define SMMU_IRQ_CTRL 0x050
+#define IRQ_CTRL_EVENTQ_IRQEN (1 << 2) /* NS Event queue interrupts enabled.*/
+#define IRQ_CTRL_PRIQ_IRQEN (1 << 1) /* PRI queue interrupts are enabled.*/
+#define IRQ_CTRL_GERROR_IRQEN (1 << 0) /* Global errors int are enabled. */
+#define SMMU_IRQ_CTRLACK 0x054
+#define SMMU_GERROR 0x060
+#define SMMU_GERRORN 0x064
+#define SMMU_GERROR_IRQ_CFG0 0x068
+#define SMMU_GERROR_IRQ_CFG1 0x070
+#define SMMU_GERROR_IRQ_CFG2 0x074
+#define SMMU_STRTAB_BASE 0x080
+#define STRTAB_BASE_RA (1UL << 62) /* Read-Allocate. */
+#define STRTAB_BASE_ADDR_S 6 /* Physical address of Stream table base */
+#define STRTAB_BASE_ADDR_M (0x3fffffffffffUL << STRTAB_BASE_ADDR_S)
+#define SMMU_STRTAB_BASE_CFG 0x088
+#define STRTAB_BASE_CFG_FMT_S 16 /* Format of Stream table. */
+#define STRTAB_BASE_CFG_FMT_M (0x3 << STRTAB_BASE_CFG_FMT_S)
+#define STRTAB_BASE_CFG_FMT_LINEAR (0x0 << STRTAB_BASE_CFG_FMT_S)
+#define STRTAB_BASE_CFG_FMT_2LVL (0x1 << STRTAB_BASE_CFG_FMT_S)
+#define STRTAB_BASE_CFG_SPLIT_S 6 /* SID split point for 2lvl table. */
+#define STRTAB_BASE_CFG_SPLIT_M (0x1f << STRTAB_BASE_CFG_SPLIT_S)
+#define STRTAB_BASE_CFG_SPLIT_4KB (6 << STRTAB_BASE_CFG_SPLIT_S)
+#define STRTAB_BASE_CFG_SPLIT_16KB (8 << STRTAB_BASE_CFG_SPLIT_S)
+#define STRTAB_BASE_CFG_SPLIT_64KB (10 << STRTAB_BASE_CFG_SPLIT_S)
+#define STRTAB_BASE_CFG_LOG2SIZE_S 0 /* Table size as log2(entries) */
+#define STRTAB_BASE_CFG_LOG2SIZE_M (0x3f << STRTAB_BASE_CFG_LOG2SIZE_S)
+#define SMMU_CMDQ_BASE 0x090
+#define CMDQ_BASE_RA (1UL << 62) /* Read-Allocate. */
+#define Q_BASE_ADDR_S 5 /* PA of queue base */
+#define Q_BASE_ADDR_M (0x7fffffffffff << Q_BASE_ADDR_S)
+#define Q_LOG2SIZE_S 0 /* Queue size as log2(entries) */
+#define Q_LOG2SIZE_M (0x1f << Q_LOG2SIZE_S)
+#define SMMU_CMDQ_PROD 0x098
+#define SMMU_CMDQ_CONS 0x09C
+#define CMDQ_CONS_ERR_S 24
+#define CMDQ_CONS_ERR_M (0x7f << CMDQ_CONS_ERR_S)
+#define SMMU_EVENTQ_BASE 0x0A0
+#define EVENTQ_BASE_WA (1UL << 62) /* Write-Allocate. */
+#define SMMU_EVENTQ_PROD 0x100A8
+#define SMMU_EVENTQ_CONS 0x100AC
+#define SMMU_EVENTQ_IRQ_CFG0 0x0B0
+#define SMMU_EVENTQ_IRQ_CFG1 0x0B8
+#define SMMU_EVENTQ_IRQ_CFG2 0x0BC
+#define SMMU_PRIQ_BASE 0x0C0
+#define PRIQ_BASE_WA (1UL < 62) /* Write-Allocate. */
+#define SMMU_PRIQ_PROD 0x100C8
+#define SMMU_PRIQ_CONS 0x100CC
+#define SMMU_PRIQ_IRQ_CFG0 0x0D0
+#define SMMU_PRIQ_IRQ_CFG1 0x0D8
+#define SMMU_PRIQ_IRQ_CFG2 0x0DC
+#define SMMU_GATOS_CTRL 0x100
+#define SMMU_GATOS_SID 0x108
+#define SMMU_GATOS_ADDR 0x110
+#define SMMU_GATOS_PAR 0x118
+#define SMMU_VATOS_SEL 0x180
+#define SMMU_S_IDR0 0x8000
+#define SMMU_S_IDR1 0x8004
+#define SMMU_S_IDR2 0x8008
+#define SMMU_S_IDR3 0x800C
+#define SMMU_S_IDR4 0x8010
+#define SMMU_S_CR0 0x8020
+#define SMMU_S_CR0ACK 0x8024
+#define SMMU_S_CR1 0x8028
+#define SMMU_S_CR2 0x802C
+#define SMMU_S_INIT 0x803C
+#define SMMU_S_GBPA 0x8044
+#define SMMU_S_AGBPA 0x8048
+#define SMMU_S_IRQ_CTRL 0x8050
+#define SMMU_S_IRQ_CTRLACK 0x8054
+#define SMMU_S_GERROR 0x8060
+#define SMMU_S_GERRORN 0x8064
+#define SMMU_S_GERROR_IRQ_CFG0 0x8068
+#define SMMU_S_GERROR_IRQ_CFG1 0x8070
+#define SMMU_S_GERROR_IRQ_CFG2 0x8074
+#define SMMU_S_STRTAB_BASE 0x8080
+#define SMMU_S_STRTAB_BASE_CFG 0x8088
+#define SMMU_S_CMDQ_BASE 0x8090
+#define SMMU_S_CMDQ_PROD 0x8098
+#define SMMU_S_CMDQ_CONS 0x809C
+#define SMMU_S_EVENTQ_BASE 0x80A0
+#define SMMU_S_EVENTQ_PROD 0x80A8
+#define SMMU_S_EVENTQ_CONS 0x80AC
+#define SMMU_S_EVENTQ_IRQ_CFG0 0x80B0
+#define SMMU_S_EVENTQ_IRQ_CFG1 0x80B8
+#define SMMU_S_EVENTQ_IRQ_CFG2 0x80BC
+#define SMMU_S_GATOS_CTRL 0x8100
+#define SMMU_S_GATOS_SID 0x8108
+#define SMMU_S_GATOS_ADDR 0x8110
+#define SMMU_S_GATOS_PAR 0x8118
+
+#define CMD_QUEUE_OPCODE_S 0
+#define CMD_QUEUE_OPCODE_M (0xff << CMD_QUEUE_OPCODE_S)
+
+#define CMD_PREFETCH_CONFIG 0x01
+#define PREFETCH_0_SID_S 32
+#define CMD_PREFETCH_ADDR 0x02
+#define CMD_CFGI_STE 0x03
+#define CFGI_0_STE_SID_S 32
+#define CMD_CFGI_STE_RANGE 0x04
+#define CFGI_1_STE_RANGE_S 0
+#define CMD_CFGI_CD 0x05
+#define CFGI_0_SSID_S 12
+#define CFGI_1_LEAF_S 0
+#define CMD_CFGI_CD_ALL 0x06
+#define CMD_TLBI_NH_ALL 0x10
+#define CMD_TLBI_NH_ASID 0x11
+#define CMD_TLBI_NH_VA 0x12
+#define TLBI_1_LEAF (1 << 0)
+#define TLBI_1_ADDR_S 12
+#define TLBI_1_ADDR_M (0xfffffffffffff << TLBI_1_ADDR_S)
+#define CMD_TLBI_NH_VAA 0x13
+#define CMD_TLBI_EL3_ALL 0x18
+#define CMD_TLBI_EL3_VA 0x1A
+#define CMD_TLBI_EL2_ALL 0x20
+#define CMD_TLBI_EL2_ASID 0x21
+#define CMD_TLBI_EL2_VA 0x22
+#define CMD_TLBI_EL2_VAA 0x23
+#define CMD_TLBI_S12_VMALL 0x28
+#define CMD_TLBI_S2_IPA 0x2A
+#define CMD_TLBI_NSNH_ALL 0x30
+#define CMD_ATC_INV 0x40
+#define CMD_PRI_RESP 0x41
+#define CMD_RESUME 0x44
+#define CMD_STALL_TERM 0x45
+#define CMD_SYNC 0x46
+#define SYNC_0_CS_S 12 /* The ComplSignal */
+#define SYNC_0_CS_M (0x3 << SYNC_0_CS_S)
+#define SYNC_0_CS_SIG_NONE (0x0 << SYNC_0_CS_S)
+#define SYNC_0_CS_SIG_IRQ (0x1 << SYNC_0_CS_S)
+#define SYNC_0_CS_SIG_SEV (0x2 << SYNC_0_CS_S)
+#define SYNC_0_MSH_S 22 /* Shareability attribute for MSI write */
+#define SYNC_0_MSH_M (0x3 << SYNC_0_MSH_S)
+#define SYNC_0_MSH_NS (0x0 << SYNC_0_MSH_S) /* Non-shareable */
+#define SYNC_0_MSH_OS (0x2 << SYNC_0_MSH_S) /* Outer Shareable */
+#define SYNC_0_MSH_IS (0x3 << SYNC_0_MSH_S) /* Inner Shareable */
+#define SYNC_0_MSIATTR_S 24 /* Write attribute for MSI */
+#define SYNC_0_MSIATTR_M (0xf << SYNC_0_MSIATTR_S)
+#define SYNC_0_MSIATTR_OIWB (0xf << SYNC_0_MSIATTR_S)
+#define SYNC_0_MSIDATA_S 32
+#define SYNC_1_MSIADDRESS_S 2
+#define SYNC_1_MSIADDRESS_M (0x3ffffffffffff << SYNC_1_MSIADDRESS_S)
+#define STE0_VALID (1 << 0) /* Structure contents are valid. */
+#define STE0_CONFIG_S 1
+#define STE0_CONFIG_M (0x7 << STE0_CONFIG_S)
+#define STE0_CONFIG_ABORT (0x0 << STE0_CONFIG_S)
+#define STE0_CONFIG_BYPASS (0x4 << STE0_CONFIG_S)
+#define STE0_CONFIG_S1_TRANS (0x5 << STE0_CONFIG_S)
+#define STE0_CONFIG_S2_TRANS (0x6 << STE0_CONFIG_S)
+#define STE0_CONFIG_ALL_TRANS (0x7 << STE0_CONFIG_S)
+#define STE0_S1FMT_S 4
+#define STE0_S1FMT_M (0x3 << STE0_S1FMT_S)
+#define STE0_S1FMT_LINEAR (0x0 << STE0_S1FMT_S)
+#define STE0_S1FMT_4KB_L2 (0x1 << STE0_S1FMT_S)
+#define STE0_S1FMT_64KB_L2 (0x2 << STE0_S1FMT_S)
+#define STE0_S1CONTEXTPTR_S 6
+#define STE0_S1CONTEXTPTR_M (0x3fffffffffff << STE0_S1CONTEXTPTR_S)
+#define STE0_S1CDMAX_S 59
+#define STE0_S1CDMAX_M (0x1f << STE0_S1CDMAX_S)
+
+#define STE1_S1DSS_S 0
+#define STE1_S1DSS_M (0x3 << STE1_S1DSS_S)
+#define STE1_S1DSS_TERMINATE (0x0 << STE1_S1DSS_S)
+#define STE1_S1DSS_BYPASS (0x1 << STE1_S1DSS_S)
+#define STE1_S1DSS_SUBSTREAM0 (0x2 << STE1_S1DSS_S)
+#define STE1_S1CIR_S 2
+#define STE1_S1CIR_M (0x3 << STE1_S1CIR_S)
+#define STE1_S1CIR_NC (0x0 << STE1_S1CIR_S)
+#define STE1_S1CIR_WBRA (0x1 << STE1_S1CIR_S)
+#define STE1_S1CIR_WT (0x2 << STE1_S1CIR_S)
+#define STE1_S1CIR_WB (0x3 << STE1_S1CIR_S)
+#define STE1_S1COR_S 4
+#define STE1_S1COR_M (0x3 << STE1_S1COR_S)
+#define STE1_S1COR_NC (0x0 << STE1_S1COR_S)
+#define STE1_S1COR_WBRA (0x1 << STE1_S1COR_S)
+#define STE1_S1COR_WT (0x2 << STE1_S1COR_S)
+#define STE1_S1COR_WB (0x3 << STE1_S1COR_S)
+#define STE1_S1CSH_S 6
+#define STE1_S1CSH_NS (0x0 << STE1_S1CSH_S)
+#define STE1_S1CSH_OS (0x2 << STE1_S1CSH_S)
+#define STE1_S1CSH_IS (0x3 << STE1_S1CSH_S)
+#define STE1_S2HWU59 (1 << 8)
+#define STE1_S2HWU60 (1 << 9)
+#define STE1_S2HWU61 (1 << 10)
+#define STE1_S2HWU62 (1 << 11)
+#define STE1_DRE (1 << 12) /* Destructive Read Enable. */
+#define STE1_CONT_S 13 /* Contiguous Hint */
+#define STE1_CONT_M (0xf << STE1_CONT_S)
+#define STE1_DCP (1 << 17) /* Directed Cache Prefetch. */
+#define STE1_PPAR (1 << 18) /* PRI Page request Auto Responses */
+#define STE1_MEV (1 << 19) /* Merge Events */
+#define STE1_S1STALLD (1 << 27) /* Stage 1 Stall Disable */
+#define STE1_EATS_S 28 /* Enable PCIe ATS translation and traffic */
+#define STE1_EATS_M (0x3 << STE1_EATS_S)
+#define STE1_EATS_ABORT (0x0 << STE1_EATS_S)
+#define STE1_EATS_FULLATS (0x1 << STE1_EATS_S) /* Full ATS */
+#define STE1_EATS_S1 (0x2 << STE1_EATS_S) /* Split-stage ATS */
+#define STE1_STRW_S 30 /* StreamWorld control */
+#define STE1_STRW_M (0x3 << STE1_STRW_S)
+#define STE1_STRW_NS_EL1 (0x0 << STE1_STRW_S)
+#define STE1_STRW_NS_EL2 (0x2 << STE1_STRW_S)
+#define STE1_MEMATTR_S 32
+#define STE1_MTCFG (1 << 36)
+#define STE1_ALLOCCFG_S 37
+#define STE1_SHCFG_S 44
+#define STE1_SHCFG_M (0x3UL << STE1_SHCFG_S)
+#define STE1_SHCFG_NS (0x0UL << STE1_SHCFG_S)
+#define STE1_SHCFG_INCOMING (0x1UL << STE1_SHCFG_S)
+#define STE1_SHCFG_OS (0x2UL << STE1_SHCFG_S)
+#define STE1_SHCFG_IS (0x3UL << STE1_SHCFG_S)
+#define STE1_NSCFG_S 46
+#define STE1_NSCFG_M (0x3UL << STE1_NSCFG_S)
+#define STE1_NSCFG_SECURE (0x2UL << STE1_NSCFG_S)
+#define STE1_NSCFG_NONSECURE (0x3UL << STE1_NSCFG_S)
+#define STE1_PRIVCFG_S 48
+#define STE1_INSTCFG_S 50
+
+#define STE2_S2VMID_S 0
+#define STE2_S2VMID_M (0xffff << STE2_S2VMID_S)
+#define STE2_S2T0SZ_S 32 /* Size of IPA input region */
+#define STE2_S2T0SZ_M (0x3f << STE2_S2T0SZ_S)
+#define STE2_S2SL0_S 38 /* Starting level of stage 2 tt walk */
+#define STE2_S2SL0_M (0x3 << STE2_S2SL0_S)
+#define STE2_S2IR0_S 40
+#define STE2_S2IR0_M (0x3 << STE2_S2IR0_S)
+#define STE2_S2OR0_S 42
+#define STE2_S2OR0_M (0x3 << STE2_S2OR0_S)
+#define STE2_S2SH0_S 44
+#define STE2_S2SH0_M (0x3 << STE2_S2SH0_S)
+#define STE2_S2TG_S 46
+#define STE2_S2TG_M (0x3 << STE2_S2TG_S)
+#define STE2_S2PS_S 48 /* Physical address Size */
+#define STE2_S2PS_M (0x7 << STE2_S2PS_S)
+#define STE2_S2AA64 (1 << 51) /* Stage 2 tt is AArch64 */
+#define STE2_S2ENDI (1 << 52) /* Stage 2 tt endianness */
+#define STE2_S2AFFD (1 << 53) /* Stage 2 Access Flag Fault Disable*/
+#define STE2_S2PTW (1 << 54) /* Protected Table Walk */
+#define STE2_S2S (1 << 57)
+#define STE2_S2R (1 << 58)
+
+#define STE3_S2TTB_S 4 /* Address of Translation Table base */
+#define STE3_S2TTB_M (0xffffffffffff << STE3_S2TTB_S)
+
+#define CD0_T0SZ_S 0 /* VA region size covered by TT0. */
+#define CD0_T0SZ_M (0x3f << CD0_T0SZ_S)
+#define CD0_TG0_S 6 /* TT0 Translation Granule size */
+#define CD0_TG0_M (0x3 << CD0_TG0_S)
+#define CD0_TG0_4KB (0x0 << CD0_TG0_S)
+#define CD0_TG0_64KB (0x1 << CD0_TG0_S)
+#define CD0_TG0_16KB (0x2 << CD0_TG0_S)
+#define CD0_IR0_S 8 /* Inner region Cacheability for TT0 access*/
+#define CD0_IR0_M (0x3 << CD0_IR0_S)
+#define CD0_IR0_NC (0x0 << CD0_IR0_S)
+#define CD0_IR0_WBC_RWA (0x1 << CD0_IR0_S)
+#define CD0_IR0_WTC_RA (0x2 << CD0_IR0_S)
+#define CD0_IR0_WBC_RA (0x3 << CD0_IR0_S)
+#define CD0_OR0_S 10 /* Outer region Cacheability for TT0 access*/
+#define CD0_OR0_M (0x3 << CD0_OR0_S)
+#define CD0_OR0_NC (0x0 << CD0_OR0_S)
+#define CD0_OR0_WBC_RWA (0x1 << CD0_OR0_S)
+#define CD0_OR0_WTC_RA (0x2 << CD0_OR0_S)
+#define CD0_OR0_WBC_RA (0x3 << CD0_OR0_S)
+#define CD0_SH0_S 12 /* Shareability for TT0 access */
+#define CD0_SH0_M (0x3 << CD0_SH0_S)
+#define CD0_SH0_NS (0x0 << CD0_SH0_S)
+#define CD0_SH0_OS (0x2 << CD0_SH0_S) /* Outer Shareable */
+#define CD0_SH0_IS (0x3 << CD0_SH0_S) /* Inner Shareable */
+#define CD0_EPD0 (1 << 14) /* TT0 walk disable */
+#define CD0_ENDI (1 << 15) /* Big Endian */
+#define CD0_T1SZ_S 16 /* VA region size covered by TT1 */
+#define CD0_T1SZ_M (0x3f << CD0_T1SZ_S)
+#define CD0_TG1_S 22 /* TT1 Translation Granule size */
+#define CD0_TG1_M (0x3 << CD0_TG1_S)
+#define CD0_TG1_4KB (0x2 << CD0_TG1_S)
+#define CD0_TG1_64KB (0x3 << CD0_TG1_S)
+#define CD0_TG1_16KB (0x1 << CD0_TG1_S)
+#define CD0_IR1_S 24 /* Inner region Cacheability for TT1 access*/
+#define CD0_IR1_M (0x3 << CD0_IR1_S)
+#define CD0_OR1_S 26
+#define CD0_OR1_M (0x3 << CD0_OR1_S)
+#define CD0_SH1_S 28
+#define CD0_SH1_M (0x3 << CD0_SH1_S)
+#define CD0_EPD1 (1UL << 30) /* TT1 tt walk disable*/
+#define CD0_VALID (1UL << 31) /* CD Valid. */
+#define CD0_IPS_S 32 /* Intermediate Physical Size */
+#define CD0_IPS_M (0x7UL << CD0_IPS_S)
+#define CD0_IPS_32BITS (0x0UL << CD0_IPS_S)
+#define CD0_IPS_36BITS (0x1UL << CD0_IPS_S)
+#define CD0_IPS_40BITS (0x2UL << CD0_IPS_S)
+#define CD0_IPS_42BITS (0x3UL << CD0_IPS_S)
+#define CD0_IPS_44BITS (0x4UL << CD0_IPS_S)
+#define CD0_IPS_48BITS (0x5UL << CD0_IPS_S)
+#define CD0_IPS_52BITS (0x6UL << CD0_IPS_S) /* SMMUv3.1 only */
+#define CD0_AFFD (1UL << 35) /* Access Flag Fault Disable */
+#define CD0_WXN (1UL << 36) /* Write eXecute Never */
+#define CD0_UWXN (1UL << 37) /* Unprivileged Write eXecut Never*/
+#define CD0_TBI0 (1UL << 38) /* Top Byte Ignore for TTB0 */
+#define CD0_TBI1 (1UL << 39) /* Top Byte Ignore for TTB1 */
+#define CD0_PAN (1UL << 40) /* Privileged Access Never */
+#define CD0_AA64 (1UL << 41) /* TTB{0,1} is AArch64-format TT */
+#define CD0_HD (1UL << 42)
+#define CD0_HA (1UL << 43)
+#define CD0_S (1UL << 44)
+#define CD0_R (1UL << 45)
+#define CD0_A (1UL << 46)
+#define CD0_ASET (1UL << 47) /* ASID Set. */
+#define CD0_ASID_S 48 /* Address Space Identifier */
+#define CD0_ASID_M (0xffff << CD0_ASID_S)
+#define CD1_TTB0_S 4 /* Address of TT0 base. */
+#define CD1_TTB0_M (0xffffffffffff << CD1_TTB0_S)
+
+#endif /* _DEV_IOMMU_SMMU_REG_H_ */
Index: sys/dev/iommu/smmu_var.h
===================================================================
--- /dev/null
+++ sys/dev/iommu/smmu_var.h
@@ -0,0 +1,172 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019-2020 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory (Department of Computer Science and
+ * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
+ * DARPA SSITH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _DEV_IOMMU_SMMU_VAR_H_
+#define _DEV_IOMMU_SMMU_VAR_H_
+
+#include <dev/iommu/iommu.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_page.h>
+
+#define SMMU_DEVSTR "ARM System Memory Management Unit"
+#define SMMU_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
+#define SMMU_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
+
+DECLARE_CLASS(smmu_driver);
+
+struct smmu_queue_local_copy {
+ union {
+ uint64_t val;
+ struct {
+ uint32_t prod;
+ uint32_t cons;
+ };
+ };
+};
+
+struct smmu_cd {
+ vm_paddr_t paddr;
+ vm_size_t size;
+ void *vaddr;
+};
+
+struct smmu_queue {
+ struct smmu_queue_local_copy lc;
+ vm_paddr_t paddr;
+ void *vaddr;
+ uint32_t prod_off;
+ uint32_t cons_off;
+ int size_log2;
+ uint64_t base;
+};
+
+struct smmu_cmdq_entry {
+ uint8_t opcode;
+ union {
+ struct {
+ uint16_t asid;
+ uint16_t vmid;
+ vm_offset_t addr;
+ bool leaf;
+ } tlbi;
+ struct {
+ uint32_t sid;
+ uint32_t ssid;
+ bool leaf;
+ } cfgi;
+ struct {
+ uint32_t sid;
+ } prefetch;
+ struct {
+ uint64_t msiaddr;
+ } sync;
+ };
+};
+
+struct l1_desc {
+ uint8_t span;
+ size_t size;
+ void *va;
+ vm_paddr_t pa;
+};
+
+struct smmu_strtab {
+ void *vaddr;
+ uint64_t base;
+ uint32_t base_cfg;
+ uint32_t num_l1_entries;
+ struct l1_desc *l1;
+};
+
+struct smmu_softc {
+ device_t dev;
+ struct resource *res[4];
+ void *intr_cookie[3];
+ uint32_t ias; /* Intermediate Physical Address */
+ uint32_t oas; /* Physical Address */
+ uint32_t asid_bits;
+ uint32_t vmid_bits;
+ uint32_t sid_bits;
+ uint32_t ssid_bits;
+ uint32_t pgsizes;
+ uint32_t features;
+#define SMMU_FEATURE_2_LVL_STREAM_TABLE (1 << 0)
+#define SMMU_FEATURE_2_LVL_CD (1 << 1)
+#define SMMU_FEATURE_TT_LE (1 << 2)
+#define SMMU_FEATURE_TT_BE (1 << 3)
+#define SMMU_FEATURE_SEV (1 << 4)
+#define SMMU_FEATURE_MSI (1 << 5)
+#define SMMU_FEATURE_HYP (1 << 6)
+#define SMMU_FEATURE_ATS (1 << 7)
+#define SMMU_FEATURE_PRI (1 << 8)
+#define SMMU_FEATURE_STALL_FORCE (1 << 9)
+#define SMMU_FEATURE_STALL (1 << 10)
+#define SMMU_FEATURE_S1P (1 << 11)
+#define SMMU_FEATURE_S2P (1 << 12)
+#define SMMU_FEATURE_VAX (1 << 13)
+#define SMMU_FEATURE_COHERENCY (1 << 14)
+#define SMMU_FEATURE_RANGE_INV (1 << 15)
+ struct smmu_queue cmdq;
+ struct smmu_queue evtq;
+ struct smmu_queue priq;
+ struct smmu_strtab strtab;
+ int sync;
+ struct mtx sc_mtx;
+};
+
+struct smmu_master {
+ LIST_ENTRY(smmu_master) next;
+ struct iommu_device *device;
+ int sid;
+};
+
+struct smmu_domain {
+ struct iommu_domain domain;
+ struct mtx mtx_lock;
+ LIST_ENTRY(smmu_domain) next;
+ LIST_HEAD(, smmu_master) master_list;
+ struct smmu_cd cd;
+ struct pmap p;
+};
+
+MALLOC_DECLARE(M_SMMU);
+
+/* Device methods */
+int smmu_attach(device_t dev);
+int smmu_detach(device_t dev);
+
+#endif /* _DEV_IOMMU_SMMU_VAR_H_ */
Index: sys/dev/pci/pci.c
===================================================================
--- sys/dev/pci/pci.c
+++ sys/dev/pci/pci.c
@@ -5700,6 +5700,25 @@
}
return (tag);
}
+#elif defined(ACPI_SMMU)
+bus_dma_tag_t bounce_smmu_get_dma_tag(device_t dev, device_t child);
+bus_dma_tag_t
+pci_get_dma_tag(device_t bus, device_t dev)
+{
+ bus_dma_tag_t tag;
+ struct pci_softc *sc;
+
+ if (device_get_parent(dev) == bus) {
+ /* try smmu and return if it works */
+ tag = bounce_smmu_get_dma_tag(bus, dev);
+ } else
+ tag = NULL;
+ if (tag == NULL) {
+ sc = device_get_softc(bus);
+ tag = sc->sc_dma_tag;
+ }
+ return (tag);
+}
#else
bus_dma_tag_t
pci_get_dma_tag(device_t bus, device_t dev)

File Metadata

Mime Type
text/plain
Expires
Sat, Jan 11, 7:23 PM (19 h, 4 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
15757057
Default Alt Text
D24618.id71155.diff (108 KB)

Event Timeline