Page MenuHomeFreeBSD

D54251.diff
No OneTemporary

D54251.diff

diff --git a/sys/arm64/arm64/gicv5_its.c b/sys/arm64/arm64/gicv5_its.c
new file mode 100644
--- /dev/null
+++ b/sys/arm64/arm64/gicv5_its.c
@@ -0,0 +1,950 @@
+/*-
+ * Copyright (c) 2015-2016 The FreeBSD Foundation
+ * Copyright (c) 2023,2025 Arm Ltd
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_acpi.h"
+#include "opt_platform.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/cpuset.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/queue.h>
+#include <sys/rman.h>
+#include <sys/smp.h>
+#include <sys/vmem.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/atomic.h>
+
+#ifdef FDT
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#include <dev/ofw/ofw_subr.h>
+#endif
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include "gicv5var.h"
+#include "gic_v3_var.h" /* For GICV3_IVAR_NIRQS */
+
+#include "pic_if.h"
+#include "msi_if.h"
+
+/* TODO: Only write back cache when non-coherent */
+
+/* ITS Config Frame */
+#define ITS_IDR0 0x0000
+#define ITS_IDR1 0x0004
+#define ITS_IDR1_L2SZ_SHIFT 8
+#define ITS_IDR1_L2SZ_MASK (0x7u << ITS_IDR1_L2SZ_SHIFT)
+#define ITS_IDR1_L2SZ_64K_MASK (0x4u << ITS_IDR1_L2SZ_SHIFT)
+#define ITS_IDR1_L2SZ_16K_MASK (0x2u << ITS_IDR1_L2SZ_SHIFT)
+#define ITS_IDR1_L2SZ_4K_MASK (0x1u << ITS_IDR1_L2SZ_SHIFT)
+#define ITS_IDR1_ITT_LEVELS (0x1u << 7)
+#define ITS_IDR1_DT_LEVELS (0x1u << 6)
+#define ITS_IDR1_DEVICEID_BITS_SHIFT 0
+#define ITS_IDR1_DEVICEID_BITS (0x3fu << ITS_IDR1_DEVICEID_BITS_SHIFT)
+#define ITS_IDR1_DEVICEID_BITS_VAL(x) \
+ (((x) & ITS_IDR1_DEVICEID_BITS) >> ITS_IDR1_DEVICEID_BITS_SHIFT)
+#define ITS_IDR2 0x0008
+#define ITS_IDR2_XDMN_EVENTS_SHIFT 5
+#define ITS_IDR2_XDMN_EVENTS_MASK (0x3u << ITS_IDR2_XDMN_EVENTS_SHIFT)
+#define ITS_IDR2_EVENTID_SHIFT 0
+#define ITS_IDR2_EVENTID_MASK (0x1fu << ITS_IDR2_EVENTID_SHIFT)
+#define ITS_IIDR 0x0040
+#define ITS_AIDR 0x0044
+#define ITS_CR0 0x0080
+#define ITS_CR0_IDLE (0x1u << 1)
+#define ITS_CR0_ITSEN (0x1u << 0)
+#define ITS_CR1 0x0084
+#define ITS_DT_BASER 0x00c0
+#define ITS_DT_BASER_ADDR_MASK 0x00fffffffffffff8ul
+#define ITS_DT_CFGR 0x00d0
+#define ITS_DIDR 0x0100
+#define ITS_EIDR 0x0108
+#define ITS_INV_EVENTR 0x010c
+#define ITS_INV_DEVICER 0x0110
+#define ITS_INV_DEVICER_I (0x1u << 31)
+#define ITS_INV_DEVICER_EVENTID_BITS_SHIFT 1
+#define ITS_INV_DEVICER_EVENTID_BITS_MASK \
+ (0x1ful << ITS_INV_DEVICER_EVENTID_BITS_SHIFT)
+#define ITS_INV_DEVICER_L1 (0x1u << 0)
+#define ITS_READ_EVENTR 0x0114
+#define ITS_READ_EVENT_DATAR 0x0118
+#define ITS_STATUSR 0x0120
+#define ITS_STATUSR_IDLE (0x1u << 0)
+#define ITS_SYNCR 0x0140
+#define ITS_SYNC_STATUSR 0x0148
+#define ITS_GEN_EVENT_DIDR 0x0180
+#define ITS_GEN_EVENT_EIDR 0x0188
+#define ITS_GEN_EVENTR 0x018c
+#define ITS_GEN_EVENT_STATUSR 0x0190
+#define ITS_MEC_IDR 0x01c0
+#define ITS_MEC_MECID_R 0x01c4
+#define ITS_MPAM_IDR 0x0200
+#define ITS_MPAM_PARTID_R 0x0204
+#define ITS_SWERR_STATUSR 0x0240
+#define ITS_SWERR_SYNDROMER0 0x0248
+#define ITS_SWERR_SYNDROMER1 0x0250
+
+/* ITS Translate Frame */
+#define ITS_TRANSLATER 0x0000
+#define ITS_RL_TRANSLATER 0x0008
+
+/* L1_DTE - Level 1 device table entry */
+#define L1_DTE_SIZE 8
+#define L1_DTE_SPAN_SHIFT 60
+#define L1_DTE_SPAN_MASK (0xful << L1_DTE_SPAN_SHIFT)
+#define L1_DTE_L2_ADDR_MASK 0xfffffffffffff8
+#define L1_DTE_VALID (0x1ul << 0)
+
+/* L2_DTE - Level 2 device table entry */
+#define L2_DTE_SIZE 8
+#define L2_DTE_EVENTID_BITS_SHIFT 59
+#define L2_DTE_EVENTID_BITS_MASK (0x1ful << L2_DTE_EVENTID_BITS_SHIFT)
+#define L2_DTE_ITT_STRUCTURE_SHIFT 58
+#define L2_DTE_ITT_STRUCTURE_MASK (0x1ul << L2_DTE_ITT_STRUCTURE_SHIFT)
+#define L2_DTE_ITT_STRUCTURE_LINEAR (0x0ul << L2_DTE_ITT_STRUCTURE_SHIFT)
+#define L2_DTE_ITT_STRUCTURE_2_LEVEL (0x1ul << L2_DTE_ITT_STRUCTURE_SHIFT)
+#define L2_DTE_DSWE_SHIFT 57
+#define L2_DTE_DSWE (0x1 << L2_DTE_DSWE_SHIFT)
+#define L2_DTE_ITT_ADDR_MASK 0x00fffffffffffff8
+#define L2_DTE_ITT_L2SZ_SHIFT 1
+#define L2_DTE_ITT_L2SZ_MASK (0x3ul << L2_DTE_ITT_L2SZ_SHIFT)
+#define L2_DTE_ITT_L2SZ_4K (0x0ul << L2_DTE_ITT_L2SZ_SHIFT)
+#define L2_DTE_ITT_L2SZ_16K (0x1ul << L2_DTE_ITT_L2SZ_SHIFT)
+#define L2_DTE_ITT_L2SZ_64K (0x2ul << L2_DTE_ITT_L2SZ_SHIFT)
+#define L2_DTE_VALID (0x1ul << 0)
+/* The maximum physical address we can use for the ITT */
+/* TODO: Move to use ITS_IDR0.PA_RANGE */
+#define ITT_MAX_ADDR 0x00ffffffffffffff
+/*
+ * TODO: Align is either:
+ * 2 + EVENTID_BITS
+ * or
+ * Max(2, (EVENTID_BITS - (9 + (2 * ITT_L2SZ)) + 2)
+ */
+#define ITT_ALIGN 8
+
+/* L1_ITTE - Level 1 interrupt translation table entry */
+#define L1_ITTE_SIZE 8
+#define L1_ITTE_SPAN_SHIFT 60
+#define L1_ITTE_SPAN_MASK (0xful << L1_ITTE_SPAN_SHIFT)
+#define L1_ITTE_L2_ADDR_MASK 0xfffffffffffff8
+#define L1_ITTE_VALID (0x1ul << 0)
+
+/* L2_ITTE - Level 2 interrupt translation table entry */
+#define L2_ITTE_SIZE 8
+#define L2_ITTE_VM_ID_SHIFT 32
+#define L2_ITTE_VM_ID_MASK (0xfffful << L2_ITT_VM_ID_SHIFT)
+#define L2_ITTE_VALID (0x1ul << 31)
+#define L2_ITTE_VIRTUAL (0x1ul << 30)
+#define L2_ITTE_DAC_SHIFT 28
+#define L2_ITTE_DAC_MASK (0x3ul << L2_ITT_DAC_SHIFT)
+#define L2_ITTE_LPI_ID_SHIFT 0
+#define L2_ITTE_LPI_ID_MASK (0xfffffful << L2_ITT_LPI_ID_SHIFT)
+
+/* LPI chunk owned by ITS device */
+struct lpi_chunk {
+ u_int lpi_base;
+ u_int lpi_free; /* First free LPI in set */
+ u_int lpi_num; /* Total number of LPIs in chunk */
+ u_int lpi_busy; /* Number of busy LPIs in chink */
+};
+
+/* ITS device */
+struct its_dev {
+ TAILQ_ENTRY(its_dev) entry;
+ /* PCI device */
+ device_t pci_dev;
+ /* Device ID (i.e. PCI device ID) */
+ uint32_t devid;
+ /* List of assigned LPIs */
+ struct lpi_chunk lpis;
+ /* Virtual address of ITT */
+ /* XXX: Only a linear ITT for now */
+ uint64_t *itt;
+};
+
+/* ITS device list */
+struct its_device_list {
+ struct mtx its_dev_lock;
+ TAILQ_HEAD(its_dev_list, its_dev) its_dev_list;
+
+ /* XXX: Only a linear DT for now */
+ uint64_t *its_dev_dte;
+ vmem_t *its_dev_irq_alloc;
+ struct gicv5_its_irqsrc **its_irqs;
+ size_t its_dev_dt_count;
+};
+
+struct gicv5_its_irqsrc {
+ struct gicv5_base_irqsrc gi_isrc;
+ u_int gi_event_id;
+ struct its_dev *gi_its_dev;
+ TAILQ_ENTRY(gicv5_its_irqsrc) gi_link;
+};
+
+struct gicv5_its_translate_frame {
+ struct intr_pic *its_pic;
+ intptr_t its_xref;
+ bus_addr_t its_frame_paddr;
+};
+
+struct gicv5_its_softc {
+ struct its_device_list its_dl;
+ struct resource *its_cfg;
+ struct gicv5_its_translate_frame its_frame;
+
+ struct gicv5_its_irqsrc **sc_irqs;
+
+ cpuset_t its_cpus;
+ u_int its_irq_cpu;
+ TAILQ_HEAD(free_irqs, gicv5_its_irqsrc) sc_free_irqs;
+};
+
+static device_attach_t gicv5_its_attach;
+
+static pic_disable_intr_t gicv5_its_disable_intr;
+static pic_enable_intr_t gicv5_its_enable_intr;
+static pic_map_intr_t gicv5_its_map_intr;
+static pic_setup_intr_t gicv5_its_setup_intr;
+static pic_post_filter_t gicv5_its_post_filter;
+static pic_post_ithread_t gicv5_its_post_ithread;
+static pic_pre_ithread_t gicv5_its_pre_ithread;
+static pic_bind_intr_t gicv5_its_bind_intr;
+
+static msi_alloc_msi_t gicv5_its_alloc_msi;
+static msi_release_msi_t gicv5_its_release_msi;
+static msi_alloc_msix_t gicv5_its_alloc_msix;
+static msi_release_msix_t gicv5_its_release_msix;
+static msi_map_msi_t gicv5_its_map_msi;
+#ifdef IOMMU
+static msi_iommu_init_t gicv5_iommu_init;
+static msi_iommu_deinit_t gicv5_iommu_deinit;
+#endif
+
+static device_method_t gicv5_its_methods[] = {
+ /* Interrupt controller interface */
+ DEVMETHOD(pic_disable_intr, gicv5_its_disable_intr),
+ DEVMETHOD(pic_enable_intr, gicv5_its_enable_intr),
+ DEVMETHOD(pic_map_intr, gicv5_its_map_intr),
+ DEVMETHOD(pic_setup_intr, gicv5_its_setup_intr),
+ DEVMETHOD(pic_post_filter, gicv5_its_post_filter),
+ DEVMETHOD(pic_post_ithread, gicv5_its_post_ithread),
+ DEVMETHOD(pic_pre_ithread, gicv5_its_pre_ithread),
+#ifdef SMP
+ DEVMETHOD(pic_bind_intr, gicv5_its_bind_intr),
+#endif
+
+ /* MSI/MSI-X */
+ DEVMETHOD(msi_alloc_msi, gicv5_its_alloc_msi),
+ DEVMETHOD(msi_release_msi, gicv5_its_release_msi),
+ DEVMETHOD(msi_alloc_msix, gicv5_its_alloc_msix),
+ DEVMETHOD(msi_release_msix, gicv5_its_release_msix),
+ DEVMETHOD(msi_map_msi, gicv5_its_map_msi),
+#ifdef IOMMU
+ DEVMETHOD(msi_iommu_init, gicv5_iommu_init),
+ DEVMETHOD(msi_iommu_deinit, gicv5_iommu_deinit),
+#endif
+
+ /* End */
+ DEVMETHOD_END
+};
+
+static DEFINE_CLASS_0(gic, gicv5_its_driver, gicv5_its_methods,
+ sizeof(struct gicv5_its_softc));
+
+static void
+its_write_cr0(struct gicv5_its_softc *sc, bool en)
+{
+ uint32_t val;
+ int timeout;
+
+ val = en ? ITS_CR0_ITSEN : 0;
+ bus_write_4(sc->its_cfg, ITS_CR0, val);
+
+ /* Timeout of ~10ms */
+ timeout = 10000;
+ do {
+ val = bus_read_4(sc->its_cfg, ITS_CR0);
+ if ((val & ITS_CR0_IDLE) == ITS_CR0_IDLE)
+ return;
+ DELAY(1);
+ } while (--timeout > 0);
+
+ panic("Timeout waiting for ITS CR0 becoming idle");
+}
+
+static void
+its_wait_for_statusr(struct gicv5_its_softc *sc)
+{
+ uint32_t val;
+ int timeout;
+
+ /* Timeout of ~10ms */
+ timeout = 10000;
+ do {
+ val = bus_read_4(sc->its_cfg, ITS_STATUSR);
+ if ((val & ITS_STATUSR_IDLE) == ITS_STATUSR_IDLE)
+ return;
+ DELAY(1);
+ } while (--timeout > 0);
+
+ panic("Timeout waiting for ITS STATUSR becoming idle");
+}
+
+/* TODO: See if we could merge its device code with GICv3 ITS */
+static void
+its_device_list_init(struct its_device_list *dev_list)
+{
+ /* Protects access to the device list */
+ mtx_init(&dev_list->its_dev_lock, "ITS device lock", NULL, MTX_SPIN);
+ TAILQ_INIT(&dev_list->its_dev_list);
+}
+
+static struct its_dev *
+its_device_find_locked(struct its_device_list *dev_list, device_t child)
+{
+ struct its_dev *its_dev;
+
+ mtx_assert(&dev_list->its_dev_lock, MA_OWNED);
+
+ TAILQ_FOREACH(its_dev, &dev_list->its_dev_list, entry) {
+ if (its_dev->pci_dev == child)
+ return (its_dev);
+ }
+
+ return (NULL);
+}
+
+static struct its_dev *
+its_device_find(struct its_device_list *dev_list, device_t child)
+{
+ struct its_dev *its_dev;
+
+ mtx_lock_spin(&dev_list->its_dev_lock);
+ its_dev = its_device_find_locked(dev_list, child);
+ mtx_unlock_spin(&dev_list->its_dev_lock);
+
+ return (its_dev);
+}
+
+static uint32_t
+its_get_devid(device_t pci_dev)
+{
+ uintptr_t id;
+
+ if (pci_get_id(pci_dev, PCI_ID_MSI, &id) != 0)
+ panic("%s: %s: Unable to get the MSI DeviceID", __func__,
+ device_get_nameunit(pci_dev));
+
+ return (id);
+}
+
+static bool
+its_device_itt_alloc_linear(struct its_dev *its_dev, u_int eventid_bits,
+ uint64_t *dtep)
+{
+ size_t size;
+
+ size = ((size_t)1 << eventid_bits) * L2_ITTE_SIZE;
+ MPASS(size <= PAGE_SIZE_4K);
+
+ its_dev->itt = contigmalloc(size, M_DEVBUF, M_NOWAIT | M_ZERO, 0,
+ ITT_MAX_ADDR, max(size, PAGE_SIZE), 0);
+ if (its_dev->itt == NULL)
+ return (false);
+ cpu_dcache_wb_range(its_dev->itt, size);
+
+ *dtep = (uint64_t)eventid_bits << L2_DTE_EVENTID_BITS_SHIFT |
+ L2_DTE_ITT_STRUCTURE_LINEAR | vtophys(its_dev->itt) |
+ L2_DTE_ITT_L2SZ_4K | L2_DTE_VALID;
+ return (true);
+}
+
+static void
+its_device_dte_update(struct its_device_list *dev_list,
+ struct its_dev *its_dev, uint64_t dte)
+{
+ uint64_t *dtep;
+
+ dtep = &dev_list->its_dev_dte[its_dev->devid];
+
+ atomic_store_64(dtep, dte);
+ cpu_dcache_wb_range(dtep, sizeof(*dtep));
+}
+
+static struct its_dev *
+its_device_get(device_t dev, struct its_device_list *dev_list, device_t child,
+ u_int nvecs)
+{
+ struct gicv5_its_softc *sc;
+ struct its_dev *its_dev, *tmp_dev;
+ vmem_addr_t irq_base;
+ uint64_t dte;
+ u_int eventid_bits;
+
+ its_dev = its_device_find(dev_list, child);
+ if (its_dev != NULL)
+ return (its_dev);
+
+ its_dev = malloc(sizeof(*its_dev), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (its_dev == NULL)
+ return (NULL);
+
+ its_dev->pci_dev = child;
+ its_dev->devid = its_get_devid(child);
+
+ its_dev->lpis.lpi_busy = 0;
+ its_dev->lpis.lpi_num = nvecs;
+ its_dev->lpis.lpi_free = nvecs;
+
+ eventid_bits = order_base_2(nvecs);
+ if (!its_device_itt_alloc_linear(its_dev, eventid_bits, &dte)) {
+ free(its_dev, M_DEVBUF);
+ return (NULL);
+ }
+
+ if (vmem_alloc(dev_list->its_dev_irq_alloc, nvecs,
+ M_FIRSTFIT | M_NOWAIT, &irq_base) != 0) {
+ free(its_dev->itt, M_DEVBUF);
+ free(its_dev, M_DEVBUF);
+ return (NULL);
+ }
+
+ mtx_lock_spin(&dev_list->its_dev_lock);
+ /* Recheck the ITS device hasn't been allocated */
+ tmp_dev = its_device_find_locked(dev_list, child);
+ if (tmp_dev != NULL) {
+ mtx_unlock_spin(&dev_list->its_dev_lock);
+ /* Clean up the unused device */
+ vmem_free(dev_list->its_dev_irq_alloc, its_dev->lpis.lpi_base,
+ nvecs);
+ free(its_dev->itt, M_DEVBUF);
+ free(its_dev, M_DEVBUF);
+ return (tmp_dev);
+ }
+ its_dev->lpis.lpi_base = irq_base;
+
+ TAILQ_INSERT_TAIL(&dev_list->its_dev_list, its_dev, entry);
+
+ /*
+ * Store with an atomic operation to ensure the Valid field is
+ * set with the other fields.
+ */
+ its_device_dte_update(dev_list, its_dev, dte);
+
+ sc = device_get_softc(dev);
+ bus_write_8(sc->its_cfg, ITS_DIDR, its_dev->devid);
+ bus_write_4(sc->its_cfg, ITS_INV_DEVICER, ITS_INV_DEVICER_I |
+ (eventid_bits << ITS_INV_DEVICER_EVENTID_BITS_SHIFT));
+
+ its_wait_for_statusr(sc);
+ mtx_unlock_spin(&dev_list->its_dev_lock);
+
+ return (its_dev);
+}
+
+
+static int
+gicv5_its_intr(void *arg, uintptr_t irq)
+{
+ struct gicv5_its_softc *sc = arg;
+ struct gicv5_its_irqsrc *gi;
+ struct trapframe *tf;
+
+ gi = sc->sc_irqs[irq];
+ if (gi == NULL)
+ panic("%s: Invalid interrupt %ld", __func__, irq);
+
+ tf = curthread->td_intr_frame;
+ intr_isrc_dispatch(&gi->gi_isrc.gbi_isrc, tf);
+ return (FILTER_HANDLED);
+}
+
+static int
+gicv5_its_select_cpu(device_t dev, struct intr_irqsrc *isrc)
+{
+ struct gicv5_its_softc *sc;
+
+ sc = device_get_softc(dev);
+ if (CPU_EMPTY(&isrc->isrc_cpu)) {
+ sc->its_irq_cpu = intr_irq_next_cpu(sc->its_irq_cpu,
+ &sc->its_cpus);
+ CPU_SETOF(sc->its_irq_cpu, &isrc->isrc_cpu);
+ }
+
+ return (0);
+}
+
+static int
+gicv5_its_attach(device_t dev)
+{
+ struct gicv5_its_softc *sc;
+ uint32_t idr;
+ u_int lpi_start, nlpis;
+ int error, rid;
+
+ sc = device_get_softc(dev);
+
+ lpi_start = gicv5_get_lpi_start(dev);
+ nlpis = gicv3_get_nirqs(dev);
+
+ rid = 0;
+ sc->its_cfg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
+ RF_ACTIVE);
+ if (sc->its_cfg == NULL) {
+ device_printf(dev, "Unable to map config frame\n");
+ return (ENXIO);
+ }
+
+ idr = bus_read_4(sc->its_cfg, ITS_IDR1);
+ if ((idr & ITS_IDR1_ITT_LEVELS) != 0)
+ device_printf(dev, "2 level itt\n");
+ if ((idr & ITS_IDR1_DT_LEVELS) != 0)
+ device_printf(dev, "2 level device table\n");
+ if ((idr & (ITS_IDR1_ITT_LEVELS | ITS_IDR1_DT_LEVELS)) != 0) {
+ if ((idr & ITS_IDR1_L2SZ_64K_MASK) != 0)
+ device_printf(dev, "64K l2 size\n");
+ if ((idr & ITS_IDR1_L2SZ_16K_MASK) != 0)
+ device_printf(dev, "16K l2 size\n");
+ if ((idr & ITS_IDR1_L2SZ_4K_MASK) != 0)
+ device_printf(dev, "4K l2 size\n");
+ if ((idr & ITS_IDR1_L2SZ_MASK) == 0)
+ device_printf(dev, "2 level tables, but no l2 size\n");
+ }
+
+ its_device_list_init(&sc->its_dl);
+ TAILQ_INIT(&sc->sc_free_irqs);
+
+ if (bus_get_cpus(dev, LOCAL_CPUS, sizeof(sc->its_cpus), &sc->its_cpus) != 0)
+ panic("%s: bus_get_cpus failed", __func__);
+
+ /* TODO: 2-level device table */
+ sc->its_dl.its_dev_dt_count = 1ul << ITS_IDR1_DEVICEID_BITS_VAL(idr);
+ sc->its_dl.its_dev_dte = contigmalloc(
+ round_page(sc->its_dl.its_dev_dt_count * L2_DTE_SIZE), M_DEVBUF,
+ M_WAITOK | M_ZERO, 0, ITS_DT_BASER_ADDR_MASK | PAGE_MASK,
+ sc->its_dl.its_dev_dt_count * L2_DTE_SIZE, 0);
+ dsb(ishst);
+
+
+ /* TODO: Add macros for this */
+ bus_write_4(sc->its_cfg, ITS_CR1, 0xd7);
+
+ bus_write_4(sc->its_cfg, ITS_DT_CFGR, ITS_IDR1_DEVICEID_BITS_VAL(idr));
+
+ MPASS((vtophys(sc->its_dl.its_dev_dte) & ~ITS_DT_BASER_ADDR_MASK) == 0);
+ bus_write_8(sc->its_cfg, ITS_DT_BASER, vtophys(sc->its_dl.its_dev_dte));
+ sc->its_dl.its_dev_irq_alloc = vmem_create(device_get_nameunit(dev),
+ lpi_start, nlpis, 1, 0, M_FIRSTFIT | M_WAITOK);
+ sc->sc_irqs = mallocarray(nlpis, sizeof(*sc->sc_irqs), M_DEVBUF,
+ M_WAITOK | M_ZERO);
+
+ its_write_cr0(sc, true);
+
+ /* Register this device as a interrupt controller */
+ sc->its_frame.its_pic = intr_pic_register(dev, sc->its_frame.its_xref);
+ error = intr_pic_add_handler(device_get_parent(dev),
+ sc->its_frame.its_pic, gicv5_its_intr, sc, lpi_start, nlpis);
+ if (error != 0) {
+ device_printf(dev, "Failed to add PIC handler\n");
+ return (error);
+ }
+
+ /* Register this device to handle MSI interrupts */
+ error = intr_msi_register(dev, sc->its_frame.its_xref);
+ if (error != 0) {
+ device_printf(dev, "Failed to register for MSIs\n");
+ return (error);
+ }
+
+ return (0);
+}
+
+static void
+gicv5_its_disable_intr(device_t dev, struct intr_irqsrc *isrc)
+{
+ panic("%s", __func__);
+}
+
+static void
+gicv5_its_enable_intr(device_t dev, struct intr_irqsrc *isrc)
+{
+ return (PIC_ENABLE_INTR(device_get_parent(dev), isrc));
+}
+
+static void
+gicv5_its_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
+{
+ PIC_PRE_ITHREAD(device_get_parent(dev), isrc);
+}
+
+static void
+gicv5_its_post_ithread(device_t dev, struct intr_irqsrc *isrc)
+{
+ PIC_POST_ITHREAD(device_get_parent(dev), isrc);
+}
+
+static void
+gicv5_its_post_filter(device_t dev, struct intr_irqsrc *isrc)
+{
+ PIC_POST_FILTER(device_get_parent(dev), isrc);
+}
+
+static int
+gicv5_its_bind_intr(device_t dev, struct intr_irqsrc *isrc)
+{
+ gicv5_its_select_cpu(dev, isrc);
+
+ return (PIC_BIND_INTR(device_get_parent(dev), isrc));
+}
+
+static int
+gicv5_its_map_intr(device_t dev, struct intr_map_data *data,
+ struct intr_irqsrc **isrcp)
+{
+ panic("%s", __func__);
+}
+
+static int
+gicv5_its_setup_intr(device_t dev, struct intr_irqsrc *isrc,
+ struct resource *res, struct intr_map_data *data)
+{
+ /* Bind the interrupt to a CPU */
+ gicv5_its_bind_intr(dev, isrc);
+
+ return (0);
+}
+
+static struct gicv5_its_irqsrc *
+gicv5_its_alloc_irqsrc(device_t dev, struct gicv5_its_softc *sc,
+ u_int event_id, u_int lpi)
+{
+ struct gicv5_its_irqsrc *girq = NULL;
+
+ /* TODO: Should this be last as malloc & intr_isrc_register could fail? */
+ gicv5_irs_alloc_lpi(device_get_parent(dev), dev, lpi);
+
+ KASSERT(sc->sc_irqs[lpi] == NULL,
+ ("%s: LPI %u already allocated", __func__, lpi));
+ mtx_lock_spin(&sc->its_dl.its_dev_lock);
+ if (!TAILQ_EMPTY(&sc->sc_free_irqs)) {
+ girq = TAILQ_FIRST(&sc->sc_free_irqs);
+ TAILQ_REMOVE(&sc->sc_free_irqs, girq, gi_link);
+ }
+ mtx_unlock_spin(&sc->its_dl.its_dev_lock);
+ if (girq == NULL) {
+ girq = malloc(sizeof(*girq), M_DEVBUF,
+ M_NOWAIT | M_ZERO);
+ if (girq == NULL)
+ return (NULL);
+ girq->gi_isrc.gbi_space = GICv5_LPI;
+ if (intr_isrc_register(&girq->gi_isrc.gbi_isrc, dev, 0,
+ "%s,%u", device_get_nameunit(dev), lpi) != 0) {
+ free(girq, M_DEVBUF);
+ return (NULL);
+ }
+ }
+ girq->gi_isrc.gbi_irq = lpi;
+ girq->gi_event_id = event_id;
+ sc->sc_irqs[lpi] = girq;
+
+ return (girq);
+}
+
+
+#if 0
+static void
+gicv5_its_release_irqsrc(struct gicv5_its_softc *sc,
+ struct gicv5_its_irqsrc *girq)
+{
+ panic("%s", __func__);
+}
+#endif
+
+static void
+gicv5_its_update_itt(struct gicv5_its_softc *sc, struct its_dev *its_dev,
+ struct gicv5_its_irqsrc *girq, u_int event_id)
+{
+ /*
+ * Update the ITT entry. Use an atomic operation to ensure the
+ * hardware sees the full value in a single operation.
+ */
+ atomic_store_64(&its_dev->itt[event_id],
+ L2_ITTE_VALID | girq->gi_isrc.gbi_irq);
+ dsb(ishst);
+
+ /* Invalidate the event */
+ bus_write_8(sc->its_cfg, ITS_DIDR, its_dev->devid);
+ bus_write_4(sc->its_cfg, ITS_EIDR, event_id);
+ bus_write_4(sc->its_cfg, ITS_INV_EVENTR, 0x1ul << 31);
+
+ {
+ int timeout = 1000000;
+
+ do {
+ uint32_t status = bus_read_4(sc->its_cfg, ITS_STATUSR);
+
+ if (status & 1)
+ break;
+
+ DELAY(1);
+ } while (--timeout > 0);
+ }
+}
+
+static int
+gicv5_its_alloc_msi(device_t dev, device_t child, int count, int maxcount,
+ device_t *pic, struct intr_irqsrc **srcs)
+{
+ struct gicv5_its_softc *sc;
+ struct gicv5_its_irqsrc *girq;
+ struct its_dev *its_dev;
+ u_int event_id, lpi;
+ int i;
+
+ sc = device_get_softc(dev);
+ its_dev = its_device_get(dev, &sc->its_dl, child, count);
+ if (its_dev == NULL)
+ return (ENXIO);
+
+ KASSERT(its_dev->lpis.lpi_free > 0, ("%s: No free LPIs", __func__));
+
+ event_id = its_dev->lpis.lpi_num - its_dev->lpis.lpi_free;
+ lpi = its_dev->lpis.lpi_base + event_id;
+
+ /* Allocate the irqsrc for each MSI */
+ for (i = 0; i < count; i++, lpi++) {
+ its_dev->lpis.lpi_free--;
+ srcs[i] = (struct intr_irqsrc *)gicv5_its_alloc_irqsrc(dev, sc,
+ event_id, lpi);
+ if (srcs[i] == NULL)
+ break;
+ }
+
+ /* The allocation failed, release them */
+ if (i != count) {
+ panic("%s: TODO: Cleanup", __func__);
+ }
+
+ /* Finish the allocation now we have all MSI irqsrcs */
+ for (i = 0; i < count; i++) {
+ girq = (struct gicv5_its_irqsrc *)srcs[i];
+ girq->gi_its_dev = its_dev;
+
+ /* Map the message to the given IRQ */
+ gicv5_its_select_cpu(dev, (struct intr_irqsrc *)girq);
+
+ gicv5_its_update_itt(sc, its_dev, girq, event_id + i);
+ }
+ its_dev->lpis.lpi_busy += count;
+ *pic = dev;
+
+ return (0);
+}
+
+static int
+gicv5_its_release_msi(device_t dev, device_t child, int count,
+ struct intr_irqsrc **isrc)
+{
+ panic("%s", __func__);
+}
+
+static int
+gicv5_its_alloc_msix(device_t dev, device_t child, device_t *pic,
+ struct intr_irqsrc **isrcp)
+{
+ struct gicv5_its_softc *sc;
+ struct gicv5_its_irqsrc *girq;
+ struct its_dev *its_dev;
+ u_int nvecs, event_id, lpi;
+
+ sc = device_get_softc(dev);
+ nvecs = pci_msix_count(child);
+ its_dev = its_device_get(dev, &sc->its_dl, child, nvecs);
+ if (its_dev == NULL)
+ return (ENXIO);
+
+ KASSERT(its_dev->lpis.lpi_free > 0, ("%s: No free LPIs", __func__));
+ event_id = its_dev->lpis.lpi_num - its_dev->lpis.lpi_free;
+ lpi = its_dev->lpis.lpi_base + event_id;
+
+ girq = gicv5_its_alloc_irqsrc(dev, sc, event_id, lpi);
+ if (girq == NULL)
+ return (ENXIO);
+ girq->gi_its_dev = its_dev;
+
+ its_dev->lpis.lpi_free--;
+ its_dev->lpis.lpi_busy++;
+
+ /* Map the message to the given IRQ */
+ gicv5_its_select_cpu(dev, (struct intr_irqsrc *)girq);
+
+ gicv5_its_update_itt(sc, its_dev, girq, event_id);
+
+ *pic = dev;
+ *isrcp = (struct intr_irqsrc *)girq;
+
+ return (0);
+}
+
+static int
+gicv5_its_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
+{
+ panic("%s", __func__);
+}
+
+static int
+gicv5_its_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
+ uint64_t *addr, uint32_t *data)
+{
+ struct gicv5_its_softc *sc;
+ struct gicv5_its_irqsrc *gi;
+ struct gicv5_its_translate_frame *frame;
+
+ sc = device_get_softc(dev);
+ gi = (struct gicv5_its_irqsrc *)isrc;
+
+ frame = &sc->its_frame;
+ *addr = frame->its_frame_paddr + ITS_TRANSLATER;
+ *data = gi->gi_event_id;
+
+ return (0);
+}
+
+#ifdef IOMMU
+static int
+gicv5_iommu_init(device_t dev, device_t child, struct iommu_domain **domain)
+{
+ /* TODO */
+ panic("%s", __func__);
+}
+
+static void
+gicv5_iommu_deinit(device_t dev, device_t child)
+{
+ /* TODO */
+ panic("%s", __func__);
+}
+#endif
+
+#ifdef FDT
+static device_probe_t gicv5_its_fdt_probe;
+static device_attach_t gicv5_its_fdt_attach;
+
+static device_method_t gicv5_its_fdt_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, gicv5_its_fdt_probe),
+ DEVMETHOD(device_attach, gicv5_its_fdt_attach),
+
+ /* End */
+ DEVMETHOD_END
+};
+
+#define its_baseclasses itsv5_fdt_baseclasses
+DEFINE_CLASS_1(its, gicv5_its_fdt_driver, gicv5_its_fdt_methods,
+ sizeof(struct gicv5_its_softc), gicv5_its_driver);
+#undef its_baseclasses
+
+EARLY_DRIVER_MODULE(itsv5_fdt, gic, gicv5_its_fdt_driver, 0, 0,
+ BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
+
+static int
+gicv5_its_fdt_probe(device_t dev)
+{
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (!ofw_bus_is_compatible(dev, "arm,gic-v5-its"))
+ return (ENXIO);
+
+ if (!gic_get_support_lpis(dev))
+ return (ENXIO);
+
+ device_set_desc(dev, "ARM GICv5 Interrupt Translation Service");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+gicv5_its_fdt_attach(device_t dev)
+{
+ struct gicv5_its_softc *sc;
+ phandle_t node, child;
+ bool found;
+
+ sc = device_get_softc(dev);
+ node = ofw_bus_get_node(dev);
+
+ found = false;
+ for (child = OF_child(node); child != 0; child = OF_peer(child)) {
+ if (OF_hasprop(child, "msi-controller")) {
+ bus_size_t size;
+ int idx;
+
+ /*
+ * Find the index for the expected register. If it's
+ * missing we can skip this frame.
+ */
+ if (ofw_bus_find_string_index(child, "reg-names",
+ "ns-translate", &idx) != 0)
+ continue;
+
+ if (found) {
+ device_printf(dev,
+ "Too many ITS frames found\n");
+ return (EINVAL);
+ }
+
+ if (ofw_reg_to_paddr(child, idx,
+ &sc->its_frame.its_frame_paddr, &size, NULL) != 0) {
+ device_printf(dev,
+ "Unable to read frame physical address\n");
+ return (EINVAL);
+ }
+ sc->its_frame.its_pic = NULL;
+ sc->its_frame.its_xref = OF_xref_from_node(child);
+ found = true;
+ }
+ }
+ if (!found) {
+ device_printf(dev, "No valid ITS frame found\n");
+ return (EINVAL);
+ }
+
+ return (gicv5_its_attach(dev));
+}
+#endif /* FDT */
diff --git a/sys/conf/files.arm64 b/sys/conf/files.arm64
--- a/sys/conf/files.arm64
+++ b/sys/conf/files.arm64
@@ -59,6 +59,7 @@
arm64/arm64/gic_v3_fdt.c optional fdt
arm64/arm64/gicv5.c standard
arm64/arm64/gicv5_fdt.c standard
+arm64/arm64/gicv5_its.c standard
arm64/arm64/hyp_stub.S standard
arm64/arm64/identcpu.c standard
arm64/arm64/kexec_support.c standard

File Metadata

Mime Type
text/plain
Expires
Sun, Dec 21, 12:16 PM (3 h, 6 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
27112809
Default Alt Text
D54251.diff (26 KB)

Event Timeline