diff --git a/sys/arm64/iommu/iommu.c b/sys/arm64/iommu/iommu.c
index 0fad03c7cd8e..0a7503976036 100644
--- a/sys/arm64/iommu/iommu.c
+++ b/sys/arm64/iommu/iommu.c
@@ -1,397 +1,398 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2020 Ruslan Bukin
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory (Department of Computer Science and
* Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
* DARPA SSITH research programme.
*
* Portions of this work was supported by Innovate UK project 105694,
* "Digital Security by Design (DSbD) Technology Platform Prototype".
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "opt_platform.h"
#include
__FBSDID("$FreeBSD$");
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "iommu.h"
#include "iommu_if.h"
static MALLOC_DEFINE(M_IOMMU, "IOMMU", "IOMMU framework");
#define IOMMU_LIST_LOCK() mtx_lock(&iommu_mtx)
#define IOMMU_LIST_UNLOCK() mtx_unlock(&iommu_mtx)
#define IOMMU_LIST_ASSERT_LOCKED() mtx_assert(&iommu_mtx, MA_OWNED)
#define dprintf(fmt, ...)
static struct mtx iommu_mtx;
struct iommu_entry {
struct iommu_unit *iommu;
LIST_ENTRY(iommu_entry) next;
};
static LIST_HEAD(, iommu_entry) iommu_list = LIST_HEAD_INITIALIZER(iommu_list);
static int
iommu_domain_unmap_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
iommu_gaddr_t size, int flags)
{
struct iommu_unit *iommu;
int error;
iommu = iodom->iommu;
error = IOMMU_UNMAP(iommu->dev, iodom, base, size);
return (error);
}
static int
iommu_domain_map_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
iommu_gaddr_t size, vm_page_t *ma, uint64_t eflags, int flags)
{
struct iommu_unit *iommu;
vm_prot_t prot;
vm_offset_t va;
int error;
dprintf("%s: base %lx, size %lx\n", __func__, base, size);
prot = 0;
if (eflags & IOMMU_MAP_ENTRY_READ)
prot |= VM_PROT_READ;
if (eflags & IOMMU_MAP_ENTRY_WRITE)
prot |= VM_PROT_WRITE;
va = base;
iommu = iodom->iommu;
error = IOMMU_MAP(iommu->dev, iodom, va, ma, size, prot);
return (0);
}
static const struct iommu_domain_map_ops domain_map_ops = {
.map = iommu_domain_map_buf,
.unmap = iommu_domain_unmap_buf,
};
static struct iommu_domain *
iommu_domain_alloc(struct iommu_unit *iommu)
{
struct iommu_domain *iodom;
iodom = IOMMU_DOMAIN_ALLOC(iommu->dev, iommu);
if (iodom == NULL)
return (NULL);
iommu_domain_init(iommu, iodom, &domain_map_ops);
iodom->end = VM_MAXUSER_ADDRESS;
iodom->iommu = iommu;
iommu_gas_init_domain(iodom);
return (iodom);
}
static int
iommu_domain_free(struct iommu_domain *iodom)
{
struct iommu_unit *iommu;
iommu = iodom->iommu;
IOMMU_LOCK(iommu);
if ((iodom->flags & IOMMU_DOMAIN_GAS_INITED) != 0) {
IOMMU_DOMAIN_LOCK(iodom);
iommu_gas_fini_domain(iodom);
IOMMU_DOMAIN_UNLOCK(iodom);
}
iommu_domain_fini(iodom);
IOMMU_DOMAIN_FREE(iommu->dev, iodom);
IOMMU_UNLOCK(iommu);
return (0);
}
static void
iommu_tag_init(struct bus_dma_tag_iommu *t)
{
bus_addr_t maxaddr;
maxaddr = BUS_SPACE_MAXADDR;
t->common.ref_count = 0;
t->common.impl = &bus_dma_iommu_impl;
t->common.alignment = 1;
t->common.boundary = 0;
t->common.lowaddr = maxaddr;
t->common.highaddr = maxaddr;
t->common.maxsize = maxaddr;
t->common.nsegments = BUS_SPACE_UNRESTRICTED;
t->common.maxsegsz = maxaddr;
}
static struct iommu_ctx *
iommu_ctx_alloc(device_t dev, struct iommu_domain *iodom, bool disabled)
{
struct iommu_unit *iommu;
struct iommu_ctx *ioctx;
iommu = iodom->iommu;
ioctx = IOMMU_CTX_ALLOC(iommu->dev, iodom, dev, disabled);
if (ioctx == NULL)
return (NULL);
/*
* iommu can also be used for non-PCI based devices.
* This should be reimplemented as new newbus method with
* pci_get_rid() as a default for PCI device class.
*/
ioctx->rid = pci_get_rid(dev);
return (ioctx);
}
struct iommu_ctx *
iommu_get_ctx(struct iommu_unit *iommu, device_t requester,
uint16_t rid, bool disabled, bool rmrr)
{
struct iommu_ctx *ioctx;
struct iommu_domain *iodom;
struct bus_dma_tag_iommu *tag;
IOMMU_LOCK(iommu);
ioctx = IOMMU_CTX_LOOKUP(iommu->dev, requester);
if (ioctx) {
IOMMU_UNLOCK(iommu);
return (ioctx);
}
IOMMU_UNLOCK(iommu);
/*
* In our current configuration we have a domain per each ctx.
* So allocate a domain first.
*/
iodom = iommu_domain_alloc(iommu);
if (iodom == NULL)
return (NULL);
ioctx = iommu_ctx_alloc(requester, iodom, disabled);
if (ioctx == NULL) {
iommu_domain_free(iodom);
return (NULL);
}
tag = ioctx->tag = malloc(sizeof(struct bus_dma_tag_iommu),
M_IOMMU, M_WAITOK | M_ZERO);
tag->owner = requester;
tag->ctx = ioctx;
tag->ctx->domain = iodom;
iommu_tag_init(tag);
ioctx->domain = iodom;
return (ioctx);
}
void
iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *ioctx)
{
struct bus_dma_tag_iommu *tag;
IOMMU_ASSERT_LOCKED(iommu);
tag = ioctx->tag;
IOMMU_CTX_FREE(iommu->dev, ioctx);
free(tag, M_IOMMU);
}
void
iommu_free_ctx(struct iommu_ctx *ioctx)
{
struct iommu_unit *iommu;
struct iommu_domain *iodom;
int error;
iodom = ioctx->domain;
iommu = iodom->iommu;
IOMMU_LOCK(iommu);
iommu_free_ctx_locked(iommu, ioctx);
IOMMU_UNLOCK(iommu);
/* Since we have a domain per each ctx, remove the domain too. */
error = iommu_domain_free(iodom);
if (error)
device_printf(iommu->dev, "Could not free a domain\n");
}
static void
iommu_domain_free_entry(struct iommu_map_entry *entry, bool free)
{
struct iommu_domain *iodom;
iodom = entry->domain;
IOMMU_DOMAIN_LOCK(iodom);
iommu_gas_free_space(iodom, entry);
IOMMU_DOMAIN_UNLOCK(iodom);
if (free)
iommu_gas_free_entry(iodom, entry);
else
entry->flags = 0;
}
void
iommu_domain_unload(struct iommu_domain *iodom,
struct iommu_map_entries_tailq *entries, bool cansleep)
{
struct iommu_map_entry *entry, *entry1;
int error;
TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) {
KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0,
("not mapped entry %p %p", iodom, entry));
error = iodom->ops->unmap(iodom, entry->start, entry->end -
entry->start, cansleep ? IOMMU_PGF_WAITOK : 0);
KASSERT(error == 0, ("unmap %p error %d", iodom, error));
TAILQ_REMOVE(entries, entry, dmamap_link);
iommu_domain_free_entry(entry, true);
}
if (TAILQ_EMPTY(entries))
return;
panic("entries map is not empty");
}
int
iommu_register(struct iommu_unit *iommu)
{
struct iommu_entry *entry;
mtx_init(&iommu->lock, "IOMMU", NULL, MTX_DEF);
entry = malloc(sizeof(struct iommu_entry), M_IOMMU, M_WAITOK | M_ZERO);
entry->iommu = iommu;
IOMMU_LIST_LOCK();
LIST_INSERT_HEAD(&iommu_list, entry, next);
IOMMU_LIST_UNLOCK();
iommu_init_busdma(iommu);
return (0);
}
int
iommu_unregister(struct iommu_unit *iommu)
{
struct iommu_entry *entry, *tmp;
IOMMU_LIST_LOCK();
LIST_FOREACH_SAFE(entry, &iommu_list, next, tmp) {
if (entry->iommu == iommu) {
LIST_REMOVE(entry, next);
free(entry, M_IOMMU);
}
}
IOMMU_LIST_UNLOCK();
iommu_fini_busdma(iommu);
mtx_destroy(&iommu->lock);
return (0);
}
struct iommu_unit *
iommu_find(device_t dev, bool verbose)
{
struct iommu_entry *entry;
struct iommu_unit *iommu;
int error;
IOMMU_LIST_LOCK();
LIST_FOREACH(entry, &iommu_list, next) {
iommu = entry->iommu;
error = IOMMU_FIND(iommu->dev, dev);
if (error == 0) {
IOMMU_LIST_UNLOCK();
return (entry->iommu);
}
}
IOMMU_LIST_UNLOCK();
return (NULL);
}
void
-iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free)
+iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free,
+ bool cansleep __unused)
{
dprintf("%s\n", __func__);
iommu_domain_free_entry(entry, free);
}
static void
iommu_init(void)
{
mtx_init(&iommu_mtx, "IOMMU", NULL, MTX_DEF);
}
SYSINIT(iommu, SI_SUB_DRIVERS, SI_ORDER_FIRST, iommu_init, NULL);
diff --git a/sys/dev/iommu/iommu.h b/sys/dev/iommu/iommu.h
index 62b5659b6e83..65fefe3ada7b 100644
--- a/sys/dev/iommu/iommu.h
+++ b/sys/dev/iommu/iommu.h
@@ -1,200 +1,201 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2013 The FreeBSD Foundation
*
* This software was developed by Konstantin Belousov
* under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _DEV_IOMMU_IOMMU_H_
#define _DEV_IOMMU_IOMMU_H_
#include
struct bus_dma_tag_common;
struct iommu_map_entry;
TAILQ_HEAD(iommu_map_entries_tailq, iommu_map_entry);
RB_HEAD(iommu_gas_entries_tree, iommu_map_entry);
RB_PROTOTYPE(iommu_gas_entries_tree, iommu_map_entry, rb_entry,
iommu_gas_cmp_entries);
struct iommu_qi_genseq {
u_int gen;
uint32_t seq;
};
struct iommu_map_entry {
iommu_gaddr_t start;
iommu_gaddr_t end;
iommu_gaddr_t first; /* Least start in subtree */
iommu_gaddr_t last; /* Greatest end in subtree */
iommu_gaddr_t free_down; /* Max free space below the
current R/B tree node */
u_int flags;
TAILQ_ENTRY(iommu_map_entry) dmamap_link; /* Link for dmamap entries */
RB_ENTRY(iommu_map_entry) rb_entry; /* Links for domain entries */
struct iommu_domain *domain;
struct iommu_qi_genseq gseq;
};
struct iommu_unit {
struct mtx lock;
device_t dev;
int unit;
int dma_enabled;
/* Busdma delayed map load */
struct task dmamap_load_task;
TAILQ_HEAD(, bus_dmamap_iommu) delayed_maps;
struct taskqueue *delayed_taskqueue;
/*
* Bitmap of buses for which context must ignore slot:func,
* duplicating the page table pointer into all context table
* entries. This is a client-controlled quirk to support some
* NTBs.
*/
uint32_t buswide_ctxs[(PCI_BUSMAX + 1) / NBBY / sizeof(uint32_t)];
};
struct iommu_domain_map_ops {
int (*map)(struct iommu_domain *domain, iommu_gaddr_t base,
iommu_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags);
int (*unmap)(struct iommu_domain *domain, iommu_gaddr_t base,
iommu_gaddr_t size, int flags);
};
/*
* Locking annotations:
* (u) - Protected by iommu unit lock
* (d) - Protected by domain lock
* (c) - Immutable after initialization
*/
struct iommu_domain {
struct iommu_unit *iommu; /* (c) */
const struct iommu_domain_map_ops *ops;
struct mtx lock; /* (c) */
struct task unload_task; /* (c) */
u_int entries_cnt; /* (d) */
struct iommu_map_entries_tailq unload_entries; /* (d) Entries to
unload */
struct iommu_gas_entries_tree rb_root; /* (d) */
iommu_gaddr_t end; /* (c) Highest address + 1 in
the guest AS */
struct iommu_map_entry *first_place, *last_place; /* (d) */
struct iommu_map_entry *msi_entry; /* (d) Arch-specific */
iommu_gaddr_t msi_base; /* (d) Arch-specific */
vm_paddr_t msi_phys; /* (d) Arch-specific */
u_int flags; /* (u) */
};
struct iommu_ctx {
struct iommu_domain *domain; /* (c) */
struct bus_dma_tag_iommu *tag; /* (c) Root tag */
u_long loads; /* atomic updates, for stat only */
u_long unloads; /* same */
u_int flags; /* (u) */
uint16_t rid; /* (c) pci RID */
};
/* struct iommu_ctx flags */
#define IOMMU_CTX_FAULTED 0x0001 /* Fault was reported,
last_fault_rec is valid */
#define IOMMU_CTX_DISABLED 0x0002 /* Device is disabled, the
ephemeral reference is kept
to prevent context destruction */
#define IOMMU_DOMAIN_GAS_INITED 0x0001
#define IOMMU_DOMAIN_PGTBL_INITED 0x0002
#define IOMMU_DOMAIN_IDMAP 0x0010 /* Domain uses identity
page table */
#define IOMMU_DOMAIN_RMRR 0x0020 /* Domain contains RMRR entry,
cannot be turned off */
#define IOMMU_LOCK(unit) mtx_lock(&(unit)->lock)
#define IOMMU_UNLOCK(unit) mtx_unlock(&(unit)->lock)
#define IOMMU_ASSERT_LOCKED(unit) mtx_assert(&(unit)->lock, MA_OWNED)
#define IOMMU_DOMAIN_LOCK(dom) mtx_lock(&(dom)->lock)
#define IOMMU_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->lock)
#define IOMMU_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->lock, MA_OWNED)
void iommu_free_ctx(struct iommu_ctx *ctx);
void iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *ctx);
struct iommu_ctx *iommu_get_ctx(struct iommu_unit *, device_t dev,
uint16_t rid, bool id_mapped, bool rmrr_init);
struct iommu_unit *iommu_find(device_t dev, bool verbose);
-void iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free);
+void iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free,
+ bool cansleep);
void iommu_domain_unload(struct iommu_domain *domain,
struct iommu_map_entries_tailq *entries, bool cansleep);
struct iommu_ctx *iommu_instantiate_ctx(struct iommu_unit *iommu,
device_t dev, bool rmrr);
device_t iommu_get_requester(device_t dev, uint16_t *rid);
int iommu_init_busdma(struct iommu_unit *unit);
void iommu_fini_busdma(struct iommu_unit *unit);
void iommu_gas_init_domain(struct iommu_domain *domain);
void iommu_gas_fini_domain(struct iommu_domain *domain);
struct iommu_map_entry *iommu_gas_alloc_entry(struct iommu_domain *domain,
u_int flags);
void iommu_gas_free_entry(struct iommu_domain *domain,
struct iommu_map_entry *entry);
void iommu_gas_free_space(struct iommu_domain *domain,
struct iommu_map_entry *entry);
int iommu_gas_map(struct iommu_domain *domain,
const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset,
u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res);
void iommu_gas_free_region(struct iommu_domain *domain,
struct iommu_map_entry *entry);
int iommu_gas_map_region(struct iommu_domain *domain,
struct iommu_map_entry *entry, u_int eflags, u_int flags, vm_page_t *ma);
int iommu_gas_reserve_region(struct iommu_domain *domain, iommu_gaddr_t start,
iommu_gaddr_t end, struct iommu_map_entry **entry0);
int iommu_gas_reserve_region_extend(struct iommu_domain *domain,
iommu_gaddr_t start, iommu_gaddr_t end);
void iommu_set_buswide_ctx(struct iommu_unit *unit, u_int busno);
bool iommu_is_buswide_ctx(struct iommu_unit *unit, u_int busno);
void iommu_domain_init(struct iommu_unit *unit, struct iommu_domain *domain,
const struct iommu_domain_map_ops *ops);
void iommu_domain_fini(struct iommu_domain *domain);
bool bus_dma_iommu_set_buswide(device_t dev);
int bus_dma_iommu_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map,
vm_paddr_t start, vm_size_t length, int flags);
bus_dma_tag_t iommu_get_dma_tag(device_t dev, device_t child);
struct iommu_ctx *iommu_get_dev_ctx(device_t dev);
struct iommu_domain *iommu_get_ctx_domain(struct iommu_ctx *ctx);
SYSCTL_DECL(_hw_iommu);
#endif /* !_DEV_IOMMU_IOMMU_H_ */
diff --git a/sys/dev/iommu/iommu_gas.c b/sys/dev/iommu/iommu_gas.c
index f94ab3756c7b..abc7a336117c 100644
--- a/sys/dev/iommu/iommu_gas.c
+++ b/sys/dev/iommu/iommu_gas.c
@@ -1,890 +1,892 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2013 The FreeBSD Foundation
*
* This software was developed by Konstantin Belousov
* under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
__FBSDID("$FreeBSD$");
#define RB_AUGMENT(entry) iommu_gas_augment_entry(entry)
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
/*
* Guest Address Space management.
*/
static uma_zone_t iommu_map_entry_zone;
#ifdef INVARIANTS
static int iommu_check_free;
#endif
static void
intel_gas_init(void)
{
iommu_map_entry_zone = uma_zcreate("IOMMU_MAP_ENTRY",
sizeof(struct iommu_map_entry), NULL, NULL,
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NODUMP);
}
SYSINIT(intel_gas, SI_SUB_DRIVERS, SI_ORDER_FIRST, intel_gas_init, NULL);
struct iommu_map_entry *
iommu_gas_alloc_entry(struct iommu_domain *domain, u_int flags)
{
struct iommu_map_entry *res;
KASSERT((flags & ~(IOMMU_PGF_WAITOK)) == 0,
("unsupported flags %x", flags));
res = uma_zalloc(iommu_map_entry_zone, ((flags & IOMMU_PGF_WAITOK) !=
0 ? M_WAITOK : M_NOWAIT) | M_ZERO);
if (res != NULL) {
res->domain = domain;
atomic_add_int(&domain->entries_cnt, 1);
}
return (res);
}
void
iommu_gas_free_entry(struct iommu_domain *domain, struct iommu_map_entry *entry)
{
KASSERT(domain == entry->domain,
("mismatched free domain %p entry %p entry->domain %p", domain,
entry, entry->domain));
atomic_subtract_int(&domain->entries_cnt, 1);
uma_zfree(iommu_map_entry_zone, entry);
}
static int
iommu_gas_cmp_entries(struct iommu_map_entry *a, struct iommu_map_entry *b)
{
/* Last entry have zero size, so <= */
KASSERT(a->start <= a->end, ("inverted entry %p (%jx, %jx)",
a, (uintmax_t)a->start, (uintmax_t)a->end));
KASSERT(b->start <= b->end, ("inverted entry %p (%jx, %jx)",
b, (uintmax_t)b->start, (uintmax_t)b->end));
KASSERT(a->end <= b->start || b->end <= a->start ||
a->end == a->start || b->end == b->start,
("overlapping entries %p (%jx, %jx) %p (%jx, %jx)",
a, (uintmax_t)a->start, (uintmax_t)a->end,
b, (uintmax_t)b->start, (uintmax_t)b->end));
if (a->end < b->end)
return (-1);
else if (b->end < a->end)
return (1);
return (0);
}
static void
iommu_gas_augment_entry(struct iommu_map_entry *entry)
{
struct iommu_map_entry *child;
iommu_gaddr_t free_down;
free_down = 0;
if ((child = RB_LEFT(entry, rb_entry)) != NULL) {
free_down = MAX(free_down, child->free_down);
free_down = MAX(free_down, entry->start - child->last);
entry->first = child->first;
} else
entry->first = entry->start;
if ((child = RB_RIGHT(entry, rb_entry)) != NULL) {
free_down = MAX(free_down, child->free_down);
free_down = MAX(free_down, child->first - entry->end);
entry->last = child->last;
} else
entry->last = entry->end;
entry->free_down = free_down;
}
RB_GENERATE(iommu_gas_entries_tree, iommu_map_entry, rb_entry,
iommu_gas_cmp_entries);
#ifdef INVARIANTS
static void
iommu_gas_check_free(struct iommu_domain *domain)
{
struct iommu_map_entry *entry, *l, *r;
iommu_gaddr_t v;
RB_FOREACH(entry, iommu_gas_entries_tree, &domain->rb_root) {
KASSERT(domain == entry->domain,
("mismatched free domain %p entry %p entry->domain %p",
domain, entry, entry->domain));
l = RB_LEFT(entry, rb_entry);
r = RB_RIGHT(entry, rb_entry);
v = 0;
if (l != NULL) {
v = MAX(v, l->free_down);
v = MAX(v, entry->start - l->last);
}
if (r != NULL) {
v = MAX(v, r->free_down);
v = MAX(v, r->first - entry->end);
}
MPASS(entry->free_down == v);
}
}
#endif
static bool
iommu_gas_rb_insert(struct iommu_domain *domain, struct iommu_map_entry *entry)
{
struct iommu_map_entry *found;
found = RB_INSERT(iommu_gas_entries_tree, &domain->rb_root, entry);
return (found == NULL);
}
static void
iommu_gas_rb_remove(struct iommu_domain *domain, struct iommu_map_entry *entry)
{
RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, entry);
}
struct iommu_domain *
iommu_get_ctx_domain(struct iommu_ctx *ctx)
{
return (ctx->domain);
}
void
iommu_gas_init_domain(struct iommu_domain *domain)
{
struct iommu_map_entry *begin, *end;
begin = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK);
end = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK);
IOMMU_DOMAIN_LOCK(domain);
KASSERT(domain->entries_cnt == 2, ("dirty domain %p", domain));
KASSERT(RB_EMPTY(&domain->rb_root),
("non-empty entries %p", domain));
begin->start = 0;
begin->end = IOMMU_PAGE_SIZE;
begin->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED;
iommu_gas_rb_insert(domain, begin);
end->start = domain->end;
end->end = domain->end;
end->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED;
iommu_gas_rb_insert(domain, end);
domain->first_place = begin;
domain->last_place = end;
domain->flags |= IOMMU_DOMAIN_GAS_INITED;
IOMMU_DOMAIN_UNLOCK(domain);
}
void
iommu_gas_fini_domain(struct iommu_domain *domain)
{
struct iommu_map_entry *entry, *entry1;
IOMMU_DOMAIN_ASSERT_LOCKED(domain);
KASSERT(domain->entries_cnt == 2,
("domain still in use %p", domain));
entry = RB_MIN(iommu_gas_entries_tree, &domain->rb_root);
KASSERT(entry->start == 0, ("start entry start %p", domain));
KASSERT(entry->end == IOMMU_PAGE_SIZE, ("start entry end %p", domain));
KASSERT(entry->flags ==
(IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED),
("start entry flags %p", domain));
RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, entry);
iommu_gas_free_entry(domain, entry);
entry = RB_MAX(iommu_gas_entries_tree, &domain->rb_root);
KASSERT(entry->start == domain->end, ("end entry start %p", domain));
KASSERT(entry->end == domain->end, ("end entry end %p", domain));
KASSERT(entry->flags ==
(IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED),
("end entry flags %p", domain));
RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, entry);
iommu_gas_free_entry(domain, entry);
RB_FOREACH_SAFE(entry, iommu_gas_entries_tree, &domain->rb_root,
entry1) {
KASSERT((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0,
("non-RMRR entry left %p", domain));
RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root,
entry);
iommu_gas_free_entry(domain, entry);
}
}
struct iommu_gas_match_args {
struct iommu_domain *domain;
iommu_gaddr_t size;
int offset;
const struct bus_dma_tag_common *common;
u_int gas_flags;
struct iommu_map_entry *entry;
};
/*
* The interval [beg, end) is a free interval between two iommu_map_entries.
* maxaddr is an upper bound on addresses that can be allocated. Try to
* allocate space in the free interval, subject to the conditions expressed
* by a, and return 'true' if and only if the allocation attempt succeeds.
*/
static bool
iommu_gas_match_one(struct iommu_gas_match_args *a, iommu_gaddr_t beg,
iommu_gaddr_t end, iommu_gaddr_t maxaddr)
{
iommu_gaddr_t bs, start;
/*
* The prev->end is always aligned on the page size, which
* causes page alignment for the entry->start too.
*
* A page sized gap is created between consecutive
* allocations to ensure that out-of-bounds accesses fault.
*/
a->entry->start = roundup2(beg + IOMMU_PAGE_SIZE,
a->common->alignment);
if (a->entry->start + a->size > maxaddr)
return (false);
/* IOMMU_PAGE_SIZE to create gap after new entry. */
if (a->entry->start < beg + IOMMU_PAGE_SIZE ||
a->entry->start + a->size + a->offset + IOMMU_PAGE_SIZE > end)
return (false);
/* No boundary crossing. */
if (vm_addr_bound_ok(a->entry->start + a->offset, a->size,
a->common->boundary))
return (true);
/*
* The start + offset to start + offset + size region crosses
* the boundary. Check if there is enough space after the
* next boundary after the beg.
*/
bs = rounddown2(a->entry->start + a->offset + a->common->boundary,
a->common->boundary);
start = roundup2(bs, a->common->alignment);
/* IOMMU_PAGE_SIZE to create gap after new entry. */
if (start + a->offset + a->size + IOMMU_PAGE_SIZE <= end &&
start + a->offset + a->size <= maxaddr &&
vm_addr_bound_ok(start + a->offset, a->size,
a->common->boundary)) {
a->entry->start = start;
return (true);
}
/*
* Not enough space to align at the requested boundary, or
* boundary is smaller than the size, but allowed to split.
* We already checked that start + size does not overlap maxaddr.
*
* XXXKIB. It is possible that bs is exactly at the start of
* the next entry, then we do not have gap. Ignore for now.
*/
if ((a->gas_flags & IOMMU_MF_CANSPLIT) != 0) {
a->size = bs - a->entry->start - a->offset;
return (true);
}
return (false);
}
static void
iommu_gas_match_insert(struct iommu_gas_match_args *a)
{
bool found __diagused;
a->entry->end = a->entry->start +
roundup2(a->size + a->offset, IOMMU_PAGE_SIZE);
found = iommu_gas_rb_insert(a->domain, a->entry);
KASSERT(found, ("found dup %p start %jx size %jx",
a->domain, (uintmax_t)a->entry->start, (uintmax_t)a->size));
a->entry->flags = IOMMU_MAP_ENTRY_MAP;
}
static int
iommu_gas_lowermatch(struct iommu_gas_match_args *a, struct iommu_map_entry *entry)
{
struct iommu_map_entry *first;
iommu_gaddr_t min_free;
/*
* If the subtree doesn't have free space for the requested allocation
* plus two guard pages, skip it.
*/
min_free = 2 * IOMMU_PAGE_SIZE +
roundup2(a->size + a->offset, IOMMU_PAGE_SIZE);
/* Find the first entry that could abut a big-enough range. */
first = NULL;
while (entry != NULL && entry->free_down >= min_free) {
first = entry;
entry = RB_LEFT(entry, rb_entry);
}
/*
* Walk the big-enough ranges until one satisfies alignment
* requirements, or violates lowaddr address requirement.
*/
entry = first;
while (entry != NULL) {
if ((first = RB_LEFT(entry, rb_entry)) != NULL &&
iommu_gas_match_one(a, first->last, entry->start,
a->common->lowaddr)) {
iommu_gas_match_insert(a);
return (0);
}
if (entry->end >= a->common->lowaddr) {
/* All remaining ranges >= lowaddr */
break;
}
if ((first = RB_RIGHT(entry, rb_entry)) != NULL &&
iommu_gas_match_one(a, entry->end, first->first,
a->common->lowaddr)) {
iommu_gas_match_insert(a);
return (0);
}
/* Find the next entry that might abut a big-enough range. */
if (first != NULL && first->free_down >= min_free) {
/* Find next entry in right subtree. */
do
entry = first;
while ((first = RB_LEFT(entry, rb_entry)) != NULL &&
first->free_down >= min_free);
} else {
/* Find next entry in a left-parent ancestor. */
while ((first = RB_PARENT(entry, rb_entry)) != NULL &&
entry == RB_RIGHT(first, rb_entry))
entry = first;
entry = first;
}
}
return (ENOMEM);
}
static int
iommu_gas_uppermatch(struct iommu_gas_match_args *a, struct iommu_map_entry *entry)
{
struct iommu_map_entry *child;
/*
* If the subtree doesn't have free space for the requested allocation
* plus two guard pages, give up.
*/
if (entry->free_down < 2 * IOMMU_PAGE_SIZE +
roundup2(a->size + a->offset, IOMMU_PAGE_SIZE))
return (ENOMEM);
if (entry->last < a->common->highaddr)
return (ENOMEM);
child = RB_LEFT(entry, rb_entry);
if (child != NULL && 0 == iommu_gas_uppermatch(a, child))
return (0);
if (child != NULL && child->last >= a->common->highaddr &&
iommu_gas_match_one(a, child->last, entry->start,
a->domain->end)) {
iommu_gas_match_insert(a);
return (0);
}
child = RB_RIGHT(entry, rb_entry);
if (child != NULL && entry->end >= a->common->highaddr &&
iommu_gas_match_one(a, entry->end, child->first,
a->domain->end)) {
iommu_gas_match_insert(a);
return (0);
}
if (child != NULL && 0 == iommu_gas_uppermatch(a, child))
return (0);
return (ENOMEM);
}
static int
iommu_gas_find_space(struct iommu_domain *domain,
const struct bus_dma_tag_common *common, iommu_gaddr_t size,
int offset, u_int flags, struct iommu_map_entry *entry)
{
struct iommu_gas_match_args a;
int error;
IOMMU_DOMAIN_ASSERT_LOCKED(domain);
KASSERT(entry->flags == 0, ("dirty entry %p %p", domain, entry));
a.domain = domain;
a.size = size;
a.offset = offset;
a.common = common;
a.gas_flags = flags;
a.entry = entry;
/* Handle lower region. */
if (common->lowaddr > 0) {
error = iommu_gas_lowermatch(&a, RB_ROOT(&domain->rb_root));
if (error == 0)
return (0);
KASSERT(error == ENOMEM,
("error %d from iommu_gas_lowermatch", error));
}
/* Handle upper region. */
if (common->highaddr >= domain->end)
return (ENOMEM);
error = iommu_gas_uppermatch(&a, RB_ROOT(&domain->rb_root));
KASSERT(error == 0 || error == ENOMEM,
("error %d from iommu_gas_uppermatch", error));
return (error);
}
static int
iommu_gas_alloc_region(struct iommu_domain *domain, struct iommu_map_entry *entry,
u_int flags)
{
struct iommu_map_entry *next, *prev;
bool found __diagused;
IOMMU_DOMAIN_ASSERT_LOCKED(domain);
if ((entry->start & IOMMU_PAGE_MASK) != 0 ||
(entry->end & IOMMU_PAGE_MASK) != 0)
return (EINVAL);
if (entry->start >= entry->end)
return (EINVAL);
if (entry->end >= domain->end)
return (EINVAL);
next = RB_NFIND(iommu_gas_entries_tree, &domain->rb_root, entry);
KASSERT(next != NULL, ("next must be non-null %p %jx", domain,
(uintmax_t)entry->start));
prev = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, next);
/* prev could be NULL */
/*
* Adapt to broken BIOSes which specify overlapping RMRR
* entries.
*
* XXXKIB: this does not handle a case when prev or next
* entries are completely covered by the current one, which
* extends both ways.
*/
if (prev != NULL && prev->end > entry->start &&
(prev->flags & IOMMU_MAP_ENTRY_PLACE) == 0) {
if ((flags & IOMMU_MF_RMRR) == 0 ||
(prev->flags & IOMMU_MAP_ENTRY_RMRR) == 0)
return (EBUSY);
entry->start = prev->end;
}
if (next->start < entry->end &&
(next->flags & IOMMU_MAP_ENTRY_PLACE) == 0) {
if ((flags & IOMMU_MF_RMRR) == 0 ||
(next->flags & IOMMU_MAP_ENTRY_RMRR) == 0)
return (EBUSY);
entry->end = next->start;
}
if (entry->end == entry->start)
return (0);
if (prev != NULL && prev->end > entry->start) {
/* This assumes that prev is the placeholder entry. */
iommu_gas_rb_remove(domain, prev);
prev = NULL;
}
if (next->start < entry->end) {
iommu_gas_rb_remove(domain, next);
next = NULL;
}
found = iommu_gas_rb_insert(domain, entry);
KASSERT(found, ("found RMRR dup %p start %jx end %jx",
domain, (uintmax_t)entry->start, (uintmax_t)entry->end));
if ((flags & IOMMU_MF_RMRR) != 0)
entry->flags = IOMMU_MAP_ENTRY_RMRR;
#ifdef INVARIANTS
struct iommu_map_entry *ip, *in;
ip = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, entry);
in = RB_NEXT(iommu_gas_entries_tree, &domain->rb_root, entry);
KASSERT(prev == NULL || ip == prev,
("RMRR %p (%jx %jx) prev %p (%jx %jx) ins prev %p (%jx %jx)",
entry, entry->start, entry->end, prev,
prev == NULL ? 0 : prev->start, prev == NULL ? 0 : prev->end,
ip, ip == NULL ? 0 : ip->start, ip == NULL ? 0 : ip->end));
KASSERT(next == NULL || in == next,
("RMRR %p (%jx %jx) next %p (%jx %jx) ins next %p (%jx %jx)",
entry, entry->start, entry->end, next,
next == NULL ? 0 : next->start, next == NULL ? 0 : next->end,
in, in == NULL ? 0 : in->start, in == NULL ? 0 : in->end));
#endif
return (0);
}
void
iommu_gas_free_space(struct iommu_domain *domain, struct iommu_map_entry *entry)
{
IOMMU_DOMAIN_ASSERT_LOCKED(domain);
KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR |
IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_MAP,
("permanent entry %p %p", domain, entry));
iommu_gas_rb_remove(domain, entry);
entry->flags &= ~IOMMU_MAP_ENTRY_MAP;
#ifdef INVARIANTS
if (iommu_check_free)
iommu_gas_check_free(domain);
#endif
}
void
iommu_gas_free_region(struct iommu_domain *domain, struct iommu_map_entry *entry)
{
struct iommu_map_entry *next, *prev;
IOMMU_DOMAIN_ASSERT_LOCKED(domain);
KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR |
IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_RMRR,
("non-RMRR entry %p %p", domain, entry));
prev = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, entry);
next = RB_NEXT(iommu_gas_entries_tree, &domain->rb_root, entry);
iommu_gas_rb_remove(domain, entry);
entry->flags &= ~IOMMU_MAP_ENTRY_RMRR;
if (prev == NULL)
iommu_gas_rb_insert(domain, domain->first_place);
if (next == NULL)
iommu_gas_rb_insert(domain, domain->last_place);
}
int
iommu_gas_map(struct iommu_domain *domain,
const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset,
u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res)
{
struct iommu_map_entry *entry;
int error;
KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_CANSPLIT)) == 0,
("invalid flags 0x%x", flags));
entry = iommu_gas_alloc_entry(domain,
(flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0);
if (entry == NULL)
return (ENOMEM);
IOMMU_DOMAIN_LOCK(domain);
error = iommu_gas_find_space(domain, common, size, offset, flags,
entry);
if (error == ENOMEM) {
IOMMU_DOMAIN_UNLOCK(domain);
iommu_gas_free_entry(domain, entry);
return (error);
}
#ifdef INVARIANTS
if (iommu_check_free)
iommu_gas_check_free(domain);
#endif
KASSERT(error == 0,
("unexpected error %d from iommu_gas_find_entry", error));
KASSERT(entry->end < domain->end, ("allocated GPA %jx, max GPA %jx",
(uintmax_t)entry->end, (uintmax_t)domain->end));
entry->flags |= eflags;
IOMMU_DOMAIN_UNLOCK(domain);
error = domain->ops->map(domain, entry->start,
entry->end - entry->start, ma, eflags,
((flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0));
if (error == ENOMEM) {
- iommu_domain_unload_entry(entry, true);
+ iommu_domain_unload_entry(entry, true,
+ (flags & IOMMU_MF_CANWAIT) != 0);
return (error);
}
KASSERT(error == 0,
("unexpected error %d from domain_map_buf", error));
*res = entry;
return (0);
}
int
iommu_gas_map_region(struct iommu_domain *domain, struct iommu_map_entry *entry,
u_int eflags, u_int flags, vm_page_t *ma)
{
iommu_gaddr_t start;
int error;
KASSERT(entry->flags == 0, ("used RMRR entry %p %p %x", domain,
entry, entry->flags));
KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_RMRR)) == 0,
("invalid flags 0x%x", flags));
start = entry->start;
IOMMU_DOMAIN_LOCK(domain);
error = iommu_gas_alloc_region(domain, entry, flags);
if (error != 0) {
IOMMU_DOMAIN_UNLOCK(domain);
return (error);
}
entry->flags |= eflags;
IOMMU_DOMAIN_UNLOCK(domain);
if (entry->end == entry->start)
return (0);
error = domain->ops->map(domain, entry->start,
entry->end - entry->start, ma + OFF_TO_IDX(start - entry->start),
eflags, ((flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0));
if (error == ENOMEM) {
- iommu_domain_unload_entry(entry, false);
+ iommu_domain_unload_entry(entry, false,
+ (flags & IOMMU_MF_CANWAIT) != 0);
return (error);
}
KASSERT(error == 0,
("unexpected error %d from domain_map_buf", error));
return (0);
}
static int
iommu_gas_reserve_region_locked(struct iommu_domain *domain,
iommu_gaddr_t start, iommu_gaddr_t end, struct iommu_map_entry *entry)
{
int error;
IOMMU_DOMAIN_ASSERT_LOCKED(domain);
entry->start = start;
entry->end = end;
error = iommu_gas_alloc_region(domain, entry, IOMMU_MF_CANWAIT);
if (error == 0)
entry->flags |= IOMMU_MAP_ENTRY_UNMAPPED;
return (error);
}
int
iommu_gas_reserve_region(struct iommu_domain *domain, iommu_gaddr_t start,
iommu_gaddr_t end, struct iommu_map_entry **entry0)
{
struct iommu_map_entry *entry;
int error;
entry = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK);
IOMMU_DOMAIN_LOCK(domain);
error = iommu_gas_reserve_region_locked(domain, start, end, entry);
IOMMU_DOMAIN_UNLOCK(domain);
if (error != 0)
iommu_gas_free_entry(domain, entry);
else if (entry0 != NULL)
*entry0 = entry;
return (error);
}
/*
* As in iommu_gas_reserve_region, reserve [start, end), but allow for existing
* entries.
*/
int
iommu_gas_reserve_region_extend(struct iommu_domain *domain,
iommu_gaddr_t start, iommu_gaddr_t end)
{
struct iommu_map_entry *entry, *next, *prev, key = {};
iommu_gaddr_t entry_start, entry_end;
int error;
error = 0;
entry = NULL;
end = ummin(end, domain->end);
while (start < end) {
/* Preallocate an entry. */
if (entry == NULL)
entry = iommu_gas_alloc_entry(domain,
IOMMU_PGF_WAITOK);
/* Calculate the free region from here to the next entry. */
key.start = key.end = start;
IOMMU_DOMAIN_LOCK(domain);
next = RB_NFIND(iommu_gas_entries_tree, &domain->rb_root, &key);
KASSERT(next != NULL, ("domain %p with end %#jx has no entry "
"after %#jx", domain, (uintmax_t)domain->end,
(uintmax_t)start));
entry_end = ummin(end, next->start);
prev = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, next);
if (prev != NULL)
entry_start = ummax(start, prev->end);
else
entry_start = start;
start = next->end;
/* Reserve the region if non-empty. */
if (entry_start != entry_end) {
error = iommu_gas_reserve_region_locked(domain,
entry_start, entry_end, entry);
if (error != 0) {
IOMMU_DOMAIN_UNLOCK(domain);
break;
}
entry = NULL;
}
IOMMU_DOMAIN_UNLOCK(domain);
}
/* Release a preallocated entry if it was not used. */
if (entry != NULL)
iommu_gas_free_entry(domain, entry);
return (error);
}
void
iommu_unmap_msi(struct iommu_ctx *ctx)
{
struct iommu_map_entry *entry;
struct iommu_domain *domain;
domain = ctx->domain;
entry = domain->msi_entry;
if (entry == NULL)
return;
domain->ops->unmap(domain, entry->start, entry->end -
entry->start, IOMMU_PGF_WAITOK);
IOMMU_DOMAIN_LOCK(domain);
iommu_gas_free_space(domain, entry);
IOMMU_DOMAIN_UNLOCK(domain);
iommu_gas_free_entry(domain, entry);
domain->msi_entry = NULL;
domain->msi_base = 0;
domain->msi_phys = 0;
}
int
iommu_map_msi(struct iommu_ctx *ctx, iommu_gaddr_t size, int offset,
u_int eflags, u_int flags, vm_page_t *ma)
{
struct iommu_domain *domain;
struct iommu_map_entry *entry;
int error;
error = 0;
domain = ctx->domain;
/* Check if there is already an MSI page allocated */
IOMMU_DOMAIN_LOCK(domain);
entry = domain->msi_entry;
IOMMU_DOMAIN_UNLOCK(domain);
if (entry == NULL) {
error = iommu_gas_map(domain, &ctx->tag->common, size, offset,
eflags, flags, ma, &entry);
IOMMU_DOMAIN_LOCK(domain);
if (error == 0) {
if (domain->msi_entry == NULL) {
MPASS(domain->msi_base == 0);
MPASS(domain->msi_phys == 0);
domain->msi_entry = entry;
domain->msi_base = entry->start;
domain->msi_phys = VM_PAGE_TO_PHYS(ma[0]);
} else {
/*
* We lost the race and already have an
* MSI page allocated. Free the unneeded entry.
*/
iommu_gas_free_entry(domain, entry);
}
} else if (domain->msi_entry != NULL) {
/*
* The allocation failed, but another succeeded.
* Return success as there is a valid MSI page.
*/
error = 0;
}
IOMMU_DOMAIN_UNLOCK(domain);
}
return (error);
}
void
iommu_translate_msi(struct iommu_domain *domain, uint64_t *addr)
{
*addr = (*addr - domain->msi_phys) + domain->msi_base;
KASSERT(*addr >= domain->msi_entry->start,
("%s: Address is below the MSI entry start address (%jx < %jx)",
__func__, (uintmax_t)*addr, (uintmax_t)domain->msi_entry->start));
KASSERT(*addr + sizeof(*addr) <= domain->msi_entry->end,
("%s: Address is above the MSI entry end address (%jx < %jx)",
__func__, (uintmax_t)*addr, (uintmax_t)domain->msi_entry->end));
}
SYSCTL_NODE(_hw, OID_AUTO, iommu, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, "");
#ifdef INVARIANTS
SYSCTL_INT(_hw_iommu, OID_AUTO, check_free, CTLFLAG_RWTUN,
&iommu_check_free, 0,
"Check the GPA RBtree for free_down and free_after validity");
#endif
diff --git a/sys/dev/iommu/iommu_gas.h b/sys/dev/iommu/iommu_gas.h
index c32a098538b0..a9d0df5f272f 100644
--- a/sys/dev/iommu/iommu_gas.h
+++ b/sys/dev/iommu/iommu_gas.h
@@ -1,59 +1,58 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2013 The FreeBSD Foundation
*
* This software was developed by Konstantin Belousov
* under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _DEV_IOMMU_IOMMU_GAS_H_
#define _DEV_IOMMU_IOMMU_GAS_H_
/* Map flags */
#define IOMMU_MF_CANWAIT 0x0001
#define IOMMU_MF_CANSPLIT 0x0002
#define IOMMU_MF_RMRR 0x0004
#define IOMMU_PGF_WAITOK 0x0001
#define IOMMU_PGF_ZERO 0x0002
#define IOMMU_PGF_ALLOC 0x0004
#define IOMMU_PGF_NOALLOC 0x0008
#define IOMMU_PGF_OBJL 0x0010
#define IOMMU_MAP_ENTRY_PLACE 0x0001 /* Fake entry */
#define IOMMU_MAP_ENTRY_RMRR 0x0002 /* Permanent, not linked by
dmamap_link */
#define IOMMU_MAP_ENTRY_MAP 0x0004 /* Busdma created, linked by
dmamap_link */
#define IOMMU_MAP_ENTRY_UNMAPPED 0x0010 /* No backing pages */
-#define IOMMU_MAP_ENTRY_QI_NF 0x0020 /* qi task, do not free entry */
#define IOMMU_MAP_ENTRY_READ 0x1000 /* Read permitted */
#define IOMMU_MAP_ENTRY_WRITE 0x2000 /* Write permitted */
#define IOMMU_MAP_ENTRY_SNOOP 0x4000 /* Snoop */
#define IOMMU_MAP_ENTRY_TM 0x8000 /* Transient */
#endif /* !_DEV_IOMMU_IOMMU_GAS_H_ */
diff --git a/sys/x86/iommu/intel_ctx.c b/sys/x86/iommu/intel_ctx.c
index 4e94936f27d4..1bb923f33d86 100644
--- a/sys/x86/iommu/intel_ctx.c
+++ b/sys/x86/iommu/intel_ctx.c
@@ -1,973 +1,983 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2013 The FreeBSD Foundation
*
* This software was developed by Konstantin Belousov
* under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
__FBSDID("$FreeBSD$");
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
static MALLOC_DEFINE(M_DMAR_CTX, "dmar_ctx", "Intel DMAR Context");
static MALLOC_DEFINE(M_DMAR_DOMAIN, "dmar_dom", "Intel DMAR Domain");
static void dmar_unref_domain_locked(struct dmar_unit *dmar,
struct dmar_domain *domain);
static void dmar_domain_destroy(struct dmar_domain *domain);
static void
dmar_ensure_ctx_page(struct dmar_unit *dmar, int bus)
{
struct sf_buf *sf;
dmar_root_entry_t *re;
vm_page_t ctxm;
/*
* Allocated context page must be linked.
*/
ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, IOMMU_PGF_NOALLOC);
if (ctxm != NULL)
return;
/*
* Page not present, allocate and link. Note that other
* thread might execute this sequence in parallel. This
* should be safe, because the context entries written by both
* threads are equal.
*/
TD_PREP_PINNED_ASSERT;
ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, IOMMU_PGF_ZERO |
IOMMU_PGF_WAITOK);
re = dmar_map_pgtbl(dmar->ctx_obj, 0, IOMMU_PGF_NOALLOC, &sf);
re += bus;
dmar_pte_store(&re->r1, DMAR_ROOT_R1_P | (DMAR_ROOT_R1_CTP_MASK &
VM_PAGE_TO_PHYS(ctxm)));
dmar_flush_root_to_ram(dmar, re);
dmar_unmap_pgtbl(sf);
TD_PINNED_ASSERT;
}
static dmar_ctx_entry_t *
dmar_map_ctx_entry(struct dmar_ctx *ctx, struct sf_buf **sfp)
{
struct dmar_unit *dmar;
dmar_ctx_entry_t *ctxp;
dmar = CTX2DMAR(ctx);
ctxp = dmar_map_pgtbl(dmar->ctx_obj, 1 + PCI_RID2BUS(ctx->context.rid),
IOMMU_PGF_NOALLOC | IOMMU_PGF_WAITOK, sfp);
ctxp += ctx->context.rid & 0xff;
return (ctxp);
}
static void
device_tag_init(struct dmar_ctx *ctx, device_t dev)
{
struct dmar_domain *domain;
bus_addr_t maxaddr;
domain = CTX2DOM(ctx);
maxaddr = MIN(domain->iodom.end, BUS_SPACE_MAXADDR);
ctx->context.tag->common.ref_count = 1; /* Prevent free */
ctx->context.tag->common.impl = &bus_dma_iommu_impl;
ctx->context.tag->common.boundary = 0;
ctx->context.tag->common.lowaddr = maxaddr;
ctx->context.tag->common.highaddr = maxaddr;
ctx->context.tag->common.maxsize = maxaddr;
ctx->context.tag->common.nsegments = BUS_SPACE_UNRESTRICTED;
ctx->context.tag->common.maxsegsz = maxaddr;
ctx->context.tag->ctx = CTX2IOCTX(ctx);
ctx->context.tag->owner = dev;
}
static void
ctx_id_entry_init_one(dmar_ctx_entry_t *ctxp, struct dmar_domain *domain,
vm_page_t ctx_root)
{
/*
* For update due to move, the store is not atomic. It is
* possible that DMAR read upper doubleword, while low
* doubleword is not yet updated. The domain id is stored in
* the upper doubleword, while the table pointer in the lower.
*
* There is no good solution, for the same reason it is wrong
* to clear P bit in the ctx entry for update.
*/
dmar_pte_store1(&ctxp->ctx2, DMAR_CTX2_DID(domain->domain) |
domain->awlvl);
if (ctx_root == NULL) {
dmar_pte_store1(&ctxp->ctx1, DMAR_CTX1_T_PASS | DMAR_CTX1_P);
} else {
dmar_pte_store1(&ctxp->ctx1, DMAR_CTX1_T_UNTR |
(DMAR_CTX1_ASR_MASK & VM_PAGE_TO_PHYS(ctx_root)) |
DMAR_CTX1_P);
}
}
static void
ctx_id_entry_init(struct dmar_ctx *ctx, dmar_ctx_entry_t *ctxp, bool move,
int busno)
{
struct dmar_unit *unit;
struct dmar_domain *domain;
vm_page_t ctx_root;
int i;
domain = CTX2DOM(ctx);
unit = DOM2DMAR(domain);
KASSERT(move || (ctxp->ctx1 == 0 && ctxp->ctx2 == 0),
("dmar%d: initialized ctx entry %d:%d:%d 0x%jx 0x%jx",
unit->iommu.unit, busno, pci_get_slot(ctx->context.tag->owner),
pci_get_function(ctx->context.tag->owner),
ctxp->ctx1, ctxp->ctx2));
if ((domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0 &&
(unit->hw_ecap & DMAR_ECAP_PT) != 0) {
KASSERT(domain->pgtbl_obj == NULL,
("ctx %p non-null pgtbl_obj", ctx));
ctx_root = NULL;
} else {
ctx_root = dmar_pgalloc(domain->pgtbl_obj, 0,
IOMMU_PGF_NOALLOC);
}
if (iommu_is_buswide_ctx(DMAR2IOMMU(unit), busno)) {
MPASS(!move);
for (i = 0; i <= PCI_BUSMAX; i++) {
ctx_id_entry_init_one(&ctxp[i], domain, ctx_root);
}
} else {
ctx_id_entry_init_one(ctxp, domain, ctx_root);
}
dmar_flush_ctx_to_ram(unit, ctxp);
}
static int
dmar_flush_for_ctx_entry(struct dmar_unit *dmar, bool force)
{
int error;
/*
* If dmar declares Caching Mode as Set, follow 11.5 "Caching
* Mode Consideration" and do the (global) invalidation of the
* negative TLB entries.
*/
if ((dmar->hw_cap & DMAR_CAP_CM) == 0 && !force)
return (0);
if (dmar->qi_enabled) {
dmar_qi_invalidate_ctx_glob_locked(dmar);
if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0 || force)
dmar_qi_invalidate_iotlb_glob_locked(dmar);
return (0);
}
error = dmar_inv_ctx_glob(dmar);
if (error == 0 && ((dmar->hw_ecap & DMAR_ECAP_DI) != 0 || force))
error = dmar_inv_iotlb_glob(dmar);
return (error);
}
static int
domain_init_rmrr(struct dmar_domain *domain, device_t dev, int bus,
int slot, int func, int dev_domain, int dev_busno,
const void *dev_path, int dev_path_len)
{
struct iommu_map_entries_tailq rmrr_entries;
struct iommu_map_entry *entry, *entry1;
vm_page_t *ma;
iommu_gaddr_t start, end;
vm_pindex_t size, i;
int error, error1;
error = 0;
TAILQ_INIT(&rmrr_entries);
dmar_dev_parse_rmrr(domain, dev_domain, dev_busno, dev_path,
dev_path_len, &rmrr_entries);
TAILQ_FOREACH_SAFE(entry, &rmrr_entries, dmamap_link, entry1) {
/*
* VT-d specification requires that the start of an
* RMRR entry is 4k-aligned. Buggy BIOSes put
* anything into the start and end fields. Truncate
* and round as neccesary.
*
* We also allow the overlapping RMRR entries, see
* iommu_gas_alloc_region().
*/
start = entry->start;
end = entry->end;
if (bootverbose)
printf("dmar%d ctx pci%d:%d:%d RMRR [%#jx, %#jx]\n",
domain->iodom.iommu->unit, bus, slot, func,
(uintmax_t)start, (uintmax_t)end);
entry->start = trunc_page(start);
entry->end = round_page(end);
if (entry->start == entry->end) {
/* Workaround for some AMI (?) BIOSes */
if (bootverbose) {
if (dev != NULL)
device_printf(dev, "");
printf("pci%d:%d:%d ", bus, slot, func);
printf("BIOS bug: dmar%d RMRR "
"region (%jx, %jx) corrected\n",
domain->iodom.iommu->unit, start, end);
}
entry->end += DMAR_PAGE_SIZE * 0x20;
}
size = OFF_TO_IDX(entry->end - entry->start);
ma = malloc(sizeof(vm_page_t) * size, M_TEMP, M_WAITOK);
for (i = 0; i < size; i++) {
ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i,
VM_MEMATTR_DEFAULT);
}
error1 = iommu_gas_map_region(DOM2IODOM(domain), entry,
IOMMU_MAP_ENTRY_READ | IOMMU_MAP_ENTRY_WRITE,
IOMMU_MF_CANWAIT | IOMMU_MF_RMRR, ma);
/*
* Non-failed RMRR entries are owned by context rb
* tree. Get rid of the failed entry, but do not stop
* the loop. Rest of the parsed RMRR entries are
* loaded and removed on the context destruction.
*/
if (error1 == 0 && entry->end != entry->start) {
IOMMU_LOCK(domain->iodom.iommu);
domain->refs++; /* XXXKIB prevent free */
domain->iodom.flags |= IOMMU_DOMAIN_RMRR;
IOMMU_UNLOCK(domain->iodom.iommu);
} else {
if (error1 != 0) {
if (dev != NULL)
device_printf(dev, "");
printf("pci%d:%d:%d ", bus, slot, func);
printf(
"dmar%d failed to map RMRR region (%jx, %jx) %d\n",
domain->iodom.iommu->unit, start, end,
error1);
error = error1;
}
TAILQ_REMOVE(&rmrr_entries, entry, dmamap_link);
iommu_gas_free_entry(DOM2IODOM(domain), entry);
}
for (i = 0; i < size; i++)
vm_page_putfake(ma[i]);
free(ma, M_TEMP);
}
return (error);
}
/*
* PCI memory address space is shared between memory-mapped devices (MMIO) and
* host memory (which may be remapped by an IOMMU). Device accesses to an
* address within a memory aperture in a PCIe root port will be treated as
* peer-to-peer and not forwarded to an IOMMU. To avoid this, reserve the
* address space of the root port's memory apertures in the address space used
* by the IOMMU for remapping.
*/
static int
dmar_reserve_pci_regions(struct dmar_domain *domain, device_t dev)
{
struct iommu_domain *iodom;
device_t root;
uint32_t val;
uint64_t base, limit;
int error;
iodom = DOM2IODOM(domain);
root = pci_find_pcie_root_port(dev);
if (root == NULL)
return (0);
/* Disable downstream memory */
base = PCI_PPBMEMBASE(0, pci_read_config(root, PCIR_MEMBASE_1, 2));
limit = PCI_PPBMEMLIMIT(0, pci_read_config(root, PCIR_MEMLIMIT_1, 2));
error = iommu_gas_reserve_region_extend(iodom, base, limit + 1);
if (bootverbose || error != 0)
device_printf(dev, "DMAR reserve [%#jx-%#jx] (error %d)\n",
base, limit + 1, error);
if (error != 0)
return (error);
/* Disable downstream prefetchable memory */
val = pci_read_config(root, PCIR_PMBASEL_1, 2);
if (val != 0 || pci_read_config(root, PCIR_PMLIMITL_1, 2) != 0) {
if ((val & PCIM_BRPM_MASK) == PCIM_BRPM_64) {
base = PCI_PPBMEMBASE(
pci_read_config(root, PCIR_PMBASEH_1, 4),
val);
limit = PCI_PPBMEMLIMIT(
pci_read_config(root, PCIR_PMLIMITH_1, 4),
pci_read_config(root, PCIR_PMLIMITL_1, 2));
} else {
base = PCI_PPBMEMBASE(0, val);
limit = PCI_PPBMEMLIMIT(0,
pci_read_config(root, PCIR_PMLIMITL_1, 2));
}
error = iommu_gas_reserve_region_extend(iodom, base,
limit + 1);
if (bootverbose || error != 0)
device_printf(dev, "DMAR reserve [%#jx-%#jx] "
"(error %d)\n", base, limit + 1, error);
if (error != 0)
return (error);
}
return (error);
}
static struct dmar_domain *
dmar_domain_alloc(struct dmar_unit *dmar, bool id_mapped)
{
struct iommu_domain *iodom;
struct iommu_unit *unit;
struct dmar_domain *domain;
int error, id, mgaw;
id = alloc_unr(dmar->domids);
if (id == -1)
return (NULL);
domain = malloc(sizeof(*domain), M_DMAR_DOMAIN, M_WAITOK | M_ZERO);
iodom = DOM2IODOM(domain);
unit = DMAR2IOMMU(dmar);
domain->domain = id;
LIST_INIT(&domain->contexts);
iommu_domain_init(unit, iodom, &dmar_domain_map_ops);
domain->dmar = dmar;
/*
* For now, use the maximal usable physical address of the
* installed memory to calculate the mgaw on id_mapped domain.
* It is useful for the identity mapping, and less so for the
* virtualized bus address space.
*/
domain->iodom.end = id_mapped ? ptoa(Maxmem) : BUS_SPACE_MAXADDR;
mgaw = dmar_maxaddr2mgaw(dmar, domain->iodom.end, !id_mapped);
error = domain_set_agaw(domain, mgaw);
if (error != 0)
goto fail;
if (!id_mapped)
/* Use all supported address space for remapping. */
domain->iodom.end = 1ULL << (domain->agaw - 1);
iommu_gas_init_domain(DOM2IODOM(domain));
if (id_mapped) {
if ((dmar->hw_ecap & DMAR_ECAP_PT) == 0) {
domain->pgtbl_obj = domain_get_idmap_pgtbl(domain,
domain->iodom.end);
}
domain->iodom.flags |= IOMMU_DOMAIN_IDMAP;
} else {
error = domain_alloc_pgtbl(domain);
if (error != 0)
goto fail;
/* Disable local apic region access */
error = iommu_gas_reserve_region(iodom, 0xfee00000,
0xfeefffff + 1, &iodom->msi_entry);
if (error != 0)
goto fail;
}
return (domain);
fail:
dmar_domain_destroy(domain);
return (NULL);
}
static struct dmar_ctx *
dmar_ctx_alloc(struct dmar_domain *domain, uint16_t rid)
{
struct dmar_ctx *ctx;
ctx = malloc(sizeof(*ctx), M_DMAR_CTX, M_WAITOK | M_ZERO);
ctx->context.domain = DOM2IODOM(domain);
ctx->context.tag = malloc(sizeof(struct bus_dma_tag_iommu),
M_DMAR_CTX, M_WAITOK | M_ZERO);
ctx->context.rid = rid;
ctx->refs = 1;
return (ctx);
}
static void
dmar_ctx_link(struct dmar_ctx *ctx)
{
struct dmar_domain *domain;
domain = CTX2DOM(ctx);
IOMMU_ASSERT_LOCKED(domain->iodom.iommu);
KASSERT(domain->refs >= domain->ctx_cnt,
("dom %p ref underflow %d %d", domain, domain->refs,
domain->ctx_cnt));
domain->refs++;
domain->ctx_cnt++;
LIST_INSERT_HEAD(&domain->contexts, ctx, link);
}
static void
dmar_ctx_unlink(struct dmar_ctx *ctx)
{
struct dmar_domain *domain;
domain = CTX2DOM(ctx);
IOMMU_ASSERT_LOCKED(domain->iodom.iommu);
KASSERT(domain->refs > 0,
("domain %p ctx dtr refs %d", domain, domain->refs));
KASSERT(domain->ctx_cnt >= domain->refs,
("domain %p ctx dtr refs %d ctx_cnt %d", domain,
domain->refs, domain->ctx_cnt));
domain->refs--;
domain->ctx_cnt--;
LIST_REMOVE(ctx, link);
}
static void
dmar_domain_destroy(struct dmar_domain *domain)
{
struct iommu_domain *iodom;
struct dmar_unit *dmar;
iodom = DOM2IODOM(domain);
KASSERT(TAILQ_EMPTY(&domain->iodom.unload_entries),
("unfinished unloads %p", domain));
KASSERT(LIST_EMPTY(&domain->contexts),
("destroying dom %p with contexts", domain));
KASSERT(domain->ctx_cnt == 0,
("destroying dom %p with ctx_cnt %d", domain, domain->ctx_cnt));
KASSERT(domain->refs == 0,
("destroying dom %p with refs %d", domain, domain->refs));
if ((domain->iodom.flags & IOMMU_DOMAIN_GAS_INITED) != 0) {
DMAR_DOMAIN_LOCK(domain);
iommu_gas_fini_domain(iodom);
DMAR_DOMAIN_UNLOCK(domain);
}
if ((domain->iodom.flags & IOMMU_DOMAIN_PGTBL_INITED) != 0) {
if (domain->pgtbl_obj != NULL)
DMAR_DOMAIN_PGLOCK(domain);
domain_free_pgtbl(domain);
}
iommu_domain_fini(iodom);
dmar = DOM2DMAR(domain);
free_unr(dmar->domids, domain->domain);
free(domain, M_DMAR_DOMAIN);
}
static struct dmar_ctx *
dmar_get_ctx_for_dev1(struct dmar_unit *dmar, device_t dev, uint16_t rid,
int dev_domain, int dev_busno, const void *dev_path, int dev_path_len,
bool id_mapped, bool rmrr_init)
{
struct dmar_domain *domain, *domain1;
struct dmar_ctx *ctx, *ctx1;
struct iommu_unit *unit __diagused;
dmar_ctx_entry_t *ctxp;
struct sf_buf *sf;
int bus, slot, func, error;
bool enable;
if (dev != NULL) {
bus = pci_get_bus(dev);
slot = pci_get_slot(dev);
func = pci_get_function(dev);
} else {
bus = PCI_RID2BUS(rid);
slot = PCI_RID2SLOT(rid);
func = PCI_RID2FUNC(rid);
}
enable = false;
TD_PREP_PINNED_ASSERT;
unit = DMAR2IOMMU(dmar);
DMAR_LOCK(dmar);
KASSERT(!iommu_is_buswide_ctx(unit, bus) || (slot == 0 && func == 0),
("iommu%d pci%d:%d:%d get_ctx for buswide", dmar->iommu.unit, bus,
slot, func));
ctx = dmar_find_ctx_locked(dmar, rid);
error = 0;
if (ctx == NULL) {
/*
* Perform the allocations which require sleep or have
* higher chance to succeed if the sleep is allowed.
*/
DMAR_UNLOCK(dmar);
dmar_ensure_ctx_page(dmar, PCI_RID2BUS(rid));
domain1 = dmar_domain_alloc(dmar, id_mapped);
if (domain1 == NULL) {
TD_PINNED_ASSERT;
return (NULL);
}
if (!id_mapped) {
error = domain_init_rmrr(domain1, dev, bus,
slot, func, dev_domain, dev_busno, dev_path,
dev_path_len);
if (error == 0)
error = dmar_reserve_pci_regions(domain1, dev);
if (error != 0) {
dmar_domain_destroy(domain1);
TD_PINNED_ASSERT;
return (NULL);
}
}
ctx1 = dmar_ctx_alloc(domain1, rid);
ctxp = dmar_map_ctx_entry(ctx1, &sf);
DMAR_LOCK(dmar);
/*
* Recheck the contexts, other thread might have
* already allocated needed one.
*/
ctx = dmar_find_ctx_locked(dmar, rid);
if (ctx == NULL) {
domain = domain1;
ctx = ctx1;
dmar_ctx_link(ctx);
ctx->context.tag->owner = dev;
device_tag_init(ctx, dev);
/*
* This is the first activated context for the
* DMAR unit. Enable the translation after
* everything is set up.
*/
if (LIST_EMPTY(&dmar->domains))
enable = true;
LIST_INSERT_HEAD(&dmar->domains, domain, link);
ctx_id_entry_init(ctx, ctxp, false, bus);
if (dev != NULL) {
device_printf(dev,
"dmar%d pci%d:%d:%d:%d rid %x domain %d mgaw %d "
"agaw %d %s-mapped\n",
dmar->iommu.unit, dmar->segment, bus, slot,
func, rid, domain->domain, domain->mgaw,
domain->agaw, id_mapped ? "id" : "re");
}
dmar_unmap_pgtbl(sf);
} else {
dmar_unmap_pgtbl(sf);
dmar_domain_destroy(domain1);
/* Nothing needs to be done to destroy ctx1. */
free(ctx1, M_DMAR_CTX);
domain = CTX2DOM(ctx);
ctx->refs++; /* tag referenced us */
}
} else {
domain = CTX2DOM(ctx);
if (ctx->context.tag->owner == NULL)
ctx->context.tag->owner = dev;
ctx->refs++; /* tag referenced us */
}
error = dmar_flush_for_ctx_entry(dmar, enable);
if (error != 0) {
dmar_free_ctx_locked(dmar, ctx);
TD_PINNED_ASSERT;
return (NULL);
}
/*
* The dmar lock was potentially dropped between check for the
* empty context list and now. Recheck the state of GCMD_TE
* to avoid unneeded command.
*/
if (enable && !rmrr_init && (dmar->hw_gcmd & DMAR_GCMD_TE) == 0) {
error = dmar_enable_translation(dmar);
if (error == 0) {
if (bootverbose) {
printf("dmar%d: enabled translation\n",
dmar->iommu.unit);
}
} else {
printf("dmar%d: enabling translation failed, "
"error %d\n", dmar->iommu.unit, error);
dmar_free_ctx_locked(dmar, ctx);
TD_PINNED_ASSERT;
return (NULL);
}
}
DMAR_UNLOCK(dmar);
TD_PINNED_ASSERT;
return (ctx);
}
struct dmar_ctx *
dmar_get_ctx_for_dev(struct dmar_unit *dmar, device_t dev, uint16_t rid,
bool id_mapped, bool rmrr_init)
{
int dev_domain, dev_path_len, dev_busno;
dev_domain = pci_get_domain(dev);
dev_path_len = dmar_dev_depth(dev);
ACPI_DMAR_PCI_PATH dev_path[dev_path_len];
dmar_dev_path(dev, &dev_busno, dev_path, dev_path_len);
return (dmar_get_ctx_for_dev1(dmar, dev, rid, dev_domain, dev_busno,
dev_path, dev_path_len, id_mapped, rmrr_init));
}
struct dmar_ctx *
dmar_get_ctx_for_devpath(struct dmar_unit *dmar, uint16_t rid,
int dev_domain, int dev_busno,
const void *dev_path, int dev_path_len,
bool id_mapped, bool rmrr_init)
{
return (dmar_get_ctx_for_dev1(dmar, NULL, rid, dev_domain, dev_busno,
dev_path, dev_path_len, id_mapped, rmrr_init));
}
int
dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx)
{
struct dmar_unit *dmar;
struct dmar_domain *old_domain;
dmar_ctx_entry_t *ctxp;
struct sf_buf *sf;
int error;
dmar = domain->dmar;
old_domain = CTX2DOM(ctx);
if (domain == old_domain)
return (0);
KASSERT(old_domain->iodom.iommu == domain->iodom.iommu,
("domain %p %u moving between dmars %u %u", domain,
domain->domain, old_domain->iodom.iommu->unit,
domain->iodom.iommu->unit));
TD_PREP_PINNED_ASSERT;
ctxp = dmar_map_ctx_entry(ctx, &sf);
DMAR_LOCK(dmar);
dmar_ctx_unlink(ctx);
ctx->context.domain = &domain->iodom;
dmar_ctx_link(ctx);
ctx_id_entry_init(ctx, ctxp, true, PCI_BUSMAX + 100);
dmar_unmap_pgtbl(sf);
error = dmar_flush_for_ctx_entry(dmar, true);
/* If flush failed, rolling back would not work as well. */
printf("dmar%d rid %x domain %d->%d %s-mapped\n",
dmar->iommu.unit, ctx->context.rid, old_domain->domain,
domain->domain, (domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0 ?
"id" : "re");
dmar_unref_domain_locked(dmar, old_domain);
TD_PINNED_ASSERT;
return (error);
}
static void
dmar_unref_domain_locked(struct dmar_unit *dmar, struct dmar_domain *domain)
{
DMAR_ASSERT_LOCKED(dmar);
KASSERT(domain->refs >= 1,
("dmar %d domain %p refs %u", dmar->iommu.unit, domain,
domain->refs));
KASSERT(domain->refs > domain->ctx_cnt,
("dmar %d domain %p refs %d ctx_cnt %d", dmar->iommu.unit, domain,
domain->refs, domain->ctx_cnt));
if (domain->refs > 1) {
domain->refs--;
DMAR_UNLOCK(dmar);
return;
}
KASSERT((domain->iodom.flags & IOMMU_DOMAIN_RMRR) == 0,
("lost ref on RMRR domain %p", domain));
LIST_REMOVE(domain, link);
DMAR_UNLOCK(dmar);
taskqueue_drain(dmar->iommu.delayed_taskqueue,
&domain->iodom.unload_task);
dmar_domain_destroy(domain);
}
void
dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx)
{
struct sf_buf *sf;
dmar_ctx_entry_t *ctxp;
struct dmar_domain *domain;
DMAR_ASSERT_LOCKED(dmar);
KASSERT(ctx->refs >= 1,
("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs));
/*
* If our reference is not last, only the dereference should
* be performed.
*/
if (ctx->refs > 1) {
ctx->refs--;
DMAR_UNLOCK(dmar);
return;
}
KASSERT((ctx->context.flags & IOMMU_CTX_DISABLED) == 0,
("lost ref on disabled ctx %p", ctx));
/*
* Otherwise, the context entry must be cleared before the
* page table is destroyed. The mapping of the context
* entries page could require sleep, unlock the dmar.
*/
DMAR_UNLOCK(dmar);
TD_PREP_PINNED_ASSERT;
ctxp = dmar_map_ctx_entry(ctx, &sf);
DMAR_LOCK(dmar);
KASSERT(ctx->refs >= 1,
("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs));
/*
* Other thread might have referenced the context, in which
* case again only the dereference should be performed.
*/
if (ctx->refs > 1) {
ctx->refs--;
DMAR_UNLOCK(dmar);
dmar_unmap_pgtbl(sf);
TD_PINNED_ASSERT;
return;
}
KASSERT((ctx->context.flags & IOMMU_CTX_DISABLED) == 0,
("lost ref on disabled ctx %p", ctx));
/*
* Clear the context pointer and flush the caches.
* XXXKIB: cannot do this if any RMRR entries are still present.
*/
dmar_pte_clear(&ctxp->ctx1);
ctxp->ctx2 = 0;
dmar_flush_ctx_to_ram(dmar, ctxp);
dmar_inv_ctx_glob(dmar);
if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0) {
if (dmar->qi_enabled)
dmar_qi_invalidate_iotlb_glob_locked(dmar);
else
dmar_inv_iotlb_glob(dmar);
}
dmar_unmap_pgtbl(sf);
domain = CTX2DOM(ctx);
dmar_ctx_unlink(ctx);
free(ctx->context.tag, M_DMAR_CTX);
free(ctx, M_DMAR_CTX);
dmar_unref_domain_locked(dmar, domain);
TD_PINNED_ASSERT;
}
void
dmar_free_ctx(struct dmar_ctx *ctx)
{
struct dmar_unit *dmar;
dmar = CTX2DMAR(ctx);
DMAR_LOCK(dmar);
dmar_free_ctx_locked(dmar, ctx);
}
/*
* Returns with the domain locked.
*/
struct dmar_ctx *
dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid)
{
struct dmar_domain *domain;
struct dmar_ctx *ctx;
DMAR_ASSERT_LOCKED(dmar);
LIST_FOREACH(domain, &dmar->domains, link) {
LIST_FOREACH(ctx, &domain->contexts, link) {
if (ctx->context.rid == rid)
return (ctx);
}
}
return (NULL);
}
void
dmar_domain_free_entry(struct iommu_map_entry *entry, bool free)
{
struct iommu_domain *domain;
domain = entry->domain;
IOMMU_DOMAIN_LOCK(domain);
if ((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0)
iommu_gas_free_region(domain, entry);
else
iommu_gas_free_space(domain, entry);
IOMMU_DOMAIN_UNLOCK(domain);
if (free)
iommu_gas_free_entry(domain, entry);
else
entry->flags = 0;
}
void
-iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free)
+iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free,
+ bool cansleep)
{
struct dmar_domain *domain;
struct dmar_unit *unit;
domain = IODOM2DOM(entry->domain);
unit = DOM2DMAR(domain);
+
+ /*
+ * If "free" is false, then the IOTLB invalidation must be performed
+ * synchronously. Otherwise, the caller might free the entry before
+ * dmar_qi_task() is finished processing it.
+ */
if (unit->qi_enabled) {
DMAR_LOCK(unit);
- dmar_qi_invalidate_locked(IODOM2DOM(entry->domain),
- entry->start, entry->end - entry->start, &entry->gseq,
- true);
- if (!free)
- entry->flags |= IOMMU_MAP_ENTRY_QI_NF;
- TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link);
+ if (free) {
+ dmar_qi_invalidate_locked(domain, entry->start,
+ entry->end - entry->start, &entry->gseq, true);
+ TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry,
+ dmamap_link);
+ } else {
+ dmar_qi_invalidate_sync_locked(domain, entry->start,
+ entry->end - entry->start, cansleep);
+ }
DMAR_UNLOCK(unit);
} else {
- domain_flush_iotlb_sync(IODOM2DOM(entry->domain),
- entry->start, entry->end - entry->start);
+ domain_flush_iotlb_sync(domain, entry->start, entry->end -
+ entry->start);
dmar_domain_free_entry(entry, free);
}
}
static bool
dmar_domain_unload_emit_wait(struct dmar_domain *domain,
struct iommu_map_entry *entry)
{
if (TAILQ_NEXT(entry, dmamap_link) == NULL)
return (true);
return (domain->batch_no++ % dmar_batch_coalesce == 0);
}
void
iommu_domain_unload(struct iommu_domain *iodom,
struct iommu_map_entries_tailq *entries, bool cansleep)
{
struct dmar_domain *domain;
struct dmar_unit *unit;
struct iommu_map_entry *entry, *entry1;
int error __diagused;
domain = IODOM2DOM(iodom);
unit = DOM2DMAR(domain);
TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) {
KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0,
("not mapped entry %p %p", domain, entry));
error = iodom->ops->unmap(iodom, entry->start, entry->end -
entry->start, cansleep ? IOMMU_PGF_WAITOK : 0);
KASSERT(error == 0, ("unmap %p error %d", domain, error));
if (!unit->qi_enabled) {
domain_flush_iotlb_sync(domain, entry->start,
entry->end - entry->start);
TAILQ_REMOVE(entries, entry, dmamap_link);
dmar_domain_free_entry(entry, true);
}
}
if (TAILQ_EMPTY(entries))
return;
KASSERT(unit->qi_enabled, ("loaded entry left"));
DMAR_LOCK(unit);
TAILQ_FOREACH(entry, entries, dmamap_link) {
dmar_qi_invalidate_locked(domain, entry->start, entry->end -
entry->start, &entry->gseq,
dmar_domain_unload_emit_wait(domain, entry));
}
TAILQ_CONCAT(&unit->tlb_flush_entries, entries, dmamap_link);
DMAR_UNLOCK(unit);
}
struct iommu_ctx *
iommu_get_ctx(struct iommu_unit *iommu, device_t dev, uint16_t rid,
bool id_mapped, bool rmrr_init)
{
struct dmar_unit *dmar;
struct dmar_ctx *ret;
dmar = IOMMU2DMAR(iommu);
ret = dmar_get_ctx_for_dev(dmar, dev, rid, id_mapped, rmrr_init);
return (CTX2IOCTX(ret));
}
void
iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *context)
{
struct dmar_unit *dmar;
struct dmar_ctx *ctx;
dmar = IOMMU2DMAR(iommu);
ctx = IOCTX2CTX(context);
dmar_free_ctx_locked(dmar, ctx);
}
void
iommu_free_ctx(struct iommu_ctx *context)
{
struct dmar_ctx *ctx;
ctx = IOCTX2CTX(context);
dmar_free_ctx(ctx);
}
diff --git a/sys/x86/iommu/intel_dmar.h b/sys/x86/iommu/intel_dmar.h
index e49f96bf93e1..21e626257a0d 100644
--- a/sys/x86/iommu/intel_dmar.h
+++ b/sys/x86/iommu/intel_dmar.h
@@ -1,454 +1,456 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2013-2015 The FreeBSD Foundation
*
* This software was developed by Konstantin Belousov
* under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef __X86_IOMMU_INTEL_DMAR_H
#define __X86_IOMMU_INTEL_DMAR_H
#include
struct dmar_unit;
/*
* Locking annotations:
* (u) - Protected by iommu unit lock
* (d) - Protected by domain lock
* (c) - Immutable after initialization
*/
/*
* The domain abstraction. Most non-constant members of the domain
* are protected by owning dmar unit lock, not by the domain lock.
* Most important, the dmar lock protects the contexts list.
*
* The domain lock protects the address map for the domain, and list
* of unload entries delayed.
*
* Page tables pages and pages content is protected by the vm object
* lock pgtbl_obj, which contains the page tables pages.
*/
struct dmar_domain {
struct iommu_domain iodom;
int domain; /* (c) DID, written in context entry */
int mgaw; /* (c) Real max address width */
int agaw; /* (c) Adjusted guest address width */
int pglvl; /* (c) The pagelevel */
int awlvl; /* (c) The pagelevel as the bitmask,
to set in context entry */
u_int ctx_cnt; /* (u) Number of contexts owned */
u_int refs; /* (u) Refs, including ctx */
struct dmar_unit *dmar; /* (c) */
LIST_ENTRY(dmar_domain) link; /* (u) Member in the dmar list */
LIST_HEAD(, dmar_ctx) contexts; /* (u) */
vm_object_t pgtbl_obj; /* (c) Page table pages */
u_int batch_no;
};
struct dmar_ctx {
struct iommu_ctx context;
uint64_t last_fault_rec[2]; /* Last fault reported */
LIST_ENTRY(dmar_ctx) link; /* (u) Member in the domain list */
u_int refs; /* (u) References from tags */
};
#define DMAR_DOMAIN_PGLOCK(dom) VM_OBJECT_WLOCK((dom)->pgtbl_obj)
#define DMAR_DOMAIN_PGTRYLOCK(dom) VM_OBJECT_TRYWLOCK((dom)->pgtbl_obj)
#define DMAR_DOMAIN_PGUNLOCK(dom) VM_OBJECT_WUNLOCK((dom)->pgtbl_obj)
#define DMAR_DOMAIN_ASSERT_PGLOCKED(dom) \
VM_OBJECT_ASSERT_WLOCKED((dom)->pgtbl_obj)
#define DMAR_DOMAIN_LOCK(dom) mtx_lock(&(dom)->iodom.lock)
#define DMAR_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->iodom.lock)
#define DMAR_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->iodom.lock, MA_OWNED)
#define DMAR2IOMMU(dmar) &((dmar)->iommu)
#define IOMMU2DMAR(dmar) \
__containerof((dmar), struct dmar_unit, iommu)
#define DOM2IODOM(domain) &((domain)->iodom)
#define IODOM2DOM(domain) \
__containerof((domain), struct dmar_domain, iodom)
#define CTX2IOCTX(ctx) &((ctx)->context)
#define IOCTX2CTX(ctx) \
__containerof((ctx), struct dmar_ctx, context)
#define CTX2DOM(ctx) IODOM2DOM((ctx)->context.domain)
#define CTX2DMAR(ctx) (CTX2DOM(ctx)->dmar)
#define DOM2DMAR(domain) ((domain)->dmar)
struct dmar_msi_data {
int irq;
int irq_rid;
struct resource *irq_res;
void *intr_handle;
int (*handler)(void *);
int msi_data_reg;
int msi_addr_reg;
int msi_uaddr_reg;
void (*enable_intr)(struct dmar_unit *);
void (*disable_intr)(struct dmar_unit *);
const char *name;
};
#define DMAR_INTR_FAULT 0
#define DMAR_INTR_QI 1
#define DMAR_INTR_TOTAL 2
struct dmar_unit {
struct iommu_unit iommu;
device_t dev;
uint16_t segment;
uint64_t base;
/* Resources */
int reg_rid;
struct resource *regs;
struct dmar_msi_data intrs[DMAR_INTR_TOTAL];
/* Hardware registers cache */
uint32_t hw_ver;
uint64_t hw_cap;
uint64_t hw_ecap;
uint32_t hw_gcmd;
/* Data for being a dmar */
LIST_HEAD(, dmar_domain) domains;
struct unrhdr *domids;
vm_object_t ctx_obj;
u_int barrier_flags;
/* Fault handler data */
struct mtx fault_lock;
uint64_t *fault_log;
int fault_log_head;
int fault_log_tail;
int fault_log_size;
struct task fault_task;
struct taskqueue *fault_taskqueue;
/* QI */
int qi_enabled;
vm_offset_t inv_queue;
vm_size_t inv_queue_size;
uint32_t inv_queue_avail;
uint32_t inv_queue_tail;
volatile uint32_t inv_waitd_seq_hw; /* hw writes there on wait
descr completion */
uint64_t inv_waitd_seq_hw_phys;
uint32_t inv_waitd_seq; /* next sequence number to use for wait descr */
u_int inv_waitd_gen; /* seq number generation AKA seq overflows */
u_int inv_seq_waiters; /* count of waiters for seq */
u_int inv_queue_full; /* informational counter */
/* IR */
int ir_enabled;
vm_paddr_t irt_phys;
dmar_irte_t *irt;
u_int irte_cnt;
vmem_t *irtids;
/* Delayed freeing of map entries queue processing */
struct iommu_map_entries_tailq tlb_flush_entries;
struct task qi_task;
struct taskqueue *qi_taskqueue;
};
#define DMAR_LOCK(dmar) mtx_lock(&(dmar)->iommu.lock)
#define DMAR_UNLOCK(dmar) mtx_unlock(&(dmar)->iommu.lock)
#define DMAR_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->iommu.lock, MA_OWNED)
#define DMAR_FAULT_LOCK(dmar) mtx_lock_spin(&(dmar)->fault_lock)
#define DMAR_FAULT_UNLOCK(dmar) mtx_unlock_spin(&(dmar)->fault_lock)
#define DMAR_FAULT_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->fault_lock, MA_OWNED)
#define DMAR_IS_COHERENT(dmar) (((dmar)->hw_ecap & DMAR_ECAP_C) != 0)
#define DMAR_HAS_QI(dmar) (((dmar)->hw_ecap & DMAR_ECAP_QI) != 0)
#define DMAR_X2APIC(dmar) \
(x2apic_mode && ((dmar)->hw_ecap & DMAR_ECAP_EIM) != 0)
/* Barrier ids */
#define DMAR_BARRIER_RMRR 0
#define DMAR_BARRIER_USEQ 1
struct dmar_unit *dmar_find(device_t dev, bool verbose);
struct dmar_unit *dmar_find_hpet(device_t dev, uint16_t *rid);
struct dmar_unit *dmar_find_ioapic(u_int apic_id, uint16_t *rid);
u_int dmar_nd2mask(u_int nd);
bool dmar_pglvl_supported(struct dmar_unit *unit, int pglvl);
int domain_set_agaw(struct dmar_domain *domain, int mgaw);
int dmar_maxaddr2mgaw(struct dmar_unit *unit, iommu_gaddr_t maxaddr,
bool allow_less);
vm_pindex_t pglvl_max_pages(int pglvl);
int domain_is_sp_lvl(struct dmar_domain *domain, int lvl);
iommu_gaddr_t pglvl_page_size(int total_pglvl, int lvl);
iommu_gaddr_t domain_page_size(struct dmar_domain *domain, int lvl);
int calc_am(struct dmar_unit *unit, iommu_gaddr_t base, iommu_gaddr_t size,
iommu_gaddr_t *isizep);
struct vm_page *dmar_pgalloc(vm_object_t obj, vm_pindex_t idx, int flags);
void dmar_pgfree(vm_object_t obj, vm_pindex_t idx, int flags);
void *dmar_map_pgtbl(vm_object_t obj, vm_pindex_t idx, int flags,
struct sf_buf **sf);
void dmar_unmap_pgtbl(struct sf_buf *sf);
int dmar_load_root_entry_ptr(struct dmar_unit *unit);
int dmar_inv_ctx_glob(struct dmar_unit *unit);
int dmar_inv_iotlb_glob(struct dmar_unit *unit);
int dmar_flush_write_bufs(struct dmar_unit *unit);
void dmar_flush_pte_to_ram(struct dmar_unit *unit, dmar_pte_t *dst);
void dmar_flush_ctx_to_ram(struct dmar_unit *unit, dmar_ctx_entry_t *dst);
void dmar_flush_root_to_ram(struct dmar_unit *unit, dmar_root_entry_t *dst);
int dmar_enable_translation(struct dmar_unit *unit);
int dmar_disable_translation(struct dmar_unit *unit);
int dmar_load_irt_ptr(struct dmar_unit *unit);
int dmar_enable_ir(struct dmar_unit *unit);
int dmar_disable_ir(struct dmar_unit *unit);
bool dmar_barrier_enter(struct dmar_unit *dmar, u_int barrier_id);
void dmar_barrier_exit(struct dmar_unit *dmar, u_int barrier_id);
uint64_t dmar_get_timeout(void);
void dmar_update_timeout(uint64_t newval);
int dmar_fault_intr(void *arg);
void dmar_enable_fault_intr(struct dmar_unit *unit);
void dmar_disable_fault_intr(struct dmar_unit *unit);
int dmar_init_fault_log(struct dmar_unit *unit);
void dmar_fini_fault_log(struct dmar_unit *unit);
int dmar_qi_intr(void *arg);
void dmar_enable_qi_intr(struct dmar_unit *unit);
void dmar_disable_qi_intr(struct dmar_unit *unit);
int dmar_init_qi(struct dmar_unit *unit);
void dmar_fini_qi(struct dmar_unit *unit);
void dmar_qi_invalidate_locked(struct dmar_domain *domain, iommu_gaddr_t start,
iommu_gaddr_t size, struct iommu_qi_genseq *psec, bool emit_wait);
+void dmar_qi_invalidate_sync_locked(struct dmar_domain *domain,
+ iommu_gaddr_t start, iommu_gaddr_t size, bool cansleep);
void dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit);
void dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit);
void dmar_qi_invalidate_iec_glob(struct dmar_unit *unit);
void dmar_qi_invalidate_iec(struct dmar_unit *unit, u_int start, u_int cnt);
vm_object_t domain_get_idmap_pgtbl(struct dmar_domain *domain,
iommu_gaddr_t maxaddr);
void put_idmap_pgtbl(vm_object_t obj);
void domain_flush_iotlb_sync(struct dmar_domain *domain, iommu_gaddr_t base,
iommu_gaddr_t size);
int domain_alloc_pgtbl(struct dmar_domain *domain);
void domain_free_pgtbl(struct dmar_domain *domain);
extern const struct iommu_domain_map_ops dmar_domain_map_ops;
int dmar_dev_depth(device_t child);
void dmar_dev_path(device_t child, int *busno, void *path1, int depth);
struct dmar_ctx *dmar_get_ctx_for_dev(struct dmar_unit *dmar, device_t dev,
uint16_t rid, bool id_mapped, bool rmrr_init);
struct dmar_ctx *dmar_get_ctx_for_devpath(struct dmar_unit *dmar, uint16_t rid,
int dev_domain, int dev_busno, const void *dev_path, int dev_path_len,
bool id_mapped, bool rmrr_init);
int dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx);
void dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx);
void dmar_free_ctx(struct dmar_ctx *ctx);
struct dmar_ctx *dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid);
void dmar_domain_free_entry(struct iommu_map_entry *entry, bool free);
void dmar_dev_parse_rmrr(struct dmar_domain *domain, int dev_domain,
int dev_busno, const void *dev_path, int dev_path_len,
struct iommu_map_entries_tailq *rmrr_entries);
int dmar_instantiate_rmrr_ctxs(struct iommu_unit *dmar);
void dmar_quirks_post_ident(struct dmar_unit *dmar);
void dmar_quirks_pre_use(struct iommu_unit *dmar);
int dmar_init_irt(struct dmar_unit *unit);
void dmar_fini_irt(struct dmar_unit *unit);
extern iommu_haddr_t dmar_high;
extern int haw;
extern int dmar_tbl_pagecnt;
extern int dmar_batch_coalesce;
static inline uint32_t
dmar_read4(const struct dmar_unit *unit, int reg)
{
return (bus_read_4(unit->regs, reg));
}
static inline uint64_t
dmar_read8(const struct dmar_unit *unit, int reg)
{
#ifdef __i386__
uint32_t high, low;
low = bus_read_4(unit->regs, reg);
high = bus_read_4(unit->regs, reg + 4);
return (low | ((uint64_t)high << 32));
#else
return (bus_read_8(unit->regs, reg));
#endif
}
static inline void
dmar_write4(const struct dmar_unit *unit, int reg, uint32_t val)
{
KASSERT(reg != DMAR_GCMD_REG || (val & DMAR_GCMD_TE) ==
(unit->hw_gcmd & DMAR_GCMD_TE),
("dmar%d clearing TE 0x%08x 0x%08x", unit->iommu.unit,
unit->hw_gcmd, val));
bus_write_4(unit->regs, reg, val);
}
static inline void
dmar_write8(const struct dmar_unit *unit, int reg, uint64_t val)
{
KASSERT(reg != DMAR_GCMD_REG, ("8byte GCMD write"));
#ifdef __i386__
uint32_t high, low;
low = val;
high = val >> 32;
bus_write_4(unit->regs, reg, low);
bus_write_4(unit->regs, reg + 4, high);
#else
bus_write_8(unit->regs, reg, val);
#endif
}
/*
* dmar_pte_store and dmar_pte_clear ensure that on i386, 32bit writes
* are issued in the correct order. For store, the lower word,
* containing the P or R and W bits, is set only after the high word
* is written. For clear, the P bit is cleared first, then the high
* word is cleared.
*
* dmar_pte_update updates the pte. For amd64, the update is atomic.
* For i386, it first disables the entry by clearing the word
* containing the P bit, and then defer to dmar_pte_store. The locked
* cmpxchg8b is probably available on any machine having DMAR support,
* but interrupt translation table may be mapped uncached.
*/
static inline void
dmar_pte_store1(volatile uint64_t *dst, uint64_t val)
{
#ifdef __i386__
volatile uint32_t *p;
uint32_t hi, lo;
hi = val >> 32;
lo = val;
p = (volatile uint32_t *)dst;
*(p + 1) = hi;
*p = lo;
#else
*dst = val;
#endif
}
static inline void
dmar_pte_store(volatile uint64_t *dst, uint64_t val)
{
KASSERT(*dst == 0, ("used pte %p oldval %jx newval %jx",
dst, (uintmax_t)*dst, (uintmax_t)val));
dmar_pte_store1(dst, val);
}
static inline void
dmar_pte_update(volatile uint64_t *dst, uint64_t val)
{
#ifdef __i386__
volatile uint32_t *p;
p = (volatile uint32_t *)dst;
*p = 0;
#endif
dmar_pte_store1(dst, val);
}
static inline void
dmar_pte_clear(volatile uint64_t *dst)
{
#ifdef __i386__
volatile uint32_t *p;
p = (volatile uint32_t *)dst;
*p = 0;
*(p + 1) = 0;
#else
*dst = 0;
#endif
}
extern struct timespec dmar_hw_timeout;
#define DMAR_WAIT_UNTIL(cond) \
{ \
struct timespec last, curr; \
bool forever; \
\
if (dmar_hw_timeout.tv_sec == 0 && \
dmar_hw_timeout.tv_nsec == 0) { \
forever = true; \
} else { \
forever = false; \
nanouptime(&curr); \
timespecadd(&curr, &dmar_hw_timeout, &last); \
} \
for (;;) { \
if (cond) { \
error = 0; \
break; \
} \
nanouptime(&curr); \
if (!forever && timespeccmp(&last, &curr, <)) { \
error = ETIMEDOUT; \
break; \
} \
cpu_spinwait(); \
} \
}
#ifdef INVARIANTS
#define TD_PREP_PINNED_ASSERT \
int old_td_pinned; \
old_td_pinned = curthread->td_pinned
#define TD_PINNED_ASSERT \
KASSERT(curthread->td_pinned == old_td_pinned, \
("pin count leak: %d %d %s:%d", curthread->td_pinned, \
old_td_pinned, __FILE__, __LINE__))
#else
#define TD_PREP_PINNED_ASSERT
#define TD_PINNED_ASSERT
#endif
#endif
diff --git a/sys/x86/iommu/intel_qi.c b/sys/x86/iommu/intel_qi.c
index 894e3d537ac7..1400be3852e9 100644
--- a/sys/x86/iommu/intel_qi.c
+++ b/sys/x86/iommu/intel_qi.c
@@ -1,477 +1,487 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2013 The FreeBSD Foundation
*
* This software was developed by Konstantin Belousov
* under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
__FBSDID("$FreeBSD$");
#include "opt_acpi.h"
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
static bool
dmar_qi_seq_processed(const struct dmar_unit *unit,
const struct iommu_qi_genseq *pseq)
{
return (pseq->gen < unit->inv_waitd_gen ||
(pseq->gen == unit->inv_waitd_gen &&
pseq->seq <= unit->inv_waitd_seq_hw));
}
static int
dmar_enable_qi(struct dmar_unit *unit)
{
int error;
DMAR_ASSERT_LOCKED(unit);
unit->hw_gcmd |= DMAR_GCMD_QIE;
dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd);
DMAR_WAIT_UNTIL(((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_QIES)
!= 0));
return (error);
}
static int
dmar_disable_qi(struct dmar_unit *unit)
{
int error;
DMAR_ASSERT_LOCKED(unit);
unit->hw_gcmd &= ~DMAR_GCMD_QIE;
dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd);
DMAR_WAIT_UNTIL(((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_QIES)
== 0));
return (error);
}
static void
dmar_qi_advance_tail(struct dmar_unit *unit)
{
DMAR_ASSERT_LOCKED(unit);
dmar_write4(unit, DMAR_IQT_REG, unit->inv_queue_tail);
}
static void
dmar_qi_ensure(struct dmar_unit *unit, int descr_count)
{
uint32_t head;
int bytes;
DMAR_ASSERT_LOCKED(unit);
bytes = descr_count << DMAR_IQ_DESCR_SZ_SHIFT;
for (;;) {
if (bytes <= unit->inv_queue_avail)
break;
/* refill */
head = dmar_read4(unit, DMAR_IQH_REG);
head &= DMAR_IQH_MASK;
unit->inv_queue_avail = head - unit->inv_queue_tail -
DMAR_IQ_DESCR_SZ;
if (head <= unit->inv_queue_tail)
unit->inv_queue_avail += unit->inv_queue_size;
if (bytes <= unit->inv_queue_avail)
break;
/*
* No space in the queue, do busy wait. Hardware must
* make a progress. But first advance the tail to
* inform the descriptor streamer about entries we
* might have already filled, otherwise they could
* clog the whole queue..
*/
dmar_qi_advance_tail(unit);
unit->inv_queue_full++;
cpu_spinwait();
}
unit->inv_queue_avail -= bytes;
}
static void
dmar_qi_emit(struct dmar_unit *unit, uint64_t data1, uint64_t data2)
{
DMAR_ASSERT_LOCKED(unit);
*(volatile uint64_t *)(unit->inv_queue + unit->inv_queue_tail) = data1;
unit->inv_queue_tail += DMAR_IQ_DESCR_SZ / 2;
KASSERT(unit->inv_queue_tail <= unit->inv_queue_size,
("tail overflow 0x%x 0x%jx", unit->inv_queue_tail,
(uintmax_t)unit->inv_queue_size));
unit->inv_queue_tail &= unit->inv_queue_size - 1;
*(volatile uint64_t *)(unit->inv_queue + unit->inv_queue_tail) = data2;
unit->inv_queue_tail += DMAR_IQ_DESCR_SZ / 2;
KASSERT(unit->inv_queue_tail <= unit->inv_queue_size,
("tail overflow 0x%x 0x%jx", unit->inv_queue_tail,
(uintmax_t)unit->inv_queue_size));
unit->inv_queue_tail &= unit->inv_queue_size - 1;
}
static void
dmar_qi_emit_wait_descr(struct dmar_unit *unit, uint32_t seq, bool intr,
bool memw, bool fence)
{
DMAR_ASSERT_LOCKED(unit);
dmar_qi_emit(unit, DMAR_IQ_DESCR_WAIT_ID |
(intr ? DMAR_IQ_DESCR_WAIT_IF : 0) |
(memw ? DMAR_IQ_DESCR_WAIT_SW : 0) |
(fence ? DMAR_IQ_DESCR_WAIT_FN : 0) |
(memw ? DMAR_IQ_DESCR_WAIT_SD(seq) : 0),
memw ? unit->inv_waitd_seq_hw_phys : 0);
}
static void
dmar_qi_emit_wait_seq(struct dmar_unit *unit, struct iommu_qi_genseq *pseq,
bool emit_wait)
{
struct iommu_qi_genseq gsec;
uint32_t seq;
KASSERT(pseq != NULL, ("wait descriptor with no place for seq"));
DMAR_ASSERT_LOCKED(unit);
if (unit->inv_waitd_seq == 0xffffffff) {
gsec.gen = unit->inv_waitd_gen;
gsec.seq = unit->inv_waitd_seq;
dmar_qi_ensure(unit, 1);
dmar_qi_emit_wait_descr(unit, gsec.seq, false, true, false);
dmar_qi_advance_tail(unit);
while (!dmar_qi_seq_processed(unit, &gsec))
cpu_spinwait();
unit->inv_waitd_gen++;
unit->inv_waitd_seq = 1;
}
seq = unit->inv_waitd_seq++;
pseq->gen = unit->inv_waitd_gen;
pseq->seq = seq;
if (emit_wait) {
dmar_qi_ensure(unit, 1);
dmar_qi_emit_wait_descr(unit, seq, true, true, false);
}
}
static void
dmar_qi_wait_for_seq(struct dmar_unit *unit, const struct iommu_qi_genseq *gseq,
bool nowait)
{
DMAR_ASSERT_LOCKED(unit);
unit->inv_seq_waiters++;
while (!dmar_qi_seq_processed(unit, gseq)) {
if (cold || nowait) {
cpu_spinwait();
} else {
msleep(&unit->inv_seq_waiters, &unit->iommu.lock, 0,
"dmarse", hz);
}
}
unit->inv_seq_waiters--;
}
void
dmar_qi_invalidate_locked(struct dmar_domain *domain, iommu_gaddr_t base,
iommu_gaddr_t size, struct iommu_qi_genseq *pseq, bool emit_wait)
{
struct dmar_unit *unit;
iommu_gaddr_t isize;
int am;
unit = domain->dmar;
DMAR_ASSERT_LOCKED(unit);
for (; size > 0; base += isize, size -= isize) {
am = calc_am(unit, base, size, &isize);
dmar_qi_ensure(unit, 1);
dmar_qi_emit(unit, DMAR_IQ_DESCR_IOTLB_INV |
DMAR_IQ_DESCR_IOTLB_PAGE | DMAR_IQ_DESCR_IOTLB_DW |
DMAR_IQ_DESCR_IOTLB_DR |
DMAR_IQ_DESCR_IOTLB_DID(domain->domain),
base | am);
}
dmar_qi_emit_wait_seq(unit, pseq, emit_wait);
dmar_qi_advance_tail(unit);
}
+void
+dmar_qi_invalidate_sync_locked(struct dmar_domain *domain, iommu_gaddr_t base,
+ iommu_gaddr_t size, bool cansleep)
+{
+ struct iommu_qi_genseq gseq;
+
+ DMAR_ASSERT_LOCKED(domain->dmar);
+ dmar_qi_invalidate_locked(domain, base, size, &gseq, true);
+ dmar_qi_wait_for_seq(domain->dmar, &gseq, !cansleep);
+}
+
void
dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit)
{
struct iommu_qi_genseq gseq;
DMAR_ASSERT_LOCKED(unit);
dmar_qi_ensure(unit, 2);
dmar_qi_emit(unit, DMAR_IQ_DESCR_CTX_INV | DMAR_IQ_DESCR_CTX_GLOB, 0);
dmar_qi_emit_wait_seq(unit, &gseq, true);
dmar_qi_advance_tail(unit);
dmar_qi_wait_for_seq(unit, &gseq, false);
}
void
dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit)
{
struct iommu_qi_genseq gseq;
DMAR_ASSERT_LOCKED(unit);
dmar_qi_ensure(unit, 2);
dmar_qi_emit(unit, DMAR_IQ_DESCR_IOTLB_INV | DMAR_IQ_DESCR_IOTLB_GLOB |
DMAR_IQ_DESCR_IOTLB_DW | DMAR_IQ_DESCR_IOTLB_DR, 0);
dmar_qi_emit_wait_seq(unit, &gseq, true);
dmar_qi_advance_tail(unit);
dmar_qi_wait_for_seq(unit, &gseq, false);
}
void
dmar_qi_invalidate_iec_glob(struct dmar_unit *unit)
{
struct iommu_qi_genseq gseq;
DMAR_ASSERT_LOCKED(unit);
dmar_qi_ensure(unit, 2);
dmar_qi_emit(unit, DMAR_IQ_DESCR_IEC_INV, 0);
dmar_qi_emit_wait_seq(unit, &gseq, true);
dmar_qi_advance_tail(unit);
dmar_qi_wait_for_seq(unit, &gseq, false);
}
void
dmar_qi_invalidate_iec(struct dmar_unit *unit, u_int start, u_int cnt)
{
struct iommu_qi_genseq gseq;
u_int c, l;
DMAR_ASSERT_LOCKED(unit);
KASSERT(start < unit->irte_cnt && start < start + cnt &&
start + cnt <= unit->irte_cnt,
("inv iec overflow %d %d %d", unit->irte_cnt, start, cnt));
for (; cnt > 0; cnt -= c, start += c) {
l = ffs(start | cnt) - 1;
c = 1 << l;
dmar_qi_ensure(unit, 1);
dmar_qi_emit(unit, DMAR_IQ_DESCR_IEC_INV |
DMAR_IQ_DESCR_IEC_IDX | DMAR_IQ_DESCR_IEC_IIDX(start) |
DMAR_IQ_DESCR_IEC_IM(l), 0);
}
dmar_qi_ensure(unit, 1);
dmar_qi_emit_wait_seq(unit, &gseq, true);
dmar_qi_advance_tail(unit);
/*
* The caller of the function, in particular,
* dmar_ir_program_irte(), may be called from the context
* where the sleeping is forbidden (in fact, the
* intr_table_lock mutex may be held, locked from
* intr_shuffle_irqs()). Wait for the invalidation completion
* using the busy wait.
*
* The impact on the interrupt input setup code is small, the
* expected overhead is comparable with the chipset register
* read. It is more harmful for the parallel DMA operations,
* since we own the dmar unit lock until whole invalidation
* queue is processed, which includes requests possibly issued
* before our request.
*/
dmar_qi_wait_for_seq(unit, &gseq, true);
}
int
dmar_qi_intr(void *arg)
{
struct dmar_unit *unit;
unit = arg;
KASSERT(unit->qi_enabled, ("dmar%d: QI is not enabled",
unit->iommu.unit));
taskqueue_enqueue(unit->qi_taskqueue, &unit->qi_task);
return (FILTER_HANDLED);
}
static void
dmar_qi_task(void *arg, int pending __unused)
{
struct dmar_unit *unit;
struct iommu_map_entry *entry;
uint32_t ics;
unit = arg;
DMAR_LOCK(unit);
for (;;) {
entry = TAILQ_FIRST(&unit->tlb_flush_entries);
if (entry == NULL)
break;
if (!dmar_qi_seq_processed(unit, &entry->gseq))
break;
TAILQ_REMOVE(&unit->tlb_flush_entries, entry, dmamap_link);
DMAR_UNLOCK(unit);
- dmar_domain_free_entry(entry, (entry->flags &
- IOMMU_MAP_ENTRY_QI_NF) == 0);
+ dmar_domain_free_entry(entry, true);
DMAR_LOCK(unit);
}
ics = dmar_read4(unit, DMAR_ICS_REG);
if ((ics & DMAR_ICS_IWC) != 0) {
ics = DMAR_ICS_IWC;
dmar_write4(unit, DMAR_ICS_REG, ics);
}
if (unit->inv_seq_waiters > 0)
wakeup(&unit->inv_seq_waiters);
DMAR_UNLOCK(unit);
}
int
dmar_init_qi(struct dmar_unit *unit)
{
uint64_t iqa;
uint32_t ics;
int qi_sz;
if (!DMAR_HAS_QI(unit) || (unit->hw_cap & DMAR_CAP_CM) != 0)
return (0);
unit->qi_enabled = 1;
TUNABLE_INT_FETCH("hw.dmar.qi", &unit->qi_enabled);
if (!unit->qi_enabled)
return (0);
TAILQ_INIT(&unit->tlb_flush_entries);
TASK_INIT(&unit->qi_task, 0, dmar_qi_task, unit);
unit->qi_taskqueue = taskqueue_create_fast("dmarqf", M_WAITOK,
taskqueue_thread_enqueue, &unit->qi_taskqueue);
taskqueue_start_threads(&unit->qi_taskqueue, 1, PI_AV,
"dmar%d qi taskq", unit->iommu.unit);
unit->inv_waitd_gen = 0;
unit->inv_waitd_seq = 1;
qi_sz = DMAR_IQA_QS_DEF;
TUNABLE_INT_FETCH("hw.dmar.qi_size", &qi_sz);
if (qi_sz > DMAR_IQA_QS_MAX)
qi_sz = DMAR_IQA_QS_MAX;
unit->inv_queue_size = (1ULL << qi_sz) * PAGE_SIZE;
/* Reserve one descriptor to prevent wraparound. */
unit->inv_queue_avail = unit->inv_queue_size - DMAR_IQ_DESCR_SZ;
/* The invalidation queue reads by DMARs are always coherent. */
unit->inv_queue = kmem_alloc_contig(unit->inv_queue_size, M_WAITOK |
M_ZERO, 0, dmar_high, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
unit->inv_waitd_seq_hw_phys = pmap_kextract(
(vm_offset_t)&unit->inv_waitd_seq_hw);
DMAR_LOCK(unit);
dmar_write8(unit, DMAR_IQT_REG, 0);
iqa = pmap_kextract(unit->inv_queue);
iqa |= qi_sz;
dmar_write8(unit, DMAR_IQA_REG, iqa);
dmar_enable_qi(unit);
ics = dmar_read4(unit, DMAR_ICS_REG);
if ((ics & DMAR_ICS_IWC) != 0) {
ics = DMAR_ICS_IWC;
dmar_write4(unit, DMAR_ICS_REG, ics);
}
dmar_enable_qi_intr(unit);
DMAR_UNLOCK(unit);
return (0);
}
void
dmar_fini_qi(struct dmar_unit *unit)
{
struct iommu_qi_genseq gseq;
if (!unit->qi_enabled)
return;
taskqueue_drain(unit->qi_taskqueue, &unit->qi_task);
taskqueue_free(unit->qi_taskqueue);
unit->qi_taskqueue = NULL;
DMAR_LOCK(unit);
/* quisce */
dmar_qi_ensure(unit, 1);
dmar_qi_emit_wait_seq(unit, &gseq, true);
dmar_qi_advance_tail(unit);
dmar_qi_wait_for_seq(unit, &gseq, false);
/* only after the quisce, disable queue */
dmar_disable_qi_intr(unit);
dmar_disable_qi(unit);
KASSERT(unit->inv_seq_waiters == 0,
("dmar%d: waiters on disabled queue", unit->iommu.unit));
DMAR_UNLOCK(unit);
kmem_free(unit->inv_queue, unit->inv_queue_size);
unit->inv_queue = 0;
unit->inv_queue_size = 0;
unit->qi_enabled = 0;
}
void
dmar_enable_qi_intr(struct dmar_unit *unit)
{
uint32_t iectl;
DMAR_ASSERT_LOCKED(unit);
KASSERT(DMAR_HAS_QI(unit), ("dmar%d: QI is not supported",
unit->iommu.unit));
iectl = dmar_read4(unit, DMAR_IECTL_REG);
iectl &= ~DMAR_IECTL_IM;
dmar_write4(unit, DMAR_IECTL_REG, iectl);
}
void
dmar_disable_qi_intr(struct dmar_unit *unit)
{
uint32_t iectl;
DMAR_ASSERT_LOCKED(unit);
KASSERT(DMAR_HAS_QI(unit), ("dmar%d: QI is not supported",
unit->iommu.unit));
iectl = dmar_read4(unit, DMAR_IECTL_REG);
dmar_write4(unit, DMAR_IECTL_REG, iectl | DMAR_IECTL_IM);
}