Changeset View
Changeset View
Standalone View
Standalone View
sys/x86/iommu/intel_ctx.c
Show First 20 Lines • Show All 108 Lines • ▼ Show 20 Lines | dmar_ensure_ctx_page(struct dmar_unit *dmar, int bus) | ||||
dmar_flush_root_to_ram(dmar, re); | dmar_flush_root_to_ram(dmar, re); | ||||
dmar_unmap_pgtbl(sf); | dmar_unmap_pgtbl(sf); | ||||
TD_PINNED_ASSERT; | TD_PINNED_ASSERT; | ||||
} | } | ||||
static dmar_ctx_entry_t * | static dmar_ctx_entry_t * | ||||
dmar_map_ctx_entry(struct dmar_ctx *ctx, struct sf_buf **sfp) | dmar_map_ctx_entry(struct dmar_ctx *ctx, struct sf_buf **sfp) | ||||
{ | { | ||||
struct dmar_unit *dmar; | |||||
dmar_ctx_entry_t *ctxp; | dmar_ctx_entry_t *ctxp; | ||||
ctxp = dmar_map_pgtbl(ctx->domain->dmar->ctx_obj, 1 + | dmar = (struct dmar_unit *)ctx->device.domain->iommu; | ||||
ctxp = dmar_map_pgtbl(dmar->ctx_obj, 1 + | |||||
PCI_RID2BUS(ctx->rid), DMAR_PGF_NOALLOC | DMAR_PGF_WAITOK, sfp); | PCI_RID2BUS(ctx->rid), DMAR_PGF_NOALLOC | DMAR_PGF_WAITOK, sfp); | ||||
ctxp += ctx->rid & 0xff; | ctxp += ctx->rid & 0xff; | ||||
return (ctxp); | return (ctxp); | ||||
} | } | ||||
static void | static void | ||||
ctx_tag_init(struct dmar_ctx *ctx, device_t dev) | device_tag_init(struct dmar_ctx *ctx, device_t dev) | ||||
{ | { | ||||
struct dmar_domain *domain; | |||||
bus_addr_t maxaddr; | bus_addr_t maxaddr; | ||||
maxaddr = MIN(ctx->domain->end, BUS_SPACE_MAXADDR); | domain = (struct dmar_domain *)ctx->device.domain; | ||||
ctx->ctx_tag.common.ref_count = 1; /* Prevent free */ | maxaddr = MIN(domain->end, BUS_SPACE_MAXADDR); | ||||
ctx->ctx_tag.common.impl = &bus_dma_dmar_impl; | ctx->device.tag.common.ref_count = 1; /* Prevent free */ | ||||
ctx->ctx_tag.common.boundary = 0; | ctx->device.tag.common.impl = &bus_dma_iommu_impl; | ||||
ctx->ctx_tag.common.lowaddr = maxaddr; | ctx->device.tag.common.boundary = 0; | ||||
ctx->ctx_tag.common.highaddr = maxaddr; | ctx->device.tag.common.lowaddr = maxaddr; | ||||
ctx->ctx_tag.common.maxsize = maxaddr; | ctx->device.tag.common.highaddr = maxaddr; | ||||
ctx->ctx_tag.common.nsegments = BUS_SPACE_UNRESTRICTED; | ctx->device.tag.common.maxsize = maxaddr; | ||||
ctx->ctx_tag.common.maxsegsz = maxaddr; | ctx->device.tag.common.nsegments = BUS_SPACE_UNRESTRICTED; | ||||
ctx->ctx_tag.ctx = ctx; | ctx->device.tag.common.maxsegsz = maxaddr; | ||||
ctx->ctx_tag.owner = dev; | ctx->device.tag.ctx = (struct iommu_device *)ctx; | ||||
ctx->device.tag.owner = dev; | |||||
} | } | ||||
static void | static void | ||||
ctx_id_entry_init_one(dmar_ctx_entry_t *ctxp, struct dmar_domain *domain, | ctx_id_entry_init_one(dmar_ctx_entry_t *ctxp, struct dmar_domain *domain, | ||||
vm_page_t ctx_root) | vm_page_t ctx_root) | ||||
{ | { | ||||
/* | /* | ||||
* For update due to move, the store is not atomic. It is | * For update due to move, the store is not atomic. It is | ||||
Show All 19 Lines | |||||
ctx_id_entry_init(struct dmar_ctx *ctx, dmar_ctx_entry_t *ctxp, bool move, | ctx_id_entry_init(struct dmar_ctx *ctx, dmar_ctx_entry_t *ctxp, bool move, | ||||
int busno) | int busno) | ||||
{ | { | ||||
struct dmar_unit *unit; | struct dmar_unit *unit; | ||||
struct dmar_domain *domain; | struct dmar_domain *domain; | ||||
vm_page_t ctx_root; | vm_page_t ctx_root; | ||||
int i; | int i; | ||||
domain = ctx->domain; | domain = (struct dmar_domain *)ctx->device.domain; | ||||
unit = domain->dmar; | unit = (struct dmar_unit *)domain->iodom.iommu; | ||||
KASSERT(move || (ctxp->ctx1 == 0 && ctxp->ctx2 == 0), | KASSERT(move || (ctxp->ctx1 == 0 && ctxp->ctx2 == 0), | ||||
("dmar%d: initialized ctx entry %d:%d:%d 0x%jx 0x%jx", | ("dmar%d: initialized ctx entry %d:%d:%d 0x%jx 0x%jx", | ||||
unit->unit, busno, pci_get_slot(ctx->ctx_tag.owner), | unit->iommu.unit, busno, pci_get_slot(ctx->device.tag.owner), | ||||
pci_get_function(ctx->ctx_tag.owner), | pci_get_function(ctx->device.tag.owner), | ||||
ctxp->ctx1, ctxp->ctx2)); | ctxp->ctx1, ctxp->ctx2)); | ||||
if ((domain->flags & DMAR_DOMAIN_IDMAP) != 0 && | if ((domain->flags & IOMMU_DOMAIN_IDMAP) != 0 && | ||||
(unit->hw_ecap & DMAR_ECAP_PT) != 0) { | (unit->hw_ecap & DMAR_ECAP_PT) != 0) { | ||||
KASSERT(domain->pgtbl_obj == NULL, | KASSERT(domain->pgtbl_obj == NULL, | ||||
("ctx %p non-null pgtbl_obj", ctx)); | ("ctx %p non-null pgtbl_obj", ctx)); | ||||
ctx_root = NULL; | ctx_root = NULL; | ||||
} else { | } else { | ||||
ctx_root = dmar_pgalloc(domain->pgtbl_obj, 0, DMAR_PGF_NOALLOC); | ctx_root = dmar_pgalloc(domain->pgtbl_obj, 0, DMAR_PGF_NOALLOC); | ||||
} | } | ||||
Show All 32 Lines | dmar_flush_for_ctx_entry(struct dmar_unit *dmar, bool force) | ||||
return (error); | return (error); | ||||
} | } | ||||
static int | static int | ||||
domain_init_rmrr(struct dmar_domain *domain, device_t dev, int bus, | domain_init_rmrr(struct dmar_domain *domain, device_t dev, int bus, | ||||
int slot, int func, int dev_domain, int dev_busno, | int slot, int func, int dev_domain, int dev_busno, | ||||
const void *dev_path, int dev_path_len) | const void *dev_path, int dev_path_len) | ||||
{ | { | ||||
struct dmar_map_entries_tailq rmrr_entries; | struct iommu_map_entries_tailq rmrr_entries; | ||||
struct dmar_map_entry *entry, *entry1; | struct iommu_map_entry *entry, *entry1; | ||||
vm_page_t *ma; | vm_page_t *ma; | ||||
dmar_gaddr_t start, end; | dmar_gaddr_t start, end; | ||||
vm_pindex_t size, i; | vm_pindex_t size, i; | ||||
int error, error1; | int error, error1; | ||||
error = 0; | error = 0; | ||||
TAILQ_INIT(&rmrr_entries); | TAILQ_INIT(&rmrr_entries); | ||||
dmar_dev_parse_rmrr(domain, dev_domain, dev_busno, dev_path, | dmar_dev_parse_rmrr(domain, dev_domain, dev_busno, dev_path, | ||||
dev_path_len, &rmrr_entries); | dev_path_len, &rmrr_entries); | ||||
TAILQ_FOREACH_SAFE(entry, &rmrr_entries, unroll_link, entry1) { | TAILQ_FOREACH_SAFE(entry, &rmrr_entries, unroll_link, entry1) { | ||||
/* | /* | ||||
* VT-d specification requires that the start of an | * VT-d specification requires that the start of an | ||||
* RMRR entry is 4k-aligned. Buggy BIOSes put | * RMRR entry is 4k-aligned. Buggy BIOSes put | ||||
* anything into the start and end fields. Truncate | * anything into the start and end fields. Truncate | ||||
* and round as neccesary. | * and round as neccesary. | ||||
* | * | ||||
* We also allow the overlapping RMRR entries, see | * We also allow the overlapping RMRR entries, see | ||||
* dmar_gas_alloc_region(). | * dmar_gas_alloc_region(). | ||||
*/ | */ | ||||
start = entry->start; | start = entry->start; | ||||
end = entry->end; | end = entry->end; | ||||
if (bootverbose) | if (bootverbose) | ||||
printf("dmar%d ctx pci%d:%d:%d RMRR [%#jx, %#jx]\n", | printf("dmar%d ctx pci%d:%d:%d RMRR [%#jx, %#jx]\n", | ||||
domain->dmar->unit, bus, slot, func, | domain->iodom.iommu->unit, bus, slot, func, | ||||
(uintmax_t)start, (uintmax_t)end); | (uintmax_t)start, (uintmax_t)end); | ||||
entry->start = trunc_page(start); | entry->start = trunc_page(start); | ||||
entry->end = round_page(end); | entry->end = round_page(end); | ||||
if (entry->start == entry->end) { | if (entry->start == entry->end) { | ||||
/* Workaround for some AMI (?) BIOSes */ | /* Workaround for some AMI (?) BIOSes */ | ||||
if (bootverbose) { | if (bootverbose) { | ||||
if (dev != NULL) | if (dev != NULL) | ||||
device_printf(dev, ""); | device_printf(dev, ""); | ||||
printf("pci%d:%d:%d ", bus, slot, func); | printf("pci%d:%d:%d ", bus, slot, func); | ||||
printf("BIOS bug: dmar%d RMRR " | printf("BIOS bug: dmar%d RMRR " | ||||
"region (%jx, %jx) corrected\n", | "region (%jx, %jx) corrected\n", | ||||
domain->dmar->unit, start, end); | domain->iodom.iommu->unit, start, end); | ||||
} | } | ||||
entry->end += DMAR_PAGE_SIZE * 0x20; | entry->end += DMAR_PAGE_SIZE * 0x20; | ||||
} | } | ||||
size = OFF_TO_IDX(entry->end - entry->start); | size = OFF_TO_IDX(entry->end - entry->start); | ||||
ma = malloc(sizeof(vm_page_t) * size, M_TEMP, M_WAITOK); | ma = malloc(sizeof(vm_page_t) * size, M_TEMP, M_WAITOK); | ||||
for (i = 0; i < size; i++) { | for (i = 0; i < size; i++) { | ||||
ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i, | ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i, | ||||
VM_MEMATTR_DEFAULT); | VM_MEMATTR_DEFAULT); | ||||
} | } | ||||
error1 = dmar_gas_map_region(domain, entry, | error1 = dmar_gas_map_region(&domain->iodom, entry, | ||||
DMAR_MAP_ENTRY_READ | DMAR_MAP_ENTRY_WRITE, | IOMMU_MAP_ENTRY_READ | IOMMU_MAP_ENTRY_WRITE, | ||||
DMAR_GM_CANWAIT | DMAR_GM_RMRR, ma); | IOMMU_MF_CANWAIT | IOMMU_MF_RMRR, ma); | ||||
/* | /* | ||||
* Non-failed RMRR entries are owned by context rb | * Non-failed RMRR entries are owned by context rb | ||||
* tree. Get rid of the failed entry, but do not stop | * tree. Get rid of the failed entry, but do not stop | ||||
* the loop. Rest of the parsed RMRR entries are | * the loop. Rest of the parsed RMRR entries are | ||||
* loaded and removed on the context destruction. | * loaded and removed on the context destruction. | ||||
*/ | */ | ||||
if (error1 == 0 && entry->end != entry->start) { | if (error1 == 0 && entry->end != entry->start) { | ||||
DMAR_LOCK(domain->dmar); | IOMMU_LOCK(domain->iodom.iommu); | ||||
domain->refs++; /* XXXKIB prevent free */ | domain->refs++; /* XXXKIB prevent free */ | ||||
domain->flags |= DMAR_DOMAIN_RMRR; | domain->flags |= IOMMU_DOMAIN_RMRR; | ||||
DMAR_UNLOCK(domain->dmar); | IOMMU_UNLOCK(domain->iodom.iommu); | ||||
} else { | } else { | ||||
if (error1 != 0) { | if (error1 != 0) { | ||||
if (dev != NULL) | if (dev != NULL) | ||||
device_printf(dev, ""); | device_printf(dev, ""); | ||||
printf("pci%d:%d:%d ", bus, slot, func); | printf("pci%d:%d:%d ", bus, slot, func); | ||||
printf( | printf( | ||||
"dmar%d failed to map RMRR region (%jx, %jx) %d\n", | "dmar%d failed to map RMRR region (%jx, %jx) %d\n", | ||||
domain->dmar->unit, start, end, | domain->iodom.iommu->unit, start, end, | ||||
error1); | error1); | ||||
error = error1; | error = error1; | ||||
} | } | ||||
TAILQ_REMOVE(&rmrr_entries, entry, unroll_link); | TAILQ_REMOVE(&rmrr_entries, entry, unroll_link); | ||||
dmar_gas_free_entry(domain, entry); | dmar_gas_free_entry(&domain->iodom, entry); | ||||
} | } | ||||
for (i = 0; i < size; i++) | for (i = 0; i < size; i++) | ||||
vm_page_putfake(ma[i]); | vm_page_putfake(ma[i]); | ||||
free(ma, M_TEMP); | free(ma, M_TEMP); | ||||
} | } | ||||
return (error); | return (error); | ||||
} | } | ||||
static struct dmar_domain * | static struct dmar_domain * | ||||
dmar_domain_alloc(struct dmar_unit *dmar, bool id_mapped) | dmar_domain_alloc(struct dmar_unit *dmar, bool id_mapped) | ||||
{ | { | ||||
struct dmar_domain *domain; | struct dmar_domain *domain; | ||||
int error, id, mgaw; | int error, id, mgaw; | ||||
id = alloc_unr(dmar->domids); | id = alloc_unr(dmar->domids); | ||||
if (id == -1) | if (id == -1) | ||||
return (NULL); | return (NULL); | ||||
domain = malloc(sizeof(*domain), M_DMAR_DOMAIN, M_WAITOK | M_ZERO); | domain = malloc(sizeof(*domain), M_DMAR_DOMAIN, M_WAITOK | M_ZERO); | ||||
domain->domain = id; | domain->domain = id; | ||||
LIST_INIT(&domain->contexts); | LIST_INIT(&domain->contexts); | ||||
RB_INIT(&domain->rb_root); | RB_INIT(&domain->rb_root); | ||||
TAILQ_INIT(&domain->unload_entries); | TAILQ_INIT(&domain->iodom.unload_entries); | ||||
TASK_INIT(&domain->unload_task, 0, dmar_domain_unload_task, domain); | TASK_INIT(&domain->iodom.unload_task, 0, dmar_domain_unload_task, | ||||
mtx_init(&domain->lock, "dmardom", NULL, MTX_DEF); | domain); | ||||
domain->dmar = dmar; | mtx_init(&domain->iodom.lock, "dmardom", NULL, MTX_DEF); | ||||
domain->iodom.iommu = &dmar->iommu; | |||||
/* | /* | ||||
* For now, use the maximal usable physical address of the | * For now, use the maximal usable physical address of the | ||||
* installed memory to calculate the mgaw on id_mapped domain. | * installed memory to calculate the mgaw on id_mapped domain. | ||||
* It is useful for the identity mapping, and less so for the | * It is useful for the identity mapping, and less so for the | ||||
* virtualized bus address space. | * virtualized bus address space. | ||||
*/ | */ | ||||
domain->end = id_mapped ? ptoa(Maxmem) : BUS_SPACE_MAXADDR; | domain->end = id_mapped ? ptoa(Maxmem) : BUS_SPACE_MAXADDR; | ||||
mgaw = dmar_maxaddr2mgaw(dmar, domain->end, !id_mapped); | mgaw = dmar_maxaddr2mgaw(dmar, domain->end, !id_mapped); | ||||
error = domain_set_agaw(domain, mgaw); | error = domain_set_agaw(domain, mgaw); | ||||
if (error != 0) | if (error != 0) | ||||
goto fail; | goto fail; | ||||
if (!id_mapped) | if (!id_mapped) | ||||
/* Use all supported address space for remapping. */ | /* Use all supported address space for remapping. */ | ||||
domain->end = 1ULL << (domain->agaw - 1); | domain->end = 1ULL << (domain->agaw - 1); | ||||
dmar_gas_init_domain(domain); | dmar_gas_init_domain(domain); | ||||
if (id_mapped) { | if (id_mapped) { | ||||
if ((dmar->hw_ecap & DMAR_ECAP_PT) == 0) { | if ((dmar->hw_ecap & DMAR_ECAP_PT) == 0) { | ||||
domain->pgtbl_obj = domain_get_idmap_pgtbl(domain, | domain->pgtbl_obj = domain_get_idmap_pgtbl(domain, | ||||
domain->end); | domain->end); | ||||
} | } | ||||
domain->flags |= DMAR_DOMAIN_IDMAP; | domain->flags |= IOMMU_DOMAIN_IDMAP; | ||||
} else { | } else { | ||||
error = domain_alloc_pgtbl(domain); | error = domain_alloc_pgtbl(domain); | ||||
if (error != 0) | if (error != 0) | ||||
goto fail; | goto fail; | ||||
/* Disable local apic region access */ | /* Disable local apic region access */ | ||||
error = dmar_gas_reserve_region(domain, 0xfee00000, | error = dmar_gas_reserve_region(domain, 0xfee00000, | ||||
0xfeefffff + 1); | 0xfeefffff + 1); | ||||
if (error != 0) | if (error != 0) | ||||
goto fail; | goto fail; | ||||
} | } | ||||
return (domain); | return (domain); | ||||
fail: | fail: | ||||
dmar_domain_destroy(domain); | dmar_domain_destroy(domain); | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
static struct dmar_ctx * | static struct dmar_ctx * | ||||
dmar_ctx_alloc(struct dmar_domain *domain, uint16_t rid) | dmar_ctx_alloc(struct dmar_domain *domain, uint16_t rid) | ||||
{ | { | ||||
struct dmar_ctx *ctx; | struct dmar_ctx *ctx; | ||||
ctx = malloc(sizeof(*ctx), M_DMAR_CTX, M_WAITOK | M_ZERO); | ctx = malloc(sizeof(*ctx), M_DMAR_CTX, M_WAITOK | M_ZERO); | ||||
ctx->domain = domain; | ctx->device.domain = (struct iommu_domain *)domain; | ||||
ctx->rid = rid; | ctx->rid = rid; | ||||
ctx->refs = 1; | ctx->refs = 1; | ||||
return (ctx); | return (ctx); | ||||
} | } | ||||
static void | static void | ||||
dmar_ctx_link(struct dmar_ctx *ctx) | dmar_ctx_link(struct dmar_ctx *ctx) | ||||
{ | { | ||||
struct dmar_domain *domain; | struct dmar_domain *domain; | ||||
domain = ctx->domain; | domain = (struct dmar_domain *)ctx->device.domain; | ||||
DMAR_ASSERT_LOCKED(domain->dmar); | IOMMU_ASSERT_LOCKED(domain->iodom.iommu); | ||||
KASSERT(domain->refs >= domain->ctx_cnt, | KASSERT(domain->refs >= domain->ctx_cnt, | ||||
("dom %p ref underflow %d %d", domain, domain->refs, | ("dom %p ref underflow %d %d", domain, domain->refs, | ||||
domain->ctx_cnt)); | domain->ctx_cnt)); | ||||
domain->refs++; | domain->refs++; | ||||
domain->ctx_cnt++; | domain->ctx_cnt++; | ||||
LIST_INSERT_HEAD(&domain->contexts, ctx, link); | LIST_INSERT_HEAD(&domain->contexts, ctx, link); | ||||
} | } | ||||
static void | static void | ||||
dmar_ctx_unlink(struct dmar_ctx *ctx) | dmar_ctx_unlink(struct dmar_ctx *ctx) | ||||
{ | { | ||||
struct dmar_domain *domain; | struct dmar_domain *domain; | ||||
domain = ctx->domain; | domain = (struct dmar_domain *)ctx->device.domain; | ||||
DMAR_ASSERT_LOCKED(domain->dmar); | IOMMU_ASSERT_LOCKED(domain->iodom.iommu); | ||||
KASSERT(domain->refs > 0, | KASSERT(domain->refs > 0, | ||||
("domain %p ctx dtr refs %d", domain, domain->refs)); | ("domain %p ctx dtr refs %d", domain, domain->refs)); | ||||
KASSERT(domain->ctx_cnt >= domain->refs, | KASSERT(domain->ctx_cnt >= domain->refs, | ||||
("domain %p ctx dtr refs %d ctx_cnt %d", domain, | ("domain %p ctx dtr refs %d ctx_cnt %d", domain, | ||||
domain->refs, domain->ctx_cnt)); | domain->refs, domain->ctx_cnt)); | ||||
domain->refs--; | domain->refs--; | ||||
domain->ctx_cnt--; | domain->ctx_cnt--; | ||||
LIST_REMOVE(ctx, link); | LIST_REMOVE(ctx, link); | ||||
} | } | ||||
static void | static void | ||||
dmar_domain_destroy(struct dmar_domain *domain) | dmar_domain_destroy(struct dmar_domain *domain) | ||||
{ | { | ||||
struct dmar_unit *dmar; | |||||
KASSERT(TAILQ_EMPTY(&domain->unload_entries), | KASSERT(TAILQ_EMPTY(&domain->iodom.unload_entries), | ||||
("unfinished unloads %p", domain)); | ("unfinished unloads %p", domain)); | ||||
KASSERT(LIST_EMPTY(&domain->contexts), | KASSERT(LIST_EMPTY(&domain->contexts), | ||||
("destroying dom %p with contexts", domain)); | ("destroying dom %p with contexts", domain)); | ||||
KASSERT(domain->ctx_cnt == 0, | KASSERT(domain->ctx_cnt == 0, | ||||
("destroying dom %p with ctx_cnt %d", domain, domain->ctx_cnt)); | ("destroying dom %p with ctx_cnt %d", domain, domain->ctx_cnt)); | ||||
KASSERT(domain->refs == 0, | KASSERT(domain->refs == 0, | ||||
("destroying dom %p with refs %d", domain, domain->refs)); | ("destroying dom %p with refs %d", domain, domain->refs)); | ||||
if ((domain->flags & DMAR_DOMAIN_GAS_INITED) != 0) { | if ((domain->flags & IOMMU_DOMAIN_GAS_INITED) != 0) { | ||||
DMAR_DOMAIN_LOCK(domain); | DMAR_DOMAIN_LOCK(domain); | ||||
dmar_gas_fini_domain(domain); | dmar_gas_fini_domain(domain); | ||||
DMAR_DOMAIN_UNLOCK(domain); | DMAR_DOMAIN_UNLOCK(domain); | ||||
} | } | ||||
if ((domain->flags & DMAR_DOMAIN_PGTBL_INITED) != 0) { | if ((domain->flags & IOMMU_DOMAIN_PGTBL_INITED) != 0) { | ||||
if (domain->pgtbl_obj != NULL) | if (domain->pgtbl_obj != NULL) | ||||
DMAR_DOMAIN_PGLOCK(domain); | IOMMU_DOMAIN_PGLOCK(domain); | ||||
domain_free_pgtbl(domain); | domain_free_pgtbl(domain); | ||||
} | } | ||||
mtx_destroy(&domain->lock); | mtx_destroy(&domain->iodom.lock); | ||||
free_unr(domain->dmar->domids, domain->domain); | dmar = (struct dmar_unit *)domain->iodom.iommu; | ||||
free_unr(dmar->domids, domain->domain); | |||||
free(domain, M_DMAR_DOMAIN); | free(domain, M_DMAR_DOMAIN); | ||||
} | } | ||||
static struct dmar_ctx * | static struct dmar_ctx * | ||||
dmar_get_ctx_for_dev1(struct dmar_unit *dmar, device_t dev, uint16_t rid, | dmar_get_ctx_for_dev1(struct dmar_unit *dmar, device_t dev, uint16_t rid, | ||||
int dev_domain, int dev_busno, const void *dev_path, int dev_path_len, | int dev_domain, int dev_busno, const void *dev_path, int dev_path_len, | ||||
bool id_mapped, bool rmrr_init) | bool id_mapped, bool rmrr_init) | ||||
{ | { | ||||
Show All 12 Lines | if (dev != NULL) { | ||||
bus = PCI_RID2BUS(rid); | bus = PCI_RID2BUS(rid); | ||||
slot = PCI_RID2SLOT(rid); | slot = PCI_RID2SLOT(rid); | ||||
func = PCI_RID2FUNC(rid); | func = PCI_RID2FUNC(rid); | ||||
} | } | ||||
enable = false; | enable = false; | ||||
TD_PREP_PINNED_ASSERT; | TD_PREP_PINNED_ASSERT; | ||||
DMAR_LOCK(dmar); | DMAR_LOCK(dmar); | ||||
KASSERT(!dmar_is_buswide_ctx(dmar, bus) || (slot == 0 && func == 0), | KASSERT(!dmar_is_buswide_ctx(dmar, bus) || (slot == 0 && func == 0), | ||||
("dmar%d pci%d:%d:%d get_ctx for buswide", dmar->unit, bus, | ("dmar%d pci%d:%d:%d get_ctx for buswide", dmar->iommu.unit, bus, | ||||
slot, func)); | slot, func)); | ||||
ctx = dmar_find_ctx_locked(dmar, rid); | ctx = dmar_find_ctx_locked(dmar, rid); | ||||
error = 0; | error = 0; | ||||
if (ctx == NULL) { | if (ctx == NULL) { | ||||
/* | /* | ||||
* Perform the allocations which require sleep or have | * Perform the allocations which require sleep or have | ||||
* higher chance to succeed if the sleep is allowed. | * higher chance to succeed if the sleep is allowed. | ||||
*/ | */ | ||||
Show All 22 Lines | if (ctx == NULL) { | ||||
* Recheck the contexts, other thread might have | * Recheck the contexts, other thread might have | ||||
* already allocated needed one. | * already allocated needed one. | ||||
*/ | */ | ||||
ctx = dmar_find_ctx_locked(dmar, rid); | ctx = dmar_find_ctx_locked(dmar, rid); | ||||
if (ctx == NULL) { | if (ctx == NULL) { | ||||
domain = domain1; | domain = domain1; | ||||
ctx = ctx1; | ctx = ctx1; | ||||
dmar_ctx_link(ctx); | dmar_ctx_link(ctx); | ||||
ctx->ctx_tag.owner = dev; | ctx->device.tag.owner = dev; | ||||
ctx_tag_init(ctx, dev); | device_tag_init(ctx, dev); | ||||
/* | /* | ||||
* This is the first activated context for the | * This is the first activated context for the | ||||
* DMAR unit. Enable the translation after | * DMAR unit. Enable the translation after | ||||
* everything is set up. | * everything is set up. | ||||
*/ | */ | ||||
if (LIST_EMPTY(&dmar->domains)) | if (LIST_EMPTY(&dmar->domains)) | ||||
enable = true; | enable = true; | ||||
LIST_INSERT_HEAD(&dmar->domains, domain, link); | LIST_INSERT_HEAD(&dmar->domains, domain, link); | ||||
ctx_id_entry_init(ctx, ctxp, false, bus); | ctx_id_entry_init(ctx, ctxp, false, bus); | ||||
if (dev != NULL) { | if (dev != NULL) { | ||||
device_printf(dev, | device_printf(dev, | ||||
"dmar%d pci%d:%d:%d:%d rid %x domain %d mgaw %d " | "dmar%d pci%d:%d:%d:%d rid %x domain %d mgaw %d " | ||||
"agaw %d %s-mapped\n", | "agaw %d %s-mapped\n", | ||||
dmar->unit, dmar->segment, bus, slot, | dmar->iommu.unit, dmar->segment, bus, slot, | ||||
func, rid, domain->domain, domain->mgaw, | func, rid, domain->domain, domain->mgaw, | ||||
domain->agaw, id_mapped ? "id" : "re"); | domain->agaw, id_mapped ? "id" : "re"); | ||||
} | } | ||||
dmar_unmap_pgtbl(sf); | dmar_unmap_pgtbl(sf); | ||||
} else { | } else { | ||||
dmar_unmap_pgtbl(sf); | dmar_unmap_pgtbl(sf); | ||||
dmar_domain_destroy(domain1); | dmar_domain_destroy(domain1); | ||||
/* Nothing needs to be done to destroy ctx1. */ | /* Nothing needs to be done to destroy ctx1. */ | ||||
free(ctx1, M_DMAR_CTX); | free(ctx1, M_DMAR_CTX); | ||||
domain = ctx->domain; | domain = (struct dmar_domain *)ctx->device.domain; | ||||
ctx->refs++; /* tag referenced us */ | ctx->refs++; /* tag referenced us */ | ||||
} | } | ||||
} else { | } else { | ||||
domain = ctx->domain; | domain = (struct dmar_domain *)ctx->device.domain; | ||||
if (ctx->ctx_tag.owner == NULL) | if (ctx->device.tag.owner == NULL) | ||||
ctx->ctx_tag.owner = dev; | ctx->device.tag.owner = dev; | ||||
ctx->refs++; /* tag referenced us */ | ctx->refs++; /* tag referenced us */ | ||||
} | } | ||||
error = dmar_flush_for_ctx_entry(dmar, enable); | error = dmar_flush_for_ctx_entry(dmar, enable); | ||||
if (error != 0) { | if (error != 0) { | ||||
dmar_free_ctx_locked(dmar, ctx); | dmar_free_ctx_locked(&dmar->iommu, (struct iommu_device *)ctx); | ||||
TD_PINNED_ASSERT; | TD_PINNED_ASSERT; | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
/* | /* | ||||
* The dmar lock was potentially dropped between check for the | * The dmar lock was potentially dropped between check for the | ||||
* empty context list and now. Recheck the state of GCMD_TE | * empty context list and now. Recheck the state of GCMD_TE | ||||
* to avoid unneeded command. | * to avoid unneeded command. | ||||
*/ | */ | ||||
if (enable && !rmrr_init && (dmar->hw_gcmd & DMAR_GCMD_TE) == 0) { | if (enable && !rmrr_init && (dmar->hw_gcmd & DMAR_GCMD_TE) == 0) { | ||||
error = dmar_enable_translation(dmar); | error = dmar_enable_translation(dmar); | ||||
if (error == 0) { | if (error == 0) { | ||||
if (bootverbose) { | if (bootverbose) { | ||||
printf("dmar%d: enabled translation\n", | printf("dmar%d: enabled translation\n", | ||||
dmar->unit); | dmar->iommu.unit); | ||||
} | } | ||||
} else { | } else { | ||||
printf("dmar%d: enabling translation failed, " | printf("dmar%d: enabling translation failed, " | ||||
"error %d\n", dmar->unit, error); | "error %d\n", dmar->iommu.unit, error); | ||||
dmar_free_ctx_locked(dmar, ctx); | dmar_free_ctx_locked(&dmar->iommu, | ||||
(struct iommu_device *)ctx); | |||||
TD_PINNED_ASSERT; | TD_PINNED_ASSERT; | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
} | } | ||||
DMAR_UNLOCK(dmar); | DMAR_UNLOCK(dmar); | ||||
TD_PINNED_ASSERT; | TD_PINNED_ASSERT; | ||||
return (ctx); | return (ctx); | ||||
} | } | ||||
Show All 27 Lines | |||||
dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx) | dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx) | ||||
{ | { | ||||
struct dmar_unit *dmar; | struct dmar_unit *dmar; | ||||
struct dmar_domain *old_domain; | struct dmar_domain *old_domain; | ||||
dmar_ctx_entry_t *ctxp; | dmar_ctx_entry_t *ctxp; | ||||
struct sf_buf *sf; | struct sf_buf *sf; | ||||
int error; | int error; | ||||
dmar = domain->dmar; | dmar = (struct dmar_unit *)domain->iodom.iommu; | ||||
old_domain = ctx->domain; | old_domain = (struct dmar_domain *)ctx->device.domain; | ||||
if (domain == old_domain) | if (domain == old_domain) | ||||
return (0); | return (0); | ||||
KASSERT(old_domain->dmar == dmar, | KASSERT(old_domain->iodom.iommu == domain->iodom.iommu, | ||||
("domain %p %u moving between dmars %u %u", domain, | ("domain %p %u moving between dmars %u %u", domain, | ||||
domain->domain, old_domain->dmar->unit, domain->dmar->unit)); | domain->domain, old_domain->iodom.iommu->unit, | ||||
domain->iodom.iommu->unit)); | |||||
TD_PREP_PINNED_ASSERT; | TD_PREP_PINNED_ASSERT; | ||||
ctxp = dmar_map_ctx_entry(ctx, &sf); | ctxp = dmar_map_ctx_entry(ctx, &sf); | ||||
DMAR_LOCK(dmar); | DMAR_LOCK(dmar); | ||||
dmar_ctx_unlink(ctx); | dmar_ctx_unlink(ctx); | ||||
ctx->domain = domain; | ctx->device.domain = &domain->iodom; | ||||
dmar_ctx_link(ctx); | dmar_ctx_link(ctx); | ||||
ctx_id_entry_init(ctx, ctxp, true, PCI_BUSMAX + 100); | ctx_id_entry_init(ctx, ctxp, true, PCI_BUSMAX + 100); | ||||
dmar_unmap_pgtbl(sf); | dmar_unmap_pgtbl(sf); | ||||
error = dmar_flush_for_ctx_entry(dmar, true); | error = dmar_flush_for_ctx_entry(dmar, true); | ||||
/* If flush failed, rolling back would not work as well. */ | /* If flush failed, rolling back would not work as well. */ | ||||
printf("dmar%d rid %x domain %d->%d %s-mapped\n", | printf("dmar%d rid %x domain %d->%d %s-mapped\n", | ||||
dmar->unit, ctx->rid, old_domain->domain, domain->domain, | dmar->iommu.unit, ctx->rid, old_domain->domain, domain->domain, | ||||
(domain->flags & DMAR_DOMAIN_IDMAP) != 0 ? "id" : "re"); | (domain->flags & IOMMU_DOMAIN_IDMAP) != 0 ? "id" : "re"); | ||||
dmar_unref_domain_locked(dmar, old_domain); | dmar_unref_domain_locked(dmar, old_domain); | ||||
TD_PINNED_ASSERT; | TD_PINNED_ASSERT; | ||||
return (error); | return (error); | ||||
} | } | ||||
static void | static void | ||||
dmar_unref_domain_locked(struct dmar_unit *dmar, struct dmar_domain *domain) | dmar_unref_domain_locked(struct dmar_unit *dmar, struct dmar_domain *domain) | ||||
{ | { | ||||
DMAR_ASSERT_LOCKED(dmar); | DMAR_ASSERT_LOCKED(dmar); | ||||
KASSERT(domain->refs >= 1, | KASSERT(domain->refs >= 1, | ||||
("dmar %d domain %p refs %u", dmar->unit, domain, domain->refs)); | ("dmar %d domain %p refs %u", dmar->iommu.unit, domain, | ||||
domain->refs)); | |||||
KASSERT(domain->refs > domain->ctx_cnt, | KASSERT(domain->refs > domain->ctx_cnt, | ||||
("dmar %d domain %p refs %d ctx_cnt %d", dmar->unit, domain, | ("dmar %d domain %p refs %d ctx_cnt %d", dmar->iommu.unit, domain, | ||||
domain->refs, domain->ctx_cnt)); | domain->refs, domain->ctx_cnt)); | ||||
if (domain->refs > 1) { | if (domain->refs > 1) { | ||||
domain->refs--; | domain->refs--; | ||||
DMAR_UNLOCK(dmar); | DMAR_UNLOCK(dmar); | ||||
return; | return; | ||||
} | } | ||||
KASSERT((domain->flags & DMAR_DOMAIN_RMRR) == 0, | KASSERT((domain->flags & IOMMU_DOMAIN_RMRR) == 0, | ||||
("lost ref on RMRR domain %p", domain)); | ("lost ref on RMRR domain %p", domain)); | ||||
LIST_REMOVE(domain, link); | LIST_REMOVE(domain, link); | ||||
DMAR_UNLOCK(dmar); | DMAR_UNLOCK(dmar); | ||||
taskqueue_drain(dmar->delayed_taskqueue, &domain->unload_task); | taskqueue_drain(dmar->iommu.delayed_taskqueue, | ||||
&domain->iodom.unload_task); | |||||
dmar_domain_destroy(domain); | dmar_domain_destroy(domain); | ||||
} | } | ||||
void | void | ||||
dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx) | dmar_free_ctx_locked(struct iommu_unit *unit, struct iommu_device *device) | ||||
{ | { | ||||
struct dmar_ctx *ctx; | |||||
struct dmar_unit *dmar; | |||||
struct sf_buf *sf; | struct sf_buf *sf; | ||||
dmar_ctx_entry_t *ctxp; | dmar_ctx_entry_t *ctxp; | ||||
struct dmar_domain *domain; | struct dmar_domain *domain; | ||||
ctx = (struct dmar_ctx *)device; | |||||
dmar = (struct dmar_unit *)unit; | |||||
DMAR_ASSERT_LOCKED(dmar); | DMAR_ASSERT_LOCKED(dmar); | ||||
KASSERT(ctx->refs >= 1, | KASSERT(ctx->refs >= 1, | ||||
("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs)); | ("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs)); | ||||
/* | /* | ||||
* If our reference is not last, only the dereference should | * If our reference is not last, only the dereference should | ||||
* be performed. | * be performed. | ||||
*/ | */ | ||||
if (ctx->refs > 1) { | if (ctx->refs > 1) { | ||||
ctx->refs--; | ctx->refs--; | ||||
DMAR_UNLOCK(dmar); | DMAR_UNLOCK(dmar); | ||||
return; | return; | ||||
} | } | ||||
KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0, | KASSERT((ctx->device.flags & DMAR_CTX_DISABLED) == 0, | ||||
("lost ref on disabled ctx %p", ctx)); | ("lost ref on disabled ctx %p", ctx)); | ||||
/* | /* | ||||
* Otherwise, the context entry must be cleared before the | * Otherwise, the context entry must be cleared before the | ||||
* page table is destroyed. The mapping of the context | * page table is destroyed. The mapping of the context | ||||
* entries page could require sleep, unlock the dmar. | * entries page could require sleep, unlock the dmar. | ||||
*/ | */ | ||||
DMAR_UNLOCK(dmar); | DMAR_UNLOCK(dmar); | ||||
Show All 10 Lines | dmar_free_ctx_locked(struct iommu_unit *unit, struct iommu_device *device) | ||||
if (ctx->refs > 1) { | if (ctx->refs > 1) { | ||||
ctx->refs--; | ctx->refs--; | ||||
DMAR_UNLOCK(dmar); | DMAR_UNLOCK(dmar); | ||||
dmar_unmap_pgtbl(sf); | dmar_unmap_pgtbl(sf); | ||||
TD_PINNED_ASSERT; | TD_PINNED_ASSERT; | ||||
return; | return; | ||||
} | } | ||||
KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0, | KASSERT((ctx->device.flags & DMAR_CTX_DISABLED) == 0, | ||||
("lost ref on disabled ctx %p", ctx)); | ("lost ref on disabled ctx %p", ctx)); | ||||
/* | /* | ||||
* Clear the context pointer and flush the caches. | * Clear the context pointer and flush the caches. | ||||
* XXXKIB: cannot do this if any RMRR entries are still present. | * XXXKIB: cannot do this if any RMRR entries are still present. | ||||
*/ | */ | ||||
dmar_pte_clear(&ctxp->ctx1); | dmar_pte_clear(&ctxp->ctx1); | ||||
ctxp->ctx2 = 0; | ctxp->ctx2 = 0; | ||||
dmar_flush_ctx_to_ram(dmar, ctxp); | dmar_flush_ctx_to_ram(dmar, ctxp); | ||||
dmar_inv_ctx_glob(dmar); | dmar_inv_ctx_glob(dmar); | ||||
if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0) { | if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0) { | ||||
if (dmar->qi_enabled) | if (dmar->qi_enabled) | ||||
dmar_qi_invalidate_iotlb_glob_locked(dmar); | dmar_qi_invalidate_iotlb_glob_locked(dmar); | ||||
else | else | ||||
dmar_inv_iotlb_glob(dmar); | dmar_inv_iotlb_glob(dmar); | ||||
} | } | ||||
dmar_unmap_pgtbl(sf); | dmar_unmap_pgtbl(sf); | ||||
domain = ctx->domain; | domain = (struct dmar_domain *)ctx->device.domain; | ||||
dmar_ctx_unlink(ctx); | dmar_ctx_unlink(ctx); | ||||
free(ctx, M_DMAR_CTX); | free(ctx, M_DMAR_CTX); | ||||
dmar_unref_domain_locked(dmar, domain); | dmar_unref_domain_locked(dmar, domain); | ||||
TD_PINNED_ASSERT; | TD_PINNED_ASSERT; | ||||
} | } | ||||
void | void | ||||
dmar_free_ctx(struct dmar_ctx *ctx) | dmar_free_ctx(struct iommu_device *device) | ||||
{ | { | ||||
struct dmar_ctx *ctx; | |||||
struct dmar_unit *dmar; | struct dmar_unit *dmar; | ||||
dmar = ctx->domain->dmar; | ctx = (struct dmar_ctx *)device; | ||||
dmar = (struct dmar_unit *)ctx->device.domain->iommu; | |||||
DMAR_LOCK(dmar); | DMAR_LOCK(dmar); | ||||
dmar_free_ctx_locked(dmar, ctx); | dmar_free_ctx_locked(&dmar->iommu, (struct iommu_device *)ctx); | ||||
} | } | ||||
/* | /* | ||||
* Returns with the domain locked. | * Returns with the domain locked. | ||||
*/ | */ | ||||
struct dmar_ctx * | struct dmar_ctx * | ||||
dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid) | dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid) | ||||
{ | { | ||||
struct dmar_domain *domain; | struct dmar_domain *domain; | ||||
struct dmar_ctx *ctx; | struct dmar_ctx *ctx; | ||||
DMAR_ASSERT_LOCKED(dmar); | DMAR_ASSERT_LOCKED(dmar); | ||||
LIST_FOREACH(domain, &dmar->domains, link) { | LIST_FOREACH(domain, &dmar->domains, link) { | ||||
LIST_FOREACH(ctx, &domain->contexts, link) { | LIST_FOREACH(ctx, &domain->contexts, link) { | ||||
if (ctx->rid == rid) | if (ctx->rid == rid) | ||||
return (ctx); | return (ctx); | ||||
} | } | ||||
} | } | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
void | void | ||||
dmar_domain_free_entry(struct dmar_map_entry *entry, bool free) | dmar_domain_free_entry(struct iommu_map_entry *entry, bool free) | ||||
{ | { | ||||
struct dmar_domain *domain; | struct dmar_domain *domain; | ||||
domain = entry->domain; | domain = entry->domain; | ||||
DMAR_DOMAIN_LOCK(domain); | DMAR_DOMAIN_LOCK(domain); | ||||
if ((entry->flags & DMAR_MAP_ENTRY_RMRR) != 0) | if ((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0) | ||||
dmar_gas_free_region(domain, entry); | dmar_gas_free_region(domain, entry); | ||||
else | else | ||||
dmar_gas_free_space(domain, entry); | dmar_gas_free_space(domain, entry); | ||||
DMAR_DOMAIN_UNLOCK(domain); | DMAR_DOMAIN_UNLOCK(domain); | ||||
if (free) | if (free) | ||||
dmar_gas_free_entry(domain, entry); | dmar_gas_free_entry(&domain->iodom, entry); | ||||
else | else | ||||
entry->flags = 0; | entry->flags = 0; | ||||
} | } | ||||
void | void | ||||
dmar_domain_unload_entry(struct dmar_map_entry *entry, bool free) | dmar_domain_unload_entry(struct iommu_map_entry *entry, bool free) | ||||
{ | { | ||||
struct dmar_unit *unit; | struct dmar_unit *unit; | ||||
unit = entry->domain->dmar; | unit = (struct dmar_unit *)entry->domain->iodom.iommu; | ||||
if (unit->qi_enabled) { | if (unit->qi_enabled) { | ||||
DMAR_LOCK(unit); | DMAR_LOCK(unit); | ||||
dmar_qi_invalidate_locked(entry->domain, entry->start, | dmar_qi_invalidate_locked(entry->domain, entry->start, | ||||
entry->end - entry->start, &entry->gseq, true); | entry->end - entry->start, &entry->gseq, true); | ||||
if (!free) | if (!free) | ||||
entry->flags |= DMAR_MAP_ENTRY_QI_NF; | entry->flags |= IOMMU_MAP_ENTRY_QI_NF; | ||||
TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link); | TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link); | ||||
DMAR_UNLOCK(unit); | DMAR_UNLOCK(unit); | ||||
} else { | } else { | ||||
domain_flush_iotlb_sync(entry->domain, entry->start, | domain_flush_iotlb_sync(entry->domain, entry->start, | ||||
entry->end - entry->start); | entry->end - entry->start); | ||||
dmar_domain_free_entry(entry, free); | dmar_domain_free_entry(entry, free); | ||||
} | } | ||||
} | } | ||||
static bool | static bool | ||||
dmar_domain_unload_emit_wait(struct dmar_domain *domain, | dmar_domain_unload_emit_wait(struct dmar_domain *domain, | ||||
struct dmar_map_entry *entry) | struct iommu_map_entry *entry) | ||||
{ | { | ||||
if (TAILQ_NEXT(entry, dmamap_link) == NULL) | if (TAILQ_NEXT(entry, dmamap_link) == NULL) | ||||
return (true); | return (true); | ||||
return (domain->batch_no++ % dmar_batch_coalesce == 0); | return (domain->batch_no++ % dmar_batch_coalesce == 0); | ||||
} | } | ||||
void | void | ||||
dmar_domain_unload(struct dmar_domain *domain, | dmar_domain_unload(struct dmar_domain *domain, | ||||
struct dmar_map_entries_tailq *entries, bool cansleep) | struct iommu_map_entries_tailq *entries, bool cansleep) | ||||
{ | { | ||||
struct dmar_unit *unit; | struct dmar_unit *unit; | ||||
struct dmar_map_entry *entry, *entry1; | struct iommu_map_entry *entry, *entry1; | ||||
int error; | int error; | ||||
unit = domain->dmar; | unit = (struct dmar_unit *)domain->iodom.iommu; | ||||
TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) { | TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) { | ||||
KASSERT((entry->flags & DMAR_MAP_ENTRY_MAP) != 0, | KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0, | ||||
("not mapped entry %p %p", domain, entry)); | ("not mapped entry %p %p", domain, entry)); | ||||
error = domain_unmap_buf(domain, entry->start, entry->end - | error = domain_unmap_buf(domain, entry->start, entry->end - | ||||
entry->start, cansleep ? DMAR_PGF_WAITOK : 0); | entry->start, cansleep ? DMAR_PGF_WAITOK : 0); | ||||
KASSERT(error == 0, ("unmap %p error %d", domain, error)); | KASSERT(error == 0, ("unmap %p error %d", domain, error)); | ||||
if (!unit->qi_enabled) { | if (!unit->qi_enabled) { | ||||
domain_flush_iotlb_sync(domain, entry->start, | domain_flush_iotlb_sync(domain, entry->start, | ||||
entry->end - entry->start); | entry->end - entry->start); | ||||
TAILQ_REMOVE(entries, entry, dmamap_link); | TAILQ_REMOVE(entries, entry, dmamap_link); | ||||
Show All 13 Lines | dmar_domain_unload(struct dmar_domain *domain, | ||||
TAILQ_CONCAT(&unit->tlb_flush_entries, entries, dmamap_link); | TAILQ_CONCAT(&unit->tlb_flush_entries, entries, dmamap_link); | ||||
DMAR_UNLOCK(unit); | DMAR_UNLOCK(unit); | ||||
} | } | ||||
static void | static void | ||||
dmar_domain_unload_task(void *arg, int pending) | dmar_domain_unload_task(void *arg, int pending) | ||||
{ | { | ||||
struct dmar_domain *domain; | struct dmar_domain *domain; | ||||
struct dmar_map_entries_tailq entries; | struct iommu_map_entries_tailq entries; | ||||
domain = arg; | domain = arg; | ||||
TAILQ_INIT(&entries); | TAILQ_INIT(&entries); | ||||
for (;;) { | for (;;) { | ||||
DMAR_DOMAIN_LOCK(domain); | DMAR_DOMAIN_LOCK(domain); | ||||
TAILQ_SWAP(&domain->unload_entries, &entries, dmar_map_entry, | TAILQ_SWAP(&domain->iodom.unload_entries, &entries, | ||||
dmamap_link); | iommu_map_entry, dmamap_link); | ||||
DMAR_DOMAIN_UNLOCK(domain); | DMAR_DOMAIN_UNLOCK(domain); | ||||
if (TAILQ_EMPTY(&entries)) | if (TAILQ_EMPTY(&entries)) | ||||
break; | break; | ||||
dmar_domain_unload(domain, &entries, true); | dmar_domain_unload(domain, &entries, true); | ||||
} | } | ||||
} | |||||
struct iommu_device * | |||||
iommu_get_device(struct iommu_unit *iommu, device_t dev, uint16_t rid, | |||||
bool id_mapped, bool rmrr_init) | |||||
{ | |||||
struct dmar_unit *dmar; | |||||
struct dmar_ctx *ret; | |||||
dmar = (struct dmar_unit *)iommu; | |||||
ret = dmar_get_ctx_for_dev(dmar, dev, rid, id_mapped, rmrr_init); | |||||
return ((struct iommu_device *)ret); | |||||
} | |||||
void | |||||
iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free) | |||||
{ | |||||
dmar_domain_unload_entry(entry, free); | |||||
} | |||||
void | |||||
iommu_domain_unload(struct iommu_domain *iodom, | |||||
struct iommu_map_entries_tailq *entries, bool cansleep) | |||||
{ | |||||
struct dmar_domain *domain; | |||||
domain = (struct dmar_domain *)iodom; | |||||
dmar_domain_unload(domain, entries, cansleep); | |||||
} | } |