Changeset View
Changeset View
Standalone View
Standalone View
sys/x86/iommu/intel_ctx.c
Show First 20 Lines • Show All 277 Lines • ▼ Show 20 Lines | TAILQ_FOREACH_SAFE(entry, &rmrr_entries, dmamap_link, entry1) { | ||||
size = OFF_TO_IDX(entry->end - entry->start); | size = OFF_TO_IDX(entry->end - entry->start); | ||||
ma = malloc(sizeof(vm_page_t) * size, M_TEMP, M_WAITOK); | ma = malloc(sizeof(vm_page_t) * size, M_TEMP, M_WAITOK); | ||||
for (i = 0; i < size; i++) { | for (i = 0; i < size; i++) { | ||||
ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i, | ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i, | ||||
VM_MEMATTR_DEFAULT); | VM_MEMATTR_DEFAULT); | ||||
} | } | ||||
error1 = iommu_gas_map_region(DOM2IODOM(domain), entry, | error1 = iommu_gas_map_region(DOM2IODOM(domain), entry, | ||||
IOMMU_MAP_ENTRY_READ | IOMMU_MAP_ENTRY_WRITE, | IOMMU_MAP_ENTRY_READ | IOMMU_MAP_ENTRY_WRITE, | ||||
IOMMU_MF_CANWAIT | IOMMU_MF_RMRR, ma); | IOMMU_MF_CANWAIT | IOMMU_MF_CANTRIM | IOMMU_MF_RMRR, ma); | ||||
/* | /* | ||||
* Non-failed RMRR entries are owned by context rb | * Non-failed RMRR entries are owned by context rb | ||||
* tree. Get rid of the failed entry, but do not stop | * tree. Get rid of the failed entry, but do not stop | ||||
* the loop. Rest of the parsed RMRR entries are | * the loop. Rest of the parsed RMRR entries are | ||||
* loaded and removed on the context destruction. | * loaded and removed on the context destruction. | ||||
*/ | */ | ||||
if (error1 == 0 && entry->end != entry->start) { | if (error1 == 0 && entry->end != entry->start) { | ||||
IOMMU_LOCK(domain->iodom.iommu); | IOMMU_LOCK(domain->iodom.iommu); | ||||
▲ Show 20 Lines • Show All 215 Lines • ▼ Show 20 Lines | dmar_domain_destroy(struct dmar_domain *domain) | ||||
} | } | ||||
iommu_domain_fini(iodom); | iommu_domain_fini(iodom); | ||||
dmar = DOM2DMAR(domain); | dmar = DOM2DMAR(domain); | ||||
free_unr(dmar->domids, domain->domain); | free_unr(dmar->domids, domain->domain); | ||||
free(domain, M_DMAR_DOMAIN); | free(domain, M_DMAR_DOMAIN); | ||||
} | } | ||||
static struct dmar_ctx * | static struct dmar_ctx * | ||||
dmar_get_ctx_for_dev1(struct dmar_unit *dmar, device_t dev, uint16_t rid, | dmar_get_ctx_for_dev1(struct dmar_unit *dmar, struct dmar_domain *domain, | ||||
int dev_domain, int dev_busno, const void *dev_path, int dev_path_len, | device_t dev, uint16_t rid, int dev_domain, int dev_busno, | ||||
bool id_mapped, bool rmrr_init) | const void *dev_path, int dev_path_len, bool id_mapped, bool rmrr_init) | ||||
{ | { | ||||
struct dmar_domain *domain, *domain1; | struct dmar_domain *domain1; | ||||
struct dmar_ctx *ctx, *ctx1; | struct dmar_ctx *ctx, *ctx1; | ||||
struct iommu_unit *unit __diagused; | struct iommu_unit *unit __diagused; | ||||
dmar_ctx_entry_t *ctxp; | dmar_ctx_entry_t *ctxp; | ||||
struct sf_buf *sf; | struct sf_buf *sf; | ||||
int bus, slot, func, error; | int bus, slot, func, error; | ||||
bool enable; | bool enable; | ||||
if (dev != NULL) { | if (dev != NULL) { | ||||
bus = pci_get_bus(dev); | bus = pci_get_bus(dev); | ||||
slot = pci_get_slot(dev); | slot = pci_get_slot(dev); | ||||
func = pci_get_function(dev); | func = pci_get_function(dev); | ||||
} else { | } else { | ||||
bus = PCI_RID2BUS(rid); | bus = PCI_RID2BUS(rid); | ||||
slot = PCI_RID2SLOT(rid); | slot = PCI_RID2SLOT(rid); | ||||
func = PCI_RID2FUNC(rid); | func = PCI_RID2FUNC(rid); | ||||
} | } | ||||
enable = false; | enable = false; | ||||
domain1 = NULL; | |||||
DMAR_ASSERT_LOCKED(dmar); | |||||
TD_PREP_PINNED_ASSERT; | TD_PREP_PINNED_ASSERT; | ||||
unit = DMAR2IOMMU(dmar); | unit = DMAR2IOMMU(dmar); | ||||
DMAR_LOCK(dmar); | |||||
KASSERT(!iommu_is_buswide_ctx(unit, bus) || (slot == 0 && func == 0), | KASSERT(!iommu_is_buswide_ctx(unit, bus) || (slot == 0 && func == 0), | ||||
("iommu%d pci%d:%d:%d get_ctx for buswide", dmar->iommu.unit, bus, | ("iommu%d pci%d:%d:%d get_ctx for buswide", dmar->iommu.unit, bus, | ||||
slot, func)); | slot, func)); | ||||
ctx = dmar_find_ctx_locked(dmar, rid); | ctx = dmar_find_ctx_locked(dmar, rid); | ||||
error = 0; | error = 0; | ||||
if (ctx == NULL) { | if (ctx == NULL) { | ||||
/* | /* | ||||
* Perform the allocations which require sleep or have | * Perform the allocations which require sleep or have | ||||
* higher chance to succeed if the sleep is allowed. | * higher chance to succeed if the sleep is allowed. | ||||
*/ | */ | ||||
DMAR_UNLOCK(dmar); | DMAR_UNLOCK(dmar); | ||||
if (domain == NULL) { | |||||
dmar_ensure_ctx_page(dmar, PCI_RID2BUS(rid)); | dmar_ensure_ctx_page(dmar, PCI_RID2BUS(rid)); | ||||
domain1 = dmar_domain_alloc(dmar, id_mapped); | domain1 = dmar_domain_alloc(dmar, id_mapped); | ||||
if (domain1 == NULL) { | if (domain1 == NULL) { | ||||
TD_PINNED_ASSERT; | TD_PINNED_ASSERT; | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
if (!id_mapped) { | if (!id_mapped) { | ||||
error = domain_init_rmrr(domain1, dev, bus, | error = domain_init_rmrr(domain1, dev, bus, | ||||
slot, func, dev_domain, dev_busno, dev_path, | slot, func, dev_domain, dev_busno, dev_path, | ||||
dev_path_len); | dev_path_len); | ||||
if (error == 0 && dev != NULL) | if (error == 0 && dev != NULL) { | ||||
error = dmar_reserve_pci_regions(domain1, dev); | error = dmar_reserve_pci_regions( | ||||
domain1, dev); | |||||
} | |||||
if (error != 0) { | if (error != 0) { | ||||
dmar_domain_destroy(domain1); | dmar_domain_destroy(domain1); | ||||
TD_PINNED_ASSERT; | TD_PINNED_ASSERT; | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
} | } | ||||
} | |||||
ctx1 = dmar_ctx_alloc(domain1, rid); | ctx1 = dmar_ctx_alloc(domain1, rid); | ||||
ctxp = dmar_map_ctx_entry(ctx1, &sf); | ctxp = dmar_map_ctx_entry(ctx1, &sf); | ||||
DMAR_LOCK(dmar); | DMAR_LOCK(dmar); | ||||
/* | /* | ||||
* Recheck the contexts, other thread might have | * Recheck the contexts, other thread might have | ||||
* already allocated needed one. | * already allocated needed one. | ||||
*/ | */ | ||||
ctx = dmar_find_ctx_locked(dmar, rid); | ctx = dmar_find_ctx_locked(dmar, rid); | ||||
if (ctx == NULL) { | if (ctx == NULL) { | ||||
if (LIST_EMPTY(&dmar->domains)) { | |||||
MPASS(domain == NULL); | |||||
enable = true; | |||||
} | |||||
if (domain == NULL) { | |||||
domain = domain1; | domain = domain1; | ||||
domain1 = NULL; | |||||
LIST_INSERT_HEAD(&dmar->domains, domain, link); | |||||
} | |||||
ctx = ctx1; | ctx = ctx1; | ||||
dmar_ctx_link(ctx); | dmar_ctx_link(ctx); | ||||
ctx->context.tag->owner = dev; | ctx->context.tag->owner = dev; | ||||
device_tag_init(ctx, dev); | device_tag_init(ctx, dev); | ||||
/* | /* | ||||
* This is the first activated context for the | * This is the first activated context for the | ||||
* DMAR unit. Enable the translation after | * DMAR unit. Enable the translation after | ||||
* everything is set up. | * everything is set up. | ||||
*/ | */ | ||||
if (LIST_EMPTY(&dmar->domains)) | |||||
enable = true; | |||||
LIST_INSERT_HEAD(&dmar->domains, domain, link); | |||||
ctx_id_entry_init(ctx, ctxp, false, bus); | ctx_id_entry_init(ctx, ctxp, false, bus); | ||||
if (dev != NULL) { | if (dev != NULL) { | ||||
device_printf(dev, | device_printf(dev, | ||||
"dmar%d pci%d:%d:%d:%d rid %x domain %d mgaw %d " | "dmar%d pci%d:%d:%d:%d rid %x domain %d mgaw %d " | ||||
"agaw %d %s-mapped\n", | "agaw %d %s-mapped\n", | ||||
dmar->iommu.unit, dmar->segment, bus, slot, | dmar->iommu.unit, dmar->segment, bus, slot, | ||||
func, rid, domain->domain, domain->mgaw, | func, rid, domain->domain, domain->mgaw, | ||||
domain->agaw, id_mapped ? "id" : "re"); | domain->agaw, id_mapped ? "id" : "re"); | ||||
} | } | ||||
dmar_unmap_pgtbl(sf); | dmar_unmap_pgtbl(sf); | ||||
} else { | } else { | ||||
dmar_unmap_pgtbl(sf); | |||||
dmar_domain_destroy(domain1); | |||||
/* Nothing needs to be done to destroy ctx1. */ | /* Nothing needs to be done to destroy ctx1. */ | ||||
free(ctx1, M_DMAR_CTX); | free(ctx1, M_DMAR_CTX); | ||||
domain = CTX2DOM(ctx); | domain = CTX2DOM(ctx); | ||||
ctx->refs++; /* tag referenced us */ | ctx->refs++; /* tag referenced us */ | ||||
} | } | ||||
if (domain1 != NULL) { | |||||
dmar_unmap_pgtbl(sf); | |||||
dmar_domain_destroy(domain1); | |||||
} | |||||
} else { | } else { | ||||
MPASS(domain == NULL); | |||||
domain = CTX2DOM(ctx); | domain = CTX2DOM(ctx); | ||||
if (ctx->context.tag->owner == NULL) | if (ctx->context.tag->owner == NULL) | ||||
ctx->context.tag->owner = dev; | ctx->context.tag->owner = dev; | ||||
ctx->refs++; /* tag referenced us */ | ctx->refs++; /* tag referenced us */ | ||||
} | } | ||||
error = dmar_flush_for_ctx_entry(dmar, enable); | error = dmar_flush_for_ctx_entry(dmar, enable); | ||||
if (error != 0) { | if (error != 0) { | ||||
Show All 21 Lines | if (enable && !rmrr_init && (dmar->hw_gcmd & DMAR_GCMD_TE) == 0) { | ||||
} else { | } else { | ||||
printf("dmar%d: enabling translation failed, " | printf("dmar%d: enabling translation failed, " | ||||
"error %d\n", dmar->iommu.unit, error); | "error %d\n", dmar->iommu.unit, error); | ||||
dmar_free_ctx_locked(dmar, ctx); | dmar_free_ctx_locked(dmar, ctx); | ||||
TD_PINNED_ASSERT; | TD_PINNED_ASSERT; | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
} | } | ||||
DMAR_UNLOCK(dmar); | |||||
TD_PINNED_ASSERT; | TD_PINNED_ASSERT; | ||||
return (ctx); | return (ctx); | ||||
} | } | ||||
#if 0 | |||||
/* | |||||
* XXXKIB need to figure out devpath from rid | |||||
*/ | |||||
struct dmar_ctx * | struct dmar_ctx * | ||||
dmar_get_ctx_for_dev(struct dmar_unit *dmar, device_t dev, uint16_t rid, | dmar_get_ctx_for_dev(struct dmar_unit *dmar, device_t dev, uint16_t rid, | ||||
bool id_mapped, bool rmrr_init) | bool id_mapped, bool rmrr_init) | ||||
{ | { | ||||
struct dmar_ctx *ctx; | |||||
DMAR_LOCK(dmar); | |||||
ctx = dmar_get_ctx_for_dev1(dmar, NULL, dev, rid, id_mapped, | |||||
rmrr_init); | |||||
if (ctx != NULL) { | |||||
MPASS((ctx->domain->flags & DMAR_DOMAIN_VMM) == 0); | |||||
ctx->domain->flags |= DMAR_DOMAIN_BUSDMA; | |||||
} | |||||
DMAR_UNLOCK(dmar); | |||||
return (ctx); | |||||
} | |||||
#endif | |||||
struct dmar_ctx * | |||||
dmar_get_ctx_for_dev(struct dmar_unit *dmar, struct dmar_domain *domain, | |||||
device_t dev, uint16_t rid, bool id_mapped, bool rmrr_init) | |||||
{ | |||||
int dev_domain, dev_path_len, dev_busno; | int dev_domain, dev_path_len, dev_busno; | ||||
dev_domain = pci_get_domain(dev); | dev_domain = pci_get_domain(dev); | ||||
dev_path_len = dmar_dev_depth(dev); | dev_path_len = dmar_dev_depth(dev); | ||||
ACPI_DMAR_PCI_PATH dev_path[dev_path_len]; | ACPI_DMAR_PCI_PATH dev_path[dev_path_len]; | ||||
dmar_dev_path(dev, &dev_busno, dev_path, dev_path_len); | dmar_dev_path(dev, &dev_busno, dev_path, dev_path_len); | ||||
return (dmar_get_ctx_for_dev1(dmar, dev, rid, dev_domain, dev_busno, | return (dmar_get_ctx_for_dev1(dmar, domain, dev, rid, dev_domain, | ||||
dev_path, dev_path_len, id_mapped, rmrr_init)); | dev_busno, dev_path, dev_path_len, id_mapped, rmrr_init)); | ||||
} | } | ||||
struct dmar_ctx * | struct dmar_ctx * | ||||
dmar_get_ctx_for_devpath(struct dmar_unit *dmar, uint16_t rid, | dmar_get_ctx_for_devpath(struct dmar_unit *dmar, uint16_t rid, | ||||
int dev_domain, int dev_busno, | int dev_domain, int dev_busno, | ||||
const void *dev_path, int dev_path_len, | const void *dev_path, int dev_path_len, | ||||
bool id_mapped, bool rmrr_init) | bool id_mapped, bool rmrr_init) | ||||
{ | { | ||||
return (dmar_get_ctx_for_dev1(dmar, NULL, rid, dev_domain, dev_busno, | return (dmar_get_ctx_for_dev1(dmar, NULL, NULL, rid, dev_domain, | ||||
dev_path, dev_path_len, id_mapped, rmrr_init)); | dev_busno, dev_path, dev_path_len, id_mapped, rmrr_init)); | ||||
} | } | ||||
int | static struct dmar_domain * | ||||
dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx) | dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx) | ||||
{ | { | ||||
struct dmar_unit *dmar; | struct dmar_unit *dmar; | ||||
struct dmar_domain *old_domain; | struct dmar_domain *old_domain; | ||||
dmar_ctx_entry_t *ctxp; | dmar_ctx_entry_t *ctxp; | ||||
struct sf_buf *sf; | struct sf_buf *sf; | ||||
int error; | |||||
dmar = domain->dmar; | dmar = domain->dmar; | ||||
DMAR_ASSERT_LOCKED(dmar); | |||||
old_domain = CTX2DOM(ctx); | old_domain = CTX2DOM(ctx); | ||||
if (domain == old_domain) | if (domain == old_domain) { | ||||
DMAR_UNLOCK(dmar); | |||||
return (0); | return (0); | ||||
} | |||||
KASSERT(old_domain->iodom.iommu == domain->iodom.iommu, | KASSERT(old_domain->iodom.iommu == domain->iodom.iommu, | ||||
("domain %p %u moving between dmars %u %u", domain, | ("domain %p %u moving between dmars %u %u", domain, | ||||
domain->domain, old_domain->iodom.iommu->unit, | domain->domain, old_domain->dmar->iommu.unit, | ||||
domain->iodom.iommu->unit)); | domain->dmar->iommu.unit)); | ||||
if ((old_domain->iodom.flags & IOMMU_DOMAIN_RMRR) != 0) | |||||
return (old_domain); | |||||
TD_PREP_PINNED_ASSERT; | TD_PREP_PINNED_ASSERT; | ||||
ctxp = dmar_map_ctx_entry(ctx, &sf); | ctxp = dmar_map_ctx_entry(ctx, &sf); | ||||
DMAR_LOCK(dmar); | |||||
dmar_ctx_unlink(ctx); | dmar_ctx_unlink(ctx); | ||||
ctx->context.domain = &domain->iodom; | ctx->context.domain = &domain->iodom; | ||||
dmar_ctx_link(ctx); | dmar_ctx_link(ctx); | ||||
ctx_id_entry_init(ctx, ctxp, true, PCI_BUSMAX + 100); | ctx_id_entry_init(ctx, ctxp, true, PCI_BUSMAX + 100); | ||||
dmar_unmap_pgtbl(sf); | dmar_unmap_pgtbl(sf); | ||||
error = dmar_flush_for_ctx_entry(dmar, true); | (void)dmar_flush_for_ctx_entry(dmar, true); | ||||
/* If flush failed, rolling back would not work as well. */ | /* If flush failed, rolling back would not work as well. */ | ||||
printf("dmar%d rid %x domain %d->%d %s-mapped\n", | printf("dmar%d rid %x domain %d->%d %s-mapped\n", | ||||
dmar->iommu.unit, ctx->context.rid, old_domain->domain, | dmar->iommu.unit, ctx->context.rid, old_domain->domain, | ||||
domain->domain, (domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0 ? | domain->domain, (domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0 ? | ||||
"id" : "re"); | "id" : "re"); | ||||
dmar_unref_domain_locked(dmar, old_domain); | dmar_unref_domain_locked(dmar, old_domain); | ||||
TD_PINNED_ASSERT; | TD_PINNED_ASSERT; | ||||
return (error); | return (domain); | ||||
} | } | ||||
/* | |||||
* Create a VMM domain for the given device. Keep on private domain | |||||
* if the device needs RMRR. Otherwise coalesce VMM domains to reduce | |||||
* number of maintained page tables. If this is the first domain on | |||||
* this dmar on this VM (domain == NULL), reuse already created busdma | |||||
* domain if possible. | |||||
*/ | |||||
struct dmar_domain * | |||||
dmar_vmm_domain_add_dev(struct iommu_unit *iommu, struct dmar_domain *domain, | |||||
device_t dev, uint16_t rid) | |||||
{ | |||||
struct dmar_unit *dmar; | |||||
struct dmar_domain *rdomain; | |||||
struct dmar_ctx *ctx; | |||||
bool drain; | |||||
dmar = (struct dmar_unit *)iommu; | |||||
MPASS(domain == NULL || domain->dmar == dmar); | |||||
rdomain = NULL; | |||||
drain = false; | |||||
DMAR_LOCK(dmar); | |||||
ctx = dmar_find_ctx_locked(dmar, rid); | |||||
if (ctx != NULL) { | |||||
rdomain = domain != NULL ? dmar_move_ctx_to_domain(domain, | |||||
ctx) : (struct dmar_domain *)ctx->context.domain; | |||||
} else { | |||||
ctx = dmar_get_ctx_for_dev(dmar, domain, dev, rid, false, | |||||
true); | |||||
if (ctx != NULL) { | |||||
rdomain = (struct dmar_domain *)ctx->context.domain; | |||||
MPASS(domain == NULL || rdomain == domain); | |||||
} | |||||
} | |||||
if (rdomain != NULL) { | |||||
MPASS((rdomain->iodom.flags & (DMAR_DOMAIN_BUSDMA | | |||||
DMAR_DOMAIN_VMM)) != (DMAR_DOMAIN_BUSDMA | | |||||
DMAR_DOMAIN_VMM)); | |||||
if ((rdomain->iodom.flags & DMAR_DOMAIN_BUSDMA) != 0) { | |||||
rdomain->iodom.flags &= ~DMAR_DOMAIN_BUSDMA; | |||||
drain = true; | |||||
} | |||||
rdomain->iodom.flags |= DMAR_DOMAIN_VMM; | |||||
} | |||||
DMAR_UNLOCK(dmar); | |||||
if (drain) { | |||||
taskqueue_drain(dmar->iommu.delayed_taskqueue, | |||||
&rdomain->iodom.unload_task); | |||||
} | |||||
return (rdomain); | |||||
} | |||||
static void | static void | ||||
dmar_unref_domain_locked(struct dmar_unit *dmar, struct dmar_domain *domain) | dmar_unref_domain_locked(struct dmar_unit *dmar, struct dmar_domain *domain) | ||||
{ | { | ||||
DMAR_ASSERT_LOCKED(dmar); | DMAR_ASSERT_LOCKED(dmar); | ||||
KASSERT(domain->refs >= 1, | KASSERT(domain->refs >= 1, | ||||
("dmar %d domain %p refs %u", dmar->iommu.unit, domain, | ("dmar %d domain %p refs %u", dmar->iommu.unit, domain, | ||||
domain->refs)); | domain->refs)); | ||||
▲ Show 20 Lines • Show All 97 Lines • ▼ Show 20 Lines | |||||
{ | { | ||||
struct dmar_unit *dmar; | struct dmar_unit *dmar; | ||||
dmar = CTX2DMAR(ctx); | dmar = CTX2DMAR(ctx); | ||||
DMAR_LOCK(dmar); | DMAR_LOCK(dmar); | ||||
dmar_free_ctx_locked(dmar, ctx); | dmar_free_ctx_locked(dmar, ctx); | ||||
} | } | ||||
/* | |||||
* Returns with the domain locked. | |||||
*/ | |||||
struct dmar_ctx * | struct dmar_ctx * | ||||
dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid) | dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid) | ||||
{ | { | ||||
struct dmar_domain *domain; | struct dmar_domain *domain; | ||||
struct dmar_ctx *ctx; | struct dmar_ctx *ctx; | ||||
DMAR_ASSERT_LOCKED(dmar); | DMAR_ASSERT_LOCKED(dmar); | ||||
▲ Show 20 Lines • Show All 101 Lines • ▼ Show 20 Lines | |||||
iommu_get_ctx(struct iommu_unit *iommu, device_t dev, uint16_t rid, | iommu_get_ctx(struct iommu_unit *iommu, device_t dev, uint16_t rid, | ||||
bool id_mapped, bool rmrr_init) | bool id_mapped, bool rmrr_init) | ||||
{ | { | ||||
struct dmar_unit *dmar; | struct dmar_unit *dmar; | ||||
struct dmar_ctx *ret; | struct dmar_ctx *ret; | ||||
dmar = IOMMU2DMAR(iommu); | dmar = IOMMU2DMAR(iommu); | ||||
ret = dmar_get_ctx_for_dev(dmar, dev, rid, id_mapped, rmrr_init); | ret = dmar_get_ctx_for_dev(dmar, NULL, dev, rid, id_mapped, rmrr_init); | ||||
return (CTX2IOCTX(ret)); | return (CTX2IOCTX(ret)); | ||||
} | } | ||||
void | void | ||||
iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *context) | iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *context) | ||||
{ | { | ||||
struct dmar_unit *dmar; | struct dmar_unit *dmar; | ||||
Show All 17 Lines |