Changeset View
Changeset View
Standalone View
Standalone View
sys/x86/iommu/busdma_dmar.c
Show First 20 Lines • Show All 68 Lines • ▼ Show 20 Lines | |||||
#include <x86/iommu/intel_dmar.h> | #include <x86/iommu/intel_dmar.h> | ||||
/* | /* | ||||
* busdma_dmar.c, the implementation of the busdma(9) interface using | * busdma_dmar.c, the implementation of the busdma(9) interface using | ||||
* DMAR units from Intel VT-d. | * DMAR units from Intel VT-d. | ||||
*/ | */ | ||||
static bool | static bool | ||||
dmar_bus_dma_is_dev_disabled(int domain, int bus, int slot, int func) | iommu_bus_dma_is_dev_disabled(int domain, int bus, int slot, int func) | ||||
{ | { | ||||
char str[128], *env; | char str[128], *env; | ||||
int default_bounce; | int default_bounce; | ||||
bool ret; | bool ret; | ||||
static const char bounce_str[] = "bounce"; | static const char bounce_str[] = "bounce"; | ||||
static const char dmar_str[] = "dmar"; | static const char dmar_str[] = "dmar"; | ||||
default_bounce = 0; | default_bounce = 0; | ||||
Show All 26 Lines | |||||
* the DMAR unit and used for page table lookup. PCI bridges may take | * the DMAR unit and used for page table lookup. PCI bridges may take | ||||
* ownership of transactions from downstream devices, so it may not be | * ownership of transactions from downstream devices, so it may not be | ||||
* the same as the BSF of the target device. In those cases, all | * the same as the BSF of the target device. In those cases, all | ||||
* devices downstream of the bridge must share a single mapping | * devices downstream of the bridge must share a single mapping | ||||
* domain, and must collectively be assigned to use either DMAR or | * domain, and must collectively be assigned to use either DMAR or | ||||
* bounce mapping. | * bounce mapping. | ||||
*/ | */ | ||||
device_t | device_t | ||||
dmar_get_requester(device_t dev, uint16_t *rid) | iommu_get_requester(device_t dev, uint16_t *rid) | ||||
{ | { | ||||
devclass_t pci_class; | devclass_t pci_class; | ||||
device_t l, pci, pcib, pcip, pcibp, requester; | device_t l, pci, pcib, pcip, pcibp, requester; | ||||
int cap_offset; | int cap_offset; | ||||
uint16_t pcie_flags; | uint16_t pcie_flags; | ||||
bool bridge_is_pcie; | bool bridge_is_pcie; | ||||
pci_class = devclass_find("pci"); | pci_class = devclass_find("pci"); | ||||
l = requester = dev; | l = requester = dev; | ||||
*rid = pci_get_rid(dev); | *rid = pci_get_rid(dev); | ||||
/* | /* | ||||
* Walk the bridge hierarchy from the target device to the | * Walk the bridge hierarchy from the target device to the | ||||
* host port to find the translating bridge nearest the DMAR | * host port to find the translating bridge nearest the DMAR | ||||
* unit. | * unit. | ||||
*/ | */ | ||||
for (;;) { | for (;;) { | ||||
pci = device_get_parent(l); | pci = device_get_parent(l); | ||||
KASSERT(pci != NULL, ("dmar_get_requester(%s): NULL parent " | KASSERT(pci != NULL, ("iommu_get_requester(%s): NULL parent " | ||||
"for %s", device_get_name(dev), device_get_name(l))); | "for %s", device_get_name(dev), device_get_name(l))); | ||||
KASSERT(device_get_devclass(pci) == pci_class, | KASSERT(device_get_devclass(pci) == pci_class, | ||||
("dmar_get_requester(%s): non-pci parent %s for %s", | ("iommu_get_requester(%s): non-pci parent %s for %s", | ||||
device_get_name(dev), device_get_name(pci), | device_get_name(dev), device_get_name(pci), | ||||
device_get_name(l))); | device_get_name(l))); | ||||
pcib = device_get_parent(pci); | pcib = device_get_parent(pci); | ||||
KASSERT(pcib != NULL, ("dmar_get_requester(%s): NULL bridge " | KASSERT(pcib != NULL, ("iommu_get_requester(%s): NULL bridge " | ||||
"for %s", device_get_name(dev), device_get_name(pci))); | "for %s", device_get_name(dev), device_get_name(pci))); | ||||
/* | /* | ||||
* The parent of our "bridge" isn't another PCI bus, | * The parent of our "bridge" isn't another PCI bus, | ||||
* so pcib isn't a PCI->PCI bridge but rather a host | * so pcib isn't a PCI->PCI bridge but rather a host | ||||
* port, and the requester ID won't be translated | * port, and the requester ID won't be translated | ||||
* further. | * further. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 66 Lines • ▼ Show 20 Lines | if (pci_find_cap(l, PCIY_EXPRESS, &cap_offset) == 0) { | ||||
*rid = pci_get_rid(pcib); | *rid = pci_get_rid(pcib); | ||||
l = pcib; | l = pcib; | ||||
} | } | ||||
} | } | ||||
} | } | ||||
return (requester); | return (requester); | ||||
} | } | ||||
struct dmar_ctx * | struct iommu_device * | ||||
dmar_instantiate_ctx(struct dmar_unit *dmar, device_t dev, bool rmrr) | iommu_instantiate_device(struct iommu_unit *unit, device_t dev, bool rmrr) | ||||
{ | { | ||||
device_t requester; | device_t requester; | ||||
struct dmar_ctx *ctx; | struct iommu_device *ctx; | ||||
bool disabled; | bool disabled; | ||||
uint16_t rid; | uint16_t rid; | ||||
requester = dmar_get_requester(dev, &rid); | requester = iommu_get_requester(dev, &rid); | ||||
/* | /* | ||||
* If the user requested the IOMMU disabled for the device, we | * If the user requested the IOMMU disabled for the device, we | ||||
* cannot disable the DMAR, due to possibility of other | * cannot disable the DMAR, due to possibility of other | ||||
* devices on the same DMAR still requiring translation. | * devices on the same DMAR still requiring translation. | ||||
* Instead provide the identity mapping for the device | * Instead provide the identity mapping for the device | ||||
* context. | * context. | ||||
*/ | */ | ||||
disabled = dmar_bus_dma_is_dev_disabled(pci_get_domain(requester), | disabled = iommu_bus_dma_is_dev_disabled(pci_get_domain(requester), | ||||
pci_get_bus(requester), pci_get_slot(requester), | pci_get_bus(requester), pci_get_slot(requester), | ||||
pci_get_function(requester)); | pci_get_function(requester)); | ||||
ctx = dmar_get_ctx_for_dev(dmar, requester, rid, disabled, rmrr); | ctx = iommu_get_device(unit, requester, rid, disabled, rmrr); | ||||
if (ctx == NULL) | if (ctx == NULL) | ||||
return (NULL); | return (NULL); | ||||
if (disabled) { | if (disabled) { | ||||
/* | /* | ||||
* Keep the first reference on context, release the | * Keep the first reference on context, release the | ||||
* later refs. | * later refs. | ||||
*/ | */ | ||||
DMAR_LOCK(dmar); | IOMMU_LOCK(unit); | ||||
if ((ctx->flags & DMAR_CTX_DISABLED) == 0) { | if ((ctx->flags & DMAR_CTX_DISABLED) == 0) { | ||||
ctx->flags |= DMAR_CTX_DISABLED; | ctx->flags |= DMAR_CTX_DISABLED; | ||||
DMAR_UNLOCK(dmar); | IOMMU_UNLOCK(unit); | ||||
} else { | } else { | ||||
dmar_free_ctx_locked(dmar, ctx); | dmar_free_ctx_locked(unit, ctx); | ||||
} | } | ||||
ctx = NULL; | ctx = NULL; | ||||
} | } | ||||
return (ctx); | return (ctx); | ||||
} | } | ||||
bus_dma_tag_t | bus_dma_tag_t | ||||
acpi_iommu_get_dma_tag(device_t dev, device_t child) | acpi_iommu_get_dma_tag(device_t dev, device_t child) | ||||
{ | { | ||||
struct dmar_unit *dmar; | struct iommu_unit *unit; | ||||
struct dmar_ctx *ctx; | struct iommu_device *ctx; | ||||
bus_dma_tag_t res; | bus_dma_tag_t res; | ||||
dmar = dmar_find(child, bootverbose); | unit = iommu_find(child, bootverbose); | ||||
/* Not in scope of any DMAR ? */ | /* Not in scope of any DMAR ? */ | ||||
if (dmar == NULL) | if (unit == NULL) | ||||
return (NULL); | return (NULL); | ||||
if (!dmar->dma_enabled) | if (!unit->dma_enabled) | ||||
return (NULL); | return (NULL); | ||||
dmar_quirks_pre_use(dmar); | dmar_quirks_pre_use(unit); | ||||
dmar_instantiate_rmrr_ctxs(dmar); | dmar_instantiate_rmrr_ctxs(unit); | ||||
ctx = dmar_instantiate_ctx(dmar, child, false); | ctx = iommu_instantiate_device(unit, child, false); | ||||
res = ctx == NULL ? NULL : (bus_dma_tag_t)&ctx->ctx_tag; | res = ctx == NULL ? NULL : (bus_dma_tag_t)&ctx->tag; | ||||
return (res); | return (res); | ||||
} | } | ||||
bool | bool | ||||
bus_dma_dmar_set_buswide(device_t dev) | bus_dma_dmar_set_buswide(device_t dev) | ||||
{ | { | ||||
struct dmar_unit *dmar; | struct iommu_unit *unit; | ||||
device_t parent; | device_t parent; | ||||
u_int busno, slot, func; | u_int busno, slot, func; | ||||
parent = device_get_parent(dev); | parent = device_get_parent(dev); | ||||
if (device_get_devclass(parent) != devclass_find("pci")) | if (device_get_devclass(parent) != devclass_find("pci")) | ||||
return (false); | return (false); | ||||
dmar = dmar_find(dev, bootverbose); | unit = iommu_find(dev, bootverbose); | ||||
if (dmar == NULL) | if (unit == NULL) | ||||
return (false); | return (false); | ||||
busno = pci_get_bus(dev); | busno = pci_get_bus(dev); | ||||
slot = pci_get_slot(dev); | slot = pci_get_slot(dev); | ||||
func = pci_get_function(dev); | func = pci_get_function(dev); | ||||
if (slot != 0 || func != 0) { | if (slot != 0 || func != 0) { | ||||
if (bootverbose) { | if (bootverbose) { | ||||
device_printf(dev, | device_printf(dev, | ||||
"dmar%d pci%d:%d:%d requested buswide busdma\n", | "dmar%d pci%d:%d:%d requested buswide busdma\n", | ||||
dmar->unit, busno, slot, func); | unit->unit, busno, slot, func); | ||||
} | } | ||||
return (false); | return (false); | ||||
} | } | ||||
dmar_set_buswide_ctx(dmar, busno); | dmar_set_buswide_ctx(unit, busno); | ||||
return (true); | return (true); | ||||
} | } | ||||
static MALLOC_DEFINE(M_DMAR_DMAMAP, "dmar_dmamap", "Intel DMAR DMA Map"); | static MALLOC_DEFINE(M_DMAR_DMAMAP, "dmar_dmamap", "Intel DMAR DMA Map"); | ||||
static void dmar_bus_schedule_dmamap(struct dmar_unit *unit, | static void iommu_bus_schedule_dmamap(struct iommu_unit *unit, | ||||
struct bus_dmamap_dmar *map); | struct bus_dmamap_iommu *map); | ||||
static int | static int | ||||
dmar_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, | iommu_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, | ||||
bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, | bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, | ||||
bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, | bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, | ||||
int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, | int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, | ||||
void *lockfuncarg, bus_dma_tag_t *dmat) | void *lockfuncarg, bus_dma_tag_t *dmat) | ||||
{ | { | ||||
struct bus_dma_tag_dmar *newtag, *oldtag; | struct bus_dma_tag_iommu *newtag, *oldtag; | ||||
int error; | int error; | ||||
*dmat = NULL; | *dmat = NULL; | ||||
error = common_bus_dma_tag_create(parent != NULL ? | error = common_bus_dma_tag_create(parent != NULL ? | ||||
&((struct bus_dma_tag_dmar *)parent)->common : NULL, alignment, | &((struct bus_dma_tag_iommu *)parent)->common : NULL, alignment, | ||||
boundary, lowaddr, highaddr, filter, filterarg, maxsize, | boundary, lowaddr, highaddr, filter, filterarg, maxsize, | ||||
nsegments, maxsegsz, flags, lockfunc, lockfuncarg, | nsegments, maxsegsz, flags, lockfunc, lockfuncarg, | ||||
sizeof(struct bus_dma_tag_dmar), (void **)&newtag); | sizeof(struct bus_dma_tag_iommu), (void **)&newtag); | ||||
if (error != 0) | if (error != 0) | ||||
goto out; | goto out; | ||||
oldtag = (struct bus_dma_tag_dmar *)parent; | oldtag = (struct bus_dma_tag_iommu *)parent; | ||||
newtag->common.impl = &bus_dma_dmar_impl; | newtag->common.impl = &bus_dma_iommu_impl; | ||||
newtag->ctx = oldtag->ctx; | newtag->ctx = oldtag->ctx; | ||||
newtag->owner = oldtag->owner; | newtag->owner = oldtag->owner; | ||||
*dmat = (bus_dma_tag_t)newtag; | *dmat = (bus_dma_tag_t)newtag; | ||||
out: | out: | ||||
CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", | CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", | ||||
__func__, newtag, (newtag != NULL ? newtag->common.flags : 0), | __func__, newtag, (newtag != NULL ? newtag->common.flags : 0), | ||||
error); | error); | ||||
return (error); | return (error); | ||||
} | } | ||||
static int | static int | ||||
dmar_bus_dma_tag_set_domain(bus_dma_tag_t dmat) | iommu_bus_dma_tag_set_domain(bus_dma_tag_t dmat) | ||||
{ | { | ||||
return (0); | return (0); | ||||
} | } | ||||
static int | static int | ||||
dmar_bus_dma_tag_destroy(bus_dma_tag_t dmat1) | iommu_bus_dma_tag_destroy(bus_dma_tag_t dmat1) | ||||
{ | { | ||||
struct bus_dma_tag_dmar *dmat, *dmat_copy, *parent; | struct bus_dma_tag_iommu *dmat, *dmat_copy, *parent; | ||||
int error; | int error; | ||||
error = 0; | error = 0; | ||||
dmat_copy = dmat = (struct bus_dma_tag_dmar *)dmat1; | dmat_copy = dmat = (struct bus_dma_tag_iommu *)dmat1; | ||||
if (dmat != NULL) { | if (dmat != NULL) { | ||||
if (dmat->map_count != 0) { | if (dmat->map_count != 0) { | ||||
error = EBUSY; | error = EBUSY; | ||||
goto out; | goto out; | ||||
} | } | ||||
while (dmat != NULL) { | while (dmat != NULL) { | ||||
parent = (struct bus_dma_tag_dmar *)dmat->common.parent; | parent = (struct bus_dma_tag_iommu *)dmat->common.parent; | ||||
if (atomic_fetchadd_int(&dmat->common.ref_count, -1) == | if (atomic_fetchadd_int(&dmat->common.ref_count, -1) == | ||||
1) { | 1) { | ||||
if (dmat == &dmat->ctx->ctx_tag) | if (dmat == &dmat->ctx->tag) | ||||
dmar_free_ctx(dmat->ctx); | dmar_free_ctx(dmat->ctx); | ||||
free_domain(dmat->segments, M_DMAR_DMAMAP); | free_domain(dmat->segments, M_DMAR_DMAMAP); | ||||
free(dmat, M_DEVBUF); | free(dmat, M_DEVBUF); | ||||
dmat = parent; | dmat = parent; | ||||
} else | } else | ||||
dmat = NULL; | dmat = NULL; | ||||
} | } | ||||
} | } | ||||
out: | out: | ||||
CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); | CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); | ||||
return (error); | return (error); | ||||
} | } | ||||
static bool | static bool | ||||
dmar_bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen) | iommu_bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen) | ||||
{ | { | ||||
return (false); | return (false); | ||||
} | } | ||||
static int | static int | ||||
dmar_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) | iommu_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) | ||||
{ | { | ||||
struct bus_dma_tag_dmar *tag; | struct bus_dma_tag_iommu *tag; | ||||
struct bus_dmamap_dmar *map; | struct bus_dmamap_iommu *map; | ||||
tag = (struct bus_dma_tag_dmar *)dmat; | tag = (struct bus_dma_tag_iommu *)dmat; | ||||
map = malloc_domainset(sizeof(*map), M_DMAR_DMAMAP, | map = malloc_domainset(sizeof(*map), M_DMAR_DMAMAP, | ||||
DOMAINSET_PREF(tag->common.domain), M_NOWAIT | M_ZERO); | DOMAINSET_PREF(tag->common.domain), M_NOWAIT | M_ZERO); | ||||
if (map == NULL) { | if (map == NULL) { | ||||
*mapp = NULL; | *mapp = NULL; | ||||
return (ENOMEM); | return (ENOMEM); | ||||
} | } | ||||
if (tag->segments == NULL) { | if (tag->segments == NULL) { | ||||
tag->segments = malloc_domainset(sizeof(bus_dma_segment_t) * | tag->segments = malloc_domainset(sizeof(bus_dma_segment_t) * | ||||
Show All 11 Lines | iommu_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) | ||||
map->cansleep = false; | map->cansleep = false; | ||||
tag->map_count++; | tag->map_count++; | ||||
*mapp = (bus_dmamap_t)map; | *mapp = (bus_dmamap_t)map; | ||||
return (0); | return (0); | ||||
} | } | ||||
static int | static int | ||||
dmar_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map1) | iommu_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map1) | ||||
{ | { | ||||
struct bus_dma_tag_dmar *tag; | struct bus_dma_tag_iommu *tag; | ||||
struct bus_dmamap_dmar *map; | struct bus_dmamap_iommu *map; | ||||
struct dmar_domain *domain; | struct iommu_domain *domain; | ||||
tag = (struct bus_dma_tag_dmar *)dmat; | tag = (struct bus_dma_tag_iommu *)dmat; | ||||
map = (struct bus_dmamap_dmar *)map1; | map = (struct bus_dmamap_iommu *)map1; | ||||
if (map != NULL) { | if (map != NULL) { | ||||
domain = tag->ctx->domain; | domain = tag->ctx->domain; | ||||
DMAR_DOMAIN_LOCK(domain); | IOMMU_DOMAIN_LOCK(domain); | ||||
if (!TAILQ_EMPTY(&map->map_entries)) { | if (!TAILQ_EMPTY(&map->map_entries)) { | ||||
DMAR_DOMAIN_UNLOCK(domain); | IOMMU_DOMAIN_UNLOCK(domain); | ||||
return (EBUSY); | return (EBUSY); | ||||
} | } | ||||
DMAR_DOMAIN_UNLOCK(domain); | IOMMU_DOMAIN_UNLOCK(domain); | ||||
free_domain(map, M_DMAR_DMAMAP); | free_domain(map, M_DMAR_DMAMAP); | ||||
} | } | ||||
tag->map_count--; | tag->map_count--; | ||||
return (0); | return (0); | ||||
} | } | ||||
static int | static int | ||||
dmar_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, | iommu_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, | ||||
bus_dmamap_t *mapp) | bus_dmamap_t *mapp) | ||||
{ | { | ||||
struct bus_dma_tag_dmar *tag; | struct bus_dma_tag_iommu *tag; | ||||
struct bus_dmamap_dmar *map; | struct bus_dmamap_iommu *map; | ||||
int error, mflags; | int error, mflags; | ||||
vm_memattr_t attr; | vm_memattr_t attr; | ||||
error = dmar_bus_dmamap_create(dmat, flags, mapp); | error = iommu_bus_dmamap_create(dmat, flags, mapp); | ||||
if (error != 0) | if (error != 0) | ||||
return (error); | return (error); | ||||
mflags = (flags & BUS_DMA_NOWAIT) != 0 ? M_NOWAIT : M_WAITOK; | mflags = (flags & BUS_DMA_NOWAIT) != 0 ? M_NOWAIT : M_WAITOK; | ||||
mflags |= (flags & BUS_DMA_ZERO) != 0 ? M_ZERO : 0; | mflags |= (flags & BUS_DMA_ZERO) != 0 ? M_ZERO : 0; | ||||
attr = (flags & BUS_DMA_NOCACHE) != 0 ? VM_MEMATTR_UNCACHEABLE : | attr = (flags & BUS_DMA_NOCACHE) != 0 ? VM_MEMATTR_UNCACHEABLE : | ||||
VM_MEMATTR_DEFAULT; | VM_MEMATTR_DEFAULT; | ||||
tag = (struct bus_dma_tag_dmar *)dmat; | tag = (struct bus_dma_tag_iommu *)dmat; | ||||
map = (struct bus_dmamap_dmar *)*mapp; | map = (struct bus_dmamap_iommu *)*mapp; | ||||
if (tag->common.maxsize < PAGE_SIZE && | if (tag->common.maxsize < PAGE_SIZE && | ||||
tag->common.alignment <= tag->common.maxsize && | tag->common.alignment <= tag->common.maxsize && | ||||
attr == VM_MEMATTR_DEFAULT) { | attr == VM_MEMATTR_DEFAULT) { | ||||
*vaddr = malloc_domainset(tag->common.maxsize, M_DEVBUF, | *vaddr = malloc_domainset(tag->common.maxsize, M_DEVBUF, | ||||
DOMAINSET_PREF(tag->common.domain), mflags); | DOMAINSET_PREF(tag->common.domain), mflags); | ||||
map->flags |= BUS_DMAMAP_DMAR_MALLOC; | map->flags |= BUS_DMAMAP_IOMMU_MALLOC; | ||||
} else { | } else { | ||||
*vaddr = (void *)kmem_alloc_attr_domainset( | *vaddr = (void *)kmem_alloc_attr_domainset( | ||||
DOMAINSET_PREF(tag->common.domain), tag->common.maxsize, | DOMAINSET_PREF(tag->common.domain), tag->common.maxsize, | ||||
mflags, 0ul, BUS_SPACE_MAXADDR, attr); | mflags, 0ul, BUS_SPACE_MAXADDR, attr); | ||||
map->flags |= BUS_DMAMAP_DMAR_KMEM_ALLOC; | map->flags |= BUS_DMAMAP_IOMMU_KMEM_ALLOC; | ||||
} | } | ||||
if (*vaddr == NULL) { | if (*vaddr == NULL) { | ||||
dmar_bus_dmamap_destroy(dmat, *mapp); | iommu_bus_dmamap_destroy(dmat, *mapp); | ||||
*mapp = NULL; | *mapp = NULL; | ||||
return (ENOMEM); | return (ENOMEM); | ||||
} | } | ||||
return (0); | return (0); | ||||
} | } | ||||
static void | static void | ||||
dmar_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map1) | iommu_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map1) | ||||
{ | { | ||||
struct bus_dma_tag_dmar *tag; | struct bus_dma_tag_iommu *tag; | ||||
struct bus_dmamap_dmar *map; | struct bus_dmamap_iommu *map; | ||||
tag = (struct bus_dma_tag_dmar *)dmat; | tag = (struct bus_dma_tag_iommu *)dmat; | ||||
map = (struct bus_dmamap_dmar *)map1; | map = (struct bus_dmamap_iommu *)map1; | ||||
if ((map->flags & BUS_DMAMAP_DMAR_MALLOC) != 0) { | if ((map->flags & BUS_DMAMAP_IOMMU_MALLOC) != 0) { | ||||
free_domain(vaddr, M_DEVBUF); | free_domain(vaddr, M_DEVBUF); | ||||
map->flags &= ~BUS_DMAMAP_DMAR_MALLOC; | map->flags &= ~BUS_DMAMAP_IOMMU_MALLOC; | ||||
} else { | } else { | ||||
KASSERT((map->flags & BUS_DMAMAP_DMAR_KMEM_ALLOC) != 0, | KASSERT((map->flags & BUS_DMAMAP_IOMMU_KMEM_ALLOC) != 0, | ||||
("dmar_bus_dmamem_free for non alloced map %p", map)); | ("iommu_bus_dmamem_free for non alloced map %p", map)); | ||||
kmem_free((vm_offset_t)vaddr, tag->common.maxsize); | kmem_free((vm_offset_t)vaddr, tag->common.maxsize); | ||||
map->flags &= ~BUS_DMAMAP_DMAR_KMEM_ALLOC; | map->flags &= ~BUS_DMAMAP_IOMMU_KMEM_ALLOC; | ||||
} | } | ||||
dmar_bus_dmamap_destroy(dmat, map1); | iommu_bus_dmamap_destroy(dmat, map1); | ||||
} | } | ||||
static int | static int | ||||
dmar_bus_dmamap_load_something1(struct bus_dma_tag_dmar *tag, | iommu_bus_dmamap_load_something1(struct bus_dma_tag_iommu *tag, | ||||
struct bus_dmamap_dmar *map, vm_page_t *ma, int offset, bus_size_t buflen, | struct bus_dmamap_iommu *map, vm_page_t *ma, int offset, bus_size_t buflen, | ||||
int flags, bus_dma_segment_t *segs, int *segp, | int flags, bus_dma_segment_t *segs, int *segp, | ||||
struct dmar_map_entries_tailq *unroll_list) | struct iommu_map_entries_tailq *unroll_list) | ||||
{ | { | ||||
struct dmar_ctx *ctx; | struct iommu_device *ctx; | ||||
struct dmar_domain *domain; | struct iommu_domain *domain; | ||||
struct dmar_map_entry *entry; | struct iommu_map_entry *entry; | ||||
dmar_gaddr_t size; | dmar_gaddr_t size; | ||||
bus_size_t buflen1; | bus_size_t buflen1; | ||||
int error, idx, gas_flags, seg; | int error, idx, gas_flags, seg; | ||||
KASSERT(offset < DMAR_PAGE_SIZE, ("offset %d", offset)); | KASSERT(offset < DMAR_PAGE_SIZE, ("offset %d", offset)); | ||||
if (segs == NULL) | if (segs == NULL) | ||||
segs = tag->segments; | segs = tag->segments; | ||||
ctx = tag->ctx; | ctx = tag->ctx; | ||||
Show All 10 Lines | while (buflen > 0) { | ||||
buflen1 = buflen > tag->common.maxsegsz ? | buflen1 = buflen > tag->common.maxsegsz ? | ||||
tag->common.maxsegsz : buflen; | tag->common.maxsegsz : buflen; | ||||
size = round_page(offset + buflen1); | size = round_page(offset + buflen1); | ||||
/* | /* | ||||
* (Too) optimistically allow split if there are more | * (Too) optimistically allow split if there are more | ||||
* then one segments left. | * then one segments left. | ||||
*/ | */ | ||||
gas_flags = map->cansleep ? DMAR_GM_CANWAIT : 0; | gas_flags = map->cansleep ? IOMMU_MF_CANWAIT : 0; | ||||
if (seg + 1 < tag->common.nsegments) | if (seg + 1 < tag->common.nsegments) | ||||
gas_flags |= DMAR_GM_CANSPLIT; | gas_flags |= IOMMU_MF_CANSPLIT; | ||||
error = dmar_gas_map(domain, &tag->common, size, offset, | error = dmar_gas_map(domain, &tag->common, size, offset, | ||||
DMAR_MAP_ENTRY_READ | | IOMMU_MAP_ENTRY_READ | | ||||
((flags & BUS_DMA_NOWRITE) == 0 ? DMAR_MAP_ENTRY_WRITE : 0), | ((flags & BUS_DMA_NOWRITE) == 0 ? IOMMU_MAP_ENTRY_WRITE : 0), | ||||
gas_flags, ma + idx, &entry); | gas_flags, ma + idx, &entry); | ||||
if (error != 0) | if (error != 0) | ||||
break; | break; | ||||
if ((gas_flags & DMAR_GM_CANSPLIT) != 0) { | if ((gas_flags & IOMMU_MF_CANSPLIT) != 0) { | ||||
KASSERT(size >= entry->end - entry->start, | KASSERT(size >= entry->end - entry->start, | ||||
("split increased entry size %jx %jx %jx", | ("split increased entry size %jx %jx %jx", | ||||
(uintmax_t)size, (uintmax_t)entry->start, | (uintmax_t)size, (uintmax_t)entry->start, | ||||
(uintmax_t)entry->end)); | (uintmax_t)entry->end)); | ||||
size = entry->end - entry->start; | size = entry->end - entry->start; | ||||
if (buflen1 > size) | if (buflen1 > size) | ||||
buflen1 = size; | buflen1 = size; | ||||
} else { | } else { | ||||
Show All 14 Lines | KASSERT(((entry->start + offset) & (tag->common.alignment - 1)) | ||||
(uintmax_t)tag->common.alignment)); | (uintmax_t)tag->common.alignment)); | ||||
KASSERT(entry->end <= tag->common.lowaddr || | KASSERT(entry->end <= tag->common.lowaddr || | ||||
entry->start >= tag->common.highaddr, | entry->start >= tag->common.highaddr, | ||||
("entry placement failed: ctx %p start 0x%jx end 0x%jx " | ("entry placement failed: ctx %p start 0x%jx end 0x%jx " | ||||
"lowaddr 0x%jx highaddr 0x%jx", ctx, | "lowaddr 0x%jx highaddr 0x%jx", ctx, | ||||
(uintmax_t)entry->start, (uintmax_t)entry->end, | (uintmax_t)entry->start, (uintmax_t)entry->end, | ||||
(uintmax_t)tag->common.lowaddr, | (uintmax_t)tag->common.lowaddr, | ||||
(uintmax_t)tag->common.highaddr)); | (uintmax_t)tag->common.highaddr)); | ||||
KASSERT(dmar_test_boundary(entry->start + offset, buflen1, | KASSERT(iommu_test_boundary(entry->start + offset, buflen1, | ||||
tag->common.boundary), | tag->common.boundary), | ||||
("boundary failed: ctx %p start 0x%jx end 0x%jx " | ("boundary failed: ctx %p start 0x%jx end 0x%jx " | ||||
"boundary 0x%jx", ctx, (uintmax_t)entry->start, | "boundary 0x%jx", ctx, (uintmax_t)entry->start, | ||||
(uintmax_t)entry->end, (uintmax_t)tag->common.boundary)); | (uintmax_t)entry->end, (uintmax_t)tag->common.boundary)); | ||||
KASSERT(buflen1 <= tag->common.maxsegsz, | KASSERT(buflen1 <= tag->common.maxsegsz, | ||||
("segment too large: ctx %p start 0x%jx end 0x%jx " | ("segment too large: ctx %p start 0x%jx end 0x%jx " | ||||
"buflen1 0x%jx maxsegsz 0x%jx", ctx, | "buflen1 0x%jx maxsegsz 0x%jx", ctx, | ||||
(uintmax_t)entry->start, (uintmax_t)entry->end, | (uintmax_t)entry->start, (uintmax_t)entry->end, | ||||
(uintmax_t)buflen1, (uintmax_t)tag->common.maxsegsz)); | (uintmax_t)buflen1, (uintmax_t)tag->common.maxsegsz)); | ||||
DMAR_DOMAIN_LOCK(domain); | IOMMU_DOMAIN_LOCK(domain); | ||||
TAILQ_INSERT_TAIL(&map->map_entries, entry, dmamap_link); | TAILQ_INSERT_TAIL(&map->map_entries, entry, dmamap_link); | ||||
entry->flags |= DMAR_MAP_ENTRY_MAP; | entry->flags |= IOMMU_MAP_ENTRY_MAP; | ||||
DMAR_DOMAIN_UNLOCK(domain); | IOMMU_DOMAIN_UNLOCK(domain); | ||||
TAILQ_INSERT_TAIL(unroll_list, entry, unroll_link); | TAILQ_INSERT_TAIL(unroll_list, entry, unroll_link); | ||||
segs[seg].ds_addr = entry->start + offset; | segs[seg].ds_addr = entry->start + offset; | ||||
segs[seg].ds_len = buflen1; | segs[seg].ds_len = buflen1; | ||||
idx += OFF_TO_IDX(trunc_page(offset + buflen1)); | idx += OFF_TO_IDX(trunc_page(offset + buflen1)); | ||||
offset += buflen1; | offset += buflen1; | ||||
offset &= DMAR_PAGE_MASK; | offset &= DMAR_PAGE_MASK; | ||||
buflen -= buflen1; | buflen -= buflen1; | ||||
} | } | ||||
if (error == 0) | if (error == 0) | ||||
*segp = seg; | *segp = seg; | ||||
return (error); | return (error); | ||||
} | } | ||||
static int | static int | ||||
dmar_bus_dmamap_load_something(struct bus_dma_tag_dmar *tag, | iommu_bus_dmamap_load_something(struct bus_dma_tag_iommu *tag, | ||||
struct bus_dmamap_dmar *map, vm_page_t *ma, int offset, bus_size_t buflen, | struct bus_dmamap_iommu *map, vm_page_t *ma, int offset, bus_size_t buflen, | ||||
int flags, bus_dma_segment_t *segs, int *segp) | int flags, bus_dma_segment_t *segs, int *segp) | ||||
{ | { | ||||
struct dmar_ctx *ctx; | struct iommu_device *ctx; | ||||
struct dmar_domain *domain; | struct iommu_domain *domain; | ||||
struct dmar_map_entry *entry, *entry1; | struct iommu_map_entry *entry, *entry1; | ||||
struct dmar_map_entries_tailq unroll_list; | struct iommu_map_entries_tailq unroll_list; | ||||
int error; | int error; | ||||
ctx = tag->ctx; | ctx = tag->ctx; | ||||
domain = ctx->domain; | domain = ctx->domain; | ||||
atomic_add_long(&ctx->loads, 1); | atomic_add_long(&ctx->loads, 1); | ||||
TAILQ_INIT(&unroll_list); | TAILQ_INIT(&unroll_list); | ||||
error = dmar_bus_dmamap_load_something1(tag, map, ma, offset, | error = iommu_bus_dmamap_load_something1(tag, map, ma, offset, | ||||
buflen, flags, segs, segp, &unroll_list); | buflen, flags, segs, segp, &unroll_list); | ||||
if (error != 0) { | if (error != 0) { | ||||
/* | /* | ||||
* The busdma interface does not allow us to report | * The busdma interface does not allow us to report | ||||
* partial buffer load, so unfortunately we have to | * partial buffer load, so unfortunately we have to | ||||
* revert all work done. | * revert all work done. | ||||
*/ | */ | ||||
DMAR_DOMAIN_LOCK(domain); | IOMMU_DOMAIN_LOCK(domain); | ||||
TAILQ_FOREACH_SAFE(entry, &unroll_list, unroll_link, | TAILQ_FOREACH_SAFE(entry, &unroll_list, unroll_link, | ||||
entry1) { | entry1) { | ||||
/* | /* | ||||
* No entries other than what we have created | * No entries other than what we have created | ||||
* during the failed run might have been | * during the failed run might have been | ||||
* inserted there in between, since we own ctx | * inserted there in between, since we own ctx | ||||
* pglock. | * pglock. | ||||
*/ | */ | ||||
TAILQ_REMOVE(&map->map_entries, entry, dmamap_link); | TAILQ_REMOVE(&map->map_entries, entry, dmamap_link); | ||||
TAILQ_REMOVE(&unroll_list, entry, unroll_link); | TAILQ_REMOVE(&unroll_list, entry, unroll_link); | ||||
TAILQ_INSERT_TAIL(&domain->unload_entries, entry, | TAILQ_INSERT_TAIL(&domain->unload_entries, entry, | ||||
dmamap_link); | dmamap_link); | ||||
} | } | ||||
DMAR_DOMAIN_UNLOCK(domain); | IOMMU_DOMAIN_UNLOCK(domain); | ||||
taskqueue_enqueue(domain->dmar->delayed_taskqueue, | taskqueue_enqueue(domain->iommu->delayed_taskqueue, | ||||
&domain->unload_task); | &domain->unload_task); | ||||
} | } | ||||
if (error == ENOMEM && (flags & BUS_DMA_NOWAIT) == 0 && | if (error == ENOMEM && (flags & BUS_DMA_NOWAIT) == 0 && | ||||
!map->cansleep) | !map->cansleep) | ||||
error = EINPROGRESS; | error = EINPROGRESS; | ||||
if (error == EINPROGRESS) | if (error == EINPROGRESS) | ||||
dmar_bus_schedule_dmamap(domain->dmar, map); | iommu_bus_schedule_dmamap(domain->iommu, map); | ||||
return (error); | return (error); | ||||
} | } | ||||
static int | static int | ||||
dmar_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map1, | iommu_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map1, | ||||
struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, | struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, | ||||
bus_dma_segment_t *segs, int *segp) | bus_dma_segment_t *segs, int *segp) | ||||
{ | { | ||||
struct bus_dma_tag_dmar *tag; | struct bus_dma_tag_iommu *tag; | ||||
struct bus_dmamap_dmar *map; | struct bus_dmamap_iommu *map; | ||||
tag = (struct bus_dma_tag_dmar *)dmat; | tag = (struct bus_dma_tag_iommu *)dmat; | ||||
map = (struct bus_dmamap_dmar *)map1; | map = (struct bus_dmamap_iommu *)map1; | ||||
return (dmar_bus_dmamap_load_something(tag, map, ma, ma_offs, tlen, | return (iommu_bus_dmamap_load_something(tag, map, ma, ma_offs, tlen, | ||||
flags, segs, segp)); | flags, segs, segp)); | ||||
} | } | ||||
static int | static int | ||||
dmar_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map1, | iommu_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map1, | ||||
vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs, | vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs, | ||||
int *segp) | int *segp) | ||||
{ | { | ||||
struct bus_dma_tag_dmar *tag; | struct bus_dma_tag_iommu *tag; | ||||
struct bus_dmamap_dmar *map; | struct bus_dmamap_iommu *map; | ||||
vm_page_t *ma, fma; | vm_page_t *ma, fma; | ||||
vm_paddr_t pstart, pend, paddr; | vm_paddr_t pstart, pend, paddr; | ||||
int error, i, ma_cnt, mflags, offset; | int error, i, ma_cnt, mflags, offset; | ||||
tag = (struct bus_dma_tag_dmar *)dmat; | tag = (struct bus_dma_tag_iommu *)dmat; | ||||
map = (struct bus_dmamap_dmar *)map1; | map = (struct bus_dmamap_iommu *)map1; | ||||
pstart = trunc_page(buf); | pstart = trunc_page(buf); | ||||
pend = round_page(buf + buflen); | pend = round_page(buf + buflen); | ||||
offset = buf & PAGE_MASK; | offset = buf & PAGE_MASK; | ||||
ma_cnt = OFF_TO_IDX(pend - pstart); | ma_cnt = OFF_TO_IDX(pend - pstart); | ||||
mflags = map->cansleep ? M_WAITOK : M_NOWAIT; | mflags = map->cansleep ? M_WAITOK : M_NOWAIT; | ||||
ma = malloc(sizeof(vm_page_t) * ma_cnt, M_DEVBUF, mflags); | ma = malloc(sizeof(vm_page_t) * ma_cnt, M_DEVBUF, mflags); | ||||
if (ma == NULL) | if (ma == NULL) | ||||
return (ENOMEM); | return (ENOMEM); | ||||
Show All 15 Lines | if (ma[i] == NULL || VM_PAGE_TO_PHYS(ma[i]) != paddr) { | ||||
return (ENOMEM); | return (ENOMEM); | ||||
} | } | ||||
} | } | ||||
vm_page_initfake(&fma[i], pstart + ptoa(i), | vm_page_initfake(&fma[i], pstart + ptoa(i), | ||||
VM_MEMATTR_DEFAULT); | VM_MEMATTR_DEFAULT); | ||||
ma[i] = &fma[i]; | ma[i] = &fma[i]; | ||||
} | } | ||||
} | } | ||||
error = dmar_bus_dmamap_load_something(tag, map, ma, offset, buflen, | error = iommu_bus_dmamap_load_something(tag, map, ma, offset, buflen, | ||||
flags, segs, segp); | flags, segs, segp); | ||||
free(fma, M_DEVBUF); | free(fma, M_DEVBUF); | ||||
free(ma, M_DEVBUF); | free(ma, M_DEVBUF); | ||||
return (error); | return (error); | ||||
} | } | ||||
static int | static int | ||||
dmar_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map1, void *buf, | iommu_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map1, void *buf, | ||||
bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs, | bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs, | ||||
int *segp) | int *segp) | ||||
{ | { | ||||
struct bus_dma_tag_dmar *tag; | struct bus_dma_tag_iommu *tag; | ||||
struct bus_dmamap_dmar *map; | struct bus_dmamap_iommu *map; | ||||
vm_page_t *ma, fma; | vm_page_t *ma, fma; | ||||
vm_paddr_t pstart, pend, paddr; | vm_paddr_t pstart, pend, paddr; | ||||
int error, i, ma_cnt, mflags, offset; | int error, i, ma_cnt, mflags, offset; | ||||
tag = (struct bus_dma_tag_dmar *)dmat; | tag = (struct bus_dma_tag_iommu *)dmat; | ||||
map = (struct bus_dmamap_dmar *)map1; | map = (struct bus_dmamap_iommu *)map1; | ||||
pstart = trunc_page((vm_offset_t)buf); | pstart = trunc_page((vm_offset_t)buf); | ||||
pend = round_page((vm_offset_t)buf + buflen); | pend = round_page((vm_offset_t)buf + buflen); | ||||
offset = (vm_offset_t)buf & PAGE_MASK; | offset = (vm_offset_t)buf & PAGE_MASK; | ||||
ma_cnt = OFF_TO_IDX(pend - pstart); | ma_cnt = OFF_TO_IDX(pend - pstart); | ||||
mflags = map->cansleep ? M_WAITOK : M_NOWAIT; | mflags = map->cansleep ? M_WAITOK : M_NOWAIT; | ||||
ma = malloc(sizeof(vm_page_t) * ma_cnt, M_DEVBUF, mflags); | ma = malloc(sizeof(vm_page_t) * ma_cnt, M_DEVBUF, mflags); | ||||
if (ma == NULL) | if (ma == NULL) | ||||
return (ENOMEM); | return (ENOMEM); | ||||
Show All 17 Lines | if (ma[i] == NULL || VM_PAGE_TO_PHYS(ma[i]) != paddr) { | ||||
free(ma, M_DEVBUF); | free(ma, M_DEVBUF); | ||||
return (ENOMEM); | return (ENOMEM); | ||||
} | } | ||||
} | } | ||||
vm_page_initfake(&fma[i], paddr, VM_MEMATTR_DEFAULT); | vm_page_initfake(&fma[i], paddr, VM_MEMATTR_DEFAULT); | ||||
ma[i] = &fma[i]; | ma[i] = &fma[i]; | ||||
} | } | ||||
} | } | ||||
error = dmar_bus_dmamap_load_something(tag, map, ma, offset, buflen, | error = iommu_bus_dmamap_load_something(tag, map, ma, offset, buflen, | ||||
flags, segs, segp); | flags, segs, segp); | ||||
free(ma, M_DEVBUF); | free(ma, M_DEVBUF); | ||||
free(fma, M_DEVBUF); | free(fma, M_DEVBUF); | ||||
return (error); | return (error); | ||||
} | } | ||||
static void | static void | ||||
dmar_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map1, | iommu_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map1, | ||||
struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg) | struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg) | ||||
{ | { | ||||
struct bus_dmamap_dmar *map; | struct bus_dmamap_iommu *map; | ||||
if (map1 == NULL) | if (map1 == NULL) | ||||
return; | return; | ||||
map = (struct bus_dmamap_dmar *)map1; | map = (struct bus_dmamap_iommu *)map1; | ||||
map->mem = *mem; | map->mem = *mem; | ||||
map->tag = (struct bus_dma_tag_dmar *)dmat; | map->tag = (struct bus_dma_tag_iommu *)dmat; | ||||
map->callback = callback; | map->callback = callback; | ||||
map->callback_arg = callback_arg; | map->callback_arg = callback_arg; | ||||
} | } | ||||
static bus_dma_segment_t * | static bus_dma_segment_t * | ||||
dmar_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map1, | iommu_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map1, | ||||
bus_dma_segment_t *segs, int nsegs, int error) | bus_dma_segment_t *segs, int nsegs, int error) | ||||
{ | { | ||||
struct bus_dma_tag_dmar *tag; | struct bus_dma_tag_iommu *tag; | ||||
struct bus_dmamap_dmar *map; | struct bus_dmamap_iommu *map; | ||||
tag = (struct bus_dma_tag_dmar *)dmat; | tag = (struct bus_dma_tag_iommu *)dmat; | ||||
map = (struct bus_dmamap_dmar *)map1; | map = (struct bus_dmamap_iommu *)map1; | ||||
if (!map->locked) { | if (!map->locked) { | ||||
KASSERT(map->cansleep, | KASSERT(map->cansleep, | ||||
("map not locked and not sleepable context %p", map)); | ("map not locked and not sleepable context %p", map)); | ||||
/* | /* | ||||
* We are called from the delayed context. Relock the | * We are called from the delayed context. Relock the | ||||
* driver. | * driver. | ||||
Show All 13 Lines | |||||
* from the delayed context on i386, since page table page mapping | * from the delayed context on i386, since page table page mapping | ||||
* might require a sleep to be successfull. The unfortunate | * might require a sleep to be successfull. The unfortunate | ||||
* consequence is that the DMA requests can be served some time after | * consequence is that the DMA requests can be served some time after | ||||
* the bus_dmamap_unload() call returned. | * the bus_dmamap_unload() call returned. | ||||
* | * | ||||
* On amd64, we assume that sf allocation cannot fail. | * On amd64, we assume that sf allocation cannot fail. | ||||
*/ | */ | ||||
static void | static void | ||||
dmar_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map1) | iommu_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map1) | ||||
{ | { | ||||
struct bus_dma_tag_dmar *tag; | struct bus_dma_tag_iommu *tag; | ||||
struct bus_dmamap_dmar *map; | struct bus_dmamap_iommu *map; | ||||
struct dmar_ctx *ctx; | struct iommu_device *ctx; | ||||
struct dmar_domain *domain; | struct iommu_domain *domain; | ||||
#if defined(__amd64__) | #if defined(__amd64__) | ||||
struct dmar_map_entries_tailq entries; | struct iommu_map_entries_tailq entries; | ||||
#endif | #endif | ||||
tag = (struct bus_dma_tag_dmar *)dmat; | tag = (struct bus_dma_tag_iommu *)dmat; | ||||
map = (struct bus_dmamap_dmar *)map1; | map = (struct bus_dmamap_iommu *)map1; | ||||
ctx = tag->ctx; | ctx = tag->ctx; | ||||
domain = ctx->domain; | domain = ctx->domain; | ||||
atomic_add_long(&ctx->unloads, 1); | atomic_add_long(&ctx->unloads, 1); | ||||
#if defined(__i386__) | #if defined(__i386__) | ||||
DMAR_DOMAIN_LOCK(domain); | IOMMU_DOMAIN_LOCK(domain); | ||||
TAILQ_CONCAT(&domain->unload_entries, &map->map_entries, dmamap_link); | TAILQ_CONCAT(&domain->unload_entries, &map->map_entries, dmamap_link); | ||||
DMAR_DOMAIN_UNLOCK(domain); | IOMMU_DOMAIN_UNLOCK(domain); | ||||
taskqueue_enqueue(domain->dmar->delayed_taskqueue, | taskqueue_enqueue(domain->iommu->delayed_taskqueue, | ||||
&domain->unload_task); | &domain->unload_task); | ||||
#else /* defined(__amd64__) */ | #else /* defined(__amd64__) */ | ||||
TAILQ_INIT(&entries); | TAILQ_INIT(&entries); | ||||
DMAR_DOMAIN_LOCK(domain); | IOMMU_DOMAIN_LOCK(domain); | ||||
TAILQ_CONCAT(&entries, &map->map_entries, dmamap_link); | TAILQ_CONCAT(&entries, &map->map_entries, dmamap_link); | ||||
DMAR_DOMAIN_UNLOCK(domain); | IOMMU_DOMAIN_UNLOCK(domain); | ||||
THREAD_NO_SLEEPING(); | THREAD_NO_SLEEPING(); | ||||
dmar_domain_unload(domain, &entries, false); | iommu_domain_unload(domain, &entries, false); | ||||
THREAD_SLEEPING_OK(); | THREAD_SLEEPING_OK(); | ||||
KASSERT(TAILQ_EMPTY(&entries), ("lazy dmar_ctx_unload %p", ctx)); | KASSERT(TAILQ_EMPTY(&entries), ("lazy iommu_device_unload %p", ctx)); | ||||
#endif | #endif | ||||
} | } | ||||
static void | static void | ||||
dmar_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, | iommu_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, | ||||
bus_dmasync_op_t op) | bus_dmasync_op_t op) | ||||
{ | { | ||||
} | } | ||||
struct bus_dma_impl bus_dma_dmar_impl = { | struct bus_dma_impl bus_dma_iommu_impl = { | ||||
.tag_create = dmar_bus_dma_tag_create, | .tag_create = iommu_bus_dma_tag_create, | ||||
.tag_destroy = dmar_bus_dma_tag_destroy, | .tag_destroy = iommu_bus_dma_tag_destroy, | ||||
.tag_set_domain = dmar_bus_dma_tag_set_domain, | .tag_set_domain = iommu_bus_dma_tag_set_domain, | ||||
.id_mapped = dmar_bus_dma_id_mapped, | .id_mapped = iommu_bus_dma_id_mapped, | ||||
.map_create = dmar_bus_dmamap_create, | .map_create = iommu_bus_dmamap_create, | ||||
.map_destroy = dmar_bus_dmamap_destroy, | .map_destroy = iommu_bus_dmamap_destroy, | ||||
.mem_alloc = dmar_bus_dmamem_alloc, | .mem_alloc = iommu_bus_dmamem_alloc, | ||||
.mem_free = dmar_bus_dmamem_free, | .mem_free = iommu_bus_dmamem_free, | ||||
.load_phys = dmar_bus_dmamap_load_phys, | .load_phys = iommu_bus_dmamap_load_phys, | ||||
.load_buffer = dmar_bus_dmamap_load_buffer, | .load_buffer = iommu_bus_dmamap_load_buffer, | ||||
.load_ma = dmar_bus_dmamap_load_ma, | .load_ma = iommu_bus_dmamap_load_ma, | ||||
.map_waitok = dmar_bus_dmamap_waitok, | .map_waitok = iommu_bus_dmamap_waitok, | ||||
.map_complete = dmar_bus_dmamap_complete, | .map_complete = iommu_bus_dmamap_complete, | ||||
.map_unload = dmar_bus_dmamap_unload, | .map_unload = iommu_bus_dmamap_unload, | ||||
.map_sync = dmar_bus_dmamap_sync, | .map_sync = iommu_bus_dmamap_sync, | ||||
}; | }; | ||||
static void | static void | ||||
dmar_bus_task_dmamap(void *arg, int pending) | iommu_bus_task_dmamap(void *arg, int pending) | ||||
{ | { | ||||
struct bus_dma_tag_dmar *tag; | struct bus_dma_tag_iommu *tag; | ||||
struct bus_dmamap_dmar *map; | struct bus_dmamap_iommu *map; | ||||
struct dmar_unit *unit; | struct iommu_unit *unit; | ||||
unit = arg; | unit = arg; | ||||
DMAR_LOCK(unit); | IOMMU_LOCK(unit); | ||||
while ((map = TAILQ_FIRST(&unit->delayed_maps)) != NULL) { | while ((map = TAILQ_FIRST(&unit->delayed_maps)) != NULL) { | ||||
TAILQ_REMOVE(&unit->delayed_maps, map, delay_link); | TAILQ_REMOVE(&unit->delayed_maps, map, delay_link); | ||||
DMAR_UNLOCK(unit); | IOMMU_UNLOCK(unit); | ||||
tag = map->tag; | tag = map->tag; | ||||
map->cansleep = true; | map->cansleep = true; | ||||
map->locked = false; | map->locked = false; | ||||
bus_dmamap_load_mem((bus_dma_tag_t)tag, (bus_dmamap_t)map, | bus_dmamap_load_mem((bus_dma_tag_t)tag, (bus_dmamap_t)map, | ||||
&map->mem, map->callback, map->callback_arg, | &map->mem, map->callback, map->callback_arg, | ||||
BUS_DMA_WAITOK); | BUS_DMA_WAITOK); | ||||
map->cansleep = false; | map->cansleep = false; | ||||
if (map->locked) { | if (map->locked) { | ||||
(tag->common.lockfunc)(tag->common.lockfuncarg, | (tag->common.lockfunc)(tag->common.lockfuncarg, | ||||
BUS_DMA_UNLOCK); | BUS_DMA_UNLOCK); | ||||
} else | } else | ||||
map->locked = true; | map->locked = true; | ||||
map->cansleep = false; | map->cansleep = false; | ||||
DMAR_LOCK(unit); | IOMMU_LOCK(unit); | ||||
} | } | ||||
DMAR_UNLOCK(unit); | IOMMU_UNLOCK(unit); | ||||
} | } | ||||
static void | static void | ||||
dmar_bus_schedule_dmamap(struct dmar_unit *unit, struct bus_dmamap_dmar *map) | iommu_bus_schedule_dmamap(struct iommu_unit *unit, struct bus_dmamap_iommu *map) | ||||
{ | { | ||||
map->locked = false; | map->locked = false; | ||||
DMAR_LOCK(unit); | IOMMU_LOCK(unit); | ||||
TAILQ_INSERT_TAIL(&unit->delayed_maps, map, delay_link); | TAILQ_INSERT_TAIL(&unit->delayed_maps, map, delay_link); | ||||
DMAR_UNLOCK(unit); | IOMMU_UNLOCK(unit); | ||||
taskqueue_enqueue(unit->delayed_taskqueue, &unit->dmamap_load_task); | taskqueue_enqueue(unit->delayed_taskqueue, &unit->dmamap_load_task); | ||||
} | } | ||||
int | int | ||||
dmar_init_busdma(struct dmar_unit *unit) | iommu_init_busdma(struct iommu_unit *unit) | ||||
{ | { | ||||
unit->dma_enabled = 1; | unit->dma_enabled = 1; | ||||
TUNABLE_INT_FETCH("hw.dmar.dma", &unit->dma_enabled); | TUNABLE_INT_FETCH("hw.dmar.dma", &unit->dma_enabled); | ||||
TAILQ_INIT(&unit->delayed_maps); | TAILQ_INIT(&unit->delayed_maps); | ||||
TASK_INIT(&unit->dmamap_load_task, 0, dmar_bus_task_dmamap, unit); | TASK_INIT(&unit->dmamap_load_task, 0, iommu_bus_task_dmamap, unit); | ||||
unit->delayed_taskqueue = taskqueue_create("dmar", M_WAITOK, | unit->delayed_taskqueue = taskqueue_create("dmar", M_WAITOK, | ||||
taskqueue_thread_enqueue, &unit->delayed_taskqueue); | taskqueue_thread_enqueue, &unit->delayed_taskqueue); | ||||
taskqueue_start_threads(&unit->delayed_taskqueue, 1, PI_DISK, | taskqueue_start_threads(&unit->delayed_taskqueue, 1, PI_DISK, | ||||
"dmar%d busdma taskq", unit->unit); | "dmar%d busdma taskq", unit->unit); | ||||
return (0); | return (0); | ||||
} | } | ||||
void | void | ||||
dmar_fini_busdma(struct dmar_unit *unit) | iommu_fini_busdma(struct iommu_unit *unit) | ||||
{ | { | ||||
if (unit->delayed_taskqueue == NULL) | if (unit->delayed_taskqueue == NULL) | ||||
return; | return; | ||||
taskqueue_drain(unit->delayed_taskqueue, &unit->dmamap_load_task); | taskqueue_drain(unit->delayed_taskqueue, &unit->dmamap_load_task); | ||||
taskqueue_free(unit->delayed_taskqueue); | taskqueue_free(unit->delayed_taskqueue); | ||||
unit->delayed_taskqueue = NULL; | unit->delayed_taskqueue = NULL; | ||||
} | } | ||||
int | int | ||||
bus_dma_dmar_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map1, | bus_dma_dmar_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map1, | ||||
vm_paddr_t start, vm_size_t length, int flags) | vm_paddr_t start, vm_size_t length, int flags) | ||||
{ | { | ||||
struct bus_dma_tag_common *tc; | struct bus_dma_tag_common *tc; | ||||
struct bus_dma_tag_dmar *tag; | struct bus_dma_tag_iommu *tag; | ||||
struct bus_dmamap_dmar *map; | struct bus_dmamap_iommu *map; | ||||
struct dmar_ctx *ctx; | struct iommu_device *ctx; | ||||
struct dmar_domain *domain; | struct iommu_domain *domain; | ||||
struct dmar_map_entry *entry; | struct iommu_map_entry *entry; | ||||
vm_page_t *ma; | vm_page_t *ma; | ||||
vm_size_t i; | vm_size_t i; | ||||
int error; | int error; | ||||
bool waitok; | bool waitok; | ||||
MPASS((start & PAGE_MASK) == 0); | MPASS((start & PAGE_MASK) == 0); | ||||
MPASS((length & PAGE_MASK) == 0); | MPASS((length & PAGE_MASK) == 0); | ||||
MPASS(length > 0); | MPASS(length > 0); | ||||
MPASS(start + length >= start); | MPASS(start + length >= start); | ||||
MPASS((flags & ~(BUS_DMA_NOWAIT | BUS_DMA_NOWRITE)) == 0); | MPASS((flags & ~(BUS_DMA_NOWAIT | BUS_DMA_NOWRITE)) == 0); | ||||
tc = (struct bus_dma_tag_common *)dmat; | tc = (struct bus_dma_tag_common *)dmat; | ||||
if (tc->impl != &bus_dma_dmar_impl) | if (tc->impl != &bus_dma_iommu_impl) | ||||
return (0); | return (0); | ||||
tag = (struct bus_dma_tag_dmar *)dmat; | tag = (struct bus_dma_tag_iommu *)dmat; | ||||
ctx = tag->ctx; | ctx = tag->ctx; | ||||
domain = ctx->domain; | domain = ctx->domain; | ||||
map = (struct bus_dmamap_dmar *)map1; | map = (struct bus_dmamap_iommu *)map1; | ||||
waitok = (flags & BUS_DMA_NOWAIT) != 0; | waitok = (flags & BUS_DMA_NOWAIT) != 0; | ||||
entry = dmar_gas_alloc_entry(domain, waitok ? 0 : DMAR_PGF_WAITOK); | entry = dmar_gas_alloc_entry(domain, waitok ? 0 : DMAR_PGF_WAITOK); | ||||
if (entry == NULL) | if (entry == NULL) | ||||
return (ENOMEM); | return (ENOMEM); | ||||
entry->start = start; | entry->start = start; | ||||
entry->end = start + length; | entry->end = start + length; | ||||
ma = malloc(sizeof(vm_page_t) * atop(length), M_TEMP, waitok ? | ma = malloc(sizeof(vm_page_t) * atop(length), M_TEMP, waitok ? | ||||
M_WAITOK : M_NOWAIT); | M_WAITOK : M_NOWAIT); | ||||
if (ma == NULL) { | if (ma == NULL) { | ||||
dmar_gas_free_entry(domain, entry); | dmar_gas_free_entry(domain, entry); | ||||
return (ENOMEM); | return (ENOMEM); | ||||
} | } | ||||
for (i = 0; i < atop(length); i++) { | for (i = 0; i < atop(length); i++) { | ||||
ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i, | ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i, | ||||
VM_MEMATTR_DEFAULT); | VM_MEMATTR_DEFAULT); | ||||
} | } | ||||
error = dmar_gas_map_region(domain, entry, DMAR_MAP_ENTRY_READ | | error = dmar_gas_map_region(domain, entry, IOMMU_MAP_ENTRY_READ | | ||||
((flags & BUS_DMA_NOWRITE) ? 0 : DMAR_MAP_ENTRY_WRITE), | ((flags & BUS_DMA_NOWRITE) ? 0 : IOMMU_MAP_ENTRY_WRITE), | ||||
waitok ? DMAR_GM_CANWAIT : 0, ma); | waitok ? IOMMU_MF_CANWAIT : 0, ma); | ||||
if (error == 0) { | if (error == 0) { | ||||
DMAR_DOMAIN_LOCK(domain); | IOMMU_DOMAIN_LOCK(domain); | ||||
TAILQ_INSERT_TAIL(&map->map_entries, entry, dmamap_link); | TAILQ_INSERT_TAIL(&map->map_entries, entry, dmamap_link); | ||||
entry->flags |= DMAR_MAP_ENTRY_MAP; | entry->flags |= IOMMU_MAP_ENTRY_MAP; | ||||
DMAR_DOMAIN_UNLOCK(domain); | IOMMU_DOMAIN_UNLOCK(domain); | ||||
} else { | } else { | ||||
dmar_domain_unload_entry(entry, true); | iommu_domain_unload_entry(entry, true); | ||||
} | } | ||||
for (i = 0; i < atop(length); i++) | for (i = 0; i < atop(length); i++) | ||||
vm_page_putfake(ma[i]); | vm_page_putfake(ma[i]); | ||||
free(ma, M_TEMP); | free(ma, M_TEMP); | ||||
return (error); | return (error); | ||||
} | } |