Changeset View
Standalone View
sys/compat/linuxkpi/common/src/linux_pci.c
Show All 32 Lines | |||||
#include <sys/kernel.h> | #include <sys/kernel.h> | ||||
#include <sys/sysctl.h> | #include <sys/sysctl.h> | ||||
#include <sys/lock.h> | #include <sys/lock.h> | ||||
#include <sys/mutex.h> | #include <sys/mutex.h> | ||||
#include <sys/bus.h> | #include <sys/bus.h> | ||||
#include <sys/fcntl.h> | #include <sys/fcntl.h> | ||||
#include <sys/file.h> | #include <sys/file.h> | ||||
#include <sys/filio.h> | #include <sys/filio.h> | ||||
#include <sys/pctrie.h> | |||||
#include <sys/rwlock.h> | #include <sys/rwlock.h> | ||||
#include <vm/vm.h> | #include <vm/vm.h> | ||||
#include <vm/pmap.h> | #include <vm/pmap.h> | ||||
#include <machine/stdarg.h> | #include <machine/stdarg.h> | ||||
#include <linux/kobject.h> | #include <linux/kobject.h> | ||||
Show All 21 Lines | static device_method_t pci_methods[] = { | ||||
DEVMETHOD(device_attach, linux_pci_attach), | DEVMETHOD(device_attach, linux_pci_attach), | ||||
DEVMETHOD(device_detach, linux_pci_detach), | DEVMETHOD(device_detach, linux_pci_detach), | ||||
DEVMETHOD(device_suspend, linux_pci_suspend), | DEVMETHOD(device_suspend, linux_pci_suspend), | ||||
DEVMETHOD(device_resume, linux_pci_resume), | DEVMETHOD(device_resume, linux_pci_resume), | ||||
DEVMETHOD(device_shutdown, linux_pci_shutdown), | DEVMETHOD(device_shutdown, linux_pci_shutdown), | ||||
DEVMETHOD_END | DEVMETHOD_END | ||||
}; | }; | ||||
struct linux_dma_priv { | |||||
uint64_t dma_mask; | |||||
bus_dma_tag_t dmat; | |||||
struct mtx ptree_lock; | |||||
struct pctrie ptree; | |||||
}; | |||||
static int | |||||
linux_pdev_dma_init(struct pci_dev *pdev) | |||||
{ | |||||
struct linux_dma_priv *priv; | |||||
priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); | |||||
pdev->dev.dma_priv = priv; | |||||
mtx_init(&priv->ptree_lock, "linux_dma_ptree", NULL, MTX_DEF); | |||||
pctrie_init(&priv->ptree); | |||||
return (0); | |||||
} | |||||
static int | |||||
linux_pdev_dma_uninit(struct pci_dev *pdev) | |||||
{ | |||||
struct linux_dma_priv *priv; | |||||
priv = pdev->dev.dma_priv; | |||||
if (priv->dmat) | |||||
bus_dma_tag_destroy(priv->dmat); | |||||
mtx_destroy(&priv->ptree_lock); | |||||
free(priv, M_DEVBUF); | |||||
pdev->dev.dma_priv = NULL; | |||||
return (0); | |||||
} | |||||
int | |||||
linux_dma_tag_init(struct device *dev, u64 dma_mask) | |||||
{ | |||||
struct linux_dma_priv *priv; | |||||
int error; | |||||
priv = dev->dma_priv; | |||||
if (priv->dmat) { | |||||
kib: Perhaps compare old and new mask, and only destroy old tag if they are non-equal ? | |||||
if (priv->dma_mask == dma_mask) | |||||
return (0); | |||||
bus_dma_tag_destroy(priv->dmat); | |||||
} | |||||
priv->dma_mask = dma_mask; | |||||
error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), | |||||
1, 0, /* alignment, boundary */ | |||||
dma_mask, /* lowaddr */ | |||||
BUS_SPACE_MAXADDR, /* highaddr */ | |||||
NULL, NULL, /* filtfunc, filtfuncarg */ | |||||
BUS_SPACE_MAXADDR, /* maxsize */ | |||||
Done Inline ActionsYou create the tag with NULL lockfunc. I believe it means that all loads and allocations must specify NOWAIT then, otherwise bounce busdma becomes unhappy when it really bounces and has to delay allocation into swi thread. And we cannot operate in callback anyway. kib: You create the tag with NULL lockfunc. I believe it means that all loads and allocations must… | |||||
Done Inline ActionsI'm not all that familiar with deferred loads but I'm now passing busdma_lock_mutex and a mutex (which as I write this I realized I forgot to initialize -- onto the TO-DO list) so I think this should now work. tychon: I'm not all that familiar with deferred loads but I'm now passing `busdma_lock_mutex` and a… | |||||
Done Inline ActionsIt is much more complicated, unfortunately. Waitable busdma requests require callbacks. The callbacks are called from the swi context and only in callback you get the segments prepared for loading into device' dma engine. I am not even sure if some handshake would work, where original requester sleeps until callback wakes him up and somehow marshals the segments back. IMO it is enough to use non-sleepable allocs for now, kib: It is much more complicated, unfortunately. Waitable busdma requests require callbacks. The… | |||||
1, /* nsegments */ | |||||
BUS_SPACE_MAXADDR, /* maxsegsz */ | |||||
0, /* flags */ | |||||
NULL, NULL, /* lockfunc, lockfuncarg */ | |||||
&priv->dmat); | |||||
return (-error); | |||||
} | |||||
static struct pci_driver * | static struct pci_driver * | ||||
linux_pci_find(device_t dev, const struct pci_device_id **idp) | linux_pci_find(device_t dev, const struct pci_device_id **idp) | ||||
{ | { | ||||
const struct pci_device_id *id; | const struct pci_device_id *id; | ||||
Done Inline ActionsLinux errors are negative. I think this should be: hselasky: Linux errors are negative. I think this should be:
return (-error);
| |||||
struct pci_driver *pdrv; | struct pci_driver *pdrv; | ||||
uint16_t vendor; | uint16_t vendor; | ||||
uint16_t device; | uint16_t device; | ||||
uint16_t subvendor; | uint16_t subvendor; | ||||
uint16_t subdevice; | uint16_t subdevice; | ||||
vendor = pci_get_vendor(dev); | vendor = pci_get_vendor(dev); | ||||
device = pci_get_device(dev); | device = pci_get_device(dev); | ||||
▲ Show 20 Lines • Show All 63 Lines • ▼ Show 20 Lines | linux_pci_attach(device_t dev) | ||||
INIT_LIST_HEAD(&pdev->dev.irqents); | INIT_LIST_HEAD(&pdev->dev.irqents); | ||||
pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev)); | pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev)); | ||||
pdev->device = dinfo->cfg.device; | pdev->device = dinfo->cfg.device; | ||||
pdev->vendor = dinfo->cfg.vendor; | pdev->vendor = dinfo->cfg.vendor; | ||||
pdev->subsystem_vendor = dinfo->cfg.subvendor; | pdev->subsystem_vendor = dinfo->cfg.subvendor; | ||||
pdev->subsystem_device = dinfo->cfg.subdevice; | pdev->subsystem_device = dinfo->cfg.subdevice; | ||||
pdev->class = pci_get_class(dev); | pdev->class = pci_get_class(dev); | ||||
pdev->revision = pci_get_revid(dev); | pdev->revision = pci_get_revid(dev); | ||||
pdev->dev.dma_mask = &pdev->dma_mask; | |||||
pdev->pdrv = pdrv; | pdev->pdrv = pdrv; | ||||
kobject_init(&pdev->dev.kobj, &linux_dev_ktype); | kobject_init(&pdev->dev.kobj, &linux_dev_ktype); | ||||
kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev)); | kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev)); | ||||
kobject_add(&pdev->dev.kobj, &linux_root_device.kobj, | kobject_add(&pdev->dev.kobj, &linux_root_device.kobj, | ||||
kobject_name(&pdev->dev.kobj)); | kobject_name(&pdev->dev.kobj)); | ||||
rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0); | rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0); | ||||
if (rle != NULL) | if (rle != NULL) | ||||
pdev->dev.irq = rle->start; | pdev->dev.irq = rle->start; | ||||
else | else | ||||
pdev->dev.irq = LINUX_IRQ_INVALID; | pdev->dev.irq = LINUX_IRQ_INVALID; | ||||
pdev->irq = pdev->dev.irq; | pdev->irq = pdev->dev.irq; | ||||
error = linux_pdev_dma_init(pdev); | |||||
if (error) | |||||
goto out; | |||||
if (pdev->bus == NULL) { | if (pdev->bus == NULL) { | ||||
pbus = malloc(sizeof(*pbus), M_DEVBUF, M_WAITOK | M_ZERO); | pbus = malloc(sizeof(*pbus), M_DEVBUF, M_WAITOK | M_ZERO); | ||||
pbus->self = pdev; | pbus->self = pdev; | ||||
pbus->number = pci_get_bus(dev); | pbus->number = pci_get_bus(dev); | ||||
pdev->bus = pbus; | pdev->bus = pbus; | ||||
} | } | ||||
spin_lock(&pci_lock); | spin_lock(&pci_lock); | ||||
list_add(&pdev->links, &pci_devices); | list_add(&pdev->links, &pci_devices); | ||||
spin_unlock(&pci_lock); | spin_unlock(&pci_lock); | ||||
error = pdrv->probe(pdev, id); | error = pdrv->probe(pdev, id); | ||||
out: | |||||
if (error) { | if (error) { | ||||
spin_lock(&pci_lock); | spin_lock(&pci_lock); | ||||
list_del(&pdev->links); | list_del(&pdev->links); | ||||
spin_unlock(&pci_lock); | spin_unlock(&pci_lock); | ||||
put_device(&pdev->dev); | put_device(&pdev->dev); | ||||
error = -error; | error = -error; | ||||
} | } | ||||
return (error); | return (error); | ||||
} | } | ||||
static int | static int | ||||
linux_pci_detach(device_t dev) | linux_pci_detach(device_t dev) | ||||
{ | { | ||||
struct pci_dev *pdev; | struct pci_dev *pdev; | ||||
linux_set_current(curthread); | linux_set_current(curthread); | ||||
pdev = device_get_softc(dev); | pdev = device_get_softc(dev); | ||||
pdev->pdrv->remove(pdev); | pdev->pdrv->remove(pdev); | ||||
linux_pdev_dma_uninit(pdev); | |||||
spin_lock(&pci_lock); | spin_lock(&pci_lock); | ||||
list_del(&pdev->links); | list_del(&pdev->links); | ||||
spin_unlock(&pci_lock); | spin_unlock(&pci_lock); | ||||
device_set_desc(dev, NULL); | device_set_desc(dev, NULL); | ||||
put_device(&pdev->dev); | put_device(&pdev->dev); | ||||
return (0); | return (0); | ||||
▲ Show 20 Lines • Show All 111 Lines • ▼ Show 20 Lines | linux_pci_unregister_driver(struct pci_driver *pdrv) | ||||
spin_lock(&pci_lock); | spin_lock(&pci_lock); | ||||
list_del(&pdrv->links); | list_del(&pdrv->links); | ||||
spin_unlock(&pci_lock); | spin_unlock(&pci_lock); | ||||
mtx_lock(&Giant); | mtx_lock(&Giant); | ||||
if (bus != NULL) | if (bus != NULL) | ||||
devclass_delete_driver(bus, &pdrv->bsddriver); | devclass_delete_driver(bus, &pdrv->bsddriver); | ||||
mtx_unlock(&Giant); | mtx_unlock(&Giant); | ||||
} | |||||
struct linux_dma_obj { | |||||
void *vaddr; | |||||
dma_addr_t dma_addr; | |||||
bus_dmamap_t dmamap; | |||||
}; | |||||
static uma_zone_t linux_dma_trie_zone; | |||||
static uma_zone_t linux_dma_obj_zone; | |||||
static void | |||||
linux_dma_init(void *arg) | |||||
{ | |||||
linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", | |||||
pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, | |||||
UMA_ALIGN_PTR, 0); | |||||
linux_dma_obj_zone = uma_zcreate("linux_dma_object", | |||||
sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, | |||||
UMA_ALIGN_PTR, 0); | |||||
} | |||||
SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); | |||||
static void | |||||
linux_dma_uninit(void *arg) | |||||
{ | |||||
uma_zdestroy(linux_dma_obj_zone); | |||||
uma_zdestroy(linux_dma_trie_zone); | |||||
} | |||||
SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); | |||||
static void * | |||||
linux_dma_trie_alloc(struct pctrie *ptree) | |||||
{ | |||||
return (uma_zalloc(linux_dma_trie_zone, 0)); | |||||
} | |||||
static void | |||||
linux_dma_trie_free(struct pctrie *ptree, void *node) | |||||
{ | |||||
uma_zfree(linux_dma_trie_zone, node); | |||||
} | |||||
PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, | |||||
linux_dma_trie_free); | |||||
void * | |||||
linux_dma_alloc_coherent(struct device *dev, size_t size, | |||||
dma_addr_t *dma_handle, gfp_t flag) | |||||
{ | |||||
struct linux_dma_priv *priv; | |||||
vm_paddr_t high; | |||||
size_t align; | |||||
void *mem; | |||||
if (dev == NULL || dev->dma_priv == NULL) { | |||||
*dma_handle = 0; | |||||
return (NULL); | |||||
} | |||||
priv = dev->dma_priv; | |||||
if (priv->dma_mask) | |||||
high = priv->dma_mask; | |||||
else if (flag & GFP_DMA32) | |||||
high = BUS_SPACE_MAXADDR_32BIT; | |||||
Not Done Inline Actionsshould high be min(mask, BUS_SPACE_MAXADDR_32BIT) ? I am not sure. kib: should high be min(mask, BUS_SPACE_MAXADDR_32BIT) ? I am not sure. | |||||
Done Inline ActionsI'm not sure either. I think GFP_DMA32 is an older flag which pre-dates the mask code. This mimics the existing code so I'm apt to leave it alone. Minimally it shouldn't be a regression. tychon: I'm not sure either. I think GFP_DMA32 is an older flag which pre-dates the mask code.
This… | |||||
else | |||||
high = BUS_SPACE_MAXADDR; | |||||
align = PAGE_SIZE << get_order(size); | |||||
mem = (void *)kmem_alloc_contig(size, flag, 0, high, align, 0, | |||||
VM_MEMATTR_DEFAULT); | |||||
if (mem) | |||||
*dma_handle = linux_dma_map_phys(dev, vtophys(mem), size); | |||||
else | |||||
*dma_handle = 0; | |||||
Done Inline ActionsPerhaps assert that nseg == 1 ? kib: Perhaps assert that nseg == 1 ? | |||||
return (mem); | |||||
} | |||||
dma_addr_t | |||||
linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) | |||||
{ | |||||
struct linux_dma_priv *priv; | |||||
struct linux_dma_obj *obj; | |||||
int nseg; | |||||
bus_dma_segment_t seg; | |||||
priv = dev->dma_priv; | |||||
obj = uma_zalloc(linux_dma_obj_zone, 0); | |||||
if (bus_dmamap_create(priv->dmat, 0, &obj->dmamap) != 0) { | |||||
uma_zfree(linux_dma_obj_zone, obj); | |||||
return (0); | |||||
} | |||||
nseg = -1; | |||||
if (_bus_dmamap_load_phys(priv->dmat, obj->dmamap, phys, len, | |||||
Done Inline ActionsIt is still waitable. kib: It is still waitable. | |||||
hselaskyUnsubmitted Done Inline ActionsThis function have shared resources on the BUSDMA tag, if I'm not mistaken. Please serialize all calls to _bus_dmamap_load_phys(). hselasky: This function have shared resources on the BUSDMA tag, if I'm not mistaken.
Please serialize… | |||||
tychonAuthorUnsubmitted Done Inline ActionsThat was an interesting find! Looking at bounce and dmar, indeed several of those API functions aren't reentrant with the same bus_dma tag. Seems like many drivers get this for free -- probably quite accidentally -- while locking rings and things like that I've added prerequisite locking. tychon: That was an interesting find! Looking at bounce and dmar, indeed several of those API… | |||||
BUS_DMA_NOWAIT, &seg, &nseg) != 0) { | |||||
bus_dmamap_destroy(priv->dmat, obj->dmamap); | |||||
uma_zfree(linux_dma_obj_zone, obj); | |||||
return (0); | |||||
} | |||||
KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); | |||||
obj->dma_addr = seg.ds_addr; | |||||
mtx_lock(&priv->ptree_lock); | |||||
if (LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj) != 0) { | |||||
mtx_unlock(&priv->ptree_lock); | |||||
bus_dmamap_unload(priv->dmat, obj->dmamap); | |||||
bus_dmamap_destroy(priv->dmat, obj->dmamap); | |||||
uma_zfree(linux_dma_obj_zone, obj); | |||||
return (0); | |||||
} | |||||
mtx_unlock(&priv->ptree_lock); | |||||
return (obj->dma_addr); | |||||
} | |||||
void | |||||
linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) | |||||
{ | |||||
struct linux_dma_priv *priv; | |||||
struct linux_dma_obj *obj; | |||||
priv = dev->dma_priv; | |||||
mtx_lock(&priv->ptree_lock); | |||||
obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); | |||||
if (obj == NULL) { | |||||
mtx_unlock(&priv->ptree_lock); | |||||
return; | |||||
} | |||||
LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); | |||||
mtx_unlock(&priv->ptree_lock); | |||||
bus_dmamap_unload(priv->dmat, obj->dmamap); | |||||
bus_dmamap_destroy(priv->dmat, obj->dmamap); | |||||
Done Inline ActionsIf sgl is de-facto physically contiguous, this load loop would create per-segment mapping regardless of the optimization possibility. Linux guide notes the optimization of gluing adjacent sg segments, so might be translate linux sg into freebsd sg and then load it instead of doing that manually ? kib: If sgl is de-facto physically contiguous, this load loop would create per-segment mapping… | |||||
Done Inline ActionsThis is still not handled, right ? I suggest at least adding a comment noting this issue. kib: This is still not handled, right ? I suggest at least adding a comment noting this issue. | |||||
uma_zfree(linux_dma_obj_zone, obj); | |||||
} | |||||
int | |||||
linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, | |||||
enum dma_data_direction dir, struct dma_attrs *attrs) | |||||
{ | |||||
struct linux_dma_priv *priv; | |||||
struct linux_dma_obj *obj; | |||||
struct scatterlist *dma_sg, *sg; | |||||
int dma_nents, nseg; | |||||
size_t seg_len; | |||||
vm_paddr_t seg_phys, prev_phys_end; | |||||
bus_dma_segment_t seg; | |||||
priv = dev->dma_priv; | |||||
obj = uma_zalloc(linux_dma_obj_zone, 0); | |||||
if (bus_dmamap_create(priv->dmat, 0, &obj->dmamap) != 0) { | |||||
uma_zfree(linux_dma_obj_zone, obj); | |||||
return (0); | |||||
} | |||||
sg = sgl; | |||||
dma_sg = sg; | |||||
Done Inline ActionsAnd this one. kib: And this one. | |||||
dma_nents = 0; | |||||
while (nents > 0) { | |||||
seg_phys = sg_phys(sg); | |||||
seg_len = sg->length; | |||||
while (--nents > 0) { | |||||
prev_phys_end = sg_phys(sg) + sg->length; | |||||
sg = sg_next(sg); | |||||
if (prev_phys_end != sg_phys(sg)) | |||||
break; | |||||
seg_len += sg->length; | |||||
} | |||||
nseg = -1; | |||||
if (_bus_dmamap_load_phys(priv->dmat, obj->dmamap, | |||||
seg_phys, seg_len, BUS_DMA_NOWAIT, | |||||
&seg, &nseg) != 0) { | |||||
bus_dmamap_unload(priv->dmat, obj->dmamap); | |||||
bus_dmamap_destroy(priv->dmat, obj->dmamap); | |||||
uma_zfree(linux_dma_obj_zone, obj); | |||||
return (0); | |||||
} | |||||
KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); | |||||
sg_dma_address(dma_sg) = seg.ds_addr; | |||||
sg_dma_len(dma_sg) = seg.ds_len; | |||||
dma_sg = sg_next(dma_sg); | |||||
dma_nents++; | |||||
} | |||||
obj->dma_addr = sg_dma_address(sgl); | |||||
mtx_lock(&priv->ptree_lock); | |||||
Not Done Inline ActionsWhy loading in ctor, instead at import time ? kib: Why loading in ctor, instead at import time ? | |||||
Done Inline ActionsIf the load occurs at import time it may go into the IOMMU before it's intended and remain there longer than intended too. By doing the load in the ctor and the invalidate in the dtor the IOMMU reflects the pool allocation/free calls. tychon: If the load occurs at import time it may go into the IOMMU before it's intended and remain… | |||||
if (LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj) != 0) { | |||||
mtx_unlock(&priv->ptree_lock); | |||||
Done Inline ActionsAssert nseg == 1 ? kib: Assert nseg == 1 ? | |||||
bus_dmamap_unload(priv->dmat, obj->dmamap); | |||||
bus_dmamap_destroy(priv->dmat, obj->dmamap); | |||||
uma_zfree(linux_dma_obj_zone, obj); | |||||
return (0); | |||||
} | |||||
mtx_unlock(&priv->ptree_lock); | |||||
return (dma_nents); | |||||
} | |||||
void | |||||
linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, | |||||
int nents, enum dma_data_direction dir, struct dma_attrs *attrs) | |||||
{ | |||||
struct linux_dma_priv *priv; | |||||
struct linux_dma_obj *obj; | |||||
priv = dev->dma_priv; | |||||
mtx_lock(&priv->ptree_lock); | |||||
obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, sg_dma_address(sgl)); | |||||
if (obj == NULL) { | |||||
mtx_unlock(&priv->ptree_lock); | |||||
return; | |||||
} | |||||
LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, sg_dma_address(sgl)); | |||||
mtx_unlock(&priv->ptree_lock); | |||||
bus_dmamap_unload(priv->dmat, obj->dmamap); | |||||
bus_dmamap_destroy(priv->dmat, obj->dmamap); | |||||
uma_zfree(linux_dma_obj_zone, obj); | |||||
} | |||||
static inline int | |||||
Done Inline ActionsSee other comment, WAITOK seems to not work. kib: See other comment, WAITOK seems to not work. | |||||
dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) | |||||
{ | |||||
struct linux_dma_obj *obj = mem; | |||||
struct dma_pool *pool = arg; | |||||
int error, nseg; | |||||
bus_dma_segment_t seg; | |||||
nseg = -1; | |||||
error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, | |||||
vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, | |||||
&seg, &nseg); | |||||
if (error != 0) { | |||||
return (error); | |||||
} | |||||
KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); | |||||
obj->dma_addr = seg.ds_addr; | |||||
return (0); | |||||
} | |||||
static void | |||||
dma_pool_obj_dtor(void *mem, int size, void *arg) | |||||
{ | |||||
struct linux_dma_obj *obj = mem; | |||||
struct dma_pool *pool = arg; | |||||
bus_dmamap_unload(pool->pool_dmat, obj->dmamap); | |||||
} | |||||
static int | |||||
dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, | |||||
int flags) | |||||
{ | |||||
struct dma_pool *pool = arg; | |||||
struct linux_dma_priv *priv; | |||||
struct linux_dma_obj *obj; | |||||
int i; | |||||
priv = pool->pool_pdev->dev.dma_priv; | |||||
for (i = 0; i < count; i++) { | |||||
obj = uma_zalloc(linux_dma_obj_zone, flags); | |||||
if (obj == NULL) | |||||
break; | |||||
if (bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, | |||||
BUS_DMA_NOWAIT, &obj->dmamap) != 0) { | |||||
uma_zfree(linux_dma_obj_zone, obj); | |||||
break; | |||||
} | |||||
store[i] = obj; | |||||
} | |||||
return (i); | |||||
} | |||||
static void | |||||
dma_pool_obj_release(void *arg, void **store, int count) | |||||
{ | |||||
struct dma_pool *pool = arg; | |||||
struct linux_dma_priv *priv; | |||||
struct linux_dma_obj *obj; | |||||
int i; | |||||
priv = pool->pool_pdev->dev.dma_priv; | |||||
for (i = 0; i < count; i++) { | |||||
obj = store[i]; | |||||
bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); | |||||
uma_zfree(linux_dma_obj_zone, obj); | |||||
} | |||||
} | |||||
struct dma_pool * | |||||
linux_dma_pool_create(char *name, struct device *dev, size_t size, | |||||
size_t align, size_t boundary) | |||||
{ | |||||
struct linux_dma_priv *priv; | |||||
struct dma_pool *pool; | |||||
priv = dev->dma_priv; | |||||
pool = kzalloc(sizeof(*pool), GFP_KERNEL); | |||||
pool->pool_pdev = to_pci_dev(dev); | |||||
pool->pool_entry_size = size; | |||||
if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), | |||||
align, boundary, /* alignment, boundary */ | |||||
priv->dma_mask, /* lowaddr */ | |||||
BUS_SPACE_MAXADDR, /* highaddr */ | |||||
NULL, NULL, /* filtfunc, filtfuncarg */ | |||||
size, /* maxsize */ | |||||
1, /* nsegments */ | |||||
size, /* maxsegsz */ | |||||
0, /* flags */ | |||||
NULL, NULL, /* lockfunc, lockfuncarg */ | |||||
&pool->pool_dmat)) { | |||||
kfree(pool); | |||||
return (NULL); | |||||
} | |||||
pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor, | |||||
dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import, | |||||
dma_pool_obj_release, pool, 0); | |||||
mtx_init(&pool->pool_ptree_lock, "linux_dma_pool_ptree", NULL, | |||||
MTX_DEF); | |||||
pctrie_init(&pool->pool_ptree); | |||||
return (pool); | |||||
} | |||||
void | |||||
linux_dma_pool_destroy(struct dma_pool *pool) | |||||
{ | |||||
mtx_destroy(&pool->pool_ptree_lock); | |||||
uma_zdestroy(pool->pool_zone); | |||||
bus_dma_tag_destroy(pool->pool_dmat); | |||||
kfree(pool); | |||||
} | |||||
void * | |||||
linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, | |||||
dma_addr_t *handle) | |||||
{ | |||||
struct linux_dma_obj *obj; | |||||
obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags); | |||||
if (obj == NULL) | |||||
return (NULL); | |||||
mtx_lock(&pool->pool_ptree_lock); | |||||
if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) { | |||||
mtx_unlock(&pool->pool_ptree_lock); | |||||
uma_zfree_arg(pool->pool_zone, obj, pool); | |||||
return (NULL); | |||||
} | |||||
mtx_unlock(&pool->pool_ptree_lock); | |||||
*handle = obj->dma_addr; | |||||
return (obj->vaddr); | |||||
} | |||||
void | |||||
linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) | |||||
{ | |||||
struct linux_dma_obj *obj; | |||||
mtx_lock(&pool->pool_ptree_lock); | |||||
obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); | |||||
if (obj == NULL) { | |||||
mtx_unlock(&pool->pool_ptree_lock); | |||||
return; | |||||
} | |||||
LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); | |||||
mtx_unlock(&pool->pool_ptree_lock); | |||||
uma_zfree_arg(pool->pool_zone, obj, pool); | |||||
} | } |
Perhaps compare old and new mask, and only destroy old tag if they are non-equal ?