Index: head/sys/compat/linuxkpi/common/include/linux/device.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/device.h +++ head/sys/compat/linuxkpi/common/include/linux/device.h @@ -105,7 +105,7 @@ struct class *class; void (*release)(struct device *dev); struct kobject kobj; - uint64_t *dma_mask; + void *dma_priv; void *driver_data; unsigned int irq; #define LINUX_IRQ_INVALID 65535 Index: head/sys/compat/linuxkpi/common/include/linux/dma-mapping.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/dma-mapping.h +++ head/sys/compat/linuxkpi/common/include/linux/dma-mapping.h @@ -90,6 +90,16 @@ #define DMA_BIT_MASK(n) ((2ULL << ((n) - 1)) - 1ULL) +int linux_dma_tag_init(struct device *dev, u64 mask); +void *linux_dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag); +dma_addr_t linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len); +void linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t size); +int linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, struct dma_attrs *attrs); +void linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, struct dma_attrs *attrs); + static inline int dma_supported(struct device *dev, u64 mask) { @@ -102,11 +112,10 @@ dma_set_mask(struct device *dev, u64 dma_mask) { - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) + if (!dev->dma_priv || !dma_supported(dev, dma_mask)) return -EIO; - *dev->dma_mask = dma_mask; - return (0); + return (linux_dma_tag_init(dev, dma_mask)); } static inline int @@ -134,24 +143,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { - vm_paddr_t high; - size_t align; - void *mem; - - if (dev != NULL && dev->dma_mask) - high = *dev->dma_mask; - else if (flag & GFP_DMA32) - high = BUS_SPACE_MAXADDR_32BIT; - else - high = BUS_SPACE_MAXADDR; - align = PAGE_SIZE << get_order(size); - mem = (void *)kmem_alloc_contig(size, flag, 0, high, align, 0, - VM_MEMATTR_DEFAULT); - if (mem) - *dma_handle = vtophys(mem); - else - *dma_handle = 0; - return (mem); + return (linux_dma_alloc_coherent(dev, size, dma_handle, flag)); } static inline void * @@ -164,25 +156,27 @@ static inline void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t dma_handle) + dma_addr_t dma_addr) { + linux_dma_unmap(dev, dma_addr, size); kmem_free((vm_offset_t)cpu_addr, size); } -/* XXX This only works with no iommu. */ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { - return vtophys(ptr); + return (linux_dma_map_phys(dev, vtophys(ptr), size)); } static inline void -dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, +dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { + + linux_dma_unmap(dev, dma_addr, size); } static inline dma_addr_t @@ -190,26 +184,23 @@ size_t size, enum dma_data_direction dir, unsigned long attrs) { - return (VM_PAGE_TO_PHYS(page) + offset); + return (linux_dma_map_phys(dev, VM_PAGE_TO_PHYS(page) + offset, size)); } static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, struct dma_attrs *attrs) { - struct scatterlist *sg; - int i; - for_each_sg(sgl, sg, nents, i) - sg_dma_address(sg) = sg_phys(sg); - - return (nents); + return (linux_dma_map_sg_attrs(dev, sgl, nents, dir, attrs)); } static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs) { + + linux_dma_unmap_sg_attrs(dev, sg, nents, dir, attrs); } static inline dma_addr_t @@ -217,13 +208,15 @@ unsigned long offset, size_t size, enum dma_data_direction direction) { - return VM_PAGE_TO_PHYS(page) + offset; + return (linux_dma_map_phys(dev, VM_PAGE_TO_PHYS(page) + offset, size)); } static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction) { + + linux_dma_unmap(dev, dma_address, size); } static inline void @@ -273,7 +266,7 @@ dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { - return (0); + return (dma_addr == 0); } static inline unsigned int dma_set_max_seg_size(struct device *dev, Index: head/sys/compat/linuxkpi/common/include/linux/dmapool.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/dmapool.h +++ head/sys/compat/linuxkpi/common/include/linux/dmapool.h @@ -37,44 +37,44 @@ #include #include +struct dma_pool *linux_dma_pool_create(char *name, struct device *dev, + size_t size, size_t align, size_t boundary); +void linux_dma_pool_destroy(struct dma_pool *pool); +void *linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, + dma_addr_t *handle); +void linux_dma_pool_free(struct dma_pool *pool, void *vaddr, + dma_addr_t dma_addr); + struct dma_pool { + struct pci_dev *pool_pdev; uma_zone_t pool_zone; + struct mtx pool_dma_lock; + bus_dma_tag_t pool_dmat; + size_t pool_entry_size; + struct mtx pool_ptree_lock; + struct pctrie pool_ptree; }; static inline struct dma_pool * dma_pool_create(char *name, struct device *dev, size_t size, size_t align, size_t boundary) { - struct dma_pool *pool; - pool = kmalloc(sizeof(*pool), GFP_KERNEL); - align--; - /* - * XXX Eventually this could use a separate allocf to honor boundary - * and physical address requirements of the device. - */ - pool->pool_zone = uma_zcreate(name, size, NULL, NULL, NULL, NULL, - align, UMA_ZONE_OFFPAGE|UMA_ZONE_HASH); - - return (pool); + return (linux_dma_pool_create(name, dev, size, align, boundary)); } static inline void dma_pool_destroy(struct dma_pool *pool) { - uma_zdestroy(pool->pool_zone); - kfree(pool); + + linux_dma_pool_destroy(pool); } static inline void * dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) { - void *vaddr; - vaddr = uma_zalloc(pool->pool_zone, mem_flags); - if (vaddr) - *handle = vtophys(vaddr); - return (vaddr); + return (linux_dma_pool_alloc(pool, mem_flags, handle)); } static inline void * @@ -85,9 +85,10 @@ } static inline void -dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr) +dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) { - uma_zfree(pool->pool_zone, vaddr); + + linux_dma_pool_free(pool, vaddr, dma_addr); } Index: head/sys/compat/linuxkpi/common/include/linux/pci.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/pci.h +++ head/sys/compat/linuxkpi/common/include/linux/pci.h @@ -219,7 +219,6 @@ struct list_head links; struct pci_driver *pdrv; struct pci_bus *bus; - uint64_t dma_mask; uint16_t device; uint16_t vendor; uint16_t subsystem_vendor; Index: head/sys/compat/linuxkpi/common/include/linux/scatterlist.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/scatterlist.h +++ head/sys/compat/linuxkpi/common/include/linux/scatterlist.h @@ -43,7 +43,8 @@ #define SG_PAGE_LINK_MASK 0x3UL unsigned int offset; unsigned int length; - dma_addr_t address; + dma_addr_t dma_address; + unsigned int dma_length; }; CTASSERT((sizeof(struct scatterlist) & SG_PAGE_LINK_MASK) == 0); @@ -77,8 +78,8 @@ #define sg_chain_ptr(sg) \ ((struct scatterlist *) ((sg)->page_link & ~SG_PAGE_LINK_MASK)) -#define sg_dma_address(sg) (sg)->address -#define sg_dma_len(sg) (sg)->length +#define sg_dma_address(sg) (sg)->dma_address +#define sg_dma_len(sg) (sg)->dma_length #define for_each_sg_page(sgl, iter, nents, pgoffset) \ for (_sg_iter_init(sgl, iter, nents, pgoffset); \ @@ -444,7 +445,7 @@ static inline dma_addr_t sg_page_iter_dma_address(struct sg_page_iter *spi) { - return (spi->sg->address + (spi->sg_pgoffset << PAGE_SHIFT)); + return (spi->sg->dma_address + (spi->sg_pgoffset << PAGE_SHIFT)); } static inline struct page * Index: head/sys/compat/linuxkpi/common/src/linux_pci.c =================================================================== --- head/sys/compat/linuxkpi/common/src/linux_pci.c +++ head/sys/compat/linuxkpi/common/src/linux_pci.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include @@ -75,6 +76,76 @@ DEVMETHOD_END }; +struct linux_dma_priv { + uint64_t dma_mask; + struct mtx dma_lock; + bus_dma_tag_t dmat; + struct mtx ptree_lock; + struct pctrie ptree; +}; + +static int +linux_pdev_dma_init(struct pci_dev *pdev) +{ + struct linux_dma_priv *priv; + + priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); + pdev->dev.dma_priv = priv; + + mtx_init(&priv->dma_lock, "linux_dma", NULL, MTX_DEF); + + mtx_init(&priv->ptree_lock, "linux_dma_ptree", NULL, MTX_DEF); + pctrie_init(&priv->ptree); + + return (0); +} + +static int +linux_pdev_dma_uninit(struct pci_dev *pdev) +{ + struct linux_dma_priv *priv; + + priv = pdev->dev.dma_priv; + if (priv->dmat) + bus_dma_tag_destroy(priv->dmat); + mtx_destroy(&priv->dma_lock); + mtx_destroy(&priv->ptree_lock); + free(priv, M_DEVBUF); + pdev->dev.dma_priv = NULL; + return (0); +} + +int +linux_dma_tag_init(struct device *dev, u64 dma_mask) +{ + struct linux_dma_priv *priv; + int error; + + priv = dev->dma_priv; + + if (priv->dmat) { + if (priv->dma_mask == dma_mask) + return (0); + + bus_dma_tag_destroy(priv->dmat); + } + + priv->dma_mask = dma_mask; + + error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), + 1, 0, /* alignment, boundary */ + dma_mask, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filtfunc, filtfuncarg */ + BUS_SPACE_MAXADDR, /* maxsize */ + 1, /* nsegments */ + BUS_SPACE_MAXADDR, /* maxsegsz */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockfuncarg */ + &priv->dmat); + return (-error); +} + static struct pci_driver * linux_pci_find(device_t dev, const struct pci_device_id **idp) { @@ -158,7 +229,6 @@ pdev->subsystem_device = dinfo->cfg.subdevice; pdev->class = pci_get_class(dev); pdev->revision = pci_get_revid(dev); - pdev->dev.dma_mask = &pdev->dma_mask; pdev->pdrv = pdrv; kobject_init(&pdev->dev.kobj, &linux_dev_ktype); kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev)); @@ -170,6 +240,9 @@ else pdev->dev.irq = LINUX_IRQ_INVALID; pdev->irq = pdev->dev.irq; + error = linux_pdev_dma_init(pdev); + if (error) + goto out; if (pdev->bus == NULL) { pbus = malloc(sizeof(*pbus), M_DEVBUF, M_WAITOK | M_ZERO); @@ -183,6 +256,7 @@ spin_unlock(&pci_lock); error = pdrv->probe(pdev, id); +out: if (error) { spin_lock(&pci_lock); list_del(&pdev->links); @@ -202,6 +276,7 @@ pdev = device_get_softc(dev); pdev->pdrv->remove(pdev); + linux_pdev_dma_uninit(pdev); spin_lock(&pci_lock); list_del(&pdev->links); @@ -329,4 +404,423 @@ if (bus != NULL) devclass_delete_driver(bus, &pdrv->bsddriver); mtx_unlock(&Giant); +} + +struct linux_dma_obj { + void *vaddr; + dma_addr_t dma_addr; + bus_dmamap_t dmamap; +}; + +static uma_zone_t linux_dma_trie_zone; +static uma_zone_t linux_dma_obj_zone; + +static void +linux_dma_init(void *arg) +{ + + linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", + pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, + UMA_ALIGN_PTR, 0); + linux_dma_obj_zone = uma_zcreate("linux_dma_object", + sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, + UMA_ALIGN_PTR, 0); + +} +SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); + +static void +linux_dma_uninit(void *arg) +{ + + uma_zdestroy(linux_dma_obj_zone); + uma_zdestroy(linux_dma_trie_zone); +} +SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); + +static void * +linux_dma_trie_alloc(struct pctrie *ptree) +{ + + return (uma_zalloc(linux_dma_trie_zone, 0)); +} + +static void +linux_dma_trie_free(struct pctrie *ptree, void *node) +{ + + uma_zfree(linux_dma_trie_zone, node); +} + + +PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, + linux_dma_trie_free); + +void * +linux_dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + struct linux_dma_priv *priv; + vm_paddr_t high; + size_t align; + void *mem; + + if (dev == NULL || dev->dma_priv == NULL) { + *dma_handle = 0; + return (NULL); + } + priv = dev->dma_priv; + if (priv->dma_mask) + high = priv->dma_mask; + else if (flag & GFP_DMA32) + high = BUS_SPACE_MAXADDR_32BIT; + else + high = BUS_SPACE_MAXADDR; + align = PAGE_SIZE << get_order(size); + mem = (void *)kmem_alloc_contig(size, flag, 0, high, align, 0, + VM_MEMATTR_DEFAULT); + if (mem) + *dma_handle = linux_dma_map_phys(dev, vtophys(mem), size); + else + *dma_handle = 0; + return (mem); +} + +dma_addr_t +linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) +{ + struct linux_dma_priv *priv; + struct linux_dma_obj *obj; + int error, nseg; + bus_dma_segment_t seg; + + priv = dev->dma_priv; + + obj = uma_zalloc(linux_dma_obj_zone, 0); + + if (bus_dmamap_create(priv->dmat, 0, &obj->dmamap) != 0) { + uma_zfree(linux_dma_obj_zone, obj); + return (0); + } + + nseg = -1; + mtx_lock(&priv->dma_lock); + if (_bus_dmamap_load_phys(priv->dmat, obj->dmamap, phys, len, + BUS_DMA_NOWAIT, &seg, &nseg) != 0) { + bus_dmamap_destroy(priv->dmat, obj->dmamap); + mtx_unlock(&priv->dma_lock); + uma_zfree(linux_dma_obj_zone, obj); + return (0); + } + mtx_unlock(&priv->dma_lock); + + KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); + obj->dma_addr = seg.ds_addr; + + mtx_lock(&priv->ptree_lock); + error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); + mtx_unlock(&priv->ptree_lock); + if (error != 0) { + mtx_lock(&priv->dma_lock); + bus_dmamap_unload(priv->dmat, obj->dmamap); + bus_dmamap_destroy(priv->dmat, obj->dmamap); + mtx_unlock(&priv->dma_lock); + uma_zfree(linux_dma_obj_zone, obj); + return (0); + } + + return (obj->dma_addr); +} + +void +linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) +{ + struct linux_dma_priv *priv; + struct linux_dma_obj *obj; + + priv = dev->dma_priv; + + mtx_lock(&priv->ptree_lock); + obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); + if (obj == NULL) { + mtx_unlock(&priv->ptree_lock); + return; + } + LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); + mtx_unlock(&priv->ptree_lock); + + mtx_lock(&priv->dma_lock); + bus_dmamap_unload(priv->dmat, obj->dmamap); + bus_dmamap_destroy(priv->dmat, obj->dmamap); + mtx_unlock(&priv->dma_lock); + + uma_zfree(linux_dma_obj_zone, obj); +} + +int +linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, + enum dma_data_direction dir, struct dma_attrs *attrs) +{ + struct linux_dma_priv *priv; + struct linux_dma_obj *obj; + struct scatterlist *dma_sg, *sg; + int dma_nents, error, nseg; + size_t seg_len; + vm_paddr_t seg_phys, prev_phys_end; + bus_dma_segment_t seg; + + priv = dev->dma_priv; + + obj = uma_zalloc(linux_dma_obj_zone, 0); + + if (bus_dmamap_create(priv->dmat, 0, &obj->dmamap) != 0) { + uma_zfree(linux_dma_obj_zone, obj); + return (0); + } + + sg = sgl; + dma_sg = sg; + dma_nents = 0; + while (nents > 0) { + seg_phys = sg_phys(sg); + seg_len = sg->length; + while (--nents > 0) { + prev_phys_end = sg_phys(sg) + sg->length; + sg = sg_next(sg); + if (prev_phys_end != sg_phys(sg)) + break; + seg_len += sg->length; + } + + nseg = -1; + mtx_lock(&priv->dma_lock); + if (_bus_dmamap_load_phys(priv->dmat, obj->dmamap, + seg_phys, seg_len, BUS_DMA_NOWAIT, + &seg, &nseg) != 0) { + bus_dmamap_unload(priv->dmat, obj->dmamap); + bus_dmamap_destroy(priv->dmat, obj->dmamap); + mtx_unlock(&priv->dma_lock); + uma_zfree(linux_dma_obj_zone, obj); + return (0); + } + mtx_unlock(&priv->dma_lock); + KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); + + sg_dma_address(dma_sg) = seg.ds_addr; + sg_dma_len(dma_sg) = seg.ds_len; + + dma_sg = sg_next(dma_sg); + dma_nents++; + } + + obj->dma_addr = sg_dma_address(sgl); + + mtx_lock(&priv->ptree_lock); + error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); + mtx_unlock(&priv->ptree_lock); + if (error != 0) { + mtx_lock(&priv->dma_lock); + bus_dmamap_unload(priv->dmat, obj->dmamap); + bus_dmamap_destroy(priv->dmat, obj->dmamap); + mtx_unlock(&priv->dma_lock); + uma_zfree(linux_dma_obj_zone, obj); + return (0); + } + + return (dma_nents); +} + +void +linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, struct dma_attrs *attrs) +{ + struct linux_dma_priv *priv; + struct linux_dma_obj *obj; + + priv = dev->dma_priv; + + mtx_lock(&priv->ptree_lock); + obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, sg_dma_address(sgl)); + if (obj == NULL) { + mtx_unlock(&priv->ptree_lock); + return; + } + LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, sg_dma_address(sgl)); + mtx_unlock(&priv->ptree_lock); + + mtx_lock(&priv->dma_lock); + bus_dmamap_unload(priv->dmat, obj->dmamap); + bus_dmamap_destroy(priv->dmat, obj->dmamap); + mtx_unlock(&priv->dma_lock); + + uma_zfree(linux_dma_obj_zone, obj); +} + +static inline int +dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) +{ + struct linux_dma_obj *obj = mem; + struct dma_pool *pool = arg; + int error, nseg; + bus_dma_segment_t seg; + + nseg = -1; + mtx_lock(&pool->pool_dma_lock); + error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, + vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, + &seg, &nseg); + mtx_unlock(&pool->pool_dma_lock); + if (error != 0) { + return (error); + } + KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); + obj->dma_addr = seg.ds_addr; + + return (0); +} + +static void +dma_pool_obj_dtor(void *mem, int size, void *arg) +{ + struct linux_dma_obj *obj = mem; + struct dma_pool *pool = arg; + + mtx_lock(&pool->pool_dma_lock); + bus_dmamap_unload(pool->pool_dmat, obj->dmamap); + mtx_unlock(&pool->pool_dma_lock); +} + +static int +dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, + int flags) +{ + struct dma_pool *pool = arg; + struct linux_dma_priv *priv; + struct linux_dma_obj *obj; + int error, i; + + priv = pool->pool_pdev->dev.dma_priv; + for (i = 0; i < count; i++) { + obj = uma_zalloc(linux_dma_obj_zone, flags); + if (obj == NULL) + break; + + error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, + BUS_DMA_NOWAIT, &obj->dmamap); + if (error!= 0) { + uma_zfree(linux_dma_obj_zone, obj); + break; + } + + store[i] = obj; + } + + return (i); +} + +static void +dma_pool_obj_release(void *arg, void **store, int count) +{ + struct dma_pool *pool = arg; + struct linux_dma_priv *priv; + struct linux_dma_obj *obj; + int i; + + priv = pool->pool_pdev->dev.dma_priv; + for (i = 0; i < count; i++) { + obj = store[i]; + bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); + uma_zfree(linux_dma_obj_zone, obj); + } +} + +struct dma_pool * +linux_dma_pool_create(char *name, struct device *dev, size_t size, + size_t align, size_t boundary) +{ + struct linux_dma_priv *priv; + struct dma_pool *pool; + + priv = dev->dma_priv; + + pool = kzalloc(sizeof(*pool), GFP_KERNEL); + pool->pool_pdev = to_pci_dev(dev); + pool->pool_entry_size = size; + + if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), + align, boundary, /* alignment, boundary */ + priv->dma_mask, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filtfunc, filtfuncarg */ + size, /* maxsize */ + 1, /* nsegments */ + size, /* maxsegsz */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockfuncarg */ + &pool->pool_dmat)) { + kfree(pool); + return (NULL); + } + + pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor, + dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import, + dma_pool_obj_release, pool, 0); + + mtx_init(&pool->pool_dma_lock, "linux_dma_pool", NULL, MTX_DEF); + + mtx_init(&pool->pool_ptree_lock, "linux_dma_pool_ptree", NULL, + MTX_DEF); + pctrie_init(&pool->pool_ptree); + + return (pool); +} + +void +linux_dma_pool_destroy(struct dma_pool *pool) +{ + + uma_zdestroy(pool->pool_zone); + bus_dma_tag_destroy(pool->pool_dmat); + mtx_destroy(&pool->pool_ptree_lock); + mtx_destroy(&pool->pool_dma_lock); + kfree(pool); +} + +void * +linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, + dma_addr_t *handle) +{ + struct linux_dma_obj *obj; + + obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags); + if (obj == NULL) + return (NULL); + + mtx_lock(&pool->pool_ptree_lock); + if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) { + mtx_unlock(&pool->pool_ptree_lock); + uma_zfree_arg(pool->pool_zone, obj, pool); + return (NULL); + } + mtx_unlock(&pool->pool_ptree_lock); + + *handle = obj->dma_addr; + return (obj->vaddr); +} + +void +linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) +{ + struct linux_dma_obj *obj; + + mtx_lock(&pool->pool_ptree_lock); + obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); + if (obj == NULL) { + mtx_unlock(&pool->pool_ptree_lock); + return; + } + LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); + mtx_unlock(&pool->pool_ptree_lock); + + uma_zfree_arg(pool->pool_zone, obj, pool); } Index: head/sys/sys/param.h =================================================================== --- head/sys/sys/param.h +++ head/sys/sys/param.h @@ -60,7 +60,7 @@ * in the range 5 to 9. */ #undef __FreeBSD_version -#define __FreeBSD_version 1300020 /* Master, propagated to newvers */ +#define __FreeBSD_version 1300021 /* Master, propagated to newvers */ /* * __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD,