Changeset View
Standalone View
sys/compat/linuxkpi/common/src/linux_pci.c
Show First 20 Lines • Show All 404 Lines • ▼ Show 20 Lines | linux_pci_unregister_driver(struct pci_driver *pdrv) | ||||
mtx_unlock(&Giant); | mtx_unlock(&Giant); | ||||
} | } | ||||
CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); | CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); | ||||
struct linux_dma_obj { | struct linux_dma_obj { | ||||
void *vaddr; | void *vaddr; | ||||
uint64_t dma_addr; | uint64_t dma_addr; | ||||
bus_dmamap_t dmamap; | bus_dmamap_t dma_map; | ||||
size_t dma_length; | |||||
ssize_t dma_refcount; | |||||
}; | }; | ||||
static uma_zone_t linux_dma_trie_zone; | static uma_zone_t linux_dma_trie_zone; | ||||
static uma_zone_t linux_dma_obj_zone; | static uma_zone_t linux_dma_obj_zone; | ||||
static void | static void | ||||
linux_dma_init(void *arg) | linux_dma_init(void *arg) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 53 Lines • ▼ Show 20 Lines | if (priv->dma_mask) | ||||
high = priv->dma_mask; | high = priv->dma_mask; | ||||
else if (flag & GFP_DMA32) | else if (flag & GFP_DMA32) | ||||
high = BUS_SPACE_MAXADDR_32BIT; | high = BUS_SPACE_MAXADDR_32BIT; | ||||
else | else | ||||
high = BUS_SPACE_MAXADDR; | high = BUS_SPACE_MAXADDR; | ||||
align = PAGE_SIZE << get_order(size); | align = PAGE_SIZE << get_order(size); | ||||
mem = (void *)kmem_alloc_contig(size, flag, 0, high, align, 0, | mem = (void *)kmem_alloc_contig(size, flag, 0, high, align, 0, | ||||
VM_MEMATTR_DEFAULT); | VM_MEMATTR_DEFAULT); | ||||
if (mem) | if (mem != NULL) { | ||||
kib: mem == NULL | |||||
*dma_handle = linux_dma_map_phys(dev, vtophys(mem), size); | *dma_handle = linux_dma_map_phys(dev, vtophys(mem), size); | ||||
else | if (*dma_handle == 0) { | ||||
kmem_free((vm_offset_t)mem, size); | |||||
mem = NULL; | |||||
} | |||||
} else { | |||||
*dma_handle = 0; | *dma_handle = 0; | ||||
} | |||||
return (mem); | return (mem); | ||||
} | } | ||||
dma_addr_t | dma_addr_t | ||||
linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) | linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) | ||||
{ | { | ||||
struct linux_dma_priv *priv; | struct linux_dma_priv *priv; | ||||
struct linux_dma_obj *obj; | struct linux_dma_obj *obj; | ||||
struct linux_dma_obj *old; | |||||
Done Inline ActionsWhy not write struct linux_dma_obj *obj, *old; kib: Why not write
```
struct linux_dma_obj *obj, *old;
``` | |||||
Done Inline ActionsI prefer one declaration per line. hselasky: I prefer one declaration per line. | |||||
int error, nseg; | int error, nseg; | ||||
bus_dma_segment_t seg; | bus_dma_segment_t seg; | ||||
priv = dev->dma_priv; | priv = dev->dma_priv; | ||||
obj = uma_zalloc(linux_dma_obj_zone, 0); | obj = uma_zalloc(linux_dma_obj_zone, 0); | ||||
DMA_PRIV_LOCK(priv); | DMA_PRIV_LOCK(priv); | ||||
if (bus_dmamap_create(priv->dmat, 0, &obj->dmamap) != 0) { | if (bus_dmamap_create(priv->dmat, 0, &obj->dma_map) != 0) { | ||||
DMA_PRIV_UNLOCK(priv); | DMA_PRIV_UNLOCK(priv); | ||||
uma_zfree(linux_dma_obj_zone, obj); | uma_zfree(linux_dma_obj_zone, obj); | ||||
return (0); | return (0); | ||||
} | } | ||||
nseg = -1; | nseg = -1; | ||||
if (_bus_dmamap_load_phys(priv->dmat, obj->dmamap, phys, len, | if (_bus_dmamap_load_phys(priv->dmat, obj->dma_map, phys, len, | ||||
BUS_DMA_NOWAIT, &seg, &nseg) != 0) { | BUS_DMA_NOWAIT, &seg, &nseg) != 0) { | ||||
bus_dmamap_destroy(priv->dmat, obj->dmamap); | bus_dmamap_destroy(priv->dmat, obj->dma_map); | ||||
DMA_PRIV_UNLOCK(priv); | DMA_PRIV_UNLOCK(priv); | ||||
uma_zfree(linux_dma_obj_zone, obj); | uma_zfree(linux_dma_obj_zone, obj); | ||||
return (0); | return (0); | ||||
} | } | ||||
KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); | KASSERT(nseg == 0, ("More than one segment (nseg=%d)", nseg + 1)); | ||||
obj->dma_addr = seg.ds_addr; | obj->dma_addr = seg.ds_addr; | ||||
obj->dma_length = seg.ds_len; | |||||
obj->dma_refcount = 1; | |||||
/* check if there is an existing mapping */ | |||||
Not Done Inline ActionsWhat if new and old requests diff by length ? You should keep around the one with greater size. Also, wouldn't dma_unmap() from one request kill the pctrie entry for another one ? I believe you need some sort of refcount. kib: What if new and old requests diff by length ? You should keep around the one with greater size. | |||||
Not Done Inline Actions
But now if we remove the one with the greater size? slavash: > What if new and old requests diff by length ? You should keep around the one with greater… | |||||
Not Done Inline ActionsCompat layer should only keep the largest mapping, refcounted. The mapping only removed when the last reference goes away. kib: Compat layer should only keep the largest mapping, refcounted. The mapping only removed when… | |||||
old = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, obj->dma_addr); | |||||
if (unlikely(old != NULL)) { | |||||
Done Inline ActionsYou still do not compare the length of old and new requests. You still do not ref-count the duplicated entry. kib: You still do not compare the length of old and new requests.
You still do not ref-count the… | |||||
Done Inline ActionsFixed in next version. hselasky: Fixed in next version. | |||||
/* check if old mapping is sufficient */ | |||||
if (obj->dma_length > old->dma_length) { | |||||
Done Inline Actionsold->dma_refcount == 0 should be an assert. kib: old->dma_refcount == 0 should be an assert. | |||||
obj->dma_refcount = old->dma_refcount + 1; | |||||
KASSERT(old->dma_refcount >= 1, ("Invalid refcount %zd", old->dma_refcount)); | |||||
KASSERT(obj->dma_refcount >= 1, ("Invalid refcount %zd", obj->dma_refcount)); | |||||
LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, old->dma_addr); | |||||
bus_dmamap_unload(priv->dmat, old->dma_map); | |||||
bus_dmamap_destroy(priv->dmat, old->dma_map); | |||||
uma_zfree(linux_dma_obj_zone, old); | |||||
} else { | |||||
old->dma_refcount++; | |||||
KASSERT(old->dma_refcount >= 1, ("Invalid refcount %zd", old->dma_refcount)); | |||||
bus_dmamap_unload(priv->dmat, obj->dma_map); | |||||
bus_dmamap_destroy(priv->dmat, obj->dma_map); | |||||
DMA_PRIV_UNLOCK(priv); | |||||
uma_zfree(linux_dma_obj_zone, obj); | |||||
return (old->dma_addr); | |||||
} | |||||
} | |||||
/* insert new mapping */ | |||||
error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); | error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); | ||||
if (error != 0) { | if (error != 0) { | ||||
bus_dmamap_unload(priv->dmat, obj->dmamap); | bus_dmamap_unload(priv->dmat, obj->dma_map); | ||||
bus_dmamap_destroy(priv->dmat, obj->dmamap); | bus_dmamap_destroy(priv->dmat, obj->dma_map); | ||||
DMA_PRIV_UNLOCK(priv); | DMA_PRIV_UNLOCK(priv); | ||||
uma_zfree(linux_dma_obj_zone, obj); | uma_zfree(linux_dma_obj_zone, obj); | ||||
return (0); | return (0); | ||||
} | } | ||||
DMA_PRIV_UNLOCK(priv); | DMA_PRIV_UNLOCK(priv); | ||||
return (obj->dma_addr); | return (obj->dma_addr); | ||||
} | } | ||||
void | void | ||||
linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) | linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) | ||||
{ | { | ||||
struct linux_dma_priv *priv; | struct linux_dma_priv *priv; | ||||
struct linux_dma_obj *obj; | struct linux_dma_obj *obj; | ||||
/* check if DMA address is not mapped */ | |||||
if (unlikely(dma_addr == 0)) | |||||
return; | |||||
priv = dev->dma_priv; | priv = dev->dma_priv; | ||||
DMA_PRIV_LOCK(priv); | DMA_PRIV_LOCK(priv); | ||||
obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); | obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); | ||||
if (obj == NULL) { | if (unlikely(obj == NULL)) { | ||||
pr_debug("linux_dma_unmap: Bad DMA address 0x%zx\n", (size_t)dma_addr); | |||||
DMA_PRIV_UNLOCK(priv); | DMA_PRIV_UNLOCK(priv); | ||||
return; | return; | ||||
} | } | ||||
Done Inline ActionsAssert that dma_refcount >= 1. Explicitly compare with 0. kib: Assert that dma_refcount >= 1. Explicitly compare with 0. | |||||
Done Inline ActionsRefcounts are unsigned and start at zero, so asserting doesn't make sense. I've renamed the variable to dma_share_count to make this clearer. hselasky: Refcounts are unsigned and start at zero, so asserting doesn't make sense.
I've renamed the… | |||||
KASSERT(obj->dma_refcount >= 1, ("Invalid refcount %zd", obj->dma_refcount)); | |||||
if (--(obj->dma_refcount) != 0) { | |||||
DMA_PRIV_UNLOCK(priv); | |||||
return; | |||||
} | |||||
LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); | LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); | ||||
bus_dmamap_unload(priv->dmat, obj->dmamap); | bus_dmamap_unload(priv->dmat, obj->dma_map); | ||||
bus_dmamap_destroy(priv->dmat, obj->dmamap); | bus_dmamap_destroy(priv->dmat, obj->dma_map); | ||||
DMA_PRIV_UNLOCK(priv); | DMA_PRIV_UNLOCK(priv); | ||||
uma_zfree(linux_dma_obj_zone, obj); | uma_zfree(linux_dma_obj_zone, obj); | ||||
} | } | ||||
int | int | ||||
linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, | linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, | ||||
enum dma_data_direction dir, struct dma_attrs *attrs) | enum dma_data_direction dir, struct dma_attrs *attrs) | ||||
{ | { | ||||
struct linux_dma_priv *priv; | struct linux_dma_priv *priv; | ||||
struct linux_dma_obj *obj; | struct scatterlist *sg; | ||||
struct scatterlist *dma_sg, *sg; | int i, nseg; | ||||
int dma_nents, error, nseg; | |||||
size_t seg_len; | |||||
vm_paddr_t seg_phys, prev_phys_end; | |||||
bus_dma_segment_t seg; | bus_dma_segment_t seg; | ||||
priv = dev->dma_priv; | priv = dev->dma_priv; | ||||
obj = uma_zalloc(linux_dma_obj_zone, 0); | |||||
DMA_PRIV_LOCK(priv); | DMA_PRIV_LOCK(priv); | ||||
if (bus_dmamap_create(priv->dmat, 0, &obj->dmamap) != 0) { | |||||
/* create common DMA map in the first S/G entry */ | |||||
if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) { | |||||
Done Inline ActionsI would be happier if we put some canary value for unallocated dma_map, and assert that we only call bus_dmamap_unload/destroy on proper dma_map. kib: I would be happier if we put some canary value for unallocated dma_map, and assert that we only… | |||||
Done Inline ActionsI'm not sure if we can cover all the cases for initializing S/G lists. hselasky: I'm not sure if we can cover all the cases for initializing S/G lists. | |||||
DMA_PRIV_UNLOCK(priv); | DMA_PRIV_UNLOCK(priv); | ||||
uma_zfree(linux_dma_obj_zone, obj); | |||||
return (0); | return (0); | ||||
} | } | ||||
sg = sgl; | /* load all S/G list entries */ | ||||
dma_sg = sg; | for_each_sg(sgl, sg, nents, i) { | ||||
dma_nents = 0; | |||||
while (nents > 0) { | |||||
seg_phys = sg_phys(sg); | |||||
seg_len = sg->length; | |||||
while (--nents > 0) { | |||||
prev_phys_end = sg_phys(sg) + sg->length; | |||||
sg = sg_next(sg); | |||||
if (prev_phys_end != sg_phys(sg)) | |||||
break; | |||||
seg_len += sg->length; | |||||
} | |||||
nseg = -1; | nseg = -1; | ||||
if (_bus_dmamap_load_phys(priv->dmat, obj->dmamap, | if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map, | ||||
seg_phys, seg_len, BUS_DMA_NOWAIT, | sg_phys(sg), sg->length, BUS_DMA_NOWAIT, | ||||
&seg, &nseg) != 0) { | &seg, &nseg) != 0) { | ||||
bus_dmamap_unload(priv->dmat, obj->dmamap); | bus_dmamap_unload(priv->dmat, sgl->dma_map); | ||||
bus_dmamap_destroy(priv->dmat, obj->dmamap); | bus_dmamap_destroy(priv->dmat, sgl->dma_map); | ||||
DMA_PRIV_UNLOCK(priv); | DMA_PRIV_UNLOCK(priv); | ||||
uma_zfree(linux_dma_obj_zone, obj); | |||||
return (0); | return (0); | ||||
} | } | ||||
KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); | KASSERT(nseg == 0, ("More than one segment (nseg=%d)", nseg + 1)); | ||||
sg_dma_address(sg) = seg.ds_addr; | |||||
sg_dma_address(dma_sg) = seg.ds_addr; | |||||
sg_dma_len(dma_sg) = seg.ds_len; | |||||
dma_sg = sg_next(dma_sg); | |||||
dma_nents++; | |||||
} | } | ||||
obj->dma_addr = sg_dma_address(sgl); | |||||
error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); | |||||
if (error != 0) { | |||||
bus_dmamap_unload(priv->dmat, obj->dmamap); | |||||
bus_dmamap_destroy(priv->dmat, obj->dmamap); | |||||
DMA_PRIV_UNLOCK(priv); | DMA_PRIV_UNLOCK(priv); | ||||
uma_zfree(linux_dma_obj_zone, obj); | |||||
return (0); | return (nents); | ||||
} | } | ||||
DMA_PRIV_UNLOCK(priv); | |||||
return (dma_nents); | |||||
} | |||||
void | void | ||||
linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, | linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, | ||||
int nents, enum dma_data_direction dir, struct dma_attrs *attrs) | int nents, enum dma_data_direction dir, struct dma_attrs *attrs) | ||||
{ | { | ||||
struct linux_dma_priv *priv; | struct linux_dma_priv *priv; | ||||
struct linux_dma_obj *obj; | |||||
priv = dev->dma_priv; | priv = dev->dma_priv; | ||||
DMA_PRIV_LOCK(priv); | DMA_PRIV_LOCK(priv); | ||||
obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, sg_dma_address(sgl)); | bus_dmamap_unload(priv->dmat, sgl->dma_map); | ||||
if (obj == NULL) { | bus_dmamap_destroy(priv->dmat, sgl->dma_map); | ||||
DMA_PRIV_UNLOCK(priv); | DMA_PRIV_UNLOCK(priv); | ||||
return; | |||||
} | } | ||||
LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, sg_dma_address(sgl)); | |||||
bus_dmamap_unload(priv->dmat, obj->dmamap); | |||||
bus_dmamap_destroy(priv->dmat, obj->dmamap); | |||||
DMA_PRIV_UNLOCK(priv); | |||||
uma_zfree(linux_dma_obj_zone, obj); | |||||
} | |||||
struct dma_pool { | struct dma_pool { | ||||
struct device *pool_device; | struct device *pool_device; | ||||
uma_zone_t pool_zone; | uma_zone_t pool_zone; | ||||
struct mtx pool_lock; | struct mtx pool_lock; | ||||
bus_dma_tag_t pool_dmat; | bus_dma_tag_t pool_dmat; | ||||
size_t pool_entry_size; | size_t pool_entry_size; | ||||
struct pctrie pool_ptree; | struct pctrie pool_ptree; | ||||
}; | }; | ||||
#define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock) | #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock) | ||||
#define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock) | #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock) | ||||
static inline int | static inline int | ||||
dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) | dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) | ||||
{ | { | ||||
struct linux_dma_obj *obj = mem; | struct linux_dma_obj *obj = mem; | ||||
struct dma_pool *pool = arg; | struct dma_pool *pool = arg; | ||||
int error, nseg; | int error, nseg; | ||||
bus_dma_segment_t seg; | bus_dma_segment_t seg; | ||||
nseg = -1; | nseg = -1; | ||||
DMA_POOL_LOCK(pool); | DMA_POOL_LOCK(pool); | ||||
error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, | error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dma_map, | ||||
vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, | vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, | ||||
&seg, &nseg); | &seg, &nseg); | ||||
DMA_POOL_UNLOCK(pool); | DMA_POOL_UNLOCK(pool); | ||||
if (error != 0) { | if (error != 0) { | ||||
return (error); | return (error); | ||||
} | } | ||||
KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); | KASSERT(nseg == 0, ("More than one segment (nseg=%d)", nseg + 1)); | ||||
obj->dma_addr = seg.ds_addr; | obj->dma_addr = seg.ds_addr; | ||||
obj->dma_length = seg.ds_len; | |||||
obj->dma_refcount = 1; | |||||
return (0); | return (0); | ||||
} | } | ||||
static void | static void | ||||
dma_pool_obj_dtor(void *mem, int size, void *arg) | dma_pool_obj_dtor(void *mem, int size, void *arg) | ||||
{ | { | ||||
struct linux_dma_obj *obj = mem; | struct linux_dma_obj *obj = mem; | ||||
struct dma_pool *pool = arg; | struct dma_pool *pool = arg; | ||||
DMA_POOL_LOCK(pool); | DMA_POOL_LOCK(pool); | ||||
bus_dmamap_unload(pool->pool_dmat, obj->dmamap); | bus_dmamap_unload(pool->pool_dmat, obj->dma_map); | ||||
DMA_POOL_UNLOCK(pool); | DMA_POOL_UNLOCK(pool); | ||||
} | } | ||||
static int | static int | ||||
dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, | dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, | ||||
int flags) | int flags) | ||||
{ | { | ||||
struct dma_pool *pool = arg; | struct dma_pool *pool = arg; | ||||
struct linux_dma_priv *priv; | struct linux_dma_priv *priv; | ||||
struct linux_dma_obj *obj; | struct linux_dma_obj *obj; | ||||
int error, i; | int error, i; | ||||
priv = pool->pool_device->dma_priv; | priv = pool->pool_device->dma_priv; | ||||
for (i = 0; i < count; i++) { | for (i = 0; i < count; i++) { | ||||
obj = uma_zalloc(linux_dma_obj_zone, flags); | obj = uma_zalloc(linux_dma_obj_zone, flags); | ||||
if (obj == NULL) | if (obj == NULL) | ||||
break; | break; | ||||
error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, | error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, | ||||
BUS_DMA_NOWAIT, &obj->dmamap); | BUS_DMA_NOWAIT, &obj->dma_map); | ||||
if (error!= 0) { | if (error!= 0) { | ||||
uma_zfree(linux_dma_obj_zone, obj); | uma_zfree(linux_dma_obj_zone, obj); | ||||
break; | break; | ||||
} | } | ||||
store[i] = obj; | store[i] = obj; | ||||
} | } | ||||
return (i); | return (i); | ||||
} | } | ||||
static void | static void | ||||
dma_pool_obj_release(void *arg, void **store, int count) | dma_pool_obj_release(void *arg, void **store, int count) | ||||
{ | { | ||||
struct dma_pool *pool = arg; | struct dma_pool *pool = arg; | ||||
struct linux_dma_priv *priv; | struct linux_dma_priv *priv; | ||||
struct linux_dma_obj *obj; | struct linux_dma_obj *obj; | ||||
int i; | int i; | ||||
priv = pool->pool_device->dma_priv; | priv = pool->pool_device->dma_priv; | ||||
for (i = 0; i < count; i++) { | for (i = 0; i < count; i++) { | ||||
obj = store[i]; | obj = store[i]; | ||||
bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); | bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dma_map); | ||||
uma_zfree(linux_dma_obj_zone, obj); | uma_zfree(linux_dma_obj_zone, obj); | ||||
} | } | ||||
} | } | ||||
struct dma_pool * | struct dma_pool * | ||||
linux_dma_pool_create(char *name, struct device *dev, size_t size, | linux_dma_pool_create(char *name, struct device *dev, size_t size, | ||||
size_t align, size_t boundary) | size_t align, size_t boundary) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 63 Lines • ▼ Show 20 Lines | linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, | ||||
return (obj->vaddr); | return (obj->vaddr); | ||||
} | } | ||||
void | void | ||||
linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) | linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) | ||||
{ | { | ||||
struct linux_dma_obj *obj; | struct linux_dma_obj *obj; | ||||
/* check if DMA address is not mapped */ | |||||
if (unlikely(dma_addr == 0)) | |||||
return; | |||||
DMA_POOL_LOCK(pool); | DMA_POOL_LOCK(pool); | ||||
obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); | obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); | ||||
if (obj == NULL) { | if (unlikely(obj == NULL)) { | ||||
pr_debug("linux_dma_pool_free: Bad DMA address 0x%zx\n", (size_t)dma_addr); | |||||
DMA_POOL_UNLOCK(pool); | |||||
return; | |||||
} | |||||
KASSERT(obj->dma_refcount >= 1, ("Invalid refcount %zd", obj->dma_refcount)); | |||||
if (--(obj->dma_refcount) != 0) { | |||||
DMA_POOL_UNLOCK(pool); | DMA_POOL_UNLOCK(pool); | ||||
return; | return; | ||||
} | } | ||||
LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); | LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); | ||||
DMA_POOL_UNLOCK(pool); | DMA_POOL_UNLOCK(pool); | ||||
uma_zfree_arg(pool->pool_zone, obj, pool); | uma_zfree_arg(pool->pool_zone, obj, pool); | ||||
} | } |
mem == NULL