Index: sys/compat/linuxkpi/common/src/linux_pci.c =================================================================== --- sys/compat/linuxkpi/common/src/linux_pci.c +++ sys/compat/linuxkpi/common/src/linux_pci.c @@ -410,7 +410,9 @@ struct linux_dma_obj { void *vaddr; uint64_t dma_addr; - bus_dmamap_t dmamap; + bus_dmamap_t dma_map; + size_t dma_length; + uint32_t dma_refcount; }; static uma_zone_t linux_dma_trie_zone; @@ -497,6 +499,7 @@ { struct linux_dma_priv *priv; struct linux_dma_obj *obj; + struct linux_dma_obj *old; int error, nseg; bus_dma_segment_t seg; @@ -505,34 +508,67 @@ obj = uma_zalloc(linux_dma_obj_zone, 0); DMA_PRIV_LOCK(priv); - if (bus_dmamap_create(priv->dmat, 0, &obj->dmamap) != 0) { + if (bus_dmamap_create(priv->dmat, 0, &obj->dma_map) != 0) { DMA_PRIV_UNLOCK(priv); uma_zfree(linux_dma_obj_zone, obj); return (0); } nseg = -1; - if (_bus_dmamap_load_phys(priv->dmat, obj->dmamap, phys, len, + if (_bus_dmamap_load_phys(priv->dmat, obj->dma_map, phys, len, BUS_DMA_NOWAIT, &seg, &nseg) != 0) { - bus_dmamap_destroy(priv->dmat, obj->dmamap); + bus_dmamap_destroy(priv->dmat, obj->dma_map); DMA_PRIV_UNLOCK(priv); uma_zfree(linux_dma_obj_zone, obj); return (0); } - KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); + KASSERT(nseg == 0, ("More than one segment (nseg=%d)", nseg + 1)); obj->dma_addr = seg.ds_addr; + obj->dma_length = seg.ds_len; + obj->dma_refcount = 1; + /* check if there is an existing mapping */ + old = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, obj->dma_addr); + if (unlikely(old != NULL)) { + + KASSERT(old->dma_refcount >= 1, + ("Invalid refcount %u", old->dma_refcount)); + + /* sanity check refcount */ + if (unlikely(old->dma_refcount == UINT_MAX)) { + goto error_locked; + } else if (obj->dma_length > old->dma_length) { + /* new mapping is needed */ + obj->dma_refcount = old->dma_refcount + 1; + LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, old->dma_addr); + bus_dmamap_unload(priv->dmat, old->dma_map); + bus_dmamap_destroy(priv->dmat, old->dma_map); + uma_zfree(linux_dma_obj_zone, old); + } else { + /* old mapping is sufficient */ + old->dma_refcount++; + bus_dmamap_unload(priv->dmat, obj->dma_map); + bus_dmamap_destroy(priv->dmat, obj->dma_map); + DMA_PRIV_UNLOCK(priv); + uma_zfree(linux_dma_obj_zone, obj); + return (old->dma_addr); + } + } + + /* insert new mapping */ error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); - if (error != 0) { - bus_dmamap_unload(priv->dmat, obj->dmamap); - bus_dmamap_destroy(priv->dmat, obj->dmamap); - DMA_PRIV_UNLOCK(priv); - uma_zfree(linux_dma_obj_zone, obj); - return (0); - } + if (error != 0) + goto error_locked; DMA_PRIV_UNLOCK(priv); return (obj->dma_addr); + +error_locked: + bus_dmamap_unload(priv->dmat, obj->dma_map); + bus_dmamap_destroy(priv->dmat, obj->dma_map); + DMA_PRIV_UNLOCK(priv); + uma_zfree(linux_dma_obj_zone, obj); + return (0); } void @@ -541,17 +577,30 @@ struct linux_dma_priv *priv; struct linux_dma_obj *obj; + /* check if DMA address is not mapped */ + if (unlikely(dma_addr == 0)) + return; + priv = dev->dma_priv; DMA_PRIV_LOCK(priv); obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); - if (obj == NULL) { + if (unlikely(obj == NULL)) { + pr_debug("linux_dma_unmap: Bad DMA address 0x%zx\n", (size_t)dma_addr); DMA_PRIV_UNLOCK(priv); return; } + + KASSERT(obj->dma_refcount >= 1, + ("Invalid refcount %u", obj->dma_refcount)); + + if (--(obj->dma_refcount) != 0) { + DMA_PRIV_UNLOCK(priv); + return; + } LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); - bus_dmamap_unload(priv->dmat, obj->dmamap); - bus_dmamap_destroy(priv->dmat, obj->dmamap); + bus_dmamap_unload(priv->dmat, obj->dma_map); + bus_dmamap_destroy(priv->dmat, obj->dma_map); DMA_PRIV_UNLOCK(priv); uma_zfree(linux_dma_obj_zone, obj); @@ -633,15 +682,17 @@ nseg = -1; DMA_POOL_LOCK(pool); - error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, + error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dma_map, vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, &seg, &nseg); DMA_POOL_UNLOCK(pool); if (error != 0) { return (error); } - KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); + KASSERT(nseg == 0, ("More than one segment (nseg=%d)", nseg + 1)); obj->dma_addr = seg.ds_addr; + obj->dma_length = seg.ds_len; + obj->dma_refcount = 1; return (0); } @@ -653,7 +704,7 @@ struct dma_pool *pool = arg; DMA_POOL_LOCK(pool); - bus_dmamap_unload(pool->pool_dmat, obj->dmamap); + bus_dmamap_unload(pool->pool_dmat, obj->dma_map); DMA_POOL_UNLOCK(pool); } @@ -673,7 +724,7 @@ break; error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, - BUS_DMA_NOWAIT, &obj->dmamap); + BUS_DMA_NOWAIT, &obj->dma_map); if (error!= 0) { uma_zfree(linux_dma_obj_zone, obj); break; @@ -696,7 +747,7 @@ priv = pool->pool_device->dma_priv; for (i = 0; i < count; i++) { obj = store[i]; - bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); + bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dma_map); uma_zfree(linux_dma_obj_zone, obj); } } @@ -776,12 +827,25 @@ { struct linux_dma_obj *obj; + /* check if DMA address is not mapped */ + if (unlikely(dma_addr == 0)) + return; + DMA_POOL_LOCK(pool); obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); - if (obj == NULL) { + if (unlikely(obj == NULL)) { + pr_debug("linux_dma_pool_free: Bad DMA address 0x%zx\n", (size_t)dma_addr); DMA_POOL_UNLOCK(pool); return; } + + KASSERT(obj->dma_refcount >= 1, + ("Invalid refcount %u", obj->dma_refcount)); + + if (--(obj->dma_refcount) != 0) { + DMA_POOL_UNLOCK(pool); + return; + } LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); DMA_POOL_UNLOCK(pool);