diff --git a/sys/arm64/arm64/busdma_bounce.h b/sys/arm64/arm64/busdma_bounce.h --- a/sys/arm64/arm64/busdma_bounce.h +++ b/sys/arm64/arm64/busdma_bounce.h @@ -402,11 +402,6 @@ #endif vm_offset_t datavaddr, tempvaddr; -#ifndef BUS_DMA_NON_COHERENT_IMPL - /* The map should always have this flag set in the coherent busdma */ - MPASS((map->flags & DMAMAP_COHERENT) != 0); -#endif - if (op == BUS_DMASYNC_POSTWRITE) return; diff --git a/sys/arm64/arm64/busdma_bounce.c b/sys/arm64/arm64/busdma_bounce.c --- a/sys/arm64/arm64/busdma_bounce.c +++ b/sys/arm64/arm64/busdma_bounce.c @@ -102,7 +102,7 @@ __sbintime_t queued_time; STAILQ_ENTRY(bus_dmamap) links; u_int flags; -#define DMAMAP_COHERENT (1 << 0) +#define DMAMAP_FLAG0 (1 << 0) #define DMAMAP_FROM_DMAMEM (1 << 1) #define DMAMAP_MBUF (1 << 2) int sync_count; @@ -363,8 +363,6 @@ if (error == 0) { dmat->map_count++; - if ((dmat->common.flags & BUS_DMA_COHERENT) != 0) - (*mapp)->flags |= DMAMAP_COHERENT; } else { free(*mapp, M_DEVBUF); } @@ -453,9 +451,10 @@ * Mark the map as coherent if we used uncacheable memory or the * tag was already marked as coherent. */ - if (attr == VM_MEMATTR_UNCACHEABLE || - (dmat->common.flags & BUS_DMA_COHERENT) != 0) - (*mapp)->flags |= DMAMAP_COHERENT; + if ((dmat->common.flags & BUS_DMA_COHERENT) == 0 && + attr == VM_MEMATTR_UNCACHEABLE) + /* Set the DMAMAP_COHERENT on the default busdma */ + (*mapp)->flags |= DMAMAP_FLAG0; (*mapp)->flags |= DMAMAP_FROM_DMAMEM; @@ -687,6 +686,8 @@ /* busdma support for non-coherent devices */ +#define DMAMAP_COHERENT DMAMAP_FLAG0 + /* * Return true if the DMA should bounce because the start or end does not fall * on a cacheline boundary (which would require a partial cacheline flush).