diff --git a/sys/arm64/arm64/busdma_bounce.h b/sys/arm64/arm64/busdma_bounce.h --- a/sys/arm64/arm64/busdma_bounce.h +++ b/sys/arm64/arm64/busdma_bounce.h @@ -31,11 +31,13 @@ * SUCH DAMAGE. */ -static bool _bus_dmamap_pagesneeded(bus_dma_tag_t, bus_dmamap_t, +#define DMA_FUNC(f) f + +static bool DMA_FUNC(_bus_dmamap_pagesneeded)(bus_dma_tag_t, bus_dmamap_t, vm_paddr_t, bus_size_t, int *); static bool -might_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t paddr, +DMA_FUNC(might_bounce)(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t paddr, bus_size_t size) { @@ -56,7 +58,7 @@ } static bool -must_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t paddr, +DMA_FUNC(must_bounce)(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t paddr, bus_size_t size) { @@ -71,17 +73,19 @@ } static bool -bounce_bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen) +DMA_FUNC(bounce_bus_dma_id_mapped)(bus_dma_tag_t dmat, vm_paddr_t buf, + bus_size_t buflen) { - if (!might_bounce(dmat, NULL, buf, buflen)) + if (!DMA_FUNC(might_bounce)(dmat, NULL, buf, buflen)) return (true); - return (!_bus_dmamap_pagesneeded(dmat, NULL, buf, buflen, NULL)); + return (!DMA_FUNC(_bus_dmamap_pagesneeded)(dmat, NULL, buf, buflen, + NULL)); } static bool -_bus_dmamap_pagesneeded(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, - bus_size_t buflen, int *pagesneeded) +DMA_FUNC(_bus_dmamap_pagesneeded)(bus_dma_tag_t dmat, bus_dmamap_t map, + vm_paddr_t buf, bus_size_t buflen, int *pagesneeded) { bus_addr_t curaddr; bus_size_t sgsize; @@ -95,7 +99,7 @@ curaddr = buf; while (buflen != 0) { sgsize = buflen; - if (must_bounce(dmat, map, curaddr, sgsize)) { + if (DMA_FUNC(must_bounce)(dmat, map, curaddr, sgsize)) { sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK)); if (pagesneeded == NULL) @@ -112,20 +116,20 @@ } static void -_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, - bus_size_t buflen, int flags) +DMA_FUNC(_bus_dmamap_count_phys)(bus_dma_tag_t dmat, bus_dmamap_t map, + vm_paddr_t buf, bus_size_t buflen, int flags) { if (map->pagesneeded == 0) { - _bus_dmamap_pagesneeded(dmat, map, buf, buflen, + DMA_FUNC(_bus_dmamap_pagesneeded)(dmat, map, buf, buflen, &map->pagesneeded); CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); } } static void -_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, - void *buf, bus_size_t buflen, int flags) +DMA_FUNC(_bus_dmamap_count_pages)(bus_dma_tag_t dmat, bus_dmamap_t map, + pmap_t pmap, void *buf, bus_size_t buflen, int flags) { vm_offset_t vaddr; vm_offset_t vendaddr; @@ -153,7 +157,8 @@ paddr = pmap_kextract(vaddr); else paddr = pmap_extract(pmap, vaddr); - if (must_bounce(dmat, map, paddr, sg_len) != 0) { + if (DMA_FUNC(must_bounce)(dmat, map, paddr, sg_len) != + 0) { sg_len = roundup2(sg_len, dmat->common.alignment); map->pagesneeded++; @@ -169,7 +174,7 @@ * the starting segment on entrace, and the ending segment on exit. */ static int -bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, +DMA_FUNC(bounce_bus_dmamap_load_phys)(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp) { @@ -181,8 +186,8 @@ if (segs == NULL) segs = dmat->segments; - if (might_bounce(dmat, map, (bus_addr_t)buf, buflen)) { - _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); + if (DMA_FUNC(might_bounce)(dmat, map, (bus_addr_t)buf, buflen)) { + DMA_FUNC(_bus_dmamap_count_phys)(dmat, map, buf, buflen, flags); if (map->pagesneeded != 0) { error = _bus_dmamap_reserve_pages(dmat, map, flags); if (error) @@ -197,7 +202,7 @@ curaddr = buf; sgsize = buflen; if (map->pagesneeded != 0 && - must_bounce(dmat, map, curaddr, sgsize)) { + DMA_FUNC(must_bounce)(dmat, map, curaddr, sgsize)) { /* * The attempt to split a physically continuous buffer * seems very controversial, it's unclear whether we @@ -251,9 +256,9 @@ * the starting segment on entrace, and the ending segment on exit. */ static int -bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, - bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs, - int *segp) +DMA_FUNC(bounce_bus_dmamap_load_buffer)(bus_dma_tag_t dmat, bus_dmamap_t map, + void *buf, bus_size_t buflen, pmap_t pmap, int flags, + bus_dma_segment_t *segs, int *segp) { struct sync_list *sl; bus_size_t sgsize; @@ -272,8 +277,9 @@ if (flags & BUS_DMA_LOAD_MBUF) map->flags |= DMAMAP_MBUF; - if (might_bounce(dmat, map, (bus_addr_t)buf, buflen)) { - _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); + if (DMA_FUNC(might_bounce)(dmat, map, (bus_addr_t)buf, buflen)) { + DMA_FUNC(_bus_dmamap_count_pages)(dmat, map, pmap, buf, buflen, + flags); if (map->pagesneeded != 0) { error = _bus_dmamap_reserve_pages(dmat, map, flags); if (error) @@ -312,7 +318,7 @@ sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK)); if (map->pagesneeded != 0 && - must_bounce(dmat, map, curaddr, sgsize)) { + DMA_FUNC(must_bounce)(dmat, map, curaddr, sgsize)) { /* See comment in bounce_bus_dmamap_load_phys */ KASSERT(dmat->common.alignment <= PAGE_SIZE, ("bounced buffer cannot have alignment bigger " @@ -364,7 +370,7 @@ } static void -bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, +DMA_FUNC(bounce_bus_dmamap_sync)(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) { struct bounce_page *bpage; @@ -461,3 +467,5 @@ kmsan_bus_dmamap_sync(&map->kmsan_mem, op); } + +#undef DMA_FUNC