Page MenuHomeFreeBSD

D49555.diff
No OneTemporary

D49555.diff

diff --git a/sys/arm64/arm64/busdma_bounce.h b/sys/arm64/arm64/busdma_bounce.h
new file mode 100644
--- /dev/null
+++ b/sys/arm64/arm64/busdma_bounce.h
@@ -0,0 +1,463 @@
+/*-
+ * Copyright (c) 1997, 1998 Justin T. Gibbs.
+ * Copyright (c) 2015-2016 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Andrew Turner
+ * under sponsorship of the FreeBSD Foundation.
+ *
+ * Portions of this software were developed by Semihalf
+ * under sponsorship of the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+static bool _bus_dmamap_pagesneeded(bus_dma_tag_t, bus_dmamap_t,
+ vm_paddr_t, bus_size_t, int *);
+
+static bool
+might_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t paddr,
+ bus_size_t size)
+{
+
+ /* Memory allocated by bounce_bus_dmamem_alloc won't bounce */
+ if (map && (map->flags & DMAMAP_FROM_DMAMEM) != 0)
+ return (false);
+
+ if ((dmat->bounce_flags & BF_COULD_BOUNCE) != 0)
+ return (true);
+
+ if (cacheline_bounce(dmat, map, paddr, size))
+ return (true);
+
+ if (alignment_bounce(dmat, paddr))
+ return (true);
+
+ return (false);
+}
+
+static bool
+must_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t paddr,
+ bus_size_t size)
+{
+
+ if (cacheline_bounce(dmat, map, paddr, size))
+ return (true);
+
+ if ((dmat->bounce_flags & BF_COULD_BOUNCE) != 0 &&
+ addr_needs_bounce(dmat, paddr))
+ return (true);
+
+ return (false);
+}
+
+static bool
+bounce_bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen)
+{
+
+ if (!might_bounce(dmat, NULL, buf, buflen))
+ return (true);
+ return (!_bus_dmamap_pagesneeded(dmat, NULL, buf, buflen, NULL));
+}
+
+static bool
+_bus_dmamap_pagesneeded(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
+ bus_size_t buflen, int *pagesneeded)
+{
+ bus_addr_t curaddr;
+ bus_size_t sgsize;
+ int count;
+
+ /*
+ * Count the number of bounce pages needed in order to
+ * complete this transfer
+ */
+ count = 0;
+ curaddr = buf;
+ while (buflen != 0) {
+ sgsize = buflen;
+ if (must_bounce(dmat, map, curaddr, sgsize)) {
+ sgsize = MIN(sgsize,
+ PAGE_SIZE - (curaddr & PAGE_MASK));
+ if (pagesneeded == NULL)
+ return (true);
+ count++;
+ }
+ curaddr += sgsize;
+ buflen -= sgsize;
+ }
+
+ if (pagesneeded != NULL)
+ *pagesneeded = count;
+ return (count != 0);
+}
+
+static void
+_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
+ bus_size_t buflen, int flags)
+{
+
+ if (map->pagesneeded == 0) {
+ _bus_dmamap_pagesneeded(dmat, map, buf, buflen,
+ &map->pagesneeded);
+ CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
+ }
+}
+
+static void
+_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
+ void *buf, bus_size_t buflen, int flags)
+{
+ vm_offset_t vaddr;
+ vm_offset_t vendaddr;
+ bus_addr_t paddr;
+ bus_size_t sg_len;
+
+ if (map->pagesneeded == 0) {
+ CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
+ "alignment= %d", dmat->common.lowaddr,
+ ptoa((vm_paddr_t)Maxmem),
+ dmat->common.boundary, dmat->common.alignment);
+ CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map,
+ map->pagesneeded);
+ /*
+ * Count the number of bounce pages
+ * needed in order to complete this transfer
+ */
+ vaddr = (vm_offset_t)buf;
+ vendaddr = (vm_offset_t)buf + buflen;
+
+ while (vaddr < vendaddr) {
+ sg_len = MIN(vendaddr - vaddr,
+ PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK));
+ if (pmap == kernel_pmap)
+ paddr = pmap_kextract(vaddr);
+ else
+ paddr = pmap_extract(pmap, vaddr);
+ if (must_bounce(dmat, map, paddr, sg_len) != 0) {
+ sg_len = roundup2(sg_len,
+ dmat->common.alignment);
+ map->pagesneeded++;
+ }
+ vaddr += sg_len;
+ }
+ CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
+ }
+}
+
+/*
+ * Utility function to load a physical buffer. segp contains
+ * the starting segment on entrace, and the ending segment on exit.
+ */
+static int
+bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
+ vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
+ int *segp)
+{
+ struct sync_list *sl;
+ bus_size_t sgsize;
+ bus_addr_t curaddr, sl_end;
+ int error;
+
+ if (segs == NULL)
+ segs = dmat->segments;
+
+ if (might_bounce(dmat, map, (bus_addr_t)buf, buflen)) {
+ _bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
+ if (map->pagesneeded != 0) {
+ error = _bus_dmamap_reserve_pages(dmat, map, flags);
+ if (error)
+ return (error);
+ }
+ }
+
+ sl = map->slist + map->sync_count - 1;
+ sl_end = 0;
+
+ while (buflen > 0) {
+ curaddr = buf;
+ sgsize = buflen;
+ if (map->pagesneeded != 0 &&
+ must_bounce(dmat, map, curaddr, sgsize)) {
+ /*
+ * The attempt to split a physically continuous buffer
+ * seems very controversial, it's unclear whether we
+ * can do this in all cases. Also, memory for bounced
+ * buffers is allocated as pages, so we cannot
+ * guarantee multipage alignment.
+ */
+ KASSERT(dmat->common.alignment <= PAGE_SIZE,
+ ("bounced buffer cannot have alignment bigger "
+ "than PAGE_SIZE: %lu", dmat->common.alignment));
+ sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
+ curaddr = add_bounce_page(dmat, map, 0, curaddr,
+ sgsize);
+ } else if ((map->flags & DMAMAP_COHERENT) == 0) {
+ if (map->sync_count > 0)
+ sl_end = sl->paddr + sl->datacount;
+
+ if (map->sync_count == 0 || curaddr != sl_end) {
+ if (++map->sync_count > dmat->common.nsegments)
+ break;
+ sl++;
+ sl->vaddr = 0;
+ sl->paddr = curaddr;
+ sl->pages = PHYS_TO_VM_PAGE(curaddr);
+ KASSERT(sl->pages != NULL,
+ ("%s: page at PA:0x%08lx is not in "
+ "vm_page_array", __func__, curaddr));
+ sl->datacount = sgsize;
+ } else
+ sl->datacount += sgsize;
+ }
+ if (!_bus_dmamap_addsegs(dmat, map, curaddr, sgsize, segs,
+ segp))
+ break;
+ buf += sgsize;
+ buflen -= sgsize;
+ }
+
+ /*
+ * Did we fit?
+ */
+ if (buflen != 0) {
+ bus_dmamap_unload(dmat, map);
+ return (EFBIG); /* XXX better return value here? */
+ }
+ return (0);
+}
+
+/*
+ * Utility function to load a linear buffer. segp contains
+ * the starting segment on entrace, and the ending segment on exit.
+ */
+static int
+bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
+ bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
+ int *segp)
+{
+ struct sync_list *sl;
+ bus_size_t sgsize;
+ bus_addr_t curaddr, sl_pend;
+ vm_offset_t kvaddr, vaddr, sl_vend;
+ int error;
+
+ KASSERT((map->flags & DMAMAP_FROM_DMAMEM) != 0 ||
+ dmat->common.alignment <= PAGE_SIZE,
+ ("loading user buffer with alignment bigger than PAGE_SIZE is not "
+ "supported"));
+
+ if (segs == NULL)
+ segs = dmat->segments;
+
+ if (flags & BUS_DMA_LOAD_MBUF)
+ map->flags |= DMAMAP_MBUF;
+
+ if (might_bounce(dmat, map, (bus_addr_t)buf, buflen)) {
+ _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
+ if (map->pagesneeded != 0) {
+ error = _bus_dmamap_reserve_pages(dmat, map, flags);
+ if (error)
+ return (error);
+ }
+ }
+
+ /*
+ * XXX Optimally we should parse input buffer for physically
+ * continuous segments first and then pass these segment into
+ * load loop.
+ */
+ sl = map->slist + map->sync_count - 1;
+ vaddr = (vm_offset_t)buf;
+ sl_pend = 0;
+ sl_vend = 0;
+
+ while (buflen > 0) {
+ /*
+ * Get the physical address for this segment.
+ */
+ if (__predict_true(pmap == kernel_pmap)) {
+ curaddr = pmap_kextract(vaddr);
+ kvaddr = vaddr;
+ } else {
+ curaddr = pmap_extract(pmap, vaddr);
+ kvaddr = 0;
+ }
+
+ /*
+ * Compute the segment size, and adjust counts.
+ */
+ sgsize = buflen;
+ if ((map->flags & DMAMAP_FROM_DMAMEM) == 0 ||
+ (dmat->bounce_flags & BF_KMEM_ALLOC_CONTIG) == 0)
+ sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
+
+ if (map->pagesneeded != 0 &&
+ must_bounce(dmat, map, curaddr, sgsize)) {
+ /* See comment in bounce_bus_dmamap_load_phys */
+ KASSERT(dmat->common.alignment <= PAGE_SIZE,
+ ("bounced buffer cannot have alignment bigger "
+ "than PAGE_SIZE: %lu", dmat->common.alignment));
+ curaddr = add_bounce_page(dmat, map, kvaddr, curaddr,
+ sgsize);
+ } else if ((map->flags & DMAMAP_COHERENT) == 0) {
+ if (map->sync_count > 0) {
+ sl_pend = sl->paddr + sl->datacount;
+ sl_vend = sl->vaddr + sl->datacount;
+ }
+
+ if (map->sync_count == 0 ||
+ (kvaddr != 0 && kvaddr != sl_vend) ||
+ (curaddr != sl_pend)) {
+ if (++map->sync_count > dmat->common.nsegments)
+ break;
+ sl++;
+ sl->vaddr = kvaddr;
+ sl->paddr = curaddr;
+ if (kvaddr != 0) {
+ sl->pages = NULL;
+ } else {
+ sl->pages = PHYS_TO_VM_PAGE(curaddr);
+ KASSERT(sl->pages != NULL,
+ ("%s: page at PA:0x%08lx is not "
+ "in vm_page_array", __func__,
+ curaddr));
+ }
+ sl->datacount = sgsize;
+ } else
+ sl->datacount += sgsize;
+ }
+ if (!_bus_dmamap_addsegs(dmat, map, curaddr, sgsize, segs,
+ segp))
+ break;
+ vaddr += sgsize;
+ buflen -= MIN(sgsize, buflen); /* avoid underflow */
+ }
+
+ /*
+ * Did we fit?
+ */
+ if (buflen != 0) {
+ bus_dmamap_unload(dmat, map);
+ return (EFBIG); /* XXX better return value here? */
+ }
+ return (0);
+}
+
+static void
+bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
+ bus_dmasync_op_t op)
+{
+ struct bounce_page *bpage;
+ struct sync_list *sl, *end;
+ vm_offset_t datavaddr, tempvaddr;
+
+ if (op == BUS_DMASYNC_POSTWRITE)
+ return;
+
+ if ((op & BUS_DMASYNC_POSTREAD) != 0) {
+ /*
+ * Wait for any DMA operations to complete before the bcopy.
+ */
+ dsb(sy);
+ }
+
+ if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
+ CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
+ "performing bounce", __func__, dmat, dmat->common.flags,
+ op);
+
+ if ((op & BUS_DMASYNC_PREWRITE) != 0) {
+ while (bpage != NULL) {
+ tempvaddr = 0;
+ datavaddr = bpage->datavaddr;
+ if (datavaddr == 0) {
+ tempvaddr = pmap_quick_enter_page(
+ bpage->datapage);
+ datavaddr = tempvaddr | bpage->dataoffs;
+ }
+
+ bcopy((void *)datavaddr,
+ (void *)bpage->vaddr, bpage->datacount);
+ if (tempvaddr != 0)
+ pmap_quick_remove_page(tempvaddr);
+ if ((map->flags & DMAMAP_COHERENT) == 0)
+ cpu_dcache_wb_range((void *)bpage->vaddr,
+ bpage->datacount);
+ bpage = STAILQ_NEXT(bpage, links);
+ }
+ dmat->bounce_zone->total_bounced++;
+ } else if ((op & BUS_DMASYNC_PREREAD) != 0) {
+ while (bpage != NULL) {
+ if ((map->flags & DMAMAP_COHERENT) == 0)
+ cpu_dcache_wbinv_range((void *)bpage->vaddr,
+ bpage->datacount);
+ bpage = STAILQ_NEXT(bpage, links);
+ }
+ }
+
+ if ((op & BUS_DMASYNC_POSTREAD) != 0) {
+ while (bpage != NULL) {
+ if ((map->flags & DMAMAP_COHERENT) == 0)
+ cpu_dcache_inv_range((void *)bpage->vaddr,
+ bpage->datacount);
+ tempvaddr = 0;
+ datavaddr = bpage->datavaddr;
+ if (datavaddr == 0) {
+ tempvaddr = pmap_quick_enter_page(
+ bpage->datapage);
+ datavaddr = tempvaddr | bpage->dataoffs;
+ }
+
+ bcopy((void *)bpage->vaddr,
+ (void *)datavaddr, bpage->datacount);
+
+ if (tempvaddr != 0)
+ pmap_quick_remove_page(tempvaddr);
+ bpage = STAILQ_NEXT(bpage, links);
+ }
+ dmat->bounce_zone->total_bounced++;
+ }
+ }
+
+ /*
+ * Cache maintenance for normal (non-COHERENT non-bounce) buffers.
+ */
+ if (map->sync_count != 0) {
+ sl = &map->slist[0];
+ end = &map->slist[map->sync_count];
+ CTR3(KTR_BUSDMA, "%s: tag %p op 0x%x "
+ "performing sync", __func__, dmat, op);
+
+ for ( ; sl != end; ++sl)
+ dma_dcache_sync(sl, op);
+ }
+
+ if ((op & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) != 0) {
+ /*
+ * Wait for the bcopy to complete before any DMA operations.
+ */
+ dsb(sy);
+ }
+
+ kmsan_bus_dmamap_sync(&map->kmsan_mem, op);
+}
diff --git a/sys/arm64/arm64/busdma_bounce.c b/sys/arm64/arm64/busdma_bounce.c
--- a/sys/arm64/arm64/busdma_bounce.c
+++ b/sys/arm64/arm64/busdma_bounce.c
@@ -112,13 +112,6 @@
struct sync_list slist[];
};
-static bool _bus_dmamap_pagesneeded(bus_dma_tag_t dmat, bus_dmamap_t map,
- vm_paddr_t buf, bus_size_t buflen, int *pagesneeded);
-static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
- pmap_t pmap, void *buf, bus_size_t buflen, int flags);
-static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
- vm_paddr_t buf, bus_size_t buflen, int flags);
-
static MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata");
#define dmat_alignment(dmat) ((dmat)->common.alignment)
@@ -205,42 +198,6 @@
return (!vm_addr_align_ok(addr, dmat->common.alignment));
}
-static bool
-might_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t paddr,
- bus_size_t size)
-{
-
- /* Memory allocated by bounce_bus_dmamem_alloc won't bounce */
- if (map && (map->flags & DMAMAP_FROM_DMAMEM) != 0)
- return (false);
-
- if ((dmat->bounce_flags & BF_COULD_BOUNCE) != 0)
- return (true);
-
- if (cacheline_bounce(dmat, map, paddr, size))
- return (true);
-
- if (alignment_bounce(dmat, paddr))
- return (true);
-
- return (false);
-}
-
-static bool
-must_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t paddr,
- bus_size_t size)
-{
-
- if (cacheline_bounce(dmat, map, paddr, size))
- return (true);
-
- if ((dmat->bounce_flags & BF_COULD_BOUNCE) != 0 &&
- addr_needs_bounce(dmat, paddr))
- return (true);
-
- return (false);
-}
-
/*
* Allocate a device specific dma_tag.
*/
@@ -341,15 +298,6 @@
return (bounce_bus_dma_zone_setup(dmat));
}
-static bool
-bounce_bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen)
-{
-
- if (!might_bounce(dmat, NULL, buf, buflen))
- return (true);
- return (!_bus_dmamap_pagesneeded(dmat, NULL, buf, buflen, NULL));
-}
-
static bus_dmamap_t
alloc_dmamap(bus_dma_tag_t dmat, int flags)
{
@@ -622,290 +570,6 @@
dmat->bounce_flags);
}
-static bool
-_bus_dmamap_pagesneeded(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
- bus_size_t buflen, int *pagesneeded)
-{
- bus_addr_t curaddr;
- bus_size_t sgsize;
- int count;
-
- /*
- * Count the number of bounce pages needed in order to
- * complete this transfer
- */
- count = 0;
- curaddr = buf;
- while (buflen != 0) {
- sgsize = buflen;
- if (must_bounce(dmat, map, curaddr, sgsize)) {
- sgsize = MIN(sgsize,
- PAGE_SIZE - (curaddr & PAGE_MASK));
- if (pagesneeded == NULL)
- return (true);
- count++;
- }
- curaddr += sgsize;
- buflen -= sgsize;
- }
-
- if (pagesneeded != NULL)
- *pagesneeded = count;
- return (count != 0);
-}
-
-static void
-_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
- bus_size_t buflen, int flags)
-{
-
- if (map->pagesneeded == 0) {
- _bus_dmamap_pagesneeded(dmat, map, buf, buflen,
- &map->pagesneeded);
- CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
- }
-}
-
-static void
-_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
- void *buf, bus_size_t buflen, int flags)
-{
- vm_offset_t vaddr;
- vm_offset_t vendaddr;
- bus_addr_t paddr;
- bus_size_t sg_len;
-
- if (map->pagesneeded == 0) {
- CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
- "alignment= %d", dmat->common.lowaddr,
- ptoa((vm_paddr_t)Maxmem),
- dmat->common.boundary, dmat->common.alignment);
- CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map,
- map->pagesneeded);
- /*
- * Count the number of bounce pages
- * needed in order to complete this transfer
- */
- vaddr = (vm_offset_t)buf;
- vendaddr = (vm_offset_t)buf + buflen;
-
- while (vaddr < vendaddr) {
- sg_len = MIN(vendaddr - vaddr,
- PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK));
- if (pmap == kernel_pmap)
- paddr = pmap_kextract(vaddr);
- else
- paddr = pmap_extract(pmap, vaddr);
- if (must_bounce(dmat, map, paddr, sg_len) != 0) {
- sg_len = roundup2(sg_len,
- dmat->common.alignment);
- map->pagesneeded++;
- }
- vaddr += sg_len;
- }
- CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
- }
-}
-
-/*
- * Utility function to load a physical buffer. segp contains
- * the starting segment on entrace, and the ending segment on exit.
- */
-static int
-bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
- vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
- int *segp)
-{
- struct sync_list *sl;
- bus_size_t sgsize;
- bus_addr_t curaddr, sl_end;
- int error;
-
- if (segs == NULL)
- segs = dmat->segments;
-
- if (might_bounce(dmat, map, (bus_addr_t)buf, buflen)) {
- _bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
- if (map->pagesneeded != 0) {
- error = _bus_dmamap_reserve_pages(dmat, map, flags);
- if (error)
- return (error);
- }
- }
-
- sl = map->slist + map->sync_count - 1;
- sl_end = 0;
-
- while (buflen > 0) {
- curaddr = buf;
- sgsize = buflen;
- if (map->pagesneeded != 0 &&
- must_bounce(dmat, map, curaddr, sgsize)) {
- /*
- * The attempt to split a physically continuous buffer
- * seems very controversial, it's unclear whether we
- * can do this in all cases. Also, memory for bounced
- * buffers is allocated as pages, so we cannot
- * guarantee multipage alignment.
- */
- KASSERT(dmat->common.alignment <= PAGE_SIZE,
- ("bounced buffer cannot have alignment bigger "
- "than PAGE_SIZE: %lu", dmat->common.alignment));
- sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
- curaddr = add_bounce_page(dmat, map, 0, curaddr,
- sgsize);
- } else if ((map->flags & DMAMAP_COHERENT) == 0) {
- if (map->sync_count > 0)
- sl_end = sl->paddr + sl->datacount;
-
- if (map->sync_count == 0 || curaddr != sl_end) {
- if (++map->sync_count > dmat->common.nsegments)
- break;
- sl++;
- sl->vaddr = 0;
- sl->paddr = curaddr;
- sl->pages = PHYS_TO_VM_PAGE(curaddr);
- KASSERT(sl->pages != NULL,
- ("%s: page at PA:0x%08lx is not in "
- "vm_page_array", __func__, curaddr));
- sl->datacount = sgsize;
- } else
- sl->datacount += sgsize;
- }
- if (!_bus_dmamap_addsegs(dmat, map, curaddr, sgsize, segs,
- segp))
- break;
- buf += sgsize;
- buflen -= sgsize;
- }
-
- /*
- * Did we fit?
- */
- if (buflen != 0) {
- bus_dmamap_unload(dmat, map);
- return (EFBIG); /* XXX better return value here? */
- }
- return (0);
-}
-
-/*
- * Utility function to load a linear buffer. segp contains
- * the starting segment on entrace, and the ending segment on exit.
- */
-static int
-bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
- bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
- int *segp)
-{
- struct sync_list *sl;
- bus_size_t sgsize;
- bus_addr_t curaddr, sl_pend;
- vm_offset_t kvaddr, vaddr, sl_vend;
- int error;
-
- KASSERT((map->flags & DMAMAP_FROM_DMAMEM) != 0 ||
- dmat->common.alignment <= PAGE_SIZE,
- ("loading user buffer with alignment bigger than PAGE_SIZE is not "
- "supported"));
-
- if (segs == NULL)
- segs = dmat->segments;
-
- if (flags & BUS_DMA_LOAD_MBUF)
- map->flags |= DMAMAP_MBUF;
-
- if (might_bounce(dmat, map, (bus_addr_t)buf, buflen)) {
- _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
- if (map->pagesneeded != 0) {
- error = _bus_dmamap_reserve_pages(dmat, map, flags);
- if (error)
- return (error);
- }
- }
-
- /*
- * XXX Optimally we should parse input buffer for physically
- * continuous segments first and then pass these segment into
- * load loop.
- */
- sl = map->slist + map->sync_count - 1;
- vaddr = (vm_offset_t)buf;
- sl_pend = 0;
- sl_vend = 0;
-
- while (buflen > 0) {
- /*
- * Get the physical address for this segment.
- */
- if (__predict_true(pmap == kernel_pmap)) {
- curaddr = pmap_kextract(vaddr);
- kvaddr = vaddr;
- } else {
- curaddr = pmap_extract(pmap, vaddr);
- kvaddr = 0;
- }
-
- /*
- * Compute the segment size, and adjust counts.
- */
- sgsize = buflen;
- if ((map->flags & DMAMAP_FROM_DMAMEM) == 0 ||
- (dmat->bounce_flags & BF_KMEM_ALLOC_CONTIG) == 0)
- sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
-
- if (map->pagesneeded != 0 &&
- must_bounce(dmat, map, curaddr, sgsize)) {
- /* See comment in bounce_bus_dmamap_load_phys */
- KASSERT(dmat->common.alignment <= PAGE_SIZE,
- ("bounced buffer cannot have alignment bigger "
- "than PAGE_SIZE: %lu", dmat->common.alignment));
- curaddr = add_bounce_page(dmat, map, kvaddr, curaddr,
- sgsize);
- } else if ((map->flags & DMAMAP_COHERENT) == 0) {
- if (map->sync_count > 0) {
- sl_pend = sl->paddr + sl->datacount;
- sl_vend = sl->vaddr + sl->datacount;
- }
-
- if (map->sync_count == 0 ||
- (kvaddr != 0 && kvaddr != sl_vend) ||
- (curaddr != sl_pend)) {
- if (++map->sync_count > dmat->common.nsegments)
- break;
- sl++;
- sl->vaddr = kvaddr;
- sl->paddr = curaddr;
- if (kvaddr != 0) {
- sl->pages = NULL;
- } else {
- sl->pages = PHYS_TO_VM_PAGE(curaddr);
- KASSERT(sl->pages != NULL,
- ("%s: page at PA:0x%08lx is not "
- "in vm_page_array", __func__,
- curaddr));
- }
- sl->datacount = sgsize;
- } else
- sl->datacount += sgsize;
- }
- if (!_bus_dmamap_addsegs(dmat, map, curaddr, sgsize, segs,
- segp))
- break;
- vaddr += sgsize;
- buflen -= MIN(sgsize, buflen); /* avoid underflow */
- }
-
- /*
- * Did we fit?
- */
- if (buflen != 0) {
- bus_dmamap_unload(dmat, map);
- return (EFBIG); /* XXX better return value here? */
- }
- return (0);
-}
-
static void
bounce_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
@@ -1013,105 +677,6 @@
}
}
-static void
-bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
- bus_dmasync_op_t op)
-{
- struct bounce_page *bpage;
- struct sync_list *sl, *end;
- vm_offset_t datavaddr, tempvaddr;
-
- if (op == BUS_DMASYNC_POSTWRITE)
- return;
-
- if ((op & BUS_DMASYNC_POSTREAD) != 0) {
- /*
- * Wait for any DMA operations to complete before the bcopy.
- */
- dsb(sy);
- }
-
- if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
- CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
- "performing bounce", __func__, dmat, dmat->common.flags,
- op);
-
- if ((op & BUS_DMASYNC_PREWRITE) != 0) {
- while (bpage != NULL) {
- tempvaddr = 0;
- datavaddr = bpage->datavaddr;
- if (datavaddr == 0) {
- tempvaddr = pmap_quick_enter_page(
- bpage->datapage);
- datavaddr = tempvaddr | bpage->dataoffs;
- }
-
- bcopy((void *)datavaddr,
- (void *)bpage->vaddr, bpage->datacount);
- if (tempvaddr != 0)
- pmap_quick_remove_page(tempvaddr);
- if ((map->flags & DMAMAP_COHERENT) == 0)
- cpu_dcache_wb_range((void *)bpage->vaddr,
- bpage->datacount);
- bpage = STAILQ_NEXT(bpage, links);
- }
- dmat->bounce_zone->total_bounced++;
- } else if ((op & BUS_DMASYNC_PREREAD) != 0) {
- while (bpage != NULL) {
- if ((map->flags & DMAMAP_COHERENT) == 0)
- cpu_dcache_wbinv_range((void *)bpage->vaddr,
- bpage->datacount);
- bpage = STAILQ_NEXT(bpage, links);
- }
- }
-
- if ((op & BUS_DMASYNC_POSTREAD) != 0) {
- while (bpage != NULL) {
- if ((map->flags & DMAMAP_COHERENT) == 0)
- cpu_dcache_inv_range((void *)bpage->vaddr,
- bpage->datacount);
- tempvaddr = 0;
- datavaddr = bpage->datavaddr;
- if (datavaddr == 0) {
- tempvaddr = pmap_quick_enter_page(
- bpage->datapage);
- datavaddr = tempvaddr | bpage->dataoffs;
- }
-
- bcopy((void *)bpage->vaddr,
- (void *)datavaddr, bpage->datacount);
-
- if (tempvaddr != 0)
- pmap_quick_remove_page(tempvaddr);
- bpage = STAILQ_NEXT(bpage, links);
- }
- dmat->bounce_zone->total_bounced++;
- }
- }
-
- /*
- * Cache maintenance for normal (non-COHERENT non-bounce) buffers.
- */
- if (map->sync_count != 0) {
- sl = &map->slist[0];
- end = &map->slist[map->sync_count];
- CTR3(KTR_BUSDMA, "%s: tag %p op 0x%x "
- "performing sync", __func__, dmat, op);
-
- for ( ; sl != end; ++sl)
- dma_dcache_sync(sl, op);
- }
-
- if ((op & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) != 0) {
- /*
- * Wait for the bcopy to complete before any DMA operations.
- */
- dsb(sy);
- }
-
- kmsan_bus_dmamap_sync(&map->kmsan_mem, op);
-}
-
#ifdef KMSAN
static void
bounce_bus_dmamap_load_kmsan(bus_dmamap_t map, struct memdesc *mem)
@@ -1122,6 +687,8 @@
}
#endif
+#include "busdma_bounce.h"
+
struct bus_dma_impl bus_dma_bounce_impl = {
.tag_create = bounce_bus_dma_tag_create,
.tag_destroy = bounce_bus_dma_tag_destroy,

File Metadata

Mime Type
text/plain
Expires
Sun, Apr 12, 10:13 PM (9 h, 53 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
31375379
Default Alt Text
D49555.diff (25 KB)

Event Timeline