Page MenuHomeFreeBSD

D31823.id.diff
No OneTemporary

D31823.id.diff

Index: sys/arm64/arm64/busdma_bounce.c
===================================================================
--- sys/arm64/arm64/busdma_bounce.c
+++ sys/arm64/arm64/busdma_bounce.c
@@ -131,6 +131,7 @@
struct bp_list bpages;
int pagesneeded;
int pagesreserved;
+ STAILQ_HEAD(bpagel, bounce_page) qbplist;
bus_dma_tag_t dmat;
struct memdesc mem;
bus_dmamap_callback_t *callback;
@@ -154,6 +155,8 @@
int commit);
static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
vm_offset_t vaddr, bus_addr_t addr, bus_size_t size);
+static bus_addr_t add_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
+ vm_offset_t vaddr, bus_addr_t addr, bus_size_t size);
static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
static bool _bus_dmamap_pagesneeded(bus_dma_tag_t dmat, bus_dmamap_t map,
@@ -394,6 +397,7 @@
/* Initialize the new map */
STAILQ_INIT(&map->bpages);
+ STAILQ_INIT(&map->qbplist);
return (map);
}
@@ -496,6 +500,11 @@
CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, EBUSY);
return (EBUSY);
}
+ if (!STAILQ_EMPTY(&map->qbplist)) {
+ /* Running a bounce_bus_dmamap_load_phys() operation. */
+ CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, EBUSY);
+ return (EBUSY);
+ }
if (dmat->bounce_zone)
dmat->bounce_zone->map_count--;
free(map, M_DEVBUF);
@@ -814,6 +823,7 @@
bus_size_t sgsize;
bus_addr_t curaddr, sl_end;
int error;
+ bool first;
if (segs == NULL)
segs = dmat->segments;
@@ -830,6 +840,7 @@
sl = map->slist + map->sync_count - 1;
sl_end = 0;
+ first = true;
while (buflen > 0) {
curaddr = buf;
sgsize = MIN(buflen, dmat->common.maxsegsz);
@@ -846,8 +857,13 @@
("bounced buffer cannot have alignment bigger "
"than PAGE_SIZE: %lu", dmat->common.alignment));
sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
- curaddr = add_bounce_page(dmat, map, 0, curaddr,
- sgsize);
+ if (first && dmat->common.nsegments == 1 &&
+ map->pagesneeded > 1)
+ curaddr = add_bounce_pages(dmat, map, 0,
+ curaddr, sgsize);
+ else
+ curaddr = add_bounce_page(dmat, map, 0, curaddr,
+ sgsize);
} else if ((map->flags & DMAMAP_COHERENT) == 0) {
if (map->sync_count > 0)
sl_end = sl->paddr + sl->datacount;
@@ -872,6 +888,7 @@
break;
buf += sgsize;
buflen -= sgsize;
+ first = false;
}
/*
@@ -1237,6 +1254,31 @@
return (bz->sysctl_tree_top);
}
+static void
+insert_bounce_page_sorted(struct bounce_zone *bz, struct bounce_page *bpage)
+{
+ struct bounce_page *bp, *bpp;
+
+ mtx_assert(&bounce_lock, MA_OWNED);
+
+ /* Sort by busaddr. */
+ bp = bpp = NULL;
+ STAILQ_FOREACH(bp, &bz->bounce_page_list, links) {
+ if (bpage->busaddr < bp->busaddr) {
+ if (bpp == NULL)
+ STAILQ_INSERT_HEAD(&bz->bounce_page_list,
+ bpage, links);
+ else
+ STAILQ_INSERT_AFTER(&bz->bounce_page_list, bpp,
+ bpage, links);
+ break;
+ }
+ bpp = bp;
+ }
+ if (bp == NULL)
+ STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
+}
+
static int
alloc_bounce_zone(bus_dma_tag_t dmat)
{
@@ -1335,7 +1377,7 @@
}
bpage->busaddr = pmap_kextract(bpage->vaddr);
mtx_lock(&bounce_lock);
- STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
+ insert_bounce_page_sorted(bz, bpage);
total_bpages++;
bz->total_bpages++;
bz->free_bpages++;
@@ -1365,6 +1407,92 @@
return (pages);
}
+/*
+ * Special case of add_bounce_page(); in case of a single segment requested
+ * we try to find a contigous space of pages to accommodate the entire needs
+ * return the first and queue the others up in the map for add_bounce_page()
+ * to find them on the next iterations.
+ */
+static bus_addr_t
+add_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
+ bus_addr_t addr, bus_size_t size)
+{
+ struct bounce_zone *bz;
+ struct bounce_page *bpage, *bp1, *bp2;
+ int seg;
+
+ KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
+
+ bz = dmat->bounce_zone;
+ KASSERT(map->pagesneeded > 0, ("%s: map doesn't need any pages: %d",
+ __func__, map->pagesneeded));
+ KASSERT(map->pagesreserved > 0, ("%s: map doesn't need any pages: %d",
+ __func__, map->pagesreserved));
+ KASSERT(dmat->common.nsegments == 1, ("%s: tag nsegment != 1: %d",
+ __func__, dmat->common.nsegments));
+
+ /*
+ * Try to find enough contiguous pages to fullfill the the
+ * nsegments == 1 constraint.
+ */
+ mtx_lock(&bounce_lock);
+ bpage = STAILQ_FIRST(&bz->bounce_page_list);
+ if (bpage == NULL)
+ panic("%s: free page list is empty", __func__);
+ seg = 1;
+ bp2 = bpage;
+ while (seg < map->pagesneeded) {
+ bp1 = STAILQ_NEXT(bp2, links);
+ if (bp1 == NULL) {
+ /*
+ * If we do not have enough contig. pages on the
+ * bounce_page_list then we return -1 here enforcing
+ * non-contiguency and _bus_dmamap_addseg will fail.
+ * No need to do any more work here.
+ */
+ mtx_unlock(&bounce_lock);
+ return (-1);
+ }
+ if ((bp2->busaddr + PAGE_SIZE) != bp1->busaddr) {
+ bpage = bp2 = bp1;
+ seg = 1;
+ } else {
+ bp2 = bp1;
+ seg++;
+ }
+ }
+
+ map->pagesneeded--;
+ map->pagesreserved--;
+ for (; seg > 0; seg--) {
+ bp2 = STAILQ_NEXT(bpage, links);
+ KASSERT(bp2 != NULL, ("%s: bpage %p bp2 %p seg %d (bp1 %p)\n",
+ __func__, bpage, bp2, seg, bp1));
+ STAILQ_REMOVE(&bz->bounce_page_list, bpage, bounce_page, links);
+ STAILQ_INSERT_TAIL(&map->qbplist, bpage, links);
+ bz->reserved_bpages--;
+ bz->active_bpages++;
+ bpage = bp2;
+ }
+
+ bpage = STAILQ_FIRST(&map->qbplist);
+ KASSERT(bpage != NULL, ("%s:%d: bpage %p\n", __func__, __LINE__, bpage));
+ STAILQ_REMOVE_HEAD(&map->qbplist, links);
+ mtx_unlock(&bounce_lock);
+
+ if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
+ /* Page offset needs to be preserved. */
+ bpage->vaddr |= addr & PAGE_MASK;
+ bpage->busaddr |= addr & PAGE_MASK;
+ }
+ bpage->datavaddr = vaddr;
+ bpage->datapage = PHYS_TO_VM_PAGE(addr);
+ bpage->dataoffs = addr & PAGE_MASK;
+ bpage->datacount = size;
+ STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
+ return (bpage->busaddr);
+}
+
static bus_addr_t
add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
bus_addr_t addr, bus_size_t size)
@@ -1375,22 +1503,26 @@
KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
bz = dmat->bounce_zone;
- if (map->pagesneeded == 0)
- panic("add_bounce_page: map doesn't need any pages");
+ KASSERT(map->pagesneeded > 0, ("%s: map doesn't need any pages: %d",
+ __func__, map->pagesneeded));
+ KASSERT(map->pagesreserved > 0, ("%s: map doesn't need any pages: %d",
+ __func__, map->pagesreserved));
map->pagesneeded--;
-
- if (map->pagesreserved == 0)
- panic("add_bounce_page: map doesn't need any pages");
map->pagesreserved--;
mtx_lock(&bounce_lock);
- bpage = STAILQ_FIRST(&bz->bounce_page_list);
- if (bpage == NULL)
- panic("add_bounce_page: free page list is empty");
+ if (!STAILQ_EMPTY(&map->qbplist)) {
+ bpage = STAILQ_FIRST(&map->qbplist);
+ STAILQ_REMOVE_HEAD(&map->qbplist, links);
- STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
- bz->reserved_bpages--;
- bz->active_bpages++;
+ } else {
+ bpage = STAILQ_FIRST(&bz->bounce_page_list);
+ if (bpage == NULL)
+ panic("add_bounce_page: free page list is empty");
+ STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
+ bz->reserved_bpages--;
+ bz->active_bpages++;
+ }
mtx_unlock(&bounce_lock);
if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
@@ -1426,7 +1558,7 @@
}
mtx_lock(&bounce_lock);
- STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
+ insert_bounce_page_sorted(bz, bpage);
bz->free_bpages++;
bz->active_bpages--;
if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {

File Metadata

Mime Type
text/plain
Expires
Sun, Feb 22, 11:19 PM (7 h, 29 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
28945814
Default Alt Text
D31823.id.diff (7 KB)

Event Timeline