Index: sys/x86/x86/busdma_bounce.c =================================================================== --- sys/x86/x86/busdma_bounce.c +++ sys/x86/x86/busdma_bounce.c @@ -79,8 +79,10 @@ vm_offset_t vaddr; /* kva of bounce buffer */ bus_addr_t busaddr; /* Physical address */ vm_offset_t datavaddr; /* kva of client data */ - bus_addr_t dataaddr; /* client physical address */ - bus_size_t datacount; /* client data count */ + bus_addr_t dataaddr1; /* first client physical address */ + bus_addr_t dataaddr2; /* second client physical address */ + bus_size_t datacount1; /* client data count */ + bus_size_t datacount2; /* client data count (2nd page) */ STAILQ_ENTRY(bounce_page) links; }; @@ -134,8 +136,8 @@ static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit); static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, - vm_offset_t vaddr, bus_addr_t addr, - bus_size_t size); + vm_offset_t vaddr, bus_addr_t addr1, + bus_addr_t addr2, bus_size_t size); static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, @@ -530,6 +532,51 @@ } } +static void +_bus_dmamap_count_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma, + int ma_offs, bus_size_t buflen, int flags) +{ + bus_size_t sg_len, max_sgsize; + int page_index; + vm_paddr_t paddr; + + if ((map != &nobounce_dmamap && map->pagesneeded == 0)) { + CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " + "alignment= %d", dmat->common.lowaddr, + ptoa((vm_paddr_t)Maxmem), + dmat->common.boundary, dmat->common.alignment); + CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d", + map, &nobounce_dmamap, map->pagesneeded); + + /* + * Count the number of bounce pages + * needed in order to complete this transfer + */ + page_index = 0; + while (buflen > 0) { + paddr = ma[page_index]->phys_addr + ma_offs; + sg_len = PAGE_SIZE - ma_offs; + max_sgsize = MIN(buflen, dmat->common.maxsegsz); + sg_len = MIN(sg_len, max_sgsize); + if (bus_dma_run_filter(&dmat->common, paddr) != 0) { + sg_len = roundup2(sg_len, + dmat->common.alignment); + sg_len = MIN(sg_len, max_sgsize); + KASSERT((sg_len & (dmat->common.alignment - 1)) + == 0, ("Segment size is not aligned")); + map->pagesneeded++; + } + if (((ma_offs + sg_len) & ~PAGE_MASK) != 0) + page_index++; + ma_offs = (ma_offs + sg_len) & PAGE_MASK; + KASSERT(buflen >= sg_len, + ("Segment length overruns original buffer")); + buflen -= sg_len; + } + CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); + } +} + static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) { @@ -635,7 +682,7 @@ map->pagesneeded != 0 && bus_dma_run_filter(&dmat->common, curaddr)) { sgsize = MIN(sgsize, PAGE_SIZE); - curaddr = add_bounce_page(dmat, map, 0, curaddr, + curaddr = add_bounce_page(dmat, map, 0, curaddr, 0, sgsize); } sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, @@ -701,7 +748,7 @@ bus_dma_run_filter(&dmat->common, curaddr)) { sgsize = roundup2(sgsize, dmat->common.alignment); sgsize = MIN(sgsize, max_sgsize); - curaddr = add_bounce_page(dmat, map, vaddr, curaddr, + curaddr = add_bounce_page(dmat, map, vaddr, curaddr, 0, sgsize); } else { sgsize = MIN(sgsize, max_sgsize); @@ -720,6 +767,79 @@ return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ } +static int +bounce_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, + struct vm_page **ma, bus_size_t buflen, int ma_offs, int flags, + bus_dma_segment_t *segs, int *segp) +{ + vm_paddr_t paddr, next_paddr; + int error, page_index; + struct vm_page *page; + bus_size_t sgsize, max_sgsize; + + if (map == NULL) + map = &nobounce_dmamap; + + if (segs == NULL) + segs = dmat->segments; + + if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) { + _bus_dmamap_count_ma(dmat, map, ma, ma_offs, buflen, flags); + if (map->pagesneeded != 0) { + error = _bus_dmamap_reserve_pages(dmat, map, flags); + if (error) + return (error); + } + } + + page_index = 0; + page = ma[0]; + while (buflen > 0) { + /* + * Compute the segment size, and adjust counts. + */ + page = ma[page_index]; + paddr = page->phys_addr + ma_offs; + max_sgsize = MIN(buflen, dmat->common.maxsegsz); + sgsize = PAGE_SIZE - ma_offs; + if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) && + map->pagesneeded != 0 && + bus_dma_run_filter(&dmat->common, paddr)) { + sgsize = roundup2(sgsize, dmat->common.alignment); + sgsize = MIN(sgsize, max_sgsize); + KASSERT((sgsize & (dmat->common.alignment - 1)) == 0, + ("Segment size is not aligned")); + /* + * Check if two pages of the user provided buffer + * are used. + */ + if (((ma_offs + sgsize) & ~PAGE_MASK) != 0) + next_paddr = ma[page_index + 1]->phys_addr; + else + next_paddr = 0; + paddr = add_bounce_page(dmat, map, 0, paddr, + next_paddr, sgsize); + } else { + sgsize = MIN(sgsize, max_sgsize); + } + sgsize = _bus_dmamap_addseg(dmat, map, paddr, sgsize, segs, + segp); + if (sgsize == 0) + break; + KASSERT(buflen >= sgsize, + ("Segment length overruns original buffer")); + buflen -= sgsize; + if (((ma_offs + sgsize) & ~PAGE_MASK) != 0) + page_index++; + ma_offs = (ma_offs + sgsize) & PAGE_MASK; + } + + /* + * Did we fit? + */ + return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ +} + static void bounce_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg) @@ -778,11 +898,17 @@ if (bpage->datavaddr != 0) { bcopy((void *)bpage->datavaddr, (void *)bpage->vaddr, - bpage->datacount); + bpage->datacount1); } else { - physcopyout(bpage->dataaddr, + physcopyout(bpage->dataaddr1, (void *)bpage->vaddr, - bpage->datacount); + bpage->datacount1); + if (bpage->dataaddr2 != 0) { + physcopyout(bpage->dataaddr2, + (void *)(bpage->vaddr + + bpage->datacount1), + bpage->datacount2); + } } bpage = STAILQ_NEXT(bpage, links); } @@ -794,11 +920,18 @@ if (bpage->datavaddr != 0) { bcopy((void *)bpage->vaddr, (void *)bpage->datavaddr, - bpage->datacount); + bpage->datacount1); } else { physcopyin((void *)bpage->vaddr, - bpage->dataaddr, - bpage->datacount); + bpage->dataaddr1, + bpage->datacount1); + if (bpage->dataaddr2 != 0) { + physcopyin( + (void *)(bpage->vaddr + + bpage->datacount1), + bpage->dataaddr2, + bpage->datacount2); + } } bpage = STAILQ_NEXT(bpage, links); } @@ -971,7 +1104,7 @@ static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, - bus_addr_t addr, bus_size_t size) + bus_addr_t addr1, bus_addr_t addr2, bus_size_t size) { struct bounce_zone *bz; struct bounce_page *bpage; @@ -1005,8 +1138,16 @@ bpage->busaddr |= vaddr & PAGE_MASK; } bpage->datavaddr = vaddr; - bpage->dataaddr = addr; - bpage->datacount = size; + bpage->dataaddr1 = addr1; + bpage->dataaddr2 = addr2; + if (addr2 == 0) { + bpage->datacount1 = size; + } else { + bpage->datacount1 = PAGE_SIZE - (addr1 & PAGE_MASK); + bpage->datacount2 = size - bpage->datacount1; + KASSERT(bpage->datacount2 <= PAGE_SIZE, + ("Size is inconsistent with provided page offset")); + } STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); return (bpage->busaddr); } @@ -1019,7 +1160,8 @@ bz = dmat->bounce_zone; bpage->datavaddr = 0; - bpage->datacount = 0; + bpage->datacount1 = 0; + bpage->datacount2 = 0; if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) { /* * Reset the bounce page to start at offset 0. Other uses @@ -1077,7 +1219,7 @@ .mem_free = bounce_bus_dmamem_free, .load_phys = bounce_bus_dmamap_load_phys, .load_buffer = bounce_bus_dmamap_load_buffer, - .load_ma = bus_dmamap_load_ma_triv, + .load_ma = bounce_bus_dmamap_load_ma, .map_waitok = bounce_bus_dmamap_waitok, .map_complete = bounce_bus_dmamap_complete, .map_unload = bounce_bus_dmamap_unload,