Index: sys/kern/subr_bus_dma.c =================================================================== --- sys/kern/subr_bus_dma.c +++ sys/kern/subr_bus_dma.c @@ -54,6 +54,41 @@ #include /* + * Load a virtual addresses. + */ +static int +_bus_dmamap_load_virt(bus_dma_tag_t dmat, bus_dmamap_t map, + void *addr, bus_size_t size, pmap_t pmap, int *nsegs, + int flags) +{ + bus_dma_segment_t seg; + struct memdesc mem; + + seg.ds_addr = (bus_addr_t) addr; + seg.ds_len = size; + mem = memdesc_vlist(&seg, 1); + + return (_bus_dmamap_load(dmat, map, &mem, pmap, flags, NULL, nsegs)); +} + +/* + * Load a physical addresses. + */ +static int +_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, + vm_paddr_t addr, bus_size_t size, int *nsegs, int flags) +{ + bus_dma_segment_t seg; + struct memdesc mem; + + seg.ds_addr = addr; + seg.ds_len = size; + mem = memdesc_plist(&seg, 1); + + return (_bus_dmamap_load(dmat, map, &mem, NULL, flags, NULL, nsegs)); +} + +/* * Load a list of virtual addresses. */ static int @@ -61,17 +96,11 @@ bus_dma_segment_t *list, int sglist_cnt, struct pmap *pmap, int *nsegs, int flags) { - int error; + struct memdesc mem; - error = 0; - for (; sglist_cnt > 0; sglist_cnt--, list++) { - error = _bus_dmamap_load_buffer(dmat, map, - (void *)(uintptr_t)list->ds_addr, list->ds_len, pmap, - flags, NULL, nsegs); - if (error) - break; - } - return (error); + mem = memdesc_vlist(list, sglist_cnt); + + return (_bus_dmamap_load(dmat, map, &mem, pmap, flags, NULL, nsegs)); } /* @@ -81,17 +110,11 @@ _bus_dmamap_load_plist(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dma_segment_t *list, int sglist_cnt, int *nsegs, int flags) { - int error; + struct memdesc mem; - error = 0; - for (; sglist_cnt > 0; sglist_cnt--, list++) { - error = _bus_dmamap_load_phys(dmat, map, - (vm_paddr_t)list->ds_addr, list->ds_len, flags, NULL, - nsegs); - if (error) - break; - } - return (error); + mem = memdesc_plist(list, sglist_cnt); + + return (_bus_dmamap_load(dmat, map, &mem, NULL, flags, NULL, nsegs)); } /* @@ -101,15 +124,19 @@ _bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, int flags) { + bus_dma_segment_t seg; + struct memdesc mem; struct mbuf *m; int error; error = 0; for (m = m0; m != NULL && error == 0; m = m->m_next) { if (m->m_len > 0) { - error = _bus_dmamap_load_buffer(dmat, map, m->m_data, - m->m_len, kernel_pmap, flags | BUS_DMA_LOAD_MBUF, - segs, nsegs); + seg.ds_addr = (bus_addr_t) m->m_data; + seg.ds_len = m->m_len; + mem = memdesc_vlist(&seg, 1); + error = _bus_dmamap_load(dmat, map, &mem, kernel_pmap, + flags | BUS_DMA_LOAD_MBUF, segs, nsegs); } } CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", @@ -124,37 +151,53 @@ _bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio, int *nsegs, int flags) { - int error; + struct memdesc mem; + bus_dma_segment_t *seg; + bus_size_t tlen, len; + int error, ma_offs, i; + size_t array_size; + pmap_t pmap; + + error = 0; if ((bio->bio_flags & BIO_UNMAPPED) == 0) { - error = _bus_dmamap_load_buffer(dmat, map, bio->bio_data, - bio->bio_bcount, kernel_pmap, flags, NULL, nsegs); + array_size = 1; } else { - error = _bus_dmamap_load_ma(dmat, map, bio->bio_ma, - bio->bio_bcount, bio->bio_ma_offset, flags, NULL, nsegs); + array_size = (bio->bio_bcount/PAGE_SIZE + 2); } - return (error); -} -int -bus_dmamap_load_ma_triv(bus_dma_tag_t dmat, bus_dmamap_t map, - struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, - bus_dma_segment_t *segs, int *segp) -{ - vm_paddr_t paddr; - bus_size_t len; - int error, i; + seg = malloc(sizeof(bus_dma_segment_t) * array_size, M_DEVBUF, + (flags & BUS_DMA_NOWAIT) == 0 ? M_WAITOK : M_NOWAIT); + if (__predict_false(seg == NULL)) { + error = ENOMEM; + goto out; + } - error = 0; - for (i = 0; tlen > 0; i++, tlen -= len) { - len = min(PAGE_SIZE - ma_offs, tlen); - paddr = VM_PAGE_TO_PHYS(ma[i]) + ma_offs; - error = _bus_dmamap_load_phys(dmat, map, paddr, len, - flags, segs, segp); - if (error != 0) - break; - ma_offs = 0; + pmap = NULL; + if ((bio->bio_flags & BIO_UNMAPPED) == 0) { + seg[0].ds_addr = (bus_addr_t) bio->bio_data; + seg[0].ds_len = bio->bio_bcount; + mem = memdesc_vlist(seg, 1); + pmap = kernel_pmap; + } else { + tlen = bio->bio_bcount; + ma_offs = bio->bio_ma_offset; + for (i = 0; tlen > 0; i++, tlen -= len) { + KASSERT(i < array_size, + ("Calculated array size doesn't match")); + len = min(PAGE_SIZE - ma_offs, tlen); + seg[i].ds_addr = + VM_PAGE_TO_PHYS(bio->bio_ma[i]) + ma_offs; + seg[i].ds_len = len; + ma_offs = 0; + } + mem = memdesc_plist(seg, i); } + + error = _bus_dmamap_load(dmat, map, &mem, pmap, flags, NULL, nsegs); + free(seg, M_DEVBUF); + +out: return (error); } @@ -208,13 +251,12 @@ switch ((ccb_h->flags & CAM_DATA_MASK)) { case CAM_DATA_VADDR: - error = _bus_dmamap_load_buffer(dmat, map, data_ptr, dxfer_len, - kernel_pmap, flags, NULL, nsegs); + error = _bus_dmamap_load_virt(dmat, map, data_ptr, dxfer_len, + kernel_pmap, nsegs, flags); break; case CAM_DATA_PADDR: - error = _bus_dmamap_load_phys(dmat, map, - (vm_paddr_t)(uintptr_t)data_ptr, dxfer_len, flags, NULL, - nsegs); + error = _bus_dmamap_load_phys(dmat, map, (vm_paddr_t)data_ptr, + dxfer_len, nsegs, flags); break; case CAM_DATA_SG: error = _bus_dmamap_load_vlist(dmat, map, @@ -243,12 +285,13 @@ _bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, int *nsegs, int flags) { + struct memdesc mem; + bus_dma_segment_t *seg; bus_size_t resid; - bus_size_t minlen; + bus_size_t len; struct iovec *iov; pmap_t pmap; - caddr_t addr; - int error, i; + int error, i, j; if (uio->uio_segflg == UIO_USERSPACE) { KASSERT(uio->uio_td != NULL, @@ -256,25 +299,35 @@ pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); } else pmap = kernel_pmap; + + seg = malloc(uio->uio_iovcnt * sizeof(bus_dma_segment_t), M_DEVBUF, + (flags & BUS_DMA_NOWAIT) == 0 ? M_WAITOK : M_NOWAIT); + if (__predict_false(seg == NULL)) { + error = ENOMEM; + goto out; + } + resid = uio->uio_resid; iov = uio->uio_iov; - error = 0; - - for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { + for (i = 0, j= 0; i < uio->uio_iovcnt && resid != 0; i++) { /* * Now at the first iovec to load. Load each iovec * until we have exhausted the residual count. */ - - addr = (caddr_t) iov[i].iov_base; - minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; - if (minlen > 0) { - error = _bus_dmamap_load_buffer(dmat, map, addr, - minlen, pmap, flags, NULL, nsegs); - resid -= minlen; - } + len = min(resid, iov[i].iov_len); + if (len == 0) + continue; + seg[j].ds_addr = (bus_addr_t) iov[i].iov_base; + seg[j].ds_len = len; + resid -= len; + j++; } + mem = memdesc_vlist(seg, j); + + error = _bus_dmamap_load(dmat, map, &mem, pmap, flags, NULL, nsegs); + free(seg, M_DEVBUF); +out: return (error); } @@ -297,8 +350,8 @@ } nsegs = -1; - error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, kernel_pmap, - flags, NULL, &nsegs); + error = _bus_dmamap_load_virt(dmat, map, buf, buflen, kernel_pmap, + &nsegs, flags); nsegs++; CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", @@ -475,8 +528,7 @@ void *callback_arg, int flags) { bus_dma_segment_t *segs; - int error; - int nsegs; + int error, nsegs; if ((flags & BUS_DMA_NOWAIT) == 0) _bus_dmamap_waitok(dmat, map, mem, callback, callback_arg); @@ -485,12 +537,12 @@ error = 0; switch (mem->md_type) { case MEMDESC_VADDR: - error = _bus_dmamap_load_buffer(dmat, map, mem->u.md_vaddr, - mem->md_opaque, kernel_pmap, flags, NULL, &nsegs); + error = _bus_dmamap_load_virt(dmat, map, mem->u.md_vaddr, + mem->md_opaque, kernel_pmap, &nsegs, flags); break; case MEMDESC_PADDR: error = _bus_dmamap_load_phys(dmat, map, mem->u.md_paddr, - mem->md_opaque, flags, NULL, &nsegs); + mem->md_opaque, &nsegs, flags); break; case MEMDESC_VLIST: error = _bus_dmamap_load_vlist(dmat, map, mem->u.md_list, Index: sys/sys/bus_dma.h =================================================================== --- sys/sys/bus_dma.h +++ sys/sys/bus_dma.h @@ -240,15 +240,6 @@ void *callback_arg, int flags); /* - * Placeholder for use by busdma implementations which do not benefit - * from optimized procedure to load an array of vm_page_t. Falls back - * to do _bus_dmamap_load_phys() in loop. - */ -int bus_dmamap_load_ma_triv(bus_dma_tag_t dmat, bus_dmamap_t map, - struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, - bus_dma_segment_t *segs, int *segp); - -/* * XXX sparc64 uses the same interface, but a much different implementation. * for the sparc64 arch contains the equivalent * declarations. @@ -318,17 +309,9 @@ callback_arg); \ } while (0); -int _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, - void *buf, bus_size_t buflen, struct pmap *pmap, - int flags, bus_dma_segment_t *segs, int *segp); - -int _bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, - vm_paddr_t paddr, bus_size_t buflen, - int flags, bus_dma_segment_t *segs, int *segp); - -int _bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, - struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, - bus_dma_segment_t *segs, int *segp); +int _bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, struct memdesc *mem, + struct pmap *pmap, int flags, bus_dma_segment_t *segs, + int *segp); bus_dma_segment_t *_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, Index: sys/x86/include/busdma_impl.h =================================================================== --- sys/x86/include/busdma_impl.h +++ sys/x86/include/busdma_impl.h @@ -63,14 +63,8 @@ int (*mem_alloc)(bus_dma_tag_t dmat, void** vaddr, int flags, bus_dmamap_t *mapp); void (*mem_free)(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map); - int (*load_ma)(bus_dma_tag_t dmat, bus_dmamap_t map, - struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, - bus_dma_segment_t *segs, int *segp); - int (*load_phys)(bus_dma_tag_t dmat, bus_dmamap_t map, - vm_paddr_t buf, bus_size_t buflen, int flags, - bus_dma_segment_t *segs, int *segp); - int (*load_buffer)(bus_dma_tag_t dmat, bus_dmamap_t map, - void *buf, bus_size_t buflen, pmap_t pmap, int flags, + int (*load)(bus_dma_tag_t dmat, bus_dmamap_t map, + struct memdesc *mem, pmap_t pmap, int flags, bus_dma_segment_t *segs, int *segp); void (*map_waitok)(bus_dma_tag_t dmat, bus_dmamap_t map, struct memdesc *mem, bus_dmamap_callback_t *callback, Index: sys/x86/iommu/busdma_dmar.c =================================================================== --- sys/x86/iommu/busdma_dmar.c +++ sys/x86/iommu/busdma_dmar.c @@ -571,6 +571,7 @@ return (error); } +#if 0 static int dmar_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map1, struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, @@ -674,6 +675,17 @@ free(fma, M_DEVBUF); return (error); } +#endif + +static int +dmar_bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, + struct memdesc *mem, pmap_t pmap, int flags, bus_dma_segment_t *segs, + int *segp) +{ + + panic("Not implemented"); + return (0); +} static void dmar_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map1, @@ -772,9 +784,12 @@ .map_destroy = dmar_bus_dmamap_destroy, .mem_alloc = dmar_bus_dmamem_alloc, .mem_free = dmar_bus_dmamem_free, + .load = dmar_bus_dmamap_load, +#if 0 .load_phys = dmar_bus_dmamap_load_phys, .load_buffer = dmar_bus_dmamap_load_buffer, .load_ma = dmar_bus_dmamap_load_ma, +#endif .map_waitok = dmar_bus_dmamap_waitok, .map_complete = dmar_bus_dmamap_complete, .map_unload = dmar_bus_dmamap_unload, Index: sys/x86/x86/busdma_bounce.c =================================================================== --- sys/x86/x86/busdma_bounce.c +++ sys/x86/x86/busdma_bounce.c @@ -78,9 +78,8 @@ struct bounce_page { vm_offset_t vaddr; /* kva of bounce buffer */ bus_addr_t busaddr; /* Physical address */ - vm_offset_t datavaddr; /* kva of client data */ - bus_addr_t dataaddr; /* client physical address */ - bus_size_t datacount; /* client data count */ + bus_dma_segment_t data[2]; /* Store client data */ + boolean_t physical; /* Is client data a physical address? */ STAILQ_ENTRY(bounce_page) links; }; @@ -134,16 +133,12 @@ static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit); static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, - vm_offset_t vaddr, bus_addr_t addr, - bus_size_t size); + bus_dma_segment_t *segs, boolean_t physical); static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, - pmap_t pmap, void *buf, bus_size_t buflen, - int flags); -static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, - vm_paddr_t buf, bus_size_t buflen, - int flags); + pmap_t pmap, struct memdesc *mem, + bus_size_t buflen, int flags); static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags); @@ -464,40 +459,51 @@ dmat->bounce_flags); } -static void -_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, - bus_size_t buflen, int flags) +static inline vm_paddr_t +_bus_dmamap_seg_paddr(struct memdesc *mem, int index, pmap_t pmap, + bus_size_t offset) { - bus_addr_t curaddr; - bus_size_t sgsize; + vm_paddr_t paddr; + vm_offset_t vaddr; - if ((map != &nobounce_dmamap && map->pagesneeded == 0)) { - /* - * Count the number of bounce pages - * needed in order to complete this transfer - */ - curaddr = buf; - while (buflen != 0) { - sgsize = MIN(buflen, dmat->common.maxsegsz); - if (bus_dma_run_filter(&dmat->common, curaddr)) { - sgsize = MIN(sgsize, PAGE_SIZE); - map->pagesneeded++; - } - curaddr += sgsize; - buflen -= sgsize; - } - CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); + KASSERT(mem->md_type == MEMDESC_PLIST || mem->md_type == MEMDESC_VLIST, + ("Provided memdesc is not compatible")); + KASSERT(index < mem->md_opaque, + ("Index overruns bus_dma_segment array")); + if (mem->md_type == MEMDESC_PLIST) { + paddr = mem->u.md_list[index].ds_addr + offset; + } else { + KASSERT(pmap != NULL, ("No valid pmap provided")); + vaddr = mem->u.md_list[index].ds_addr + offset; + if (pmap == kernel_pmap) + paddr = pmap_kextract(vaddr); + else + paddr = pmap_extract(pmap, vaddr); } + + return (paddr); +} + +static inline vm_offset_t +_bus_dmamap_seg_vaddr(struct memdesc *mem, int index, bus_size_t offset) +{ + + KASSERT(mem->md_type == MEMDESC_VLIST, + ("Provided memdesc is not compatible")); + KASSERT(index < mem->md_opaque, + ("Index overruns bus_dma_segment array")); + + return (mem->u.md_list[index].ds_addr + offset); } static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, - void *buf, bus_size_t buflen, int flags) + struct memdesc *mem, bus_size_t buflen, int flags) { - vm_offset_t vaddr; - vm_offset_t vendaddr; - bus_addr_t paddr; - bus_size_t sg_len; + bus_size_t sg_len, max_sgsize; + vm_paddr_t paddr, offset; + bus_dma_segment_t *seg; + int i; if ((map != &nobounce_dmamap && map->pagesneeded == 0)) { CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " @@ -506,25 +512,39 @@ dmat->common.boundary, dmat->common.alignment); CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d", map, &nobounce_dmamap, map->pagesneeded); + /* * Count the number of bounce pages * needed in order to complete this transfer */ - vaddr = (vm_offset_t)buf; - vendaddr = (vm_offset_t)buf + buflen; - - while (vaddr < vendaddr) { - sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); - if (pmap == kernel_pmap) - paddr = pmap_kextract(vaddr); - else - paddr = pmap_extract(pmap, vaddr); + i = 0; + offset = 0; + seg = mem->u.md_list; + while (buflen > 0) { + paddr = _bus_dmamap_seg_paddr(mem, i, pmap, offset); + sg_len = MIN(PAGE_SIZE - (paddr & PAGE_MASK), + seg[i].ds_len - offset); + max_sgsize = MIN(buflen, dmat->common.maxsegsz); + sg_len = MIN(sg_len, max_sgsize); if (bus_dma_run_filter(&dmat->common, paddr) != 0) { sg_len = roundup2(sg_len, dmat->common.alignment); + sg_len = MIN(sg_len, max_sgsize); + KASSERT((sg_len & (dmat->common.alignment - 1)) + == 0, ("Segment size is not aligned")); map->pagesneeded++; } - vaddr += sg_len; + KASSERT(buflen >= sg_len, + ("Segment length overruns original buffer")); + buflen -= sg_len; + if (offset + sg_len >= seg[i].ds_len) { + offset = (offset + sg_len) % seg[i].ds_len; + i++; + KASSERT(buflen == 0 || offset < seg[i].ds_len, + ("Offset overruns next segment")); + } else { + offset += sg_len; + } } CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); } @@ -600,71 +620,18 @@ return (sgsize); } -/* - * Utility function to load a physical buffer. segp contains - * the starting segment on entrace, and the ending segment on exit. - */ static int -bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, - vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs, +bounce_bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, + struct memdesc *mem, pmap_t pmap, int flags, bus_dma_segment_t *segs, int *segp) { - bus_size_t sgsize; - bus_addr_t curaddr; - int error; - - if (map == NULL) - map = &nobounce_dmamap; + bus_size_t buflen, offset, max_sgsize, sgsize; + bus_dma_segment_t *seg, bounce[2]; + vm_paddr_t paddr; + int i, error; + boolean_t phys_addr; - if (segs == NULL) - segs = dmat->segments; - - if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) { - _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); - if (map->pagesneeded != 0) { - error = _bus_dmamap_reserve_pages(dmat, map, flags); - if (error) - return (error); - } - } - - while (buflen > 0) { - curaddr = buf; - sgsize = MIN(buflen, dmat->common.maxsegsz); - if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) && - map->pagesneeded != 0 && - bus_dma_run_filter(&dmat->common, curaddr)) { - sgsize = MIN(sgsize, PAGE_SIZE); - curaddr = add_bounce_page(dmat, map, 0, curaddr, - sgsize); - } - sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, - segp); - if (sgsize == 0) - break; - buf += sgsize; - buflen -= sgsize; - } - - /* - * Did we fit? - */ - return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ -} - -/* - * Utility function to load a linear buffer. segp contains - * the starting segment on entrace, and the ending segment on exit. - */ -static int -bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, - bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs, - int *segp) -{ - bus_size_t sgsize, max_sgsize; - bus_addr_t curaddr; - vm_offset_t vaddr; - int error; + phys_addr = mem->md_type == MEMDESC_PLIST ? TRUE : FALSE; if (map == NULL) map = &nobounce_dmamap; @@ -672,8 +639,13 @@ if (segs == NULL) segs = dmat->segments; + /* Calculate total buffer length. */ + seg = mem->u.md_list; + for (i = 0, buflen = 0; i < mem->md_opaque; i++) + buflen += seg[i].ds_len; + if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) { - _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); + _bus_dmamap_count_pages(dmat, map, pmap, mem, buflen, flags); if (map->pagesneeded != 0) { error = _bus_dmamap_reserve_pages(dmat, map, flags); if (error) @@ -681,43 +653,72 @@ } } - vaddr = (vm_offset_t)buf; + i = 0; + offset = 0; while (buflen > 0) { /* - * Get the physical address for this segment. - */ - if (pmap == kernel_pmap) - curaddr = pmap_kextract(vaddr); - else - curaddr = pmap_extract(pmap, vaddr); - - /* * Compute the segment size, and adjust counts. */ + paddr = _bus_dmamap_seg_paddr(mem, i, pmap, offset); max_sgsize = MIN(buflen, dmat->common.maxsegsz); - sgsize = PAGE_SIZE - ((vm_offset_t)curaddr & PAGE_MASK); + sgsize = MIN(PAGE_SIZE - (paddr & PAGE_MASK), + seg[i].ds_len - offset); if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) && map->pagesneeded != 0 && - bus_dma_run_filter(&dmat->common, curaddr)) { + bus_dma_run_filter(&dmat->common, paddr)) { sgsize = roundup2(sgsize, dmat->common.alignment); sgsize = MIN(sgsize, max_sgsize); - curaddr = add_bounce_page(dmat, map, vaddr, curaddr, - sgsize); + KASSERT((sgsize & (dmat->common.alignment - 1)) == 0, + ("Segment size is not aligned")); + + bzero(bounce, sizeof(bounce)); + bounce[0].ds_addr = phys_addr ? paddr : + _bus_dmamap_seg_vaddr(mem, i, offset); + bounce[0].ds_len = sgsize; + + /* + * Check if two pages of the user provided buffer + * are used. + */ + if ((offset + sgsize) > seg[i].ds_len) { + KASSERT(offset + sgsize < + seg[i].ds_len + seg[i+1].ds_len, + ("Cannot bounce more than two segments")); + bounce[1].ds_addr = phys_addr ? + _bus_dmamap_seg_paddr(mem, i + 1, pmap, 0) : + _bus_dmamap_seg_vaddr(mem, i + 1, 0); + bounce[1].ds_len = (offset + sgsize) % + seg[i].ds_len; + bounce[0].ds_len -= bounce[1].ds_len; + } + paddr = add_bounce_page(dmat, map, bounce, phys_addr); } else { sgsize = MIN(sgsize, max_sgsize); } - sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, + + sgsize = _bus_dmamap_addseg(dmat, map, paddr, sgsize, segs, segp); if (sgsize == 0) break; - vaddr += sgsize; + + KASSERT(buflen >= sgsize, + ("Segment length overruns original buffer")); buflen -= sgsize; + + if (offset + sgsize >= seg[i].ds_len) { + offset = (offset + sgsize) % seg[i].ds_len; + i++; + KASSERT(buflen == 0 || offset < seg[i].ds_len, + ("Offset overruns next segment")); + } else { + offset += sgsize; + } } /* * Did we fit? */ - return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ + return (buflen != 0 ? EFBIG : 0); } static void @@ -757,6 +758,43 @@ } } +static inline void +_bus_dmamap_copyout(struct bounce_page *bpage) +{ + bus_dma_segment_t *seg; + + seg = bpage->data; + if (bpage->physical) { + physcopyout(seg[0].ds_addr, (void *)bpage->vaddr, + seg[0].ds_len); + physcopyout(seg[1].ds_addr, + (void *)(bpage->vaddr + seg[0].ds_len), seg[1].ds_len); + } else { + bcopy((void *) seg[0].ds_addr, (void *) bpage->vaddr, + seg[0].ds_len); + bcopy((void *)(bpage->vaddr + seg[0].ds_len), + (void *)seg[1].ds_addr, seg[1].ds_len); + } +} + +static inline void +_bus_dmamap_copyin(struct bounce_page *bpage) +{ + bus_dma_segment_t *seg; + + seg = bpage->data; + if (bpage->physical) { + physcopyin((void *)bpage->vaddr, seg[0].ds_addr, seg[0].ds_len); + physcopyin((void *)(bpage->vaddr + seg[0].ds_len), + seg[1].ds_addr, seg[1].ds_len); + } else { + bcopy((void *)bpage->vaddr, (void *)seg[0].ds_addr, + seg[0].ds_len); + bcopy((void *)seg[1].ds_addr, + (void *)(bpage->vaddr + seg[0].ds_len), seg[1].ds_len); + } +} + static void bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) @@ -775,15 +813,7 @@ if ((op & BUS_DMASYNC_PREWRITE) != 0) { while (bpage != NULL) { - if (bpage->datavaddr != 0) { - bcopy((void *)bpage->datavaddr, - (void *)bpage->vaddr, - bpage->datacount); - } else { - physcopyout(bpage->dataaddr, - (void *)bpage->vaddr, - bpage->datacount); - } + _bus_dmamap_copyout(bpage); bpage = STAILQ_NEXT(bpage, links); } dmat->bounce_zone->total_bounced++; @@ -791,15 +821,7 @@ if ((op & BUS_DMASYNC_POSTREAD) != 0) { while (bpage != NULL) { - if (bpage->datavaddr != 0) { - bcopy((void *)bpage->vaddr, - (void *)bpage->datavaddr, - bpage->datacount); - } else { - physcopyin((void *)bpage->vaddr, - bpage->dataaddr, - bpage->datacount); - } + _bus_dmamap_copyin(bpage); bpage = STAILQ_NEXT(bpage, links); } dmat->bounce_zone->total_bounced++; @@ -963,8 +985,8 @@ } static bus_addr_t -add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, - bus_addr_t addr, bus_size_t size) +add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dma_segment_t *segs, + boolean_t physical) { struct bounce_zone *bz; struct bounce_page *bpage; @@ -994,12 +1016,12 @@ if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) { /* Page offset needs to be preserved. */ - bpage->vaddr |= vaddr & PAGE_MASK; - bpage->busaddr |= vaddr & PAGE_MASK; + bpage->vaddr |= segs[0].ds_addr & PAGE_MASK; + bpage->busaddr |= segs[0].ds_addr & PAGE_MASK; } - bpage->datavaddr = vaddr; - bpage->dataaddr = addr; - bpage->datacount = size; + bpage->physical = physical; + bcopy(segs, bpage->data, sizeof(bpage->data)); + STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); return (bpage->busaddr); } @@ -1011,8 +1033,8 @@ struct bounce_zone *bz; bz = dmat->bounce_zone; - bpage->datavaddr = 0; - bpage->datacount = 0; + bzero(bpage->data, sizeof(bpage->data)); + if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) { /* * Reset the bounce page to start at offset 0. Other uses @@ -1068,9 +1090,7 @@ .map_destroy = bounce_bus_dmamap_destroy, .mem_alloc = bounce_bus_dmamem_alloc, .mem_free = bounce_bus_dmamem_free, - .load_phys = bounce_bus_dmamap_load_phys, - .load_buffer = bounce_bus_dmamap_load_buffer, - .load_ma = bus_dmamap_load_ma_triv, + .load = bounce_bus_dmamap_load, .map_waitok = bounce_bus_dmamap_waitok, .map_complete = bounce_bus_dmamap_complete, .map_unload = bounce_bus_dmamap_unload, Index: sys/x86/x86/busdma_machdep.c =================================================================== --- sys/x86/x86/busdma_machdep.c +++ sys/x86/x86/busdma_machdep.c @@ -275,47 +275,18 @@ } /* - * Utility function to load a physical buffer. segp contains - * the starting segment on entrace, and the ending segment on exit. + * Utility function to load a memdesc. */ int -_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, - bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp) +_bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, struct memdesc *mem, + pmap_t pmap, int flags, bus_dma_segment_t *segs, int *segp) { struct bus_dma_tag_common *tc; tc = (struct bus_dma_tag_common *)dmat; - return (tc->impl->load_phys(dmat, map, buf, buflen, flags, segs, - segp)); + return (tc->impl->load(dmat, map, mem, pmap, flags, segs, segp)); } -int -_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma, - bus_size_t tlen, int ma_offs, int flags, bus_dma_segment_t *segs, - int *segp) -{ - struct bus_dma_tag_common *tc; - - tc = (struct bus_dma_tag_common *)dmat; - return (tc->impl->load_ma(dmat, map, ma, tlen, ma_offs, flags, - segs, segp)); -} - -/* - * Utility function to load a linear buffer. segp contains - * the starting segment on entrace, and the ending segment on exit. - */ -int -_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, - bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs, - int *segp) -{ - struct bus_dma_tag_common *tc; - - tc = (struct bus_dma_tag_common *)dmat; - return (tc->impl->load_buffer(dmat, map, buf, buflen, pmap, flags, segs, - segp)); -} void __bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,