Index: sys/compat/ndis/subr_ndis.c =================================================================== --- sys/compat/ndis/subr_ndis.c +++ sys/compat/ndis/subr_ndis.c @@ -186,7 +186,6 @@ static ndis_status NdisMAllocateMapRegisters(ndis_handle, uint32_t, uint8_t, uint32_t, uint32_t); static void NdisMFreeMapRegisters(ndis_handle); -static void ndis_mapshared_cb(void *, bus_dma_segment_t *, int, int); static void NdisMAllocateSharedMemory(ndis_handle, uint32_t, uint8_t, void **, ndis_physaddr *); static void ndis_asyncmem_complete(device_object *, void *); @@ -1387,23 +1386,6 @@ bus_dma_tag_destroy(sc->ndis_mtag); } -static void -ndis_mapshared_cb(arg, segs, nseg, error) - void *arg; - bus_dma_segment_t *segs; - int nseg; - int error; -{ - ndis_physaddr *p; - - if (error || nseg > 1) - return; - - p = arg; - - p->np_quad = segs[0].ds_addr; -} - /* * This maps to bus_dmamem_alloc(). */ @@ -1415,6 +1397,7 @@ ndis_miniport_block *block; struct ndis_softc *sc; struct ndis_shmem *sh; + struct bus_dmamem_args args; int error; if (adapter == NULL) @@ -1443,34 +1426,19 @@ * than 1GB of physical memory. */ - error = bus_dma_tag_create(sc->ndis_parent_tag, 64, - 0, NDIS_BUS_SPACE_SHARED_MAXADDR, BUS_SPACE_MAXADDR, NULL, - NULL, len, 1, len, BUS_DMA_ALLOCNOW, NULL, NULL, - &sh->ndis_stag); + bus_dma_mem_args_init(&args); + args.dma_alignment = 64; + args.dma_lowaddr = NDIS_BUS_SPACE_SHARED_MAXADDR; + error = bus_dma_mem_alloc(sc->ndis_parent_tag, len, BUS_DMA_NOWAIT | + BUS_DMA_ZERO, &args, &sh->ndis_mem); if (error) { free(sh, M_DEVBUF); return; } - error = bus_dmamem_alloc(sh->ndis_stag, vaddr, - BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sh->ndis_smap); - - if (error) { - bus_dma_tag_destroy(sh->ndis_stag); - free(sh, M_DEVBUF); - return; - } - - error = bus_dmamap_load(sh->ndis_stag, sh->ndis_smap, *vaddr, - len, ndis_mapshared_cb, (void *)paddr, BUS_DMA_NOWAIT); - - if (error) { - bus_dmamem_free(sh->ndis_stag, *vaddr, sh->ndis_smap); - bus_dma_tag_destroy(sh->ndis_stag); - free(sh, M_DEVBUF); - return; - } + *vaddr = sh->ndis_mem.dma_vaddr; + paddr->np_quad = sh->ndis_mem.dma_baddr; /* * Save the physical address along with the source address. @@ -1482,8 +1450,6 @@ */ NDIS_LOCK(sc); - sh->ndis_paddr.np_quad = paddr->np_quad; - sh->ndis_saddr = *vaddr; InsertHeadList((&sc->ndis_shlist), (&sh->ndis_list)); NDIS_UNLOCK(sc); } @@ -1581,13 +1547,13 @@ l = sc->ndis_shlist.nle_flink; while (l != &sc->ndis_shlist) { sh = CONTAINING_RECORD(l, struct ndis_shmem, ndis_list); - if (sh->ndis_saddr == vaddr) + if (sh->ndis_mem.dma_vaddr == vaddr) break; /* * Check the physaddr too, just in case the driver lied * about the virtual address. */ - if (sh->ndis_paddr.np_quad == paddr.np_quad) + if (sh->ndis_mem.dma_baddr == paddr.np_quad) break; l = l->nle_flink; } @@ -1604,9 +1570,7 @@ NDIS_UNLOCK(sc); - bus_dmamap_unload(sh->ndis_stag, sh->ndis_smap); - bus_dmamem_free(sh->ndis_stag, sh->ndis_saddr, sh->ndis_smap); - bus_dma_tag_destroy(sh->ndis_stag); + bus_dma_mem_free(&sh->ndis_mem); free(sh, M_DEVBUF); } Index: sys/dev/if_ndis/if_ndisvar.h =================================================================== --- sys/dev/if_ndis/if_ndisvar.h +++ sys/dev/if_ndis/if_ndisvar.h @@ -66,10 +66,7 @@ struct ndis_shmem { list_entry ndis_list; - bus_dma_tag_t ndis_stag; - bus_dmamap_t ndis_smap; - void *ndis_saddr; - ndis_physaddr ndis_paddr; + struct bus_dmamem ndis_mem; }; struct ndis_cfglist { Index: sys/dev/xl/if_xl.c =================================================================== --- sys/dev/xl/if_xl.c +++ sys/dev/xl/if_xl.c @@ -275,7 +275,6 @@ static void xl_mediacheck(struct xl_softc *); static void xl_choose_media(struct xl_softc *sc, int *media); static void xl_choose_xcvr(struct xl_softc *, int); -static void xl_dma_map_addr(void *, bus_dma_segment_t *, int, int); #ifdef notdef static void xl_testpacket(struct xl_softc *); #endif @@ -333,15 +332,6 @@ SI_ORDER_ANY); DRIVER_MODULE(miibus, xl, miibus_driver, miibus_devclass, NULL, NULL); -static void -xl_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) -{ - u_int32_t *paddr; - - paddr = arg; - *paddr = segs->ds_addr; -} - /* * Murphy's law says that it's possible the chip can wedge and * the 'command in progress' bit may never clear. Hence, we wait @@ -1065,6 +1055,7 @@ { u_char eaddr[ETHER_ADDR_LEN]; u_int16_t sinfo2, xcvr[2]; + struct bus_dmamem_args args; struct xl_softc *sc; struct ifnet *ifp; int media, pmcap; @@ -1220,75 +1211,28 @@ TASK_INIT(&sc->xl_task, 0, xl_rxeof_task, sc); /* - * Now allocate a tag for the DMA descriptor lists and a chunk - * of DMA-able memory based on the tag. Also obtain the DMA - * addresses of the RX and TX ring, which we'll need later. - * All of our lists are allocated as a contiguous block - * of memory. + * Now allocate a chunk of DMA-able memory for the DMA + * descriptor lists. All of our lists are allocated as a + * contiguous block of memory. */ - error = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0, - BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, - XL_RX_LIST_SZ, 1, XL_RX_LIST_SZ, 0, NULL, NULL, - &sc->xl_ldata.xl_rx_tag); + bus_dma_mem_args_init(&args); + args.dma_alignment = 8; + args.dma_lowaddr = BUS_SPACE_MAXADDR_32BIT; + error = bus_dma_mem_alloc(bus_get_dma_tag(dev), XL_RX_LIST_SZ, 0, &args, + &sc->xl_ldata.xl_rx_ring); if (error) { - device_printf(dev, "failed to allocate rx dma tag\n"); + device_printf(dev, "failed to allocate rx ring\n"); goto fail; } + sc->xl_ldata.xl_rx_list = sc->xl_ldata.xl_rx_ring.dma_vaddr; - error = bus_dmamem_alloc(sc->xl_ldata.xl_rx_tag, - (void **)&sc->xl_ldata.xl_rx_list, BUS_DMA_NOWAIT | - BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->xl_ldata.xl_rx_dmamap); + error = bus_dma_mem_alloc(bus_get_dma_tag(dev), XL_TX_LIST_SZ, 0, &args, + &sc->xl_ldata.xl_tx_ring); if (error) { - device_printf(dev, "no memory for rx list buffers!\n"); - bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag); - sc->xl_ldata.xl_rx_tag = NULL; - goto fail; - } - - error = bus_dmamap_load(sc->xl_ldata.xl_rx_tag, - sc->xl_ldata.xl_rx_dmamap, sc->xl_ldata.xl_rx_list, - XL_RX_LIST_SZ, xl_dma_map_addr, - &sc->xl_ldata.xl_rx_dmaaddr, BUS_DMA_NOWAIT); - if (error) { - device_printf(dev, "cannot get dma address of the rx ring!\n"); - bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list, - sc->xl_ldata.xl_rx_dmamap); - bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag); - sc->xl_ldata.xl_rx_tag = NULL; - goto fail; - } - - error = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0, - BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, - XL_TX_LIST_SZ, 1, XL_TX_LIST_SZ, 0, NULL, NULL, - &sc->xl_ldata.xl_tx_tag); - if (error) { - device_printf(dev, "failed to allocate tx dma tag\n"); - goto fail; - } - - error = bus_dmamem_alloc(sc->xl_ldata.xl_tx_tag, - (void **)&sc->xl_ldata.xl_tx_list, BUS_DMA_NOWAIT | - BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->xl_ldata.xl_tx_dmamap); - if (error) { - device_printf(dev, "no memory for list buffers!\n"); - bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag); - sc->xl_ldata.xl_tx_tag = NULL; - goto fail; - } - - error = bus_dmamap_load(sc->xl_ldata.xl_tx_tag, - sc->xl_ldata.xl_tx_dmamap, sc->xl_ldata.xl_tx_list, - XL_TX_LIST_SZ, xl_dma_map_addr, - &sc->xl_ldata.xl_tx_dmaaddr, BUS_DMA_NOWAIT); - if (error) { - device_printf(dev, "cannot get dma address of the tx ring!\n"); - bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list, - sc->xl_ldata.xl_tx_dmamap); - bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag); - sc->xl_ldata.xl_tx_tag = NULL; + device_printf(dev, "failed to allocate tx ring\n"); goto fail; } + sc->xl_ldata.xl_tx_list = sc->xl_ldata.xl_tx_ring.dma_vaddr; /* * Allocate a DMA tag for the mapping of mbufs. @@ -1606,20 +1550,8 @@ bus_dmamap_destroy(sc->xl_mtag, sc->xl_tmpmap); bus_dma_tag_destroy(sc->xl_mtag); } - if (sc->xl_ldata.xl_rx_tag) { - bus_dmamap_unload(sc->xl_ldata.xl_rx_tag, - sc->xl_ldata.xl_rx_dmamap); - bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list, - sc->xl_ldata.xl_rx_dmamap); - bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag); - } - if (sc->xl_ldata.xl_tx_tag) { - bus_dmamap_unload(sc->xl_ldata.xl_tx_tag, - sc->xl_ldata.xl_tx_dmamap); - bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list, - sc->xl_ldata.xl_tx_dmamap); - bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag); - } + bus_dma_mem_free(&sc->xl_ldata.xl_rx_ring); + bus_dma_mem_free(&sc->xl_ldata.xl_tx_ring); mtx_destroy(&sc->xl_mtx); @@ -1646,7 +1578,7 @@ &cd->xl_tx_chain[i].xl_map); if (error) return (error); - cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr + + cd->xl_tx_chain[i].xl_phys = ld->xl_tx_ring.dma_baddr + i * sizeof(struct xl_list); if (i == (XL_TX_LIST_CNT - 1)) cd->xl_tx_chain[i].xl_next = NULL; @@ -1657,7 +1589,7 @@ cd->xl_tx_free = &cd->xl_tx_chain[0]; cd->xl_tx_tail = cd->xl_tx_head = NULL; - bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE); + bus_dma_mem_sync(&ld->xl_tx_ring, BUS_DMASYNC_PREWRITE); return (0); } @@ -1681,7 +1613,7 @@ &cd->xl_tx_chain[i].xl_map); if (error) return (error); - cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr + + cd->xl_tx_chain[i].xl_phys = ld->xl_tx_ring.dma_baddr + i * sizeof(struct xl_list); if (i == (XL_TX_LIST_CNT - 1)) cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[0]; @@ -1702,7 +1634,7 @@ cd->xl_tx_cons = 1; cd->xl_tx_cnt = 0; - bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE); + bus_dma_mem_sync(&ld->xl_tx_ring, BUS_DMASYNC_PREWRITE); return (0); } @@ -1737,13 +1669,13 @@ next = 0; else next = i + 1; - nextptr = ld->xl_rx_dmaaddr + + nextptr = ld->xl_rx_ring.dma_baddr + next * sizeof(struct xl_list_onefrag); cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[next]; ld->xl_rx_list[i].xl_next = htole32(nextptr); } - bus_dmamap_sync(ld->xl_rx_tag, ld->xl_rx_dmamap, BUS_DMASYNC_PREWRITE); + bus_dma_mem_sync(&ld->xl_rx_ring, BUS_DMASYNC_PREWRITE); cd->xl_rx_head = &cd->xl_rx_chain[0]; return (0); @@ -1836,8 +1768,7 @@ XL_LOCK_ASSERT(sc); again: - bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap, - BUS_DMASYNC_POSTREAD); + bus_dma_mem_sync(&sc->xl_ldata.xl_rx_ring, BUS_DMASYNC_POSTREAD); while ((rxstat = le32toh(sc->xl_cdata.xl_rx_head->xl_ptr->xl_status))) { #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { @@ -1869,8 +1800,8 @@ if (rxstat & XL_RXSTAT_UP_ERROR) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); cur_rx->xl_ptr->xl_status = 0; - bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, - sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE); + bus_dma_mem_sync(&sc->xl_ldata.xl_rx_ring, + BUS_DMASYNC_PREWRITE); continue; } @@ -1884,8 +1815,8 @@ "bad receive status -- packet dropped\n"); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); cur_rx->xl_ptr->xl_status = 0; - bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, - sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE); + bus_dma_mem_sync(&sc->xl_ldata.xl_rx_ring, + BUS_DMASYNC_PREWRITE); continue; } @@ -1904,12 +1835,12 @@ if (xl_newbuf(sc, cur_rx)) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); cur_rx->xl_ptr->xl_status = 0; - bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, - sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE); + bus_dma_mem_sync(&sc->xl_ldata.xl_rx_ring, + BUS_DMASYNC_PREWRITE); continue; } - bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, - sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE); + bus_dma_mem_sync(&sc->xl_ldata.xl_rx_ring, + BUS_DMASYNC_PREWRITE); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); m->m_pkthdr.rcvif = ifp; @@ -1960,7 +1891,8 @@ CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL); xl_wait(sc); - CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr); + CSR_WRITE_4(sc, XL_UPLIST_PTR, + sc->xl_ldata.xl_rx_ring.dma_baddr); sc->xl_cdata.xl_rx_head = &sc->xl_cdata.xl_rx_chain[0]; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL); goto again; @@ -2044,8 +1976,7 @@ XL_LOCK_ASSERT(sc); - bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap, - BUS_DMASYNC_POSTREAD); + bus_dma_mem_sync(&sc->xl_ldata.xl_tx_ring, BUS_DMASYNC_POSTREAD); idx = sc->xl_cdata.xl_tx_cons; while (idx != sc->xl_cdata.xl_tx_prod) { cur_tx = &sc->xl_cdata.xl_tx_chain[idx]; @@ -2545,8 +2476,7 @@ sc->xl_cdata.xl_tx_head = start_tx; sc->xl_cdata.xl_tx_tail = cur_tx; } - bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap, - BUS_DMASYNC_PREWRITE); + bus_dma_mem_sync(&sc->xl_ldata.xl_tx_ring, BUS_DMASYNC_PREWRITE); if (!CSR_READ_4(sc, XL_DOWNLIST_PTR)) CSR_WRITE_4(sc, XL_DOWNLIST_PTR, start_tx->xl_phys); @@ -2653,8 +2583,7 @@ /* Start transmission */ sc->xl_cdata.xl_tx_prod = idx; start_tx->xl_prev->xl_ptr->xl_next = htole32(start_tx->xl_phys); - bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap, - BUS_DMASYNC_PREWRITE); + bus_dma_mem_sync(&sc->xl_ldata.xl_tx_ring, BUS_DMASYNC_PREWRITE); /* * Set a timeout in case the chip goes out to lunch. @@ -2789,7 +2718,7 @@ */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL); xl_wait(sc); - CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr); + CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_ring.dma_baddr); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL); xl_wait(sc); Index: sys/dev/xl/if_xlreg.h =================================================================== --- sys/dev/xl/if_xlreg.h +++ sys/dev/xl/if_xlreg.h @@ -480,12 +480,8 @@ struct xl_list_data { struct xl_list_onefrag *xl_rx_list; struct xl_list *xl_tx_list; - u_int32_t xl_rx_dmaaddr; - bus_dma_tag_t xl_rx_tag; - bus_dmamap_t xl_rx_dmamap; - u_int32_t xl_tx_dmaaddr; - bus_dma_tag_t xl_tx_tag; - bus_dmamap_t xl_tx_dmamap; + struct bus_dmamem xl_rx_ring; + struct bus_dmamem xl_tx_ring; }; struct xl_chain { Index: sys/kern/subr_bus_dma.c =================================================================== --- sys/kern/subr_bus_dma.c +++ sys/kern/subr_bus_dma.c @@ -557,3 +557,84 @@ return (0); } + +void +bus_dma_mem_args_init_impl(struct bus_dmamem_args *args, size_t sz) +{ + + bzero(args, sz); + args->dma_size = sz; + args->dma_alignment = 1; + args->dma_lowaddr = BUS_SPACE_MAXADDR; + args->dma_highaddr = BUS_SPACE_MAXADDR; +} + +struct bus_dma_mem_cb_data { + struct bus_dmamem *mem; + int error; +}; + +static void +bus_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) +{ + struct bus_dma_mem_cb_data *d; + + d = arg; + d->error = error; + if (error) + return; + d->mem->dma_baddr = segs[0].ds_addr; +} + +int +bus_dma_mem_alloc(bus_dma_tag_t parent, bus_size_t len, int flags, + struct bus_dmamem_args *args1, struct bus_dmamem *mem); +{ + struct bus_dma_mem_cb_data d; + struct bus_dmamem_args args; + int error; + + bus_dma_mem_args_init(&args); + if (args1 != NULL) { + if (sizeof(args) < args1->dma_size) + return (EINVAL); + bcopy(args1, &args, args1->dma_size); + } + bzero(mem, sizeof(*mem)); + error = bus_dma_tag_create(parent, args.dma_alignment, + args.dma_boundary, args.dma_lowaddr, args.dma_highaddr, + NULL, NULL, len, 1, len, 0, NULL, NULL, &mem->dma_tag); + if (error) { + bus_dma_mem_free(mem); + return (error); + } + error = bus_dmamem_alloc(mem->dma_tag, &mem->dma_vaddr, flags, + &mem->dma_map); + if (error) { + bus_dma_mem_free(mem); + return (error); + } + d.mem = mem; + error = bus_dmamap_load(mem->dma_tag, mem->dma_map, mem->dma_vaddr, len, + bus_dma_mem_cb, &d, BUS_DMA_NOWAIT); + if (error == 0) + error = d.error; + if (error) { + bus_dma_mem_free(mem); + return (error); + } + return (0); +} + +void +bus_dma_mem_free(struct bus_dmamem *mem) +{ + + if (mem->dma_baddr != 0) + bus_dmamap_unload(mem->dma_tag, mem->dma_map); + if (mem->dma_vaddr != NULL) + bus_dmamem_free(mem->dma_tag, mem->dma_vaddr, mem->dma_map); + if (mem->dma_tag != NULL) + bus_dma_tag_destroy(mem->dma_tag); + bzero(mem, sizeof(*mem)); +} Index: sys/sys/bus_dma.h =================================================================== --- sys/sys/bus_dma.h +++ sys/sys/bus_dma.h @@ -349,4 +349,48 @@ #endif /* __sparc64__ */ +/* + * A wrapper API to simplify management of static mappings. + */ + +struct bus_dmamem { + bus_dma_tag_t dma_tag; + bus_dmamap_t dma_map; + void *dma_vaddr; + bus_addr_t dma_baddr; +}; + +/* Optional properties of a DMA memory allocation request. */ +struct bus_dmamem_args { + size_t dma_size; + bus_size_t dma_alignment; + bus_addr_t dma_boundary; + bus_addr_t dma_lowaddr; + bus_addr_t dma_highaddr; +}; + +void bus_dma_mem_args_init_impl(struct bus_dmamem_args *args, size_t sz); +#define bus_dma_mem_args_init(a) \ + bus_dma_mem_args_init_impl((a), sizeof(struct bus_dmamem_args)) + +/* + * Allocates memory for DMA and maps it. On success, zero is returned + * and the 'dma_vaddr' and 'dma_baddr' fields are populated with the + * virtual and bus addresses, respectively, of the mapping. + */ +int bus_dma_mem_alloc(bus_dma_tag_t parent, bus_size_t len, int flags, + struct bus_dmamem_args *args, struct bus_dmamem *mem); + +/* + * Wrapper for bus_dmamap_sync() for memory allocated via + * bus_dma_mem_alloc(). + */ +#define bus_dma_mem_sync(mem, op) \ + bus_dmamap_sync((mem)->dma_tag, (mem)->dma_map, (op)) + +/* + * Release a mapping created by bus_dma_mem_alloc(). + */ +void bus_dma_mem_free(struct bus_dmamem *mem); + #endif /* _BUS_DMA_H_ */