Changeset View
Changeset View
Standalone View
Standalone View
sys/arm64/arm64/busdma_bounce.c
Show First 20 Lines • Show All 66 Lines • ▼ Show 20 Lines | enum { | ||||
BF_KMEM_ALLOC = 0x04, | BF_KMEM_ALLOC = 0x04, | ||||
BF_COHERENT = 0x10, | BF_COHERENT = 0x10, | ||||
}; | }; | ||||
struct bounce_zone; | struct bounce_zone; | ||||
struct bus_dma_tag { | struct bus_dma_tag { | ||||
struct bus_dma_tag_common common; | struct bus_dma_tag_common common; | ||||
size_t alloc_size; | |||||
size_t alloc_alignment; | |||||
int map_count; | int map_count; | ||||
int bounce_flags; | int bounce_flags; | ||||
bus_dma_segment_t *segments; | bus_dma_segment_t *segments; | ||||
struct bounce_zone *bounce_zone; | struct bounce_zone *bounce_zone; | ||||
}; | }; | ||||
struct bounce_page { | struct bounce_page { | ||||
vm_offset_t vaddr; /* kva of bounce buffer */ | vm_offset_t vaddr; /* kva of bounce buffer */ | ||||
▲ Show 20 Lines • Show All 120 Lines • ▼ Show 20 Lines | error = common_bus_dma_tag_create(parent != NULL ? &parent->common : | ||||
sizeof (struct bus_dma_tag), (void **)&newtag); | sizeof (struct bus_dma_tag), (void **)&newtag); | ||||
if (error != 0) | if (error != 0) | ||||
return (error); | return (error); | ||||
newtag->common.impl = &bus_dma_bounce_impl; | newtag->common.impl = &bus_dma_bounce_impl; | ||||
newtag->map_count = 0; | newtag->map_count = 0; | ||||
newtag->segments = NULL; | newtag->segments = NULL; | ||||
if ((flags & BUS_DMA_COHERENT) != 0) | if ((flags & BUS_DMA_COHERENT) != 0) { | ||||
newtag->bounce_flags |= BF_COHERENT; | newtag->bounce_flags |= BF_COHERENT; | ||||
newtag->alloc_alignment = newtag->common.alignment; | |||||
newtag->alloc_size = newtag->common.maxsize; | |||||
} else { | |||||
/* | |||||
* Ensure the buffer is aligned to a cacheline when allocating | |||||
* a non-coherent buffer. This is so we don't have any data | |||||
* that another CPU may be accessing around DMA buffer | |||||
* causing the cache to become dirty. | |||||
*/ | |||||
newtag->alloc_alignment = MAX(newtag->common.alignment, | |||||
dcache_line_size); | |||||
newtag->alloc_size = roundup2(newtag->common.maxsize, | |||||
dcache_line_size); | |||||
} | |||||
if (parent != NULL) { | if (parent != NULL) { | ||||
if ((newtag->common.filter != NULL || | if ((newtag->common.filter != NULL || | ||||
(parent->bounce_flags & BF_COULD_BOUNCE) != 0)) | (parent->bounce_flags & BF_COULD_BOUNCE) != 0)) | ||||
newtag->bounce_flags |= BF_COULD_BOUNCE; | newtag->bounce_flags |= BF_COULD_BOUNCE; | ||||
/* Copy some flags from the parent */ | /* Copy some flags from the parent */ | ||||
newtag->bounce_flags |= parent->bounce_flags & BF_COHERENT; | newtag->bounce_flags |= parent->bounce_flags & BF_COHERENT; | ||||
▲ Show 20 Lines • Show All 294 Lines • ▼ Show 20 Lines | bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, | ||||
* | * | ||||
* NOTE: The (dmat->common.alignment <= dmat->maxsize) check | * NOTE: The (dmat->common.alignment <= dmat->maxsize) check | ||||
* below is just a quick hack. The exact alignment guarantees | * below is just a quick hack. The exact alignment guarantees | ||||
* of malloc(9) need to be nailed down, and the code below | * of malloc(9) need to be nailed down, and the code below | ||||
* should be rewritten to take that into account. | * should be rewritten to take that into account. | ||||
* | * | ||||
* In the meantime warn the user if malloc gets it wrong. | * In the meantime warn the user if malloc gets it wrong. | ||||
*/ | */ | ||||
if ((dmat->common.maxsize <= PAGE_SIZE) && | if ((dmat->alloc_size <= PAGE_SIZE) && | ||||
(dmat->common.alignment <= dmat->common.maxsize) && | (dmat->alloc_alignment <= dmat->alloc_size) && | ||||
dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) && | dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) && | ||||
attr == VM_MEMATTR_DEFAULT) { | attr == VM_MEMATTR_DEFAULT) { | ||||
*vaddr = malloc(dmat->common.maxsize, M_DEVBUF, mflags); | *vaddr = malloc(dmat->alloc_size, M_DEVBUF, mflags); | ||||
} else if (dmat->common.nsegments >= | } else if (dmat->common.nsegments >= | ||||
howmany(dmat->common.maxsize, MIN(dmat->common.maxsegsz, PAGE_SIZE)) && | howmany(dmat->alloc_size, MIN(dmat->common.maxsegsz, PAGE_SIZE)) && | ||||
dmat->common.alignment <= PAGE_SIZE && | dmat->alloc_alignment <= PAGE_SIZE && | ||||
(dmat->common.boundary % PAGE_SIZE) == 0) { | (dmat->common.boundary % PAGE_SIZE) == 0) { | ||||
/* Page-based multi-segment allocations allowed */ | /* Page-based multi-segment allocations allowed */ | ||||
*vaddr = (void *)kmem_alloc_attr(dmat->common.maxsize, mflags, | *vaddr = (void *)kmem_alloc_attr(dmat->alloc_size, mflags, | ||||
0ul, dmat->common.lowaddr, attr); | 0ul, dmat->common.lowaddr, attr); | ||||
dmat->bounce_flags |= BF_KMEM_ALLOC; | dmat->bounce_flags |= BF_KMEM_ALLOC; | ||||
} else { | } else { | ||||
*vaddr = (void *)kmem_alloc_contig(dmat->common.maxsize, mflags, | *vaddr = (void *)kmem_alloc_contig(dmat->alloc_size, mflags, | ||||
0ul, dmat->common.lowaddr, dmat->common.alignment != 0 ? | 0ul, dmat->common.lowaddr, dmat->alloc_alignment != 0 ? | ||||
dmat->common.alignment : 1ul, dmat->common.boundary, attr); | dmat->alloc_alignment : 1ul, dmat->common.boundary, attr); | ||||
dmat->bounce_flags |= BF_KMEM_ALLOC; | dmat->bounce_flags |= BF_KMEM_ALLOC; | ||||
} | } | ||||
if (*vaddr == NULL) { | if (*vaddr == NULL) { | ||||
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", | CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", | ||||
__func__, dmat, dmat->common.flags, ENOMEM); | __func__, dmat, dmat->common.flags, ENOMEM); | ||||
free(*mapp, M_DEVBUF); | free(*mapp, M_DEVBUF); | ||||
return (ENOMEM); | return (ENOMEM); | ||||
} else if (vtophys(*vaddr) & (dmat->common.alignment - 1)) { | } else if (vtophys(*vaddr) & (dmat->alloc_alignment - 1)) { | ||||
printf("bus_dmamem_alloc failed to align memory properly.\n"); | printf("bus_dmamem_alloc failed to align memory properly.\n"); | ||||
} | } | ||||
dmat->map_count++; | dmat->map_count++; | ||||
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", | CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", | ||||
__func__, dmat, dmat->common.flags, 0); | __func__, dmat, dmat->common.flags, 0); | ||||
return (0); | return (0); | ||||
} | } | ||||
Show All 10 Lines | bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) | ||||
* should be NULL and the BF_KMEM_ALLOC flag cleared if malloc() | * should be NULL and the BF_KMEM_ALLOC flag cleared if malloc() | ||||
* was used and set if kmem_alloc_contig() was used. | * was used and set if kmem_alloc_contig() was used. | ||||
*/ | */ | ||||
if ((map->flags & DMAMAP_FROM_DMAMEM) == 0) | if ((map->flags & DMAMAP_FROM_DMAMEM) == 0) | ||||
panic("bus_dmamem_free: Invalid map freed\n"); | panic("bus_dmamem_free: Invalid map freed\n"); | ||||
if ((dmat->bounce_flags & BF_KMEM_ALLOC) == 0) | if ((dmat->bounce_flags & BF_KMEM_ALLOC) == 0) | ||||
free(vaddr, M_DEVBUF); | free(vaddr, M_DEVBUF); | ||||
else | else | ||||
kmem_free((vm_offset_t)vaddr, dmat->common.maxsize); | kmem_free((vm_offset_t)vaddr, dmat->alloc_size); | ||||
free(map, M_DEVBUF); | free(map, M_DEVBUF); | ||||
dmat->map_count--; | dmat->map_count--; | ||||
CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, | CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, | ||||
dmat->bounce_flags); | dmat->bounce_flags); | ||||
} | } | ||||
static bool | static bool | ||||
_bus_dmamap_pagesneeded(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen, | _bus_dmamap_pagesneeded(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen, | ||||
▲ Show 20 Lines • Show All 79 Lines • ▼ Show 20 Lines | |||||
} | } | ||||
static int | static int | ||||
_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) | _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) | ||||
{ | { | ||||
/* Reserve Necessary Bounce Pages */ | /* Reserve Necessary Bounce Pages */ | ||||
mtx_lock(&bounce_lock); | mtx_lock(&bounce_lock); | ||||
if (flags & BUS_DMA_NOWAIT) { | if (flags & BUS_DMA_NOWAIT) { | ||||
andrew: Extra change that sneaked in | |||||
if (reserve_bounce_pages(dmat, map, 0) != 0) { | if (reserve_bounce_pages(dmat, map, 0) != 0) { | ||||
mtx_unlock(&bounce_lock); | mtx_unlock(&bounce_lock); | ||||
return (ENOMEM); | return (ENOMEM); | ||||
} | } | ||||
} else { | } else { | ||||
if (reserve_bounce_pages(dmat, map, 1) != 0) { | if (reserve_bounce_pages(dmat, map, 1) != 0) { | ||||
/* Queue us for resources */ | /* Queue us for resources */ | ||||
STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); | STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); | ||||
▲ Show 20 Lines • Show All 130 Lines • ▼ Show 20 Lines | bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, | ||||
int *segp) | int *segp) | ||||
{ | { | ||||
struct sync_list *sl; | struct sync_list *sl; | ||||
bus_size_t sgsize, max_sgsize; | bus_size_t sgsize, max_sgsize; | ||||
bus_addr_t curaddr, sl_pend; | bus_addr_t curaddr, sl_pend; | ||||
vm_offset_t kvaddr, vaddr, sl_vend; | vm_offset_t kvaddr, vaddr, sl_vend; | ||||
int error; | int error; | ||||
if (segs == NULL) | if (segs == NULL) | ||||
Done Inline ActionsExtra change that sneaked in andrew: Extra change that sneaked in | |||||
segs = dmat->segments; | segs = dmat->segments; | ||||
if (might_bounce(dmat)) { | if (might_bounce(dmat)) { | ||||
_bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); | _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); | ||||
if (map->pagesneeded != 0) { | if (map->pagesneeded != 0) { | ||||
error = _bus_dmamap_reserve_pages(dmat, map, flags); | error = _bus_dmamap_reserve_pages(dmat, map, flags); | ||||
if (error) | if (error) | ||||
return (error); | return (error); | ||||
▲ Show 20 Lines • Show All 419 Lines • ▼ Show 20 Lines | |||||
static int | static int | ||||
reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) | reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) | ||||
{ | { | ||||
struct bounce_zone *bz; | struct bounce_zone *bz; | ||||
int pages; | int pages; | ||||
mtx_assert(&bounce_lock, MA_OWNED); | mtx_assert(&bounce_lock, MA_OWNED); | ||||
bz = dmat->bounce_zone; | bz = dmat->bounce_zone; | ||||
pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); | pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); | ||||
if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) | if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) | ||||
Done Inline ActionsExtra change that sneaked in andrew: Extra change that sneaked in | |||||
return (map->pagesneeded - (map->pagesreserved + pages)); | return (map->pagesneeded - (map->pagesreserved + pages)); | ||||
bz->free_bpages -= pages; | bz->free_bpages -= pages; | ||||
bz->reserved_bpages += pages; | bz->reserved_bpages += pages; | ||||
map->pagesreserved += pages; | map->pagesreserved += pages; | ||||
pages = map->pagesneeded - map->pagesreserved; | pages = map->pagesneeded - map->pagesreserved; | ||||
return (pages); | return (pages); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 115 Lines • Show Last 20 Lines |
Extra change that sneaked in