diff --git a/sys/arm/arm/busdma_machdep.c b/sys/arm/arm/busdma_machdep.c index 758517323ff1..a754cb13ddc5 100644 --- a/sys/arm/arm/busdma_machdep.c +++ b/sys/arm/arm/busdma_machdep.c @@ -1,1442 +1,1407 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2012-2015 Ian Lepore * Copyright (c) 2010 Mark Tinguely * Copyright (c) 2004 Olivier Houchard * Copyright (c) 2002 Peter Grehan * Copyright (c) 1997, 1998 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * From i386/busdma_machdep.c 191438 2009-04-23 20:24:19Z jhb */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include //#define ARM_BUSDMA_MAPLOAD_STATS #define BUSDMA_DCACHE_ALIGN cpuinfo.dcache_line_size #define BUSDMA_DCACHE_MASK cpuinfo.dcache_line_mask #define MAX_BPAGES 64 #define MAX_DMA_SEGMENTS 4096 #define BUS_DMA_EXCL_BOUNCE BUS_DMA_BUS2 #define BUS_DMA_ALIGN_BOUNCE BUS_DMA_BUS3 #define BUS_DMA_COULD_BOUNCE (BUS_DMA_EXCL_BOUNCE | BUS_DMA_ALIGN_BOUNCE) #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 struct bounce_page; struct bounce_zone; struct bus_dma_tag { bus_dma_tag_t parent; bus_size_t alignment; bus_addr_t boundary; bus_addr_t lowaddr; bus_addr_t highaddr; bus_dma_filter_t *filter; void *filterarg; bus_size_t maxsize; u_int nsegments; bus_size_t maxsegsz; int flags; int ref_count; int map_count; bus_dma_lock_t *lockfunc; void *lockfuncarg; struct bounce_zone *bounce_zone; }; struct sync_list { vm_offset_t vaddr; /* kva of client data */ bus_addr_t paddr; /* physical address */ vm_page_t pages; /* starting page of client data */ bus_size_t datacount; /* client data count */ }; static uint32_t tags_total; static uint32_t maps_total; static uint32_t maps_dmamem; static uint32_t maps_coherent; #ifdef ARM_BUSDMA_MAPLOAD_STATS static counter_u64_t maploads_total; static counter_u64_t maploads_bounced; static counter_u64_t maploads_coherent; static counter_u64_t maploads_dmamem; static counter_u64_t maploads_mbuf; static counter_u64_t maploads_physmem; #endif SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Busdma parameters"); SYSCTL_UINT(_hw_busdma, OID_AUTO, tags_total, CTLFLAG_RD, &tags_total, 0, "Number of active tags"); SYSCTL_UINT(_hw_busdma, OID_AUTO, maps_total, CTLFLAG_RD, &maps_total, 0, "Number of active maps"); SYSCTL_UINT(_hw_busdma, OID_AUTO, maps_dmamem, CTLFLAG_RD, &maps_dmamem, 0, "Number of active maps for bus_dmamem_alloc buffers"); SYSCTL_UINT(_hw_busdma, OID_AUTO, maps_coherent, CTLFLAG_RD, &maps_coherent, 0, "Number of active maps with BUS_DMA_COHERENT flag set"); #ifdef ARM_BUSDMA_MAPLOAD_STATS SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_total, CTLFLAG_RD, &maploads_total, "Number of load operations performed"); SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_bounced, CTLFLAG_RD, &maploads_bounced, "Number of load operations that used bounce buffers"); SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_coherent, CTLFLAG_RD, &maploads_dmamem, "Number of load operations on BUS_DMA_COHERENT memory"); SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_dmamem, CTLFLAG_RD, &maploads_dmamem, "Number of load operations on bus_dmamem_alloc buffers"); SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_mbuf, CTLFLAG_RD, &maploads_mbuf, "Number of load operations for mbufs"); SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_physmem, CTLFLAG_RD, &maploads_physmem, "Number of load operations on physical buffers"); #endif struct bus_dmamap { STAILQ_HEAD(, bounce_page) bpages; int pagesneeded; int pagesreserved; bus_dma_tag_t dmat; struct memdesc mem; bus_dmamap_callback_t *callback; void *callback_arg; int flags; #define DMAMAP_COHERENT (1 << 0) #define DMAMAP_DMAMEM_ALLOC (1 << 1) #define DMAMAP_MBUF (1 << 2) STAILQ_ENTRY(bus_dmamap) links; bus_dma_segment_t *segments; int sync_count; struct sync_list slist[]; }; static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, pmap_t pmap, bus_dmamap_t map, void *buf, bus_size_t buflen, int flags); static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, bus_size_t buflen, int flags); static void dma_preread_safe(vm_offset_t va, vm_paddr_t pa, vm_size_t size); static void dma_dcache_sync(struct sync_list *sl, bus_dmasync_op_t op); static busdma_bufalloc_t coherent_allocator; /* Cache of coherent buffers */ static busdma_bufalloc_t standard_allocator; /* Cache of standard buffers */ MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata"); #define dmat_alignment(dmat) ((dmat)->alignment) #define dmat_flags(dmat) ((dmat)->flags) #define dmat_lowaddr(dmat) ((dmat)->lowaddr) #define dmat_lockfunc(dmat) ((dmat)->lockfunc) #define dmat_lockfuncarg(dmat) ((dmat)->lockfuncarg) #include "../../kern/subr_busdma_bounce.c" static void busdma_init(void *dummy) { int uma_flags; #ifdef ARM_BUSDMA_MAPLOAD_STATS maploads_total = counter_u64_alloc(M_WAITOK); maploads_bounced = counter_u64_alloc(M_WAITOK); maploads_coherent = counter_u64_alloc(M_WAITOK); maploads_dmamem = counter_u64_alloc(M_WAITOK); maploads_mbuf = counter_u64_alloc(M_WAITOK); maploads_physmem = counter_u64_alloc(M_WAITOK); #endif uma_flags = 0; /* Create a cache of buffers in standard (cacheable) memory. */ standard_allocator = busdma_bufalloc_create("buffer", BUSDMA_DCACHE_ALIGN,/* minimum_alignment */ NULL, /* uma_alloc func */ NULL, /* uma_free func */ uma_flags); /* uma_zcreate_flags */ #ifdef INVARIANTS /* * Force UMA zone to allocate service structures like * slabs using own allocator. uma_debug code performs * atomic ops on uma_slab_t fields and safety of this * operation is not guaranteed for write-back caches */ uma_flags = UMA_ZONE_NOTOUCH; #endif /* * Create a cache of buffers in uncacheable memory, to implement the * BUS_DMA_COHERENT (and potentially BUS_DMA_NOCACHE) flag. */ coherent_allocator = busdma_bufalloc_create("coherent", BUSDMA_DCACHE_ALIGN,/* minimum_alignment */ busdma_bufalloc_alloc_uncacheable, busdma_bufalloc_free_uncacheable, uma_flags); /* uma_zcreate_flags */ } /* * This init historically used SI_SUB_VM, but now the init code requires * malloc(9) using M_BUSDMA memory and the pcpu zones for counter(9), which get * set up by SI_SUB_KMEM and SI_ORDER_LAST, so we'll go right after that by * using SI_SUB_KMEM+1. */ SYSINIT(busdma, SI_SUB_KMEM+1, SI_ORDER_FIRST, busdma_init, NULL); /* * This routine checks the exclusion zone constraints from a tag against the * physical RAM available on the machine. If a tag specifies an exclusion zone * but there's no RAM in that zone, then we avoid allocating resources to bounce * a request, and we can use any memory allocator (as opposed to needing * kmem_alloc_contig() just because it can allocate pages in an address range). * * Most tags have BUS_SPACE_MAXADDR or BUS_SPACE_MAXADDR_32BIT (they are the * same value on 32-bit architectures) as their lowaddr constraint, and we can't * possibly have RAM at an address higher than the highest address we can * express, so we take a fast out. */ static int exclusion_bounce_check(vm_offset_t lowaddr, vm_offset_t highaddr) { int i; if (lowaddr >= BUS_SPACE_MAXADDR) return (0); for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) { if ((lowaddr >= phys_avail[i] && lowaddr < phys_avail[i + 1]) || (lowaddr < phys_avail[i] && highaddr >= phys_avail[i])) return (1); } return (0); } /* * Return true if the tag has an exclusion zone that could lead to bouncing. */ static __inline int exclusion_bounce(bus_dma_tag_t dmat) { return (dmat->flags & BUS_DMA_EXCL_BOUNCE); } /* * Return true if the given address does not fall on the alignment boundary. */ static __inline int alignment_bounce(bus_dma_tag_t dmat, bus_addr_t addr) { return (!vm_addr_align_ok(addr, dmat->alignment)); } /* * Return true if the DMA should bounce because the start or end does not fall * on a cacheline boundary (which would require a partial cacheline flush). * COHERENT memory doesn't trigger cacheline flushes. Memory allocated by * bus_dmamem_alloc() is always aligned to cacheline boundaries, and there's a * strict rule that such memory cannot be accessed by the CPU while DMA is in * progress (or by multiple DMA engines at once), so that it's always safe to do * full cacheline flushes even if that affects memory outside the range of a * given DMA operation that doesn't involve the full allocated buffer. If we're * mapping an mbuf, that follows the same rules as a buffer we allocated. */ static __inline int cacheline_bounce(bus_dmamap_t map, bus_addr_t addr, bus_size_t size) { if (map->flags & (DMAMAP_DMAMEM_ALLOC | DMAMAP_COHERENT | DMAMAP_MBUF)) return (0); return ((addr | size) & BUSDMA_DCACHE_MASK); } /* * Return true if we might need to bounce the DMA described by addr and size. * * This is used to quick-check whether we need to do the more expensive work of * checking the DMA page-by-page looking for alignment and exclusion bounces. * * Note that the addr argument might be either virtual or physical. It doesn't * matter because we only look at the low-order bits, which are the same in both * address spaces and maximum alignment of generic buffer is limited up to page * size. * Bouncing of buffers allocated by bus_dmamem_alloc()is not necessary, these * always comply with the required rules (alignment, boundary, and address * range). */ static __inline int might_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t addr, bus_size_t size) { KASSERT(map->flags & DMAMAP_DMAMEM_ALLOC || dmat->alignment <= PAGE_SIZE, ("%s: unsupported alignment (0x%08lx) for buffer not " "allocated by bus_dmamem_alloc()", __func__, dmat->alignment)); return (!(map->flags & DMAMAP_DMAMEM_ALLOC) && ((dmat->flags & BUS_DMA_EXCL_BOUNCE) || alignment_bounce(dmat, addr) || cacheline_bounce(map, addr, size))); } /* * Return true if we must bounce the DMA described by paddr and size. * * Bouncing can be triggered by DMA that doesn't begin and end on cacheline * boundaries, or doesn't begin on an alignment boundary, or falls within the * exclusion zone of any tag in the ancestry chain. * * For exclusions, walk the chain of tags comparing paddr to the exclusion zone * within each tag. If the tag has a filter function, use it to decide whether * the DMA needs to bounce, otherwise any DMA within the zone bounces. */ static int must_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t paddr, bus_size_t size) { if (cacheline_bounce(map, paddr, size)) return (1); /* * The tag already contains ancestors' alignment restrictions so this * check doesn't need to be inside the loop. */ if (alignment_bounce(dmat, paddr)) return (1); /* * Even though each tag has an exclusion zone that is a superset of its * own and all its ancestors' exclusions, the exclusion zone of each tag * up the chain must be checked within the loop, because the busdma * rules say the filter function is called only when the address lies * within the low-highaddr range of the tag that filterfunc belongs to. */ while (dmat != NULL && exclusion_bounce(dmat)) { if ((paddr >= dmat->lowaddr && paddr <= dmat->highaddr) && (dmat->filter == NULL || dmat->filter(dmat->filterarg, paddr) != 0)) return (1); dmat = dmat->parent; } return (0); } -/* - * Convenience function for manipulating driver locks from busdma (during - * busdma_swi, for example). - */ -void -busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) -{ - struct mtx *dmtx; - - dmtx = (struct mtx *)arg; - switch (op) { - case BUS_DMA_LOCK: - mtx_lock(dmtx); - break; - case BUS_DMA_UNLOCK: - mtx_unlock(dmtx); - break; - default: - panic("Unknown operation 0x%x for busdma_lock_mutex!", op); - } -} - -/* - * dflt_lock should never get called. It gets put into the dma tag when - * lockfunc == NULL, which is only valid if the maps that are associated - * with the tag are meant to never be defered. - * XXX Should have a way to identify which driver is responsible here. - */ -static void -dflt_lock(void *arg, bus_dma_lock_op_t op) -{ - - panic("driver error: busdma dflt_lock called"); -} - /* * Allocate a device specific dma_tag. */ int bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, void *lockfuncarg, bus_dma_tag_t *dmat) { bus_dma_tag_t newtag; int error = 0; /* Basic sanity checking. */ KASSERT(boundary == 0 || powerof2(boundary), ("dma tag boundary %lu, must be a power of 2", boundary)); KASSERT(boundary == 0 || boundary >= maxsegsz, ("dma tag boundary %lu is < maxsegsz %lu\n", boundary, maxsegsz)); KASSERT(alignment != 0 && powerof2(alignment), ("dma tag alignment %lu, must be non-zero power of 2", alignment)); KASSERT(maxsegsz != 0, ("dma tag maxsegsz must not be zero")); /* Return a NULL tag on failure */ *dmat = NULL; newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_BUSDMA, M_ZERO | M_NOWAIT); if (newtag == NULL) { CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", __func__, newtag, 0, error); return (ENOMEM); } newtag->parent = parent; newtag->alignment = alignment; newtag->boundary = boundary; newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1); newtag->filter = filter; newtag->filterarg = filterarg; newtag->maxsize = maxsize; newtag->nsegments = nsegments; newtag->maxsegsz = maxsegsz; newtag->flags = flags; newtag->ref_count = 1; /* Count ourself */ newtag->map_count = 0; if (lockfunc != NULL) { newtag->lockfunc = lockfunc; newtag->lockfuncarg = lockfuncarg; } else { - newtag->lockfunc = dflt_lock; + newtag->lockfunc = _busdma_dflt_lock; newtag->lockfuncarg = NULL; } /* Take into account any restrictions imposed by our parent tag */ if (parent != NULL) { newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); newtag->alignment = MAX(parent->alignment, newtag->alignment); newtag->flags |= parent->flags & BUS_DMA_COULD_BOUNCE; newtag->flags |= parent->flags & BUS_DMA_COHERENT; if (newtag->boundary == 0) newtag->boundary = parent->boundary; else if (parent->boundary != 0) newtag->boundary = MIN(parent->boundary, newtag->boundary); if (newtag->filter == NULL) { /* * Short circuit to looking at our parent directly * since we have encapsulated all of its information */ newtag->filter = parent->filter; newtag->filterarg = parent->filterarg; newtag->parent = parent->parent; } if (newtag->parent != NULL) atomic_add_int(&parent->ref_count, 1); } if (exclusion_bounce_check(newtag->lowaddr, newtag->highaddr)) newtag->flags |= BUS_DMA_EXCL_BOUNCE; if (alignment_bounce(newtag, 1)) newtag->flags |= BUS_DMA_ALIGN_BOUNCE; /* * Any request can auto-bounce due to cacheline alignment, in addition * to any alignment or boundary specifications in the tag, so if the * ALLOCNOW flag is set, there's always work to do. */ if ((flags & BUS_DMA_ALLOCNOW) != 0) { struct bounce_zone *bz; /* * Round size up to a full page, and add one more page because * there can always be one more boundary crossing than the * number of pages in a transfer. */ maxsize = roundup2(maxsize, PAGE_SIZE) + PAGE_SIZE; if ((error = alloc_bounce_zone(newtag)) != 0) { free(newtag, M_BUSDMA); return (error); } bz = newtag->bounce_zone; if (ptoa(bz->total_bpages) < maxsize) { int pages; pages = atop(maxsize) - bz->total_bpages; /* Add pages to our bounce pool */ if (alloc_bounce_pages(newtag, pages) < pages) error = ENOMEM; } /* Performed initial allocation */ newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; } else newtag->bounce_zone = NULL; if (error != 0) { free(newtag, M_BUSDMA); } else { atomic_add_32(&tags_total, 1); *dmat = newtag; } CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); return (error); } void bus_dma_template_clone(bus_dma_template_t *t, bus_dma_tag_t dmat) { if (t == NULL || dmat == NULL) return; t->parent = dmat->parent; t->alignment = dmat->alignment; t->boundary = dmat->boundary; t->lowaddr = dmat->lowaddr; t->highaddr = dmat->highaddr; t->maxsize = dmat->maxsize; t->nsegments = dmat->nsegments; t->maxsegsize = dmat->maxsegsz; t->flags = dmat->flags; t->lockfunc = dmat->lockfunc; t->lockfuncarg = dmat->lockfuncarg; } int bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain) { return (0); } int bus_dma_tag_destroy(bus_dma_tag_t dmat) { bus_dma_tag_t dmat_copy; int error; error = 0; dmat_copy = dmat; if (dmat != NULL) { if (dmat->map_count != 0) { error = EBUSY; goto out; } while (dmat != NULL) { bus_dma_tag_t parent; parent = dmat->parent; atomic_subtract_int(&dmat->ref_count, 1); if (dmat->ref_count == 0) { atomic_subtract_32(&tags_total, 1); free(dmat, M_BUSDMA); /* * Last reference count, so * release our reference * count on our parent. */ dmat = parent; } else dmat = NULL; } } out: CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); return (error); } static int allocate_bz_and_pages(bus_dma_tag_t dmat, bus_dmamap_t mapp) { struct bounce_zone *bz; int maxpages; int error; if (dmat->bounce_zone == NULL) if ((error = alloc_bounce_zone(dmat)) != 0) return (error); bz = dmat->bounce_zone; /* Initialize the new map */ STAILQ_INIT(&(mapp->bpages)); /* * Attempt to add pages to our pool on a per-instance basis up to a sane * limit. Even if the tag isn't flagged as COULD_BOUNCE due to * alignment and boundary constraints, it could still auto-bounce due to * cacheline alignment, which requires at most two bounce pages. */ if (dmat->flags & BUS_DMA_COULD_BOUNCE) maxpages = MAX_BPAGES; else maxpages = 2 * bz->map_count; if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 || (bz->map_count > 0 && bz->total_bpages < maxpages)) { int pages; pages = atop(roundup2(dmat->maxsize, PAGE_SIZE)) + 1; pages = MIN(maxpages - bz->total_bpages, pages); pages = MAX(pages, 2); if (alloc_bounce_pages(dmat, pages) < pages) return (ENOMEM); if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; } bz->map_count++; return (0); } static bus_dmamap_t allocate_map(bus_dma_tag_t dmat, int mflags) { int mapsize, segsize; bus_dmamap_t map; /* * Allocate the map. The map structure ends with an embedded * variable-sized array of sync_list structures. Following that * we allocate enough extra space to hold the array of bus_dma_segments. */ KASSERT(dmat->nsegments <= MAX_DMA_SEGMENTS, ("cannot allocate %u dma segments (max is %u)", dmat->nsegments, MAX_DMA_SEGMENTS)); segsize = sizeof(struct bus_dma_segment) * dmat->nsegments; mapsize = sizeof(*map) + sizeof(struct sync_list) * dmat->nsegments; map = malloc(mapsize + segsize, M_BUSDMA, mflags | M_ZERO); if (map == NULL) { CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); return (NULL); } map->segments = (bus_dma_segment_t *)((uintptr_t)map + mapsize); STAILQ_INIT(&map->bpages); return (map); } /* * Allocate a handle for mapping from kva/uva/physical * address space into bus device space. */ int bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) { bus_dmamap_t map; int error = 0; *mapp = map = allocate_map(dmat, M_NOWAIT); if (map == NULL) { CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); return (ENOMEM); } /* * Bouncing might be required if the driver asks for an exclusion * region, a data alignment that is stricter than 1, or DMA that begins * or ends with a partial cacheline. Whether bouncing will actually * happen can't be known until mapping time, but we need to pre-allocate * resources now because we might not be allowed to at mapping time. */ error = allocate_bz_and_pages(dmat, map); if (error != 0) { free(map, M_BUSDMA); *mapp = NULL; return (error); } if (map->flags & DMAMAP_COHERENT) atomic_add_32(&maps_coherent, 1); atomic_add_32(&maps_total, 1); dmat->map_count++; return (0); } /* * Destroy a handle for mapping from kva/uva/physical * address space into bus device space. */ int bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) { if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) { CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, EBUSY); return (EBUSY); } if (dmat->bounce_zone) dmat->bounce_zone->map_count--; if (map->flags & DMAMAP_COHERENT) atomic_subtract_32(&maps_coherent, 1); atomic_subtract_32(&maps_total, 1); free(map, M_BUSDMA); dmat->map_count--; CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); return (0); } /* * Allocate a piece of memory that can be efficiently mapped into bus device * space based on the constraints listed in the dma tag. Returns a pointer to * the allocated memory, and a pointer to an associated bus_dmamap. */ int bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags, bus_dmamap_t *mapp) { busdma_bufalloc_t ba; struct busdma_bufzone *bufzone; bus_dmamap_t map; vm_memattr_t memattr; int mflags; if (flags & BUS_DMA_NOWAIT) mflags = M_NOWAIT; else mflags = M_WAITOK; if (flags & BUS_DMA_ZERO) mflags |= M_ZERO; *mapp = map = allocate_map(dmat, mflags); if (map == NULL) { CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", __func__, dmat, dmat->flags, ENOMEM); return (ENOMEM); } map->flags = DMAMAP_DMAMEM_ALLOC; /* For coherent memory, set the map flag that disables sync ops. */ if (flags & BUS_DMA_COHERENT) map->flags |= DMAMAP_COHERENT; /* * Choose a busdma buffer allocator based on memory type flags. * If the tag's COHERENT flag is set, that means normal memory * is already coherent, use the normal allocator. */ if ((flags & BUS_DMA_COHERENT) && ((dmat->flags & BUS_DMA_COHERENT) == 0)) { memattr = VM_MEMATTR_UNCACHEABLE; ba = coherent_allocator; } else { memattr = VM_MEMATTR_DEFAULT; ba = standard_allocator; } /* * Try to find a bufzone in the allocator that holds a cache of buffers * of the right size for this request. If the buffer is too big to be * held in the allocator cache, this returns NULL. */ bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize); /* * Allocate the buffer from the uma(9) allocator if... * - It's small enough to be in the allocator (bufzone not NULL). * - The alignment constraint isn't larger than the allocation size * (the allocator aligns buffers to their size boundaries). * - There's no need to handle lowaddr/highaddr exclusion zones. * else allocate non-contiguous pages if... * - The page count that could get allocated doesn't exceed * nsegments also when the maximum segment size is less * than PAGE_SIZE. * - The alignment constraint isn't larger than a page boundary. * - There are no boundary-crossing constraints. * else allocate a block of contiguous pages because one or more of the * constraints is something that only the contig allocator can fulfill. */ if (bufzone != NULL && dmat->alignment <= bufzone->size && !exclusion_bounce(dmat)) { *vaddr = uma_zalloc(bufzone->umazone, mflags); } else if (dmat->nsegments >= howmany(dmat->maxsize, MIN(dmat->maxsegsz, PAGE_SIZE)) && dmat->alignment <= PAGE_SIZE && (dmat->boundary % PAGE_SIZE) == 0) { *vaddr = (void *)kmem_alloc_attr(dmat->maxsize, mflags, 0, dmat->lowaddr, memattr); } else { *vaddr = (void *)kmem_alloc_contig(dmat->maxsize, mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary, memattr); } if (*vaddr == NULL) { CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", __func__, dmat, dmat->flags, ENOMEM); free(map, M_BUSDMA); *mapp = NULL; return (ENOMEM); } if (map->flags & DMAMAP_COHERENT) atomic_add_32(&maps_coherent, 1); atomic_add_32(&maps_dmamem, 1); atomic_add_32(&maps_total, 1); dmat->map_count++; CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", __func__, dmat, dmat->flags, 0); return (0); } /* * Free a piece of memory that was allocated via bus_dmamem_alloc, along with * its associated map. */ void bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) { struct busdma_bufzone *bufzone; busdma_bufalloc_t ba; if ((map->flags & DMAMAP_COHERENT) && ((dmat->flags & BUS_DMA_COHERENT) == 0)) ba = coherent_allocator; else ba = standard_allocator; bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize); if (bufzone != NULL && dmat->alignment <= bufzone->size && !exclusion_bounce(dmat)) uma_zfree(bufzone->umazone, vaddr); else kmem_free((vm_offset_t)vaddr, dmat->maxsize); dmat->map_count--; if (map->flags & DMAMAP_COHERENT) atomic_subtract_32(&maps_coherent, 1); atomic_subtract_32(&maps_total, 1); atomic_subtract_32(&maps_dmamem, 1); free(map, M_BUSDMA); CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); } static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, bus_size_t buflen, int flags) { bus_addr_t curaddr; bus_size_t sgsize; if (map->pagesneeded == 0) { CTR5(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d" " map= %p, pagesneeded= %d", dmat->lowaddr, dmat->boundary, dmat->alignment, map, map->pagesneeded); /* * Count the number of bounce pages * needed in order to complete this transfer */ curaddr = buf; while (buflen != 0) { sgsize = MIN(buflen, dmat->maxsegsz); if (must_bounce(dmat, map, curaddr, sgsize) != 0) { sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK)); map->pagesneeded++; } curaddr += sgsize; buflen -= sgsize; } CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded); } } static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, pmap_t pmap, bus_dmamap_t map, void *buf, bus_size_t buflen, int flags) { vm_offset_t vaddr; vm_offset_t vendaddr; bus_addr_t paddr; if (map->pagesneeded == 0) { CTR5(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d" " map= %p, pagesneeded= %d", dmat->lowaddr, dmat->boundary, dmat->alignment, map, map->pagesneeded); /* * Count the number of bounce pages * needed in order to complete this transfer */ vaddr = (vm_offset_t)buf; vendaddr = (vm_offset_t)buf + buflen; while (vaddr < vendaddr) { if (__predict_true(pmap == kernel_pmap)) paddr = pmap_kextract(vaddr); else paddr = pmap_extract(pmap, vaddr); if (must_bounce(dmat, map, paddr, min(vendaddr - vaddr, (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK)))) != 0) { map->pagesneeded++; } vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK)); } CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded); } } /* * Add a single contiguous physical range to the segment list. */ static int _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) { int seg; /* * Make sure we don't cross any boundaries. */ if (!vm_addr_bound_ok(curaddr, sgsize, dmat->boundary)) sgsize = roundup2(curaddr, dmat->boundary) - curaddr; /* * Insert chunk into a segment, coalescing with * previous segment if possible. */ seg = *segp; if (seg == -1) { seg = 0; segs[seg].ds_addr = curaddr; segs[seg].ds_len = sgsize; } else { if (curaddr == segs[seg].ds_addr + segs[seg].ds_len && (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && vm_addr_bound_ok(segs[seg].ds_addr, segs[seg].ds_len + sgsize, dmat->boundary)) segs[seg].ds_len += sgsize; else { if (++seg >= dmat->nsegments) return (0); segs[seg].ds_addr = curaddr; segs[seg].ds_len = sgsize; } } *segp = seg; return (sgsize); } /* * Utility function to load a physical buffer. segp contains * the starting segment on entrace, and the ending segment on exit. */ int _bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp) { bus_addr_t curaddr; bus_addr_t sl_end = 0; bus_size_t sgsize; struct sync_list *sl; int error; if (segs == NULL) segs = map->segments; #ifdef ARM_BUSDMA_MAPLOAD_STATS counter_u64_add(maploads_total, 1); counter_u64_add(maploads_physmem, 1); #endif if (might_bounce(dmat, map, (bus_addr_t)buf, buflen)) { _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); if (map->pagesneeded != 0) { #ifdef ARM_BUSDMA_MAPLOAD_STATS counter_u64_add(maploads_bounced, 1); #endif error = _bus_dmamap_reserve_pages(dmat, map, flags); if (error) return (error); } } sl = map->slist + map->sync_count - 1; while (buflen > 0) { curaddr = buf; sgsize = MIN(buflen, dmat->maxsegsz); if (map->pagesneeded != 0 && must_bounce(dmat, map, curaddr, sgsize)) { sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK)); curaddr = add_bounce_page(dmat, map, 0, curaddr, sgsize); } else if ((dmat->flags & BUS_DMA_COHERENT) == 0) { if (map->sync_count > 0) sl_end = sl->paddr + sl->datacount; if (map->sync_count == 0 || curaddr != sl_end) { if (++map->sync_count > dmat->nsegments) break; sl++; sl->vaddr = 0; sl->paddr = curaddr; sl->datacount = sgsize; sl->pages = PHYS_TO_VM_PAGE(curaddr); KASSERT(sl->pages != NULL, ("%s: page at PA:0x%08lx is not in " "vm_page_array", __func__, curaddr)); } else sl->datacount += sgsize; } sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, segp); if (sgsize == 0) break; buf += sgsize; buflen -= sgsize; } /* * Did we fit? */ if (buflen != 0) { bus_dmamap_unload(dmat, map); return (EFBIG); /* XXX better return value here? */ } return (0); } int _bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, bus_dma_segment_t *segs, int *segp) { return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags, segs, segp)); } /* * Utility function to load a linear buffer. segp contains * the starting segment on entrance, and the ending segment on exit. */ int _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs, int *segp) { bus_size_t sgsize; bus_addr_t curaddr; bus_addr_t sl_pend = 0; vm_offset_t kvaddr, vaddr, sl_vend = 0; struct sync_list *sl; int error; #ifdef ARM_BUSDMA_MAPLOAD_STATS counter_u64_add(maploads_total, 1); if (map->flags & DMAMAP_COHERENT) counter_u64_add(maploads_coherent, 1); if (map->flags & DMAMAP_DMAMEM_ALLOC) counter_u64_add(maploads_dmamem, 1); #endif if (segs == NULL) segs = map->segments; if (flags & BUS_DMA_LOAD_MBUF) { #ifdef ARM_BUSDMA_MAPLOAD_STATS counter_u64_add(maploads_mbuf, 1); #endif map->flags |= DMAMAP_MBUF; } if (might_bounce(dmat, map, (bus_addr_t)buf, buflen)) { _bus_dmamap_count_pages(dmat, pmap, map, buf, buflen, flags); if (map->pagesneeded != 0) { #ifdef ARM_BUSDMA_MAPLOAD_STATS counter_u64_add(maploads_bounced, 1); #endif error = _bus_dmamap_reserve_pages(dmat, map, flags); if (error) return (error); } } sl = map->slist + map->sync_count - 1; vaddr = (vm_offset_t)buf; while (buflen > 0) { /* * Get the physical address for this segment. */ if (__predict_true(pmap == kernel_pmap)) { curaddr = pmap_kextract(vaddr); kvaddr = vaddr; } else { curaddr = pmap_extract(pmap, vaddr); kvaddr = 0; } /* * Compute the segment size, and adjust counts. */ sgsize = PAGE_SIZE - (curaddr & PAGE_MASK); if (sgsize > dmat->maxsegsz) sgsize = dmat->maxsegsz; if (buflen < sgsize) sgsize = buflen; if (map->pagesneeded != 0 && must_bounce(dmat, map, curaddr, sgsize)) { curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, sgsize); } else if ((dmat->flags & BUS_DMA_COHERENT) == 0) { if (map->sync_count > 0) { sl_pend = sl->paddr + sl->datacount; sl_vend = sl->vaddr + sl->datacount; } if (map->sync_count == 0 || (kvaddr != 0 && kvaddr != sl_vend) || (curaddr != sl_pend)) { if (++map->sync_count > dmat->nsegments) goto cleanup; sl++; sl->vaddr = kvaddr; sl->paddr = curaddr; if (kvaddr != 0) { sl->pages = NULL; } else { sl->pages = PHYS_TO_VM_PAGE(curaddr); KASSERT(sl->pages != NULL, ("%s: page at PA:0x%08lx is not " "in vm_page_array", __func__, curaddr)); } sl->datacount = sgsize; } else sl->datacount += sgsize; } sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, segp); if (sgsize == 0) break; vaddr += sgsize; buflen -= sgsize; } cleanup: /* * Did we fit? */ if (buflen != 0) { bus_dmamap_unload(dmat, map); return (EFBIG); /* XXX better return value here? */ } return (0); } void _bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg) { map->mem = *mem; map->dmat = dmat; map->callback = callback; map->callback_arg = callback_arg; } bus_dma_segment_t * _bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dma_segment_t *segs, int nsegs, int error) { if (segs == NULL) segs = map->segments; return (segs); } /* * Release the mapping held by map. */ void bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) { struct bounce_page *bpage; struct bounce_zone *bz; if ((bz = dmat->bounce_zone) != NULL) { while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { STAILQ_REMOVE_HEAD(&map->bpages, links); free_bounce_page(dmat, bpage); } bz = dmat->bounce_zone; bz->free_bpages += map->pagesreserved; bz->reserved_bpages -= map->pagesreserved; map->pagesreserved = 0; map->pagesneeded = 0; } map->sync_count = 0; map->flags &= ~DMAMAP_MBUF; } static void dma_preread_safe(vm_offset_t va, vm_paddr_t pa, vm_size_t size) { /* * Write back any partial cachelines immediately before and * after the DMA region. We don't need to round the address * down to the nearest cacheline or specify the exact size, * as dcache_wb_poc() will do the rounding for us and works * at cacheline granularity. */ if (va & BUSDMA_DCACHE_MASK) dcache_wb_poc(va, pa, 1); if ((va + size) & BUSDMA_DCACHE_MASK) dcache_wb_poc(va + size, pa + size, 1); dcache_inv_poc_dma(va, pa, size); } static void dma_dcache_sync(struct sync_list *sl, bus_dmasync_op_t op) { uint32_t len, offset; vm_page_t m; vm_paddr_t pa; vm_offset_t va, tempva; bus_size_t size; offset = sl->paddr & PAGE_MASK; m = sl->pages; size = sl->datacount; pa = sl->paddr; for ( ; size != 0; size -= len, pa += len, offset = 0, ++m) { tempva = 0; if (sl->vaddr == 0) { len = min(PAGE_SIZE - offset, size); tempva = pmap_quick_enter_page(m); va = tempva | offset; KASSERT(pa == (VM_PAGE_TO_PHYS(m) | offset), ("unexpected vm_page_t phys: 0x%08x != 0x%08x", VM_PAGE_TO_PHYS(m) | offset, pa)); } else { len = sl->datacount; va = sl->vaddr; } switch (op) { case BUS_DMASYNC_PREWRITE: case BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD: dcache_wb_poc(va, pa, len); break; case BUS_DMASYNC_PREREAD: /* * An mbuf may start in the middle of a cacheline. There * will be no cpu writes to the beginning of that line * (which contains the mbuf header) while dma is in * progress. Handle that case by doing a writeback of * just the first cacheline before invalidating the * overall buffer. Any mbuf in a chain may have this * misalignment. Buffers which are not mbufs bounce if * they are not aligned to a cacheline. */ dma_preread_safe(va, pa, len); break; case BUS_DMASYNC_POSTREAD: case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE: dcache_inv_poc(va, pa, len); break; default: panic("unsupported combination of sync operations: " "0x%08x\n", op); } if (tempva != 0) pmap_quick_remove_page(tempva); } } void bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) { struct bounce_page *bpage; struct sync_list *sl, *end; vm_offset_t datavaddr, tempvaddr; if (op == BUS_DMASYNC_POSTWRITE) return; /* * If the buffer was from user space, it is possible that this is not * the same vm map, especially on a POST operation. It's not clear that * dma on userland buffers can work at all right now. To be safe, until * we're able to test direct userland dma, panic on a map mismatch. */ if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " "performing bounce", __func__, dmat, dmat->flags, op); /* * For PREWRITE do a writeback. Clean the caches from the * innermost to the outermost levels. */ if (op & BUS_DMASYNC_PREWRITE) { while (bpage != NULL) { tempvaddr = 0; datavaddr = bpage->datavaddr; if (datavaddr == 0) { tempvaddr = pmap_quick_enter_page( bpage->datapage); datavaddr = tempvaddr | bpage->dataoffs; } bcopy((void *)datavaddr, (void *)bpage->vaddr, bpage->datacount); if (tempvaddr != 0) pmap_quick_remove_page(tempvaddr); if ((dmat->flags & BUS_DMA_COHERENT) == 0) dcache_wb_poc(bpage->vaddr, bpage->busaddr, bpage->datacount); bpage = STAILQ_NEXT(bpage, links); } dmat->bounce_zone->total_bounced++; } /* * Do an invalidate for PREREAD unless a writeback was already * done above due to PREWRITE also being set. The reason for a * PREREAD invalidate is to prevent dirty lines currently in the * cache from being evicted during the DMA. If a writeback was * done due to PREWRITE also being set there will be no dirty * lines and the POSTREAD invalidate handles the rest. The * invalidate is done from the innermost to outermost level. If * L2 were done first, a dirty cacheline could be automatically * evicted from L1 before we invalidated it, re-dirtying the L2. */ if ((op & BUS_DMASYNC_PREREAD) && !(op & BUS_DMASYNC_PREWRITE)) { bpage = STAILQ_FIRST(&map->bpages); while (bpage != NULL) { if ((dmat->flags & BUS_DMA_COHERENT) == 0) dcache_inv_poc_dma(bpage->vaddr, bpage->busaddr, bpage->datacount); bpage = STAILQ_NEXT(bpage, links); } } /* * Re-invalidate the caches on a POSTREAD, even though they were * already invalidated at PREREAD time. Aggressive prefetching * due to accesses to other data near the dma buffer could have * brought buffer data into the caches which is now stale. The * caches are invalidated from the outermost to innermost; the * prefetches could be happening right now, and if L1 were * invalidated first, stale L2 data could be prefetched into L1. */ if (op & BUS_DMASYNC_POSTREAD) { while (bpage != NULL) { if ((dmat->flags & BUS_DMA_COHERENT) == 0) dcache_inv_poc(bpage->vaddr, bpage->busaddr, bpage->datacount); tempvaddr = 0; datavaddr = bpage->datavaddr; if (datavaddr == 0) { tempvaddr = pmap_quick_enter_page( bpage->datapage); datavaddr = tempvaddr | bpage->dataoffs; } bcopy((void *)bpage->vaddr, (void *)datavaddr, bpage->datacount); if (tempvaddr != 0) pmap_quick_remove_page(tempvaddr); bpage = STAILQ_NEXT(bpage, links); } dmat->bounce_zone->total_bounced++; } } /* * For COHERENT memory no cache maintenance is necessary, but ensure all * writes have reached memory for the PREWRITE case. No action is * needed for a PREREAD without PREWRITE also set, because that would * imply that the cpu had written to the COHERENT buffer and expected * the dma device to see that change, and by definition a PREWRITE sync * is required to make that happen. */ if (map->flags & DMAMAP_COHERENT) { if (op & BUS_DMASYNC_PREWRITE) { dsb(); if ((dmat->flags & BUS_DMA_COHERENT) == 0) cpu_l2cache_drain_writebuf(); } return; } /* * Cache maintenance for normal (non-COHERENT non-bounce) buffers. All * the comments about the sequences for flushing cache levels in the * bounce buffer code above apply here as well. In particular, the fact * that the sequence is inner-to-outer for PREREAD invalidation and * outer-to-inner for POSTREAD invalidation is not a mistake. */ if (map->sync_count != 0) { sl = &map->slist[0]; end = &map->slist[map->sync_count]; CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " "performing sync", __func__, dmat, dmat->flags, op); for ( ; sl != end; ++sl) dma_dcache_sync(sl, op); } } diff --git a/sys/arm64/arm64/busdma_machdep.c b/sys/arm64/arm64/busdma_machdep.c index 5d42d4e58c22..4ea604ce474c 100644 --- a/sys/arm64/arm64/busdma_machdep.c +++ b/sys/arm64/arm64/busdma_machdep.c @@ -1,251 +1,216 @@ /*- * Copyright (c) 1997, 1998 Justin T. Gibbs. * Copyright (c) 2013, 2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Portions of this software were developed by Semihalf * under sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -/* - * Convenience function for manipulating driver locks from busdma (during - * busdma_swi, for example). - */ -void -busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) -{ - struct mtx *dmtx; - - dmtx = (struct mtx *)arg; - switch (op) { - case BUS_DMA_LOCK: - mtx_lock(dmtx); - break; - case BUS_DMA_UNLOCK: - mtx_unlock(dmtx); - break; - default: - panic("Unknown operation 0x%x for busdma_lock_mutex!", op); - } -} - -/* - * dflt_lock should never get called. It gets put into the dma tag when - * lockfunc == NULL, which is only valid if the maps that are associated - * with the tag are meant to never be defered. - * XXX Should have a way to identify which driver is responsible here. - */ -void -bus_dma_dflt_lock(void *arg, bus_dma_lock_op_t op) -{ - - panic("driver error: busdma dflt_lock called"); -} - /* * Return true if a match is made. * * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. * * If paddr is within the bounds of the dma tag then call the filter callback * to check for a match, if there is no filter callback then assume a match. */ int bus_dma_run_filter(struct bus_dma_tag_common *tc, bus_addr_t paddr) { while (tc != NULL) { if ((paddr > tc->lowaddr && paddr <= tc->highaddr) && (tc->filter == NULL || (*tc->filter)(tc->filterarg, paddr) != 0)) return (1); tc = tc->parent; } return (0); } int common_bus_dma_tag_create(struct bus_dma_tag_common *parent, bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, void *lockfuncarg, size_t sz, void **dmat) { void *newtag; struct bus_dma_tag_common *common; KASSERT(sz >= sizeof(struct bus_dma_tag_common), ("sz")); /* Return a NULL tag on failure */ *dmat = NULL; /* Basic sanity checking */ if (boundary != 0 && boundary < maxsegsz) maxsegsz = boundary; if (maxsegsz == 0) return (EINVAL); newtag = malloc(sz, M_DEVBUF, M_ZERO | M_NOWAIT); if (newtag == NULL) { CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", __func__, newtag, 0, ENOMEM); return (ENOMEM); } common = newtag; common->impl = &bus_dma_bounce_impl; common->parent = parent; common->alignment = alignment; common->boundary = boundary; common->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); common->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1); common->filter = filter; common->filterarg = filterarg; common->maxsize = maxsize; common->nsegments = nsegments; common->maxsegsz = maxsegsz; common->flags = flags; common->ref_count = 1; /* Count ourself */ if (lockfunc != NULL) { common->lockfunc = lockfunc; common->lockfuncarg = lockfuncarg; } else { - common->lockfunc = bus_dma_dflt_lock; + common->lockfunc = _busdma_dflt_lock; common->lockfuncarg = NULL; } /* Take into account any restrictions imposed by our parent tag */ if (parent != NULL) { common->impl = parent->impl; common->lowaddr = MIN(parent->lowaddr, common->lowaddr); common->highaddr = MAX(parent->highaddr, common->highaddr); common->alignment = MAX(parent->alignment, common->alignment); if (common->boundary == 0) common->boundary = parent->boundary; else if (parent->boundary != 0) { common->boundary = MIN(parent->boundary, common->boundary); } if (common->filter == NULL) { /* * Short circuit looking at our parent directly * since we have encapsulated all of its information */ common->filter = parent->filter; common->filterarg = parent->filterarg; common->parent = parent->parent; } atomic_add_int(&parent->ref_count, 1); } *dmat = common; return (0); } /* * Allocate a device specific dma_tag. */ int bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, void *lockfuncarg, bus_dma_tag_t *dmat) { struct bus_dma_tag_common *tc; int error; if (parent == NULL) { error = bus_dma_bounce_impl.tag_create(parent, alignment, boundary, lowaddr, highaddr, filter, filterarg, maxsize, nsegments, maxsegsz, flags, lockfunc, lockfuncarg, dmat); } else { tc = (struct bus_dma_tag_common *)parent; error = tc->impl->tag_create(parent, alignment, boundary, lowaddr, highaddr, filter, filterarg, maxsize, nsegments, maxsegsz, flags, lockfunc, lockfuncarg, dmat); } return (error); } void bus_dma_template_clone(bus_dma_template_t *t, bus_dma_tag_t dmat) { struct bus_dma_tag_common *common; if (t == NULL || dmat == NULL) return; common = (struct bus_dma_tag_common *)dmat; t->parent = (bus_dma_tag_t)common->parent; t->alignment = common->alignment; t->boundary = common->boundary; t->lowaddr = common->lowaddr; t->highaddr = common->highaddr; t->maxsize = common->maxsize; t->nsegments = common->nsegments; t->maxsegsize = common->maxsegsz; t->flags = common->flags; t->lockfunc = common->lockfunc; t->lockfuncarg = common->lockfuncarg; } int bus_dma_tag_destroy(bus_dma_tag_t dmat) { struct bus_dma_tag_common *tc; tc = (struct bus_dma_tag_common *)dmat; return (tc->impl->tag_destroy(dmat)); } int bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain) { return (0); } diff --git a/sys/arm64/include/bus_dma_impl.h b/sys/arm64/include/bus_dma_impl.h index 1488bd3a55da..949850cc528e 100644 --- a/sys/arm64/include/bus_dma_impl.h +++ b/sys/arm64/include/bus_dma_impl.h @@ -1,98 +1,97 @@ /*- * Copyright (c) 2013 The FreeBSD Foundation * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_BUS_DMA_IMPL_H_ #define _MACHINE_BUS_DMA_IMPL_H_ struct bus_dma_tag_common { struct bus_dma_impl *impl; struct bus_dma_tag_common *parent; bus_size_t alignment; bus_addr_t boundary; bus_addr_t lowaddr; bus_addr_t highaddr; bus_dma_filter_t *filter; void *filterarg; bus_size_t maxsize; u_int nsegments; bus_size_t maxsegsz; int flags; bus_dma_lock_t *lockfunc; void *lockfuncarg; int ref_count; int domain; }; struct bus_dma_impl { int (*tag_create)(bus_dma_tag_t parent, bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, void *lockfuncarg, bus_dma_tag_t *dmat); int (*tag_destroy)(bus_dma_tag_t dmat); int (*tag_set_domain)(bus_dma_tag_t); bool (*id_mapped)(bus_dma_tag_t, vm_paddr_t, bus_size_t); int (*map_create)(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp); int (*map_destroy)(bus_dma_tag_t dmat, bus_dmamap_t map); int (*mem_alloc)(bus_dma_tag_t dmat, void** vaddr, int flags, bus_dmamap_t *mapp); void (*mem_free)(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map); int (*load_ma)(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, bus_dma_segment_t *segs, int *segp); int (*load_phys)(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp); int (*load_buffer)(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t *segs, int *segp); void (*map_waitok)(bus_dma_tag_t dmat, bus_dmamap_t map, struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg); bus_dma_segment_t *(*map_complete)(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dma_segment_t *segs, int nsegs, int error); void (*map_unload)(bus_dma_tag_t dmat, bus_dmamap_t map); void (*map_sync)(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op); }; -void bus_dma_dflt_lock(void *arg, bus_dma_lock_op_t op); int bus_dma_run_filter(struct bus_dma_tag_common *dmat, bus_addr_t paddr); int common_bus_dma_tag_create(struct bus_dma_tag_common *parent, bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, void *lockfuncarg, size_t sz, void **dmat); extern struct bus_dma_impl bus_dma_bounce_impl; #endif diff --git a/sys/kern/subr_bus_dma.c b/sys/kern/subr_bus_dma.c index 909750112a96..7ed012ba1a4f 100644 --- a/sys/kern/subr_bus_dma.c +++ b/sys/kern/subr_bus_dma.c @@ -1,865 +1,904 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2012 EMC Corp. * All rights reserved. * * Copyright (c) 1997, 1998 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_bus.h" #include "opt_iommu.h" #include #include #include #include #include #include #include +#include #include #include +#include #include #include #include #include #include #include #include #include #include #include +/* + * Convenience function for manipulating driver locks from busdma (during + * busdma_swi, for example). + */ +void +busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) +{ + struct mtx *dmtx; + + dmtx = (struct mtx *)arg; + switch (op) { + case BUS_DMA_LOCK: + mtx_lock(dmtx); + break; + case BUS_DMA_UNLOCK: + mtx_unlock(dmtx); + break; + default: + panic("Unknown operation 0x%x for busdma_lock_mutex!", op); + } +} + +/* + * dflt_lock should never get called. It gets put into the dma tag when + * lockfunc == NULL, which is only valid if the maps that are associated + * with the tag are meant to never be deferred. + * + * XXX Should have a way to identify which driver is responsible here. + */ +void +_busdma_dflt_lock(void *arg, bus_dma_lock_op_t op) +{ + + panic("driver error: _bus_dma_dflt_lock called"); +} + + /* * Load up data starting at offset within a region specified by a * list of virtual address ranges until either length or the region * are exhausted. */ static int _bus_dmamap_load_vlist(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dma_segment_t *list, int sglist_cnt, struct pmap *pmap, int *nsegs, int flags, size_t offset, size_t length) { int error; error = 0; for (; sglist_cnt > 0 && length != 0; sglist_cnt--, list++) { char *addr; size_t ds_len; KASSERT((offset < list->ds_len), ("Invalid mid-segment offset")); addr = (char *)(uintptr_t)list->ds_addr + offset; ds_len = list->ds_len - offset; offset = 0; if (ds_len > length) ds_len = length; length -= ds_len; KASSERT((ds_len != 0), ("Segment length is zero")); error = _bus_dmamap_load_buffer(dmat, map, addr, ds_len, pmap, flags, NULL, nsegs); if (error) break; } return (error); } /* * Load a list of physical addresses. */ static int _bus_dmamap_load_plist(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dma_segment_t *list, int sglist_cnt, int *nsegs, int flags) { int error; error = 0; for (; sglist_cnt > 0; sglist_cnt--, list++) { error = _bus_dmamap_load_phys(dmat, map, (vm_paddr_t)list->ds_addr, list->ds_len, flags, NULL, nsegs); if (error) break; } return (error); } /* * Load an unmapped mbuf */ static int _bus_dmamap_load_mbuf_epg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m, bus_dma_segment_t *segs, int *nsegs, int flags) { int error, i, off, len, pglen, pgoff, seglen, segoff; M_ASSERTEXTPG(m); len = m->m_len; error = 0; /* Skip over any data removed from the front. */ off = mtod(m, vm_offset_t); if (m->m_epg_hdrlen != 0) { if (off >= m->m_epg_hdrlen) { off -= m->m_epg_hdrlen; } else { seglen = m->m_epg_hdrlen - off; segoff = off; seglen = min(seglen, len); off = 0; len -= seglen; error = _bus_dmamap_load_buffer(dmat, map, &m->m_epg_hdr[segoff], seglen, kernel_pmap, flags, segs, nsegs); } } pgoff = m->m_epg_1st_off; for (i = 0; i < m->m_epg_npgs && error == 0 && len > 0; i++) { pglen = m_epg_pagelen(m, i, pgoff); if (off >= pglen) { off -= pglen; pgoff = 0; continue; } seglen = pglen - off; segoff = pgoff + off; off = 0; seglen = min(seglen, len); len -= seglen; error = _bus_dmamap_load_phys(dmat, map, m->m_epg_pa[i] + segoff, seglen, flags, segs, nsegs); pgoff = 0; }; if (len != 0 && error == 0) { KASSERT((off + len) <= m->m_epg_trllen, ("off + len > trail (%d + %d > %d)", off, len, m->m_epg_trllen)); error = _bus_dmamap_load_buffer(dmat, map, &m->m_epg_trail[off], len, kernel_pmap, flags, segs, nsegs); } return (error); } /* * Load a single mbuf. */ static int _bus_dmamap_load_single_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m, bus_dma_segment_t *segs, int *nsegs, int flags) { int error; error = 0; if ((m->m_flags & M_EXTPG) != 0) error = _bus_dmamap_load_mbuf_epg(dmat, map, m, segs, nsegs, flags); else error = _bus_dmamap_load_buffer(dmat, map, m->m_data, m->m_len, kernel_pmap, flags | BUS_DMA_LOAD_MBUF, segs, nsegs); CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", __func__, dmat, flags, error, *nsegs); return (error); } /* * Load an mbuf chain. */ static int _bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, int flags) { struct mbuf *m; int error; error = 0; for (m = m0; m != NULL && error == 0; m = m->m_next) { if (m->m_len > 0) { if ((m->m_flags & M_EXTPG) != 0) error = _bus_dmamap_load_mbuf_epg(dmat, map, m, segs, nsegs, flags); else error = _bus_dmamap_load_buffer(dmat, map, m->m_data, m->m_len, kernel_pmap, flags | BUS_DMA_LOAD_MBUF, segs, nsegs); } } CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", __func__, dmat, flags, error, *nsegs); return (error); } /* * Load from block io. */ static int _bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio, int *nsegs, int flags) { if ((bio->bio_flags & BIO_VLIST) != 0) { bus_dma_segment_t *segs = (bus_dma_segment_t *)bio->bio_data; return (_bus_dmamap_load_vlist(dmat, map, segs, bio->bio_ma_n, kernel_pmap, nsegs, flags, bio->bio_ma_offset, bio->bio_bcount)); } if ((bio->bio_flags & BIO_UNMAPPED) != 0) return (_bus_dmamap_load_ma(dmat, map, bio->bio_ma, bio->bio_bcount, bio->bio_ma_offset, flags, NULL, nsegs)); return (_bus_dmamap_load_buffer(dmat, map, bio->bio_data, bio->bio_bcount, kernel_pmap, flags, NULL, nsegs)); } int bus_dmamap_load_ma_triv(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, bus_dma_segment_t *segs, int *segp) { vm_paddr_t paddr; bus_size_t len; int error, i; error = 0; for (i = 0; tlen > 0; i++, tlen -= len) { len = min(PAGE_SIZE - ma_offs, tlen); paddr = VM_PAGE_TO_PHYS(ma[i]) + ma_offs; error = _bus_dmamap_load_phys(dmat, map, paddr, len, flags, segs, segp); if (error != 0) break; ma_offs = 0; } return (error); } /* * Load a cam control block. */ static int _bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb, int *nsegs, int flags) { struct ccb_hdr *ccb_h; void *data_ptr; int error; uint32_t dxfer_len; uint16_t sglist_cnt; error = 0; ccb_h = &ccb->ccb_h; switch (ccb_h->func_code) { case XPT_SCSI_IO: { struct ccb_scsiio *csio; csio = &ccb->csio; data_ptr = csio->data_ptr; dxfer_len = csio->dxfer_len; sglist_cnt = csio->sglist_cnt; break; } case XPT_CONT_TARGET_IO: { struct ccb_scsiio *ctio; ctio = &ccb->ctio; data_ptr = ctio->data_ptr; dxfer_len = ctio->dxfer_len; sglist_cnt = ctio->sglist_cnt; break; } case XPT_ATA_IO: { struct ccb_ataio *ataio; ataio = &ccb->ataio; data_ptr = ataio->data_ptr; dxfer_len = ataio->dxfer_len; sglist_cnt = 0; break; } case XPT_NVME_IO: case XPT_NVME_ADMIN: { struct ccb_nvmeio *nvmeio; nvmeio = &ccb->nvmeio; data_ptr = nvmeio->data_ptr; dxfer_len = nvmeio->dxfer_len; sglist_cnt = nvmeio->sglist_cnt; break; } default: panic("_bus_dmamap_load_ccb: Unsupported func code %d", ccb_h->func_code); } switch ((ccb_h->flags & CAM_DATA_MASK)) { case CAM_DATA_VADDR: error = _bus_dmamap_load_buffer(dmat, map, data_ptr, dxfer_len, kernel_pmap, flags, NULL, nsegs); break; case CAM_DATA_PADDR: error = _bus_dmamap_load_phys(dmat, map, (vm_paddr_t)(uintptr_t)data_ptr, dxfer_len, flags, NULL, nsegs); break; case CAM_DATA_SG: error = _bus_dmamap_load_vlist(dmat, map, (bus_dma_segment_t *)data_ptr, sglist_cnt, kernel_pmap, nsegs, flags, 0, dxfer_len); break; case CAM_DATA_SG_PADDR: error = _bus_dmamap_load_plist(dmat, map, (bus_dma_segment_t *)data_ptr, sglist_cnt, nsegs, flags); break; case CAM_DATA_BIO: error = _bus_dmamap_load_bio(dmat, map, (struct bio *)data_ptr, nsegs, flags); break; default: panic("_bus_dmamap_load_ccb: flags 0x%X unimplemented", ccb_h->flags); } return (error); } /* * Load a uio. */ static int _bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, int *nsegs, int flags) { bus_size_t resid; bus_size_t minlen; struct iovec *iov; pmap_t pmap; caddr_t addr; int error, i; if (uio->uio_segflg == UIO_USERSPACE) { KASSERT(uio->uio_td != NULL, ("bus_dmamap_load_uio: USERSPACE but no proc")); pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); } else pmap = kernel_pmap; resid = uio->uio_resid; iov = uio->uio_iov; error = 0; for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { /* * Now at the first iovec to load. Load each iovec * until we have exhausted the residual count. */ addr = (caddr_t) iov[i].iov_base; minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; if (minlen > 0) { error = _bus_dmamap_load_buffer(dmat, map, addr, minlen, pmap, flags, NULL, nsegs); resid -= minlen; } } return (error); } /* * Map the buffer buf into bus space using the dmamap map. */ int bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, bus_size_t buflen, bus_dmamap_callback_t *callback, void *callback_arg, int flags) { bus_dma_segment_t *segs; struct memdesc mem; int error; int nsegs; #ifdef KMSAN mem = memdesc_vaddr(buf, buflen); _bus_dmamap_load_kmsan(dmat, map, &mem); #endif if ((flags & BUS_DMA_NOWAIT) == 0) { mem = memdesc_vaddr(buf, buflen); _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg); } nsegs = -1; error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, kernel_pmap, flags, NULL, &nsegs); nsegs++; CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", __func__, dmat, flags, error, nsegs); if (error == EINPROGRESS) return (error); segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); if (error) (*callback)(callback_arg, segs, 0, error); else (*callback)(callback_arg, segs, nsegs, 0); /* * Return ENOMEM to the caller so that it can pass it up the stack. * This error only happens when NOWAIT is set, so deferral is disabled. */ if (error == ENOMEM) return (error); return (0); } int bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, bus_dmamap_callback2_t *callback, void *callback_arg, int flags) { bus_dma_segment_t *segs; int nsegs, error; M_ASSERTPKTHDR(m0); #ifdef KMSAN struct memdesc mem = memdesc_mbuf(m0); _bus_dmamap_load_kmsan(dmat, map, &mem); #endif flags |= BUS_DMA_NOWAIT; nsegs = -1; error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, NULL, &nsegs, flags); ++nsegs; segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); if (error) (*callback)(callback_arg, segs, 0, 0, error); else (*callback)(callback_arg, segs, nsegs, m0->m_pkthdr.len, error); CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", __func__, dmat, flags, error, nsegs); return (error); } int bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, int flags) { int error; #ifdef KMSAN struct memdesc mem = memdesc_mbuf(m0); _bus_dmamap_load_kmsan(dmat, map, &mem); #endif flags |= BUS_DMA_NOWAIT; *nsegs = -1; error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags); ++*nsegs; _bus_dmamap_complete(dmat, map, segs, *nsegs, error); return (error); } int bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, bus_dmamap_callback2_t *callback, void *callback_arg, int flags) { bus_dma_segment_t *segs; int nsegs, error; #ifdef KMSAN struct memdesc mem = memdesc_uio(uio); _bus_dmamap_load_kmsan(dmat, map, &mem); #endif flags |= BUS_DMA_NOWAIT; nsegs = -1; error = _bus_dmamap_load_uio(dmat, map, uio, &nsegs, flags); nsegs++; segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); if (error) (*callback)(callback_arg, segs, 0, 0, error); else (*callback)(callback_arg, segs, nsegs, uio->uio_resid, error); CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", __func__, dmat, flags, error, nsegs); return (error); } int bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb, bus_dmamap_callback_t *callback, void *callback_arg, int flags) { bus_dma_segment_t *segs; struct ccb_hdr *ccb_h; struct memdesc mem; int error; int nsegs; #ifdef KMSAN mem = memdesc_ccb(ccb); _bus_dmamap_load_kmsan(dmat, map, &mem); #endif ccb_h = &ccb->ccb_h; if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_NONE) { callback(callback_arg, NULL, 0, 0); return (0); } if ((flags & BUS_DMA_NOWAIT) == 0) { mem = memdesc_ccb(ccb); _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg); } nsegs = -1; error = _bus_dmamap_load_ccb(dmat, map, ccb, &nsegs, flags); nsegs++; CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", __func__, dmat, flags, error, nsegs); if (error == EINPROGRESS) return (error); segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); if (error) (*callback)(callback_arg, segs, 0, error); else (*callback)(callback_arg, segs, nsegs, error); /* * Return ENOMEM to the caller so that it can pass it up the stack. * This error only happens when NOWAIT is set, so deferral is disabled. */ if (error == ENOMEM) return (error); return (0); } int bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio, bus_dmamap_callback_t *callback, void *callback_arg, int flags) { bus_dma_segment_t *segs; struct memdesc mem; int error; int nsegs; #ifdef KMSAN mem = memdesc_bio(bio); _bus_dmamap_load_kmsan(dmat, map, &mem); #endif if ((flags & BUS_DMA_NOWAIT) == 0) { mem = memdesc_bio(bio); _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg); } nsegs = -1; error = _bus_dmamap_load_bio(dmat, map, bio, &nsegs, flags); nsegs++; CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", __func__, dmat, flags, error, nsegs); if (error == EINPROGRESS) return (error); segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); if (error) (*callback)(callback_arg, segs, 0, error); else (*callback)(callback_arg, segs, nsegs, error); /* * Return ENOMEM to the caller so that it can pass it up the stack. * This error only happens when NOWAIT is set, so deferral is disabled. */ if (error == ENOMEM) return (error); return (0); } int bus_dmamap_load_mem(bus_dma_tag_t dmat, bus_dmamap_t map, struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg, int flags) { bus_dma_segment_t *segs; int error; int nsegs; #ifdef KMSAN _bus_dmamap_load_kmsan(dmat, map, mem); #endif if ((flags & BUS_DMA_NOWAIT) == 0) _bus_dmamap_waitok(dmat, map, mem, callback, callback_arg); nsegs = -1; error = 0; switch (mem->md_type) { case MEMDESC_VADDR: error = _bus_dmamap_load_buffer(dmat, map, mem->u.md_vaddr, mem->md_opaque, kernel_pmap, flags, NULL, &nsegs); break; case MEMDESC_PADDR: error = _bus_dmamap_load_phys(dmat, map, mem->u.md_paddr, mem->md_opaque, flags, NULL, &nsegs); break; case MEMDESC_VLIST: error = _bus_dmamap_load_vlist(dmat, map, mem->u.md_list, mem->md_opaque, kernel_pmap, &nsegs, flags, 0, SIZE_T_MAX); break; case MEMDESC_PLIST: error = _bus_dmamap_load_plist(dmat, map, mem->u.md_list, mem->md_opaque, &nsegs, flags); break; case MEMDESC_BIO: error = _bus_dmamap_load_bio(dmat, map, mem->u.md_bio, &nsegs, flags); break; case MEMDESC_UIO: error = _bus_dmamap_load_uio(dmat, map, mem->u.md_uio, &nsegs, flags); break; case MEMDESC_MBUF: error = _bus_dmamap_load_mbuf_sg(dmat, map, mem->u.md_mbuf, NULL, &nsegs, flags); break; case MEMDESC_CCB: error = _bus_dmamap_load_ccb(dmat, map, mem->u.md_ccb, &nsegs, flags); break; } nsegs++; CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", __func__, dmat, flags, error, nsegs); if (error == EINPROGRESS) return (error); segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); if (error) (*callback)(callback_arg, segs, 0, error); else (*callback)(callback_arg, segs, nsegs, 0); /* * Return ENOMEM to the caller so that it can pass it up the stack. * This error only happens when NOWAIT is set, so deferral is disabled. */ if (error == ENOMEM) return (error); return (0); } int bus_dmamap_load_crp_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, struct crypto_buffer *cb, bus_dmamap_callback_t *callback, void *callback_arg, int flags) { bus_dma_segment_t *segs; int error; int nsegs; flags |= BUS_DMA_NOWAIT; nsegs = -1; error = 0; switch (cb->cb_type) { case CRYPTO_BUF_CONTIG: error = _bus_dmamap_load_buffer(dmat, map, cb->cb_buf, cb->cb_buf_len, kernel_pmap, flags, NULL, &nsegs); break; case CRYPTO_BUF_MBUF: error = _bus_dmamap_load_mbuf_sg(dmat, map, cb->cb_mbuf, NULL, &nsegs, flags); break; case CRYPTO_BUF_SINGLE_MBUF: error = _bus_dmamap_load_single_mbuf(dmat, map, cb->cb_mbuf, NULL, &nsegs, flags); break; case CRYPTO_BUF_UIO: error = _bus_dmamap_load_uio(dmat, map, cb->cb_uio, &nsegs, flags); break; case CRYPTO_BUF_VMPAGE: error = _bus_dmamap_load_ma(dmat, map, cb->cb_vm_page, cb->cb_vm_page_len, cb->cb_vm_page_offset, flags, NULL, &nsegs); break; default: error = EINVAL; } nsegs++; CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", __func__, dmat, flags, error, nsegs); if (error == EINPROGRESS) return (error); segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); if (error) (*callback)(callback_arg, segs, 0, error); else (*callback)(callback_arg, segs, nsegs, 0); /* * Return ENOMEM to the caller so that it can pass it up the stack. * This error only happens when NOWAIT is set, so deferral is disabled. */ if (error == ENOMEM) return (error); return (0); } int bus_dmamap_load_crp(bus_dma_tag_t dmat, bus_dmamap_t map, struct cryptop *crp, bus_dmamap_callback_t *callback, void *callback_arg, int flags) { return (bus_dmamap_load_crp_buffer(dmat, map, &crp->crp_buf, callback, callback_arg, flags)); } void bus_dma_template_init(bus_dma_template_t *t, bus_dma_tag_t parent) { if (t == NULL) return; t->parent = parent; t->alignment = 1; t->boundary = 0; t->lowaddr = t->highaddr = BUS_SPACE_MAXADDR; t->maxsize = t->maxsegsize = BUS_SPACE_MAXSIZE; t->nsegments = BUS_SPACE_UNRESTRICTED; t->lockfunc = NULL; t->lockfuncarg = NULL; t->flags = 0; } int bus_dma_template_tag(bus_dma_template_t *t, bus_dma_tag_t *dmat) { if (t == NULL || dmat == NULL) return (EINVAL); return (bus_dma_tag_create(t->parent, t->alignment, t->boundary, t->lowaddr, t->highaddr, NULL, NULL, t->maxsize, t->nsegments, t->maxsegsize, t->flags, t->lockfunc, t->lockfuncarg, dmat)); } void bus_dma_template_fill(bus_dma_template_t *t, bus_dma_param_t *kv, u_int count) { bus_dma_param_t *pkv; while (count) { pkv = &kv[--count]; switch (pkv->key) { case BD_PARAM_PARENT: t->parent = pkv->ptr; break; case BD_PARAM_ALIGNMENT: t->alignment = pkv->num; break; case BD_PARAM_BOUNDARY: t->boundary = pkv->num; break; case BD_PARAM_LOWADDR: t->lowaddr = pkv->pa; break; case BD_PARAM_HIGHADDR: t->highaddr = pkv->pa; break; case BD_PARAM_MAXSIZE: t->maxsize = pkv->num; break; case BD_PARAM_NSEGMENTS: t->nsegments = pkv->num; break; case BD_PARAM_MAXSEGSIZE: t->maxsegsize = pkv->num; break; case BD_PARAM_FLAGS: t->flags = pkv->num; break; case BD_PARAM_LOCKFUNC: t->lockfunc = pkv->ptr; break; case BD_PARAM_LOCKFUNCARG: t->lockfuncarg = pkv->ptr; break; case BD_PARAM_NAME: t->name = pkv->ptr; break; case BD_PARAM_INVALID: default: KASSERT(0, ("Invalid key %d\n", pkv->key)); break; } } return; } #ifndef IOMMU bool bus_dma_iommu_set_buswide(device_t dev); int bus_dma_iommu_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t start, vm_size_t length, int flags); bool bus_dma_iommu_set_buswide(device_t dev) { return (false); } int bus_dma_iommu_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t start, vm_size_t length, int flags) { return (0); } #endif diff --git a/sys/powerpc/powerpc/busdma_machdep.c b/sys/powerpc/powerpc/busdma_machdep.c index ea3ab3dc7315..666f40d4edd7 100644 --- a/sys/powerpc/powerpc/busdma_machdep.c +++ b/sys/powerpc/powerpc/busdma_machdep.c @@ -1,919 +1,885 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997, 1998 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * From amd64/busdma_machdep.c, r204214 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "iommu_if.h" #define MAX_BPAGES MIN(8192, physmem/40) struct bounce_page; struct bounce_zone; struct bus_dma_tag { bus_dma_tag_t parent; bus_size_t alignment; bus_addr_t boundary; bus_addr_t lowaddr; bus_addr_t highaddr; bus_dma_filter_t *filter; void *filterarg; bus_size_t maxsize; bus_size_t maxsegsz; u_int nsegments; int flags; int ref_count; int map_count; bus_dma_lock_t *lockfunc; void *lockfuncarg; struct bounce_zone *bounce_zone; device_t iommu; void *iommu_cookie; }; static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Busdma parameters"); struct bus_dmamap { STAILQ_HEAD(, bounce_page) bpages; int pagesneeded; int pagesreserved; bus_dma_tag_t dmat; struct memdesc mem; bus_dma_segment_t *segments; int nsegs; bus_dmamap_callback_t *callback; void *callback_arg; STAILQ_ENTRY(bus_dmamap) links; int contigalloc; }; static MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata"); static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); #define dmat_alignment(dmat) ((dmat)->alignment) #define dmat_flags(dmat) ((dmat)->flags) #define dmat_lowaddr(dmat) ((dmat)->lowaddr) #define dmat_lockfunc(dmat) ((dmat)->lockfunc) #define dmat_lockfuncarg(dmat) ((dmat)->lockfuncarg) #include "../../kern/subr_busdma_bounce.c" /* * Return true if a match is made. * * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. * * If paddr is within the bounds of the dma tag then call the filter callback * to check for a match, if there is no filter callback then assume a match. */ static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) { int retval; retval = 0; do { if (dmat->filter == NULL && dmat->iommu == NULL && paddr > dmat->lowaddr && paddr <= dmat->highaddr) retval = 1; if (dmat->filter == NULL && !vm_addr_align_ok(paddr, dmat->alignment)) retval = 1; if (dmat->filter != NULL && (*dmat->filter)(dmat->filterarg, paddr) != 0) retval = 1; dmat = dmat->parent; } while (retval == 0 && dmat != NULL); return (retval); } -/* - * Convenience function for manipulating driver locks from busdma (during - * busdma_swi, for example). - */ -void -busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) -{ - struct mtx *dmtx; - - dmtx = (struct mtx *)arg; - switch (op) { - case BUS_DMA_LOCK: - mtx_lock(dmtx); - break; - case BUS_DMA_UNLOCK: - mtx_unlock(dmtx); - break; - default: - panic("Unknown operation 0x%x for busdma_lock_mutex!", op); - } -} - -/* - * dflt_lock should never get called. It gets put into the dma tag when - * lockfunc == NULL, which is only valid if the maps that are associated - * with the tag are meant to never be defered. - * XXX Should have a way to identify which driver is responsible here. - */ -static void -dflt_lock(void *arg, bus_dma_lock_op_t op) -{ - panic("driver error: busdma dflt_lock called"); -} - #define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 /* * Allocate a device specific dma_tag. */ int bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, void *lockfuncarg, bus_dma_tag_t *dmat) { bus_dma_tag_t newtag; int error = 0; /* Basic sanity checking */ if (boundary != 0 && boundary < maxsegsz) maxsegsz = boundary; if (maxsegsz == 0) { return (EINVAL); } /* Return a NULL tag on failure */ *dmat = NULL; newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_ZERO | M_NOWAIT); if (newtag == NULL) { CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", __func__, newtag, 0, error); return (ENOMEM); } newtag->parent = parent; newtag->alignment = alignment; newtag->boundary = boundary; newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1); newtag->filter = filter; newtag->filterarg = filterarg; newtag->maxsize = maxsize; newtag->nsegments = nsegments; newtag->maxsegsz = maxsegsz; newtag->flags = flags; newtag->ref_count = 1; /* Count ourself */ newtag->map_count = 0; if (lockfunc != NULL) { newtag->lockfunc = lockfunc; newtag->lockfuncarg = lockfuncarg; } else { - newtag->lockfunc = dflt_lock; + newtag->lockfunc = _busdma_dflt_lock; newtag->lockfuncarg = NULL; } /* Take into account any restrictions imposed by our parent tag */ if (parent != NULL) { newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); if (newtag->boundary == 0) newtag->boundary = parent->boundary; else if (parent->boundary != 0) newtag->boundary = MIN(parent->boundary, newtag->boundary); if (newtag->filter == NULL) { /* * Short circuit looking at our parent directly * since we have encapsulated all of its information */ newtag->filter = parent->filter; newtag->filterarg = parent->filterarg; newtag->parent = parent->parent; } if (newtag->parent != NULL) atomic_add_int(&parent->ref_count, 1); newtag->iommu = parent->iommu; newtag->iommu_cookie = parent->iommu_cookie; } if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && newtag->iommu == NULL) newtag->flags |= BUS_DMA_COULD_BOUNCE; if (newtag->alignment > 1) newtag->flags |= BUS_DMA_COULD_BOUNCE; if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && (flags & BUS_DMA_ALLOCNOW) != 0) { struct bounce_zone *bz; /* Must bounce */ if ((error = alloc_bounce_zone(newtag)) != 0) { free(newtag, M_DEVBUF); return (error); } bz = newtag->bounce_zone; if (ptoa(bz->total_bpages) < maxsize) { int pages; pages = atop(maxsize) - bz->total_bpages; /* Add pages to our bounce pool */ if (alloc_bounce_pages(newtag, pages) < pages) error = ENOMEM; } /* Performed initial allocation */ newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; } if (error != 0) { free(newtag, M_DEVBUF); } else { *dmat = newtag; } CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); return (error); } void bus_dma_template_clone(bus_dma_template_t *t, bus_dma_tag_t dmat) { if (t == NULL || dmat == NULL) return; t->parent = dmat->parent; t->alignment = dmat->alignment; t->boundary = dmat->boundary; t->lowaddr = dmat->lowaddr; t->highaddr = dmat->highaddr; t->maxsize = dmat->maxsize; t->nsegments = dmat->nsegments; t->maxsegsize = dmat->maxsegsz; t->flags = dmat->flags; t->lockfunc = dmat->lockfunc; t->lockfuncarg = dmat->lockfuncarg; } int bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain) { return (0); } int bus_dma_tag_destroy(bus_dma_tag_t dmat) { bus_dma_tag_t dmat_copy __unused; int error; error = 0; dmat_copy = dmat; if (dmat != NULL) { if (dmat->map_count != 0) { error = EBUSY; goto out; } while (dmat != NULL) { bus_dma_tag_t parent; parent = dmat->parent; atomic_subtract_int(&dmat->ref_count, 1); if (dmat->ref_count == 0) { free(dmat, M_DEVBUF); /* * Last reference count, so * release our reference * count on our parent. */ dmat = parent; } else dmat = NULL; } } out: CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); return (error); } /* * Allocate a handle for mapping from kva/uva/physical * address space into bus device space. */ int bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) { int error; error = 0; *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO); if (*mapp == NULL) { CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); return (ENOMEM); } /* * Bouncing might be required if the driver asks for an active * exclusion region, a data alignment that is stricter than 1, and/or * an active address boundary. */ if (dmat->flags & BUS_DMA_COULD_BOUNCE) { /* Must bounce */ struct bounce_zone *bz; int maxpages; if (dmat->bounce_zone == NULL) { if ((error = alloc_bounce_zone(dmat)) != 0) return (error); } bz = dmat->bounce_zone; /* Initialize the new map */ STAILQ_INIT(&((*mapp)->bpages)); /* * Attempt to add pages to our pool on a per-instance * basis up to a sane limit. */ if (dmat->alignment > 1) maxpages = MAX_BPAGES; else maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr)); if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 || (bz->map_count > 0 && bz->total_bpages < maxpages)) { int pages; pages = MAX(atop(dmat->maxsize), 1); pages = MIN(maxpages - bz->total_bpages, pages); pages = MAX(pages, 1); if (alloc_bounce_pages(dmat, pages) < pages) error = ENOMEM; if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { if (error == 0) dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; } else { error = 0; } } bz->map_count++; } (*mapp)->nsegs = 0; (*mapp)->segments = (bus_dma_segment_t *)malloc( sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, M_NOWAIT); if ((*mapp)->segments == NULL) { CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); return (ENOMEM); } if (error == 0) dmat->map_count++; CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", __func__, dmat, dmat->flags, error); return (error); } /* * Destroy a handle for mapping from kva/uva/physical * address space into bus device space. */ int bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) { if (dmat->flags & BUS_DMA_COULD_BOUNCE) { if (STAILQ_FIRST(&map->bpages) != NULL) { CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, EBUSY); return (EBUSY); } if (dmat->bounce_zone) dmat->bounce_zone->map_count--; } free(map->segments, M_DEVBUF); free(map, M_DEVBUF); dmat->map_count--; CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); return (0); } /* * Allocate a piece of memory that can be efficiently mapped into * bus device space based on the constraints lited in the dma tag. * A dmamap to for use with dmamap_load is also allocated. */ int bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, bus_dmamap_t *mapp) { vm_memattr_t attr; int mflags; if (flags & BUS_DMA_NOWAIT) mflags = M_NOWAIT; else mflags = M_WAITOK; bus_dmamap_create(dmat, flags, mapp); if (flags & BUS_DMA_ZERO) mflags |= M_ZERO; if (flags & BUS_DMA_NOCACHE) attr = VM_MEMATTR_UNCACHEABLE; else attr = VM_MEMATTR_DEFAULT; /* * XXX: * (dmat->alignment <= dmat->maxsize) is just a quick hack; the exact * alignment guarantees of malloc need to be nailed down, and the * code below should be rewritten to take that into account. * * In the meantime, we'll warn the user if malloc gets it wrong. */ if ((dmat->maxsize <= PAGE_SIZE) && (dmat->alignment <= dmat->maxsize) && dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem) && attr == VM_MEMATTR_DEFAULT) { *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); } else { /* * XXX Use Contigmalloc until it is merged into this facility * and handles multi-seg allocations. Nobody is doing * multi-seg allocations yet though. * XXX Certain AGP hardware does. */ *vaddr = (void *)kmem_alloc_contig(dmat->maxsize, mflags, 0ul, dmat->lowaddr, dmat->alignment ? dmat->alignment : 1ul, dmat->boundary, attr); (*mapp)->contigalloc = 1; } if (*vaddr == NULL) { CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", __func__, dmat, dmat->flags, ENOMEM); return (ENOMEM); } else if (!vm_addr_align_ok(vtophys(*vaddr), dmat->alignment)) { printf("bus_dmamem_alloc failed to align memory properly.\n"); } CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", __func__, dmat, dmat->flags, 0); return (0); } /* * Free a piece of memory and it's allociated dmamap, that was allocated * via bus_dmamem_alloc. Make the same choice for free/contigfree. */ void bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) { if (!map->contigalloc) free(vaddr, M_DEVBUF); else kmem_free((vm_offset_t)vaddr, dmat->maxsize); bus_dmamap_destroy(dmat, map); CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); } static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, bus_size_t buflen, int flags) { bus_addr_t curaddr; bus_size_t sgsize; if (map->pagesneeded == 0) { CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), dmat->boundary, dmat->alignment); CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded); /* * Count the number of bounce pages * needed in order to complete this transfer */ curaddr = buf; while (buflen != 0) { sgsize = MIN(buflen, dmat->maxsegsz); if (run_filter(dmat, curaddr) != 0) { sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK)); map->pagesneeded++; } curaddr += sgsize; buflen -= sgsize; } CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); } } static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, void *buf, bus_size_t buflen, int flags) { vm_offset_t vaddr; vm_offset_t vendaddr; bus_addr_t paddr; if (map->pagesneeded == 0) { CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), dmat->boundary, dmat->alignment); CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded); /* * Count the number of bounce pages * needed in order to complete this transfer */ vaddr = (vm_offset_t)buf; vendaddr = (vm_offset_t)buf + buflen; while (vaddr < vendaddr) { bus_size_t sg_len; sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); if (pmap == kernel_pmap) paddr = pmap_kextract(vaddr); else paddr = pmap_extract(pmap, vaddr); if (run_filter(dmat, paddr) != 0) { sg_len = roundup2(sg_len, dmat->alignment); map->pagesneeded++; } vaddr += sg_len; } CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); } } /* * Add a single contiguous physical range to the segment list. */ static int _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) { int seg; /* * Make sure we don't cross any boundaries. */ if (!vm_addr_bound_ok(curaddr, sgsize, dmat->boundary)) sgsize = roundup2(curaddr, dmat->boundary) - curaddr; /* * Insert chunk into a segment, coalescing with * previous segment if possible. */ seg = *segp; if (seg == -1) { seg = 0; segs[seg].ds_addr = curaddr; segs[seg].ds_len = sgsize; } else { if (curaddr == segs[seg].ds_addr + segs[seg].ds_len && (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && vm_addr_bound_ok(segs[seg].ds_addr, segs[seg].ds_len + sgsize, dmat->boundary)) segs[seg].ds_len += sgsize; else { if (++seg >= dmat->nsegments) return (0); segs[seg].ds_addr = curaddr; segs[seg].ds_len = sgsize; } } *segp = seg; return (sgsize); } /* * Utility function to load a physical buffer. segp contains * the starting segment on entrace, and the ending segment on exit. */ int _bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp) { bus_addr_t curaddr; bus_size_t sgsize; int error; if (segs == NULL) segs = map->segments; if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); if (map->pagesneeded != 0) { error = _bus_dmamap_reserve_pages(dmat, map, flags); if (error) return (error); } } while (buflen > 0) { curaddr = buf; sgsize = MIN(buflen, dmat->maxsegsz); if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) { sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK)); curaddr = add_bounce_page(dmat, map, 0, curaddr, sgsize); } sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, segp); if (sgsize == 0) break; buf += sgsize; buflen -= sgsize; } /* * Did we fit? */ return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ } int _bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, bus_dma_segment_t *segs, int *segp) { return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags, segs, segp)); } /* * Utility function to load a linear buffer. segp contains * the starting segment on entrance, and the ending segment on exit. */ int _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs, int *segp) { bus_size_t sgsize; bus_addr_t curaddr; vm_offset_t kvaddr, vaddr; int error; if (segs == NULL) segs = map->segments; if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); if (map->pagesneeded != 0) { error = _bus_dmamap_reserve_pages(dmat, map, flags); if (error) return (error); } } vaddr = (vm_offset_t)buf; while (buflen > 0) { bus_size_t max_sgsize; /* * Get the physical address for this segment. */ if (pmap == kernel_pmap) { curaddr = pmap_kextract(vaddr); kvaddr = vaddr; } else { curaddr = pmap_extract(pmap, vaddr); kvaddr = 0; } /* * Compute the segment size, and adjust counts. */ max_sgsize = MIN(buflen, dmat->maxsegsz); sgsize = PAGE_SIZE - (curaddr & PAGE_MASK); if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) { sgsize = roundup2(sgsize, dmat->alignment); sgsize = MIN(sgsize, max_sgsize); curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, sgsize); } else { sgsize = MIN(sgsize, max_sgsize); } sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, segp); if (sgsize == 0) break; vaddr += sgsize; buflen -= sgsize; } /* * Did we fit? */ return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ } void _bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg) { if (dmat->flags & BUS_DMA_COULD_BOUNCE) { map->dmat = dmat; map->mem = *mem; map->callback = callback; map->callback_arg = callback_arg; } } bus_dma_segment_t * _bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dma_segment_t *segs, int nsegs, int error) { map->nsegs = nsegs; if (segs != NULL) memcpy(map->segments, segs, map->nsegs*sizeof(segs[0])); if (dmat->iommu != NULL) IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs, dmat->lowaddr, dmat->highaddr, dmat->alignment, dmat->boundary, dmat->iommu_cookie); if (segs != NULL) memcpy(segs, map->segments, map->nsegs*sizeof(segs[0])); else segs = map->segments; return (segs); } /* * Release the mapping held by map. */ void bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) { struct bounce_page *bpage; if (dmat->iommu) { IOMMU_UNMAP(dmat->iommu, map->segments, map->nsegs, dmat->iommu_cookie); map->nsegs = 0; } while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { STAILQ_REMOVE_HEAD(&map->bpages, links); free_bounce_page(dmat, bpage); } } void bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) { struct bounce_page *bpage; vm_offset_t datavaddr, tempvaddr; if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { /* * Handle data bouncing. We might also * want to add support for invalidating * the caches on broken hardware */ CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " "performing bounce", __func__, dmat, dmat->flags, op); if (op & BUS_DMASYNC_PREWRITE) { while (bpage != NULL) { tempvaddr = 0; datavaddr = bpage->datavaddr; if (datavaddr == 0) { tempvaddr = pmap_quick_enter_page( bpage->datapage); datavaddr = tempvaddr | bpage->dataoffs; } bcopy((void *)datavaddr, (void *)bpage->vaddr, bpage->datacount); if (tempvaddr != 0) pmap_quick_remove_page(tempvaddr); bpage = STAILQ_NEXT(bpage, links); } dmat->bounce_zone->total_bounced++; } if (op & BUS_DMASYNC_POSTREAD) { while (bpage != NULL) { tempvaddr = 0; datavaddr = bpage->datavaddr; if (datavaddr == 0) { tempvaddr = pmap_quick_enter_page( bpage->datapage); datavaddr = tempvaddr | bpage->dataoffs; } bcopy((void *)bpage->vaddr, (void *)datavaddr, bpage->datacount); if (tempvaddr != 0) pmap_quick_remove_page(tempvaddr); bpage = STAILQ_NEXT(bpage, links); } dmat->bounce_zone->total_bounced++; } } powerpc_sync(); } int bus_dma_tag_set_iommu(bus_dma_tag_t tag, device_t iommu, void *cookie) { tag->iommu = iommu; tag->iommu_cookie = cookie; return (0); } diff --git a/sys/riscv/include/bus_dma_impl.h b/sys/riscv/include/bus_dma_impl.h index e482fb76a716..fd741210ca29 100644 --- a/sys/riscv/include/bus_dma_impl.h +++ b/sys/riscv/include/bus_dma_impl.h @@ -1,95 +1,94 @@ /*- * Copyright (c) 2013 The FreeBSD Foundation * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_BUS_DMA_IMPL_H_ #define _MACHINE_BUS_DMA_IMPL_H_ struct bus_dma_tag_common { struct bus_dma_impl *impl; struct bus_dma_tag_common *parent; bus_size_t alignment; bus_addr_t boundary; bus_addr_t lowaddr; bus_addr_t highaddr; bus_dma_filter_t *filter; void *filterarg; bus_size_t maxsize; u_int nsegments; bus_size_t maxsegsz; int flags; bus_dma_lock_t *lockfunc; void *lockfuncarg; int ref_count; }; struct bus_dma_impl { int (*tag_create)(bus_dma_tag_t parent, bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, void *lockfuncarg, bus_dma_tag_t *dmat); int (*tag_destroy)(bus_dma_tag_t dmat); int (*map_create)(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp); int (*map_destroy)(bus_dma_tag_t dmat, bus_dmamap_t map); int (*mem_alloc)(bus_dma_tag_t dmat, void** vaddr, int flags, bus_dmamap_t *mapp); void (*mem_free)(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map); int (*load_ma)(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, bus_dma_segment_t *segs, int *segp); int (*load_phys)(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp); int (*load_buffer)(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t *segs, int *segp); void (*map_waitok)(bus_dma_tag_t dmat, bus_dmamap_t map, struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg); bus_dma_segment_t *(*map_complete)(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dma_segment_t *segs, int nsegs, int error); void (*map_unload)(bus_dma_tag_t dmat, bus_dmamap_t map); void (*map_sync)(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op); }; -void bus_dma_dflt_lock(void *arg, bus_dma_lock_op_t op); int bus_dma_run_filter(struct bus_dma_tag_common *dmat, bus_addr_t paddr); int common_bus_dma_tag_create(struct bus_dma_tag_common *parent, bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, void *lockfuncarg, size_t sz, void **dmat); extern struct bus_dma_impl bus_dma_bounce_impl; #endif diff --git a/sys/riscv/riscv/busdma_machdep.c b/sys/riscv/riscv/busdma_machdep.c index 5a80def489e9..11ff9aa6afa5 100644 --- a/sys/riscv/riscv/busdma_machdep.c +++ b/sys/riscv/riscv/busdma_machdep.c @@ -1,252 +1,217 @@ /*- * Copyright (c) 1997, 1998 Justin T. Gibbs. * Copyright (c) 2013, 2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Portions of this software were developed by Semihalf * under sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -/* - * Convenience function for manipulating driver locks from busdma (during - * busdma_swi, for example). - */ -void -busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) -{ - struct mtx *dmtx; - - dmtx = (struct mtx *)arg; - switch (op) { - case BUS_DMA_LOCK: - mtx_lock(dmtx); - break; - case BUS_DMA_UNLOCK: - mtx_unlock(dmtx); - break; - default: - panic("Unknown operation 0x%x for busdma_lock_mutex!", op); - } -} - -/* - * dflt_lock should never get called. It gets put into the dma tag when - * lockfunc == NULL, which is only valid if the maps that are associated - * with the tag are meant to never be defered. - * XXX Should have a way to identify which driver is responsible here. - */ -void -bus_dma_dflt_lock(void *arg, bus_dma_lock_op_t op) -{ - - panic("driver error: busdma dflt_lock called"); -} - /* * Return true if a match is made. * * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. * * If paddr is within the bounds of the dma tag then call the filter callback * to check for a match, if there is no filter callback then assume a match. */ int bus_dma_run_filter(struct bus_dma_tag_common *tc, bus_addr_t paddr) { int retval; retval = 0; do { if (((paddr > tc->lowaddr && paddr <= tc->highaddr) || !vm_addr_align_ok(paddr, tc->alignment)) && (tc->filter == NULL || (*tc->filter)(tc->filterarg, paddr) != 0)) retval = 1; tc = tc->parent; } while (retval == 0 && tc != NULL); return (retval); } int common_bus_dma_tag_create(struct bus_dma_tag_common *parent, bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, void *lockfuncarg, size_t sz, void **dmat) { void *newtag; struct bus_dma_tag_common *common; KASSERT(sz >= sizeof(struct bus_dma_tag_common), ("sz")); /* Return a NULL tag on failure */ *dmat = NULL; /* Basic sanity checking */ if (boundary != 0 && boundary < maxsegsz) maxsegsz = boundary; if (maxsegsz == 0) return (EINVAL); newtag = malloc(sz, M_DEVBUF, M_ZERO | M_NOWAIT); if (newtag == NULL) { CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", __func__, newtag, 0, ENOMEM); return (ENOMEM); } common = newtag; common->impl = &bus_dma_bounce_impl; common->parent = parent; common->alignment = alignment; common->boundary = boundary; common->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); common->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1); common->filter = filter; common->filterarg = filterarg; common->maxsize = maxsize; common->nsegments = nsegments; common->maxsegsz = maxsegsz; common->flags = flags; common->ref_count = 1; /* Count ourself */ if (lockfunc != NULL) { common->lockfunc = lockfunc; common->lockfuncarg = lockfuncarg; } else { - common->lockfunc = bus_dma_dflt_lock; + common->lockfunc = _busdma_dflt_lock; common->lockfuncarg = NULL; } /* Take into account any restrictions imposed by our parent tag */ if (parent != NULL) { common->impl = parent->impl; common->lowaddr = MIN(parent->lowaddr, common->lowaddr); common->highaddr = MAX(parent->highaddr, common->highaddr); if (common->boundary == 0) common->boundary = parent->boundary; else if (parent->boundary != 0) { common->boundary = MIN(parent->boundary, common->boundary); } if (common->filter == NULL) { /* * Short circuit looking at our parent directly * since we have encapsulated all of its information */ common->filter = parent->filter; common->filterarg = parent->filterarg; common->parent = parent->parent; } atomic_add_int(&parent->ref_count, 1); } *dmat = common; return (0); } /* * Allocate a device specific dma_tag. */ int bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, void *lockfuncarg, bus_dma_tag_t *dmat) { struct bus_dma_tag_common *tc; int error; if (parent == NULL) { error = bus_dma_bounce_impl.tag_create(parent, alignment, boundary, lowaddr, highaddr, filter, filterarg, maxsize, nsegments, maxsegsz, flags, lockfunc, lockfuncarg, dmat); } else { tc = (struct bus_dma_tag_common *)parent; error = tc->impl->tag_create(parent, alignment, boundary, lowaddr, highaddr, filter, filterarg, maxsize, nsegments, maxsegsz, flags, lockfunc, lockfuncarg, dmat); } return (error); } void bus_dma_template_clone(bus_dma_template_t *t, bus_dma_tag_t dmat) { struct bus_dma_tag_common *common; if (t == NULL || dmat == NULL) return; common = (struct bus_dma_tag_common *)dmat; t->parent = (bus_dma_tag_t)common->parent; t->alignment = common->alignment; t->boundary = common->boundary; t->lowaddr = common->lowaddr; t->highaddr = common->highaddr; t->maxsize = common->maxsize; t->nsegments = common->nsegments; t->maxsegsize = common->maxsegsz; t->flags = common->flags; t->lockfunc = common->lockfunc; t->lockfuncarg = common->lockfuncarg; } int bus_dma_tag_destroy(bus_dma_tag_t dmat) { struct bus_dma_tag_common *tc; tc = (struct bus_dma_tag_common *)dmat; return (tc->impl->tag_destroy(dmat)); } int bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain) { return (0); } diff --git a/sys/sys/bus_dma.h b/sys/sys/bus_dma.h index 7eac96b5af1e..212d8e3af09d 100644 --- a/sys/sys/bus_dma.h +++ b/sys/sys/bus_dma.h @@ -1,400 +1,405 @@ /* $NetBSD: bus.h,v 1.12 1997/10/01 08:25:15 fvdl Exp $ */ /*- * SPDX-License-Identifier: (BSD-2-Clause-NetBSD AND BSD-4-Clause) * * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, * NASA Ames Research Center. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (c) 1996 Charles M. Hannum. All rights reserved. * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Christopher G. Demetriou * for the NetBSD Project. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* $FreeBSD$ */ #ifndef _BUS_DMA_H_ #define _BUS_DMA_H_ #ifdef _KERNEL #include #endif /* * Machine independent interface for mapping physical addresses to peripheral * bus 'physical' addresses, and assisting with DMA operations. * * XXX This file is always included from and should not * (yet) be included directly. */ /* * Flags used in various bus DMA methods. */ #define BUS_DMA_WAITOK 0x00 /* safe to sleep (pseudo-flag) */ #define BUS_DMA_NOWAIT 0x01 /* not safe to sleep */ #define BUS_DMA_ALLOCNOW 0x02 /* perform resource allocation now */ #define BUS_DMA_COHERENT 0x04 /* hint: map memory in a coherent way */ #define BUS_DMA_ZERO 0x08 /* allocate zero'ed memory */ #define BUS_DMA_BUS1 0x10 /* placeholders for bus functions... */ #define BUS_DMA_BUS2 0x20 #define BUS_DMA_BUS3 0x40 #define BUS_DMA_BUS4 0x80 /* * The following two flags are non-standard or specific to only certain * architectures */ #define BUS_DMA_NOWRITE 0x100 #define BUS_DMA_NOCACHE 0x200 /* * The following flag is a DMA tag hint that the page offset of the * loaded kernel virtual address must be preserved in the first * physical segment address, when the KVA is loaded into DMA. */ #define BUS_DMA_KEEP_PG_OFFSET 0x400 #define BUS_DMA_LOAD_MBUF 0x800 /* Forwards needed by prototypes below. */ union ccb; struct bio; struct crypto_buffer; struct cryptop; struct mbuf; struct memdesc; struct pmap; struct uio; /* * Operations performed by bus_dmamap_sync(). */ #define BUS_DMASYNC_PREREAD 1 #define BUS_DMASYNC_POSTREAD 2 #define BUS_DMASYNC_PREWRITE 4 #define BUS_DMASYNC_POSTWRITE 8 /* * bus_dma_segment_t * * Describes a single contiguous DMA transaction. Values * are suitable for programming into DMA registers. */ typedef struct bus_dma_segment { bus_addr_t ds_addr; /* DMA address */ bus_size_t ds_len; /* length of transfer */ } bus_dma_segment_t; #ifdef _KERNEL /* * A function that returns 1 if the address cannot be accessed by * a device and 0 if it can be. */ typedef int bus_dma_filter_t(void *, bus_addr_t); /* * Generic helper function for manipulating mutexes. */ void busdma_lock_mutex(void *arg, bus_dma_lock_op_t op); +/* + * Internal helper function used by tags that do not defer loads. + */ +void _busdma_dflt_lock(void *arg, bus_dma_lock_op_t op); + /* * Allocate a device specific dma_tag encapsulating the constraints of * the parent tag in addition to other restrictions specified: * * alignment: Alignment for segments. * boundary: Boundary that segments cannot cross. * lowaddr: Low restricted address that cannot appear in a mapping. * highaddr: High restricted address that cannot appear in a mapping. * filtfunc: An optional function to further test if an address * within the range of lowaddr and highaddr cannot appear * in a mapping. * filtfuncarg: An argument that will be passed to filtfunc in addition * to the address to test. * maxsize: Maximum mapping size supported by this tag. * nsegments: Number of discontinuities allowed in maps. * maxsegsz: Maximum size of a segment in the map. * flags: Bus DMA flags. * lockfunc: An optional function to handle driver-defined lock * operations. * lockfuncarg: An argument that will be passed to lockfunc in addition * to the lock operation. * dmat: A pointer to set to a valid dma tag should the return * value of this function indicate success. */ /* XXX Should probably allow specification of alignment */ int bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t *filtfunc, void *filtfuncarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, void *lockfuncarg, bus_dma_tag_t *dmat); /* * Functions for creating and cloning tags via a template, * * bus_dma_template_t is made avaialble publicly so it can be allocated * from the caller stack. Its contents should be considered private, and * should only be accessed via the documented APIs and macros */ typedef struct { bus_dma_tag_t parent; bus_size_t alignment; bus_addr_t boundary; bus_addr_t lowaddr; bus_addr_t highaddr; bus_size_t maxsize; int nsegments; bus_size_t maxsegsize; int flags; bus_dma_lock_t *lockfunc; void *lockfuncarg; const char *name; } bus_dma_template_t; /* * These enum values should not be re-ordered. BD_PARAM_INVALID is an * invalid key and will trigger a panic. */ typedef enum { BD_PARAM_INVALID = 0, BD_PARAM_PARENT = 1, BD_PARAM_ALIGNMENT = 2, BD_PARAM_BOUNDARY = 3, BD_PARAM_LOWADDR = 4, BD_PARAM_HIGHADDR = 5, BD_PARAM_MAXSIZE = 6, BD_PARAM_NSEGMENTS = 7, BD_PARAM_MAXSEGSIZE = 8, BD_PARAM_FLAGS = 9, BD_PARAM_LOCKFUNC = 10, BD_PARAM_LOCKFUNCARG = 11, BD_PARAM_NAME = 12 } bus_dma_param_key_t; /* These contents should also be considered private */ typedef struct { bus_dma_param_key_t key; union { void *ptr; vm_paddr_t pa; uintmax_t num; }; } bus_dma_param_t; #define BD_PARENT(val) { BD_PARAM_PARENT, .ptr = val } #define BD_ALIGNMENT(val) { BD_PARAM_ALIGNMENT, .num = val } #define BD_BOUNDARY(val) { BD_PARAM_BOUNDARY, .num = val } #define BD_LOWADDR(val) { BD_PARAM_LOWADDR, .pa = val } #define BD_HIGHADDR(val) { BD_PARAM_HIGHADDR, .pa = val } #define BD_MAXSIZE(val) { BD_PARAM_MAXSIZE, .num = val } #define BD_NSEGMENTS(val) { BD_PARAM_NSEGMENTS, .num = val } #define BD_MAXSEGSIZE(val) { BD_PARAM_MAXSEGSIZE, .num = val } #define BD_FLAGS(val) { BD_PARAM_FLAGS, .num = val } #define BD_LOCKFUNC(val) { BD_PARAM_LOCKFUNC, .ptr = val } #define BD_LOCKFUNCARG(val) { BD_PARAM_LOCKFUNCARG, .ptr = val } #define BD_NAME(val) { BD_PARAM_NAME, .ptr = val } #define BUS_DMA_TEMPLATE_FILL(t, kv...) \ do { \ bus_dma_param_t pm[] = { kv }; \ bus_dma_template_fill(t, pm, howmany(sizeof(pm), sizeof(pm[0]))); \ } while (0) void bus_dma_template_init(bus_dma_template_t *t, bus_dma_tag_t parent); int bus_dma_template_tag(bus_dma_template_t *t, bus_dma_tag_t *dmat); void bus_dma_template_clone(bus_dma_template_t *t, bus_dma_tag_t dmat); void bus_dma_template_fill(bus_dma_template_t *t, bus_dma_param_t *kv, u_int count); /* * Set the memory domain to be used for allocations. * * Automatic for PCI devices. Must be set prior to creating maps or * allocating memory. */ int bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain); int bus_dma_tag_destroy(bus_dma_tag_t dmat); /* * A function that processes a successfully loaded dma map or an error * from a delayed load map. */ typedef void bus_dmamap_callback_t(void *, bus_dma_segment_t *, int, int); /* * Like bus_dmamap_callback but includes map size in bytes. This is * defined as a separate interface to maintain compatibility for users * of bus_dmamap_callback_t--at some point these interfaces should be merged. */ typedef void bus_dmamap_callback2_t(void *, bus_dma_segment_t *, int, bus_size_t, int); /* * Map the buffer buf into bus space using the dmamap map. */ int bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, bus_size_t buflen, bus_dmamap_callback_t *callback, void *callback_arg, int flags); /* * Like bus_dmamap_load but for mbufs. Note the use of the * bus_dmamap_callback2_t interface. */ int bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *mbuf, bus_dmamap_callback2_t *callback, void *callback_arg, int flags); int bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *mbuf, bus_dma_segment_t *segs, int *nsegs, int flags); /* * Like bus_dmamap_load but for uios. Note the use of the * bus_dmamap_callback2_t interface. */ int bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *ui, bus_dmamap_callback2_t *callback, void *callback_arg, int flags); /* * Like bus_dmamap_load but for cam control blocks. */ int bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb, bus_dmamap_callback_t *callback, void *callback_arg, int flags); /* * Like bus_dmamap_load but for bios. */ int bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio, bus_dmamap_callback_t *callback, void *callback_arg, int flags); /* * Like bus_dmamap_load but for crypto ops. */ int bus_dmamap_load_crp(bus_dma_tag_t dmat, bus_dmamap_t map, struct cryptop *crp, bus_dmamap_callback_t *callback, void *callback_arg, int flags); int bus_dmamap_load_crp_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, struct crypto_buffer *cb, bus_dmamap_callback_t *callback, void *callback_arg, int flags); /* * Loads any memory descriptor. */ int bus_dmamap_load_mem(bus_dma_tag_t dmat, bus_dmamap_t map, struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg, int flags); /* * Placeholder for use by busdma implementations which do not benefit * from optimized procedure to load an array of vm_page_t. Falls back * to do _bus_dmamap_load_phys() in loop. */ int bus_dmamap_load_ma_triv(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, bus_dma_segment_t *segs, int *segp); #ifdef WANT_INLINE_DMAMAP #define BUS_DMAMAP_OP static inline #else #define BUS_DMAMAP_OP #endif /* * Allocate a handle for mapping from kva/uva/physical * address space into bus device space. */ BUS_DMAMAP_OP int bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp); /* * Destroy a handle for mapping from kva/uva/physical * address space into bus device space. */ BUS_DMAMAP_OP int bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map); /* * Allocate a piece of memory that can be efficiently mapped into * bus device space based on the constraints listed in the dma tag. * A dmamap to for use with dmamap_load is also allocated. */ BUS_DMAMAP_OP int bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, bus_dmamap_t *mapp); /* * Free a piece of memory and its allocated dmamap, that was allocated * via bus_dmamem_alloc. */ BUS_DMAMAP_OP void bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map); /* * Perform a synchronization operation on the given map. If the map * is NULL we have a fully IO-coherent system. */ BUS_DMAMAP_OP void bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t dmamap, bus_dmasync_op_t op); /* * Release the mapping held by map. */ BUS_DMAMAP_OP void bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t dmamap); #undef BUS_DMAMAP_OP #endif /* _KERNEL */ #endif /* _BUS_DMA_H_ */ diff --git a/sys/x86/include/busdma_impl.h b/sys/x86/include/busdma_impl.h index 9ae75095821a..577fd99f9496 100644 --- a/sys/x86/include/busdma_impl.h +++ b/sys/x86/include/busdma_impl.h @@ -1,103 +1,102 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013 The FreeBSD Foundation * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef __X86_BUSDMA_IMPL_H #define __X86_BUSDMA_IMPL_H struct bus_dma_tag_common { struct bus_dma_impl *impl; struct bus_dma_tag_common *parent; bus_size_t alignment; bus_addr_t boundary; bus_addr_t lowaddr; bus_addr_t highaddr; bus_dma_filter_t *filter; void *filterarg; bus_size_t maxsize; u_int nsegments; bus_size_t maxsegsz; int flags; bus_dma_lock_t *lockfunc; void *lockfuncarg; int ref_count; int domain; }; struct bus_dma_impl { int (*tag_create)(bus_dma_tag_t parent, bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, void *lockfuncarg, bus_dma_tag_t *dmat); int (*tag_destroy)(bus_dma_tag_t dmat); int (*tag_set_domain)(bus_dma_tag_t); bool (*id_mapped)(bus_dma_tag_t, vm_paddr_t, bus_size_t); int (*map_create)(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp); int (*map_destroy)(bus_dma_tag_t dmat, bus_dmamap_t map); int (*mem_alloc)(bus_dma_tag_t dmat, void** vaddr, int flags, bus_dmamap_t *mapp); void (*mem_free)(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map); int (*load_ma)(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, bus_dma_segment_t *segs, int *segp); int (*load_phys)(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp); int (*load_buffer)(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t *segs, int *segp); void (*map_waitok)(bus_dma_tag_t dmat, bus_dmamap_t map, struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg); bus_dma_segment_t *(*map_complete)(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dma_segment_t *segs, int nsegs, int error); void (*map_unload)(bus_dma_tag_t dmat, bus_dmamap_t map); void (*map_sync)(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op); #ifdef KMSAN void (*load_kmsan)(bus_dmamap_t map, struct memdesc *mem); #endif }; -void bus_dma_dflt_lock(void *arg, bus_dma_lock_op_t op); int bus_dma_run_filter(struct bus_dma_tag_common *dmat, vm_paddr_t paddr); int common_bus_dma_tag_create(struct bus_dma_tag_common *parent, bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, void *lockfuncarg, size_t sz, void **dmat); extern struct bus_dma_impl bus_dma_bounce_impl; #endif diff --git a/sys/x86/x86/busdma_machdep.c b/sys/x86/x86/busdma_machdep.c index ae62f83ea300..52bc765138c9 100644 --- a/sys/x86/x86/busdma_machdep.c +++ b/sys/x86/x86/busdma_machdep.c @@ -1,267 +1,232 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997, 1998 Justin T. Gibbs. * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_acpi.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -/* - * Convenience function for manipulating driver locks from busdma (during - * busdma_swi, for example). - */ -void -busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) -{ - struct mtx *dmtx; - - dmtx = (struct mtx *)arg; - switch (op) { - case BUS_DMA_LOCK: - mtx_lock(dmtx); - break; - case BUS_DMA_UNLOCK: - mtx_unlock(dmtx); - break; - default: - panic("Unknown operation 0x%x for busdma_lock_mutex!", op); - } -} - -/* - * dflt_lock should never get called. It gets put into the dma tag when - * lockfunc == NULL, which is only valid if the maps that are associated - * with the tag are meant to never be defered. - * XXX Should have a way to identify which driver is responsible here. - */ -void -bus_dma_dflt_lock(void *arg, bus_dma_lock_op_t op) -{ - - panic("driver error: busdma dflt_lock called"); -} - /* * Return true if a match is made. * * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. * * If paddr is within the bounds of the dma tag then call the filter callback * to check for a match, if there is no filter callback then assume a match. */ int bus_dma_run_filter(struct bus_dma_tag_common *tc, vm_paddr_t paddr) { int retval; retval = 0; do { if ((paddr >= BUS_SPACE_MAXADDR || (paddr > tc->lowaddr && paddr <= tc->highaddr) || !vm_addr_align_ok(paddr, tc->alignment)) && (tc->filter == NULL || (*tc->filter)(tc->filterarg, paddr) != 0)) retval = 1; tc = tc->parent; } while (retval == 0 && tc != NULL); return (retval); } int common_bus_dma_tag_create(struct bus_dma_tag_common *parent, bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, void *lockfuncarg, size_t sz, void **dmat) { void *newtag; struct bus_dma_tag_common *common; KASSERT(sz >= sizeof(struct bus_dma_tag_common), ("sz")); /* Basic sanity checking */ if (boundary != 0 && boundary < maxsegsz) maxsegsz = boundary; if (maxsegsz == 0) return (EINVAL); /* Return a NULL tag on failure */ *dmat = NULL; newtag = malloc(sz, M_DEVBUF, M_ZERO | M_NOWAIT); if (newtag == NULL) { CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", __func__, newtag, 0, ENOMEM); return (ENOMEM); } common = newtag; common->impl = &bus_dma_bounce_impl; common->parent = parent; common->alignment = alignment; common->boundary = boundary; common->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); common->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1); common->filter = filter; common->filterarg = filterarg; common->maxsize = maxsize; common->nsegments = nsegments; common->maxsegsz = maxsegsz; common->flags = flags; common->ref_count = 1; /* Count ourself */ if (lockfunc != NULL) { common->lockfunc = lockfunc; common->lockfuncarg = lockfuncarg; } else { - common->lockfunc = bus_dma_dflt_lock; + common->lockfunc = _busdma_dflt_lock; common->lockfuncarg = NULL; } /* Take into account any restrictions imposed by our parent tag */ if (parent != NULL) { common->impl = parent->impl; common->lowaddr = MIN(parent->lowaddr, common->lowaddr); common->highaddr = MAX(parent->highaddr, common->highaddr); if (common->boundary == 0) common->boundary = parent->boundary; else if (parent->boundary != 0) { common->boundary = MIN(parent->boundary, common->boundary); } if (common->filter == NULL) { /* * Short circuit looking at our parent directly * since we have encapsulated all of its information */ common->filter = parent->filter; common->filterarg = parent->filterarg; common->parent = parent->parent; } common->domain = parent->domain; atomic_add_int(&parent->ref_count, 1); } common->domain = vm_phys_domain_match(common->domain, 0ul, common->lowaddr); *dmat = common; return (0); } int bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain) { struct bus_dma_tag_common *tc; tc = (struct bus_dma_tag_common *)dmat; domain = vm_phys_domain_match(domain, 0ul, tc->lowaddr); /* Only call the callback if it changes. */ if (domain == tc->domain) return (0); tc->domain = domain; return (tc->impl->tag_set_domain(dmat)); } /* * Allocate a device specific dma_tag. */ int bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, void *lockfuncarg, bus_dma_tag_t *dmat) { struct bus_dma_tag_common *tc; int error; if (parent == NULL) { error = bus_dma_bounce_impl.tag_create(parent, alignment, boundary, lowaddr, highaddr, filter, filterarg, maxsize, nsegments, maxsegsz, flags, lockfunc, lockfuncarg, dmat); } else { tc = (struct bus_dma_tag_common *)parent; error = tc->impl->tag_create(parent, alignment, boundary, lowaddr, highaddr, filter, filterarg, maxsize, nsegments, maxsegsz, flags, lockfunc, lockfuncarg, dmat); } return (error); } void bus_dma_template_clone(bus_dma_template_t *t, bus_dma_tag_t dmat) { struct bus_dma_tag_common *common; if (t == NULL || dmat == NULL) return; common = (struct bus_dma_tag_common *)dmat; t->parent = (bus_dma_tag_t)common->parent; t->alignment = common->alignment; t->boundary = common->boundary; t->lowaddr = common->lowaddr; t->highaddr = common->highaddr; t->maxsize = common->maxsize; t->nsegments = common->nsegments; t->maxsegsize = common->maxsegsz; t->flags = common->flags; t->lockfunc = common->lockfunc; t->lockfuncarg = common->lockfuncarg; } int bus_dma_tag_destroy(bus_dma_tag_t dmat) { struct bus_dma_tag_common *tc; tc = (struct bus_dma_tag_common *)dmat; return (tc->impl->tag_destroy(dmat)); }