Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F144988958
D55084.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
61 KB
Referenced Files
None
Subscribers
None
D55084.diff
View Options
diff --git a/sys/conf/files.powerpc b/sys/conf/files.powerpc
--- a/sys/conf/files.powerpc
+++ b/sys/conf/files.powerpc
@@ -361,6 +361,7 @@
powerpc/powerpc/altivec.c optional !powerpcspe
powerpc/powerpc/autoconf.c standard
powerpc/powerpc/bus_machdep.c standard
+powerpc/powerpc/busdma_bounce.c standard
powerpc/powerpc/busdma_machdep.c standard
powerpc/powerpc/clock.c standard
powerpc/powerpc/copyinout.c optional aim
diff --git a/sys/powerpc/include/bus_dma.h b/sys/powerpc/include/bus_dma.h
--- a/sys/powerpc/include/bus_dma.h
+++ b/sys/powerpc/include/bus_dma.h
@@ -2,6 +2,7 @@
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2005 Scott Long
+ * Copyright (c) 2026 Raptor Engineering, LLC
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -26,12 +27,144 @@
* SUCH DAMAGE.
*/
-#ifndef _POWERPC_BUS_DMA_H_
-#define _POWERPC_BUS_DMA_H_
+#ifndef _MACHINE_BUS_DMA_H_
+#define _MACHINE_BUS_DMA_H_
+#define WANT_INLINE_DMAMAP
#include <sys/bus_dma.h>
-#include <sys/bus_dma_internal.h>
+
+#include <machine/bus_dma_impl.h>
+
+/*
+ * Allocate a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+static inline int
+bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ return (tc->impl->map_create(dmat, flags, mapp));
+}
+
+/*
+ * Destroy a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+static inline int
+bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ return (tc->impl->map_destroy(dmat, map));
+}
+
+/*
+ * Allocate a piece of memory that can be efficiently mapped into
+ * bus device space based on the constraints listed in the dma tag.
+ * A dmamap to for use with dmamap_load is also allocated.
+ */
+static inline int
+bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
+ bus_dmamap_t *mapp)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ return (tc->impl->mem_alloc(dmat, vaddr, flags, mapp));
+}
+
+/*
+ * Free a piece of memory and it's allociated dmamap, that was allocated
+ * via bus_dmamem_alloc.
+ */
+static inline void
+bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ tc->impl->mem_free(dmat, vaddr, map);
+}
+
+/*
+ * Release the mapping held by map.
+ */
+static inline void
+bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ tc->impl->map_unload(dmat, map);
+}
+
+static inline void
+bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ tc->impl->map_sync(dmat, map, op);
+}
+
+static inline int
+_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
+ bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ return (tc->impl->load_phys(dmat, map, buf, buflen, flags, segs,
+ segp));
+}
+
+static inline int
+_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma,
+ bus_size_t tlen, int ma_offs, int flags, bus_dma_segment_t *segs,
+ int *segp)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ return (tc->impl->load_ma(dmat, map, ma, tlen, ma_offs, flags,
+ segs, segp));
+}
+
+static inline int
+_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
+ bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t *segs,
+ int *segp)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ return (tc->impl->load_buffer(dmat, map, buf, buflen, pmap, flags, segs,
+ segp));
+}
+
+static inline void
+_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ tc->impl->map_waitok(dmat, map, mem, callback, callback_arg);
+}
+
+static inline bus_dma_segment_t *
+_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
+ bus_dma_segment_t *segs, int nsegs, int error)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ return (tc->impl->map_complete(dmat, map, segs, nsegs, error));
+}
int bus_dma_tag_set_iommu(bus_dma_tag_t, device_t iommu, void *cookie);
-#endif /* _POWERPC_BUS_DMA_H_ */
+#endif /* !_MACHINE_BUS_DMA_H_ */
diff --git a/sys/powerpc/include/bus_dma_impl.h b/sys/powerpc/include/bus_dma_impl.h
new file mode 100644
--- /dev/null
+++ b/sys/powerpc/include/bus_dma_impl.h
@@ -0,0 +1,88 @@
+/*-
+ * Copyright (c) 2013 The FreeBSD Foundation
+ *
+ * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_BUS_DMA_IMPL_H_
+#define _MACHINE_BUS_DMA_IMPL_H_
+
+struct bus_dma_tag_common {
+ struct bus_dma_impl *impl;
+ bus_size_t alignment;
+ bus_addr_t boundary;
+ bus_addr_t lowaddr;
+ bus_addr_t highaddr;
+ bus_size_t maxsize;
+ u_int nsegments;
+ bus_size_t maxsegsz;
+ int flags;
+ bus_dma_lock_t *lockfunc;
+ void *lockfuncarg;
+ int domain;
+};
+
+struct bus_dma_impl {
+ int (*tag_create)(bus_dma_tag_t parent,
+ bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr,
+ bus_addr_t highaddr, bus_size_t maxsize, int nsegments,
+ bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
+ void *lockfuncarg, bus_dma_tag_t *dmat);
+ int (*tag_destroy)(bus_dma_tag_t dmat);
+ int (*tag_set_domain)(bus_dma_tag_t);
+ bool (*id_mapped)(bus_dma_tag_t, vm_paddr_t, bus_size_t);
+ int (*map_create)(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp);
+ int (*map_destroy)(bus_dma_tag_t dmat, bus_dmamap_t map);
+ int (*mem_alloc)(bus_dma_tag_t dmat, void** vaddr, int flags,
+ bus_dmamap_t *mapp);
+ void (*mem_free)(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map);
+ int (*load_ma)(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
+ bus_dma_segment_t *segs, int *segp);
+ int (*load_phys)(bus_dma_tag_t dmat, bus_dmamap_t map,
+ vm_paddr_t buf, bus_size_t buflen, int flags,
+ bus_dma_segment_t *segs, int *segp);
+ int (*load_buffer)(bus_dma_tag_t dmat, bus_dmamap_t map,
+ void *buf, bus_size_t buflen, struct pmap *pmap, int flags,
+ bus_dma_segment_t *segs, int *segp);
+ void (*map_waitok)(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct memdesc *mem, bus_dmamap_callback_t *callback,
+ void *callback_arg);
+ bus_dma_segment_t *(*map_complete)(bus_dma_tag_t dmat, bus_dmamap_t map,
+ bus_dma_segment_t *segs, int nsegs, int error);
+ void (*map_unload)(bus_dma_tag_t dmat, bus_dmamap_t map);
+ void (*map_sync)(bus_dma_tag_t dmat, bus_dmamap_t map,
+ bus_dmasync_op_t op);
+};
+
+int common_bus_dma_tag_create(struct bus_dma_tag_common *parent,
+ bus_size_t alignment,
+ bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
+ bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags,
+ bus_dma_lock_t *lockfunc, void *lockfuncarg, size_t sz, void **dmat);
+
+extern struct bus_dma_impl bus_dma_bounce_impl;
+
+#endif
diff --git a/sys/powerpc/powerpc/busdma_bounce.c b/sys/powerpc/powerpc/busdma_bounce.c
new file mode 100644
--- /dev/null
+++ b/sys/powerpc/powerpc/busdma_bounce.c
@@ -0,0 +1,1051 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 1997, 1998 Justin T. Gibbs.
+ * Copyright (c) 2026 Raptor Engineering, LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * From amd64/busdma_machdep.c, r204214
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/domainset.h>
+#include <sys/malloc.h>
+#include <sys/bus.h>
+#include <sys/interrupt.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/lock.h>
+#include <sys/proc.h>
+#include <sys/memdesc.h>
+#include <sys/mutex.h>
+#include <sys/sysctl.h>
+#include <sys/uio.h>
+
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+
+#include <machine/atomic.h>
+#include <machine/bus.h>
+#include <machine/cpufunc.h>
+#include <machine/md_var.h>
+#include <machine/bus_dma_impl.h>
+
+#include "iommu_if.h"
+
+#define MAX_BPAGES MIN(8192, physmem/40)
+
+enum {
+ BF_COULD_BOUNCE = 0x01,
+ BF_MIN_ALLOC_COMP = 0x02,
+ BF_KMEM_ALLOC = 0x04,
+ BF_COHERENT = 0x10,
+};
+
+struct bounce_page;
+struct bounce_zone;
+
+struct bus_dma_tag {
+ struct bus_dma_tag_common common;
+ bus_size_t maxsize;
+ int map_count;
+ int bounce_flags;
+ bus_dma_segment_t *segments;
+ struct bounce_zone *bounce_zone;
+ device_t iommu;
+ void *iommu_cookie;
+};
+
+static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
+ "Busdma parameters");
+
+struct sync_list {
+ vm_offset_t vaddr; /* kva of client data */
+ bus_addr_t paddr; /* physical address */
+ vm_page_t pages; /* starting page of client data */
+ bus_size_t datacount; /* client data count */
+};
+
+struct bus_dmamap {
+ STAILQ_HEAD(, bounce_page) bpages;
+ int pagesneeded;
+ int pagesreserved;
+ bus_dma_tag_t dmat;
+ struct memdesc mem;
+
+ /* temporary glue for existing pSeries IOMMU setup */
+ int nsegs;
+
+ bus_dmamap_callback_t *callback;
+ void *callback_arg;
+ __sbintime_t queued_time;
+ STAILQ_ENTRY(bus_dmamap) links;
+ u_int flags;
+#define DMAMAP_COHERENT (1 << 0)
+#define DMAMAP_COULD_BOUNCE (1 << 1)
+#define DMAMAP_FROM_DMAMEM (1 << 2)
+#define DMAMAP_MBUF (1 << 3)
+ int sync_count;
+ struct sync_list slist[];
+};
+
+static bool _bus_dmamap_pagesneeded(bus_dma_tag_t dmat, bus_dmamap_t map,
+ vm_paddr_t buf, bus_size_t buflen, int *pagesneeded);
+static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
+ pmap_t pmap, void *buf, bus_size_t buflen, int flags);
+static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
+ vm_paddr_t buf, bus_size_t buflen, int flags);
+
+static MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata");
+
+#define dmat_alignment(dmat) ((dmat)->common.alignment)
+#define dmat_bounce_flags(dmat) ((dmat)->bounce_flags)
+#define dmat_boundary(dmat) ((dmat)->common.boundary)
+#define dmat_domain(dmat) ((dmat)->common.domain)
+#define dmat_flags(dmat) ((dmat)->common.flags)
+#define dmat_highaddr(dmat) ((dmat)->common.highaddr)
+#define dmat_lowaddr(dmat) ((dmat)->common.lowaddr)
+#define dmat_lockfunc(dmat) ((dmat)->common.lockfunc)
+#define dmat_lockfuncarg(dmat) ((dmat)->common.lockfuncarg)
+#define dmat_maxsegsz(dmat) ((dmat)->common.maxsegsz)
+#define dmat_nsegments(dmat) ((dmat)->common.nsegments)
+
+#include "../../kern/subr_busdma_bounce.c"
+
+static int
+bounce_bus_dma_zone_setup(bus_dma_tag_t dmat)
+{
+ struct bounce_zone *bz;
+ bus_size_t maxsize;
+ int error;
+
+ /*
+ * Round size up to a full page, and add one more page because
+ * there can always be one more boundary crossing than the
+ * number of pages in a transfer.
+ */
+ maxsize = roundup2(dmat->common.maxsize, PAGE_SIZE) + PAGE_SIZE;
+
+ /* Must bounce */
+ if ((error = alloc_bounce_zone(dmat)) != 0)
+ return (error);
+ bz = dmat->bounce_zone;
+
+ if (ptoa(bz->total_bpages) < maxsize) {
+ int pages;
+
+ pages = atop(maxsize) + 1 - bz->total_bpages;
+
+ /* Add pages to our bounce pool */
+ if (alloc_bounce_pages(dmat, pages) < pages)
+ return (ENOMEM);
+ }
+ /* Performed initial allocation */
+ dmat->bounce_flags |= BF_MIN_ALLOC_COMP;
+
+ return (error);
+}
+
+/*
+ * Return true if the DMA should bounce because the start or end does not fall
+ * on a cacheline boundary (which would require a partial cacheline flush).
+ * COHERENT memory doesn't trigger cacheline flushes. Memory allocated by
+ * bus_dmamem_alloc() is always aligned to cacheline boundaries, and there's a
+ * strict rule that such memory cannot be accessed by the CPU while DMA is in
+ * progress (or by multiple DMA engines at once), so that it's always safe to do
+ * full cacheline flushes even if that affects memory outside the range of a
+ * given DMA operation that doesn't involve the full allocated buffer. If we're
+ * mapping an mbuf, that follows the same rules as a buffer we allocated.
+ */
+static bool
+cacheline_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t paddr,
+ bus_size_t size)
+{
+
+#define DMAMAP_CACHELINE_FLAGS \
+ (DMAMAP_FROM_DMAMEM | DMAMAP_COHERENT | DMAMAP_MBUF)
+ if ((dmat->bounce_flags & BF_COHERENT) != 0)
+ return (false);
+ if (map != NULL && (map->flags & DMAMAP_CACHELINE_FLAGS) != 0)
+ return (false);
+ return (((paddr | size) & (cacheline_size - 1)) != 0);
+#undef DMAMAP_CACHELINE_FLAGS
+}
+
+/*
+ * Return true if the given address does not fall on the alignment boundary.
+ */
+static bool
+alignment_bounce(bus_dma_tag_t dmat, bus_addr_t addr)
+{
+
+ return (!vm_addr_align_ok(addr, dmat->common.alignment));
+}
+
+static bool
+might_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t paddr,
+ bus_size_t size)
+{
+
+ /* Memory allocated by bounce_bus_dmamem_alloc won't bounce */
+ if (map && (map->flags & DMAMAP_FROM_DMAMEM) != 0)
+ return (false);
+
+ if ((dmat->bounce_flags & BF_COULD_BOUNCE) != 0)
+ return (true);
+
+ if (cacheline_bounce(dmat, map, paddr, size))
+ return (true);
+
+ if (alignment_bounce(dmat, paddr))
+ return (true);
+
+ return (false);
+}
+
+/*
+ * Returns true if the address falls within the tag's exclusion window, or
+ * fails to meet its alignment requirements.
+ */
+static bool
+must_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t paddr,
+ bus_size_t size)
+{
+ if (cacheline_bounce(dmat, map, paddr, size))
+ return (true);
+
+ if (dmat->iommu == NULL && paddr > dmat->common.lowaddr &&
+ paddr <= dmat->common.highaddr)
+ return (true);
+
+ if (!vm_addr_align_ok(paddr, dmat->common.alignment))
+ return (true);
+
+ if ((dmat->bounce_flags & BF_COULD_BOUNCE) != 0 &&
+ addr_needs_bounce(dmat, paddr))
+ return (true);
+
+ return (false);
+}
+
+/*
+ * Allocate a device specific dma_tag.
+ */
+static int
+bounce_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
+ bus_addr_t boundary, bus_addr_t lowaddr,
+ bus_addr_t highaddr, bus_size_t maxsize, int nsegments,
+ bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
+ void *lockfuncarg, bus_dma_tag_t *dmat)
+{
+ bus_dma_tag_t newtag;
+ int error = 0;
+
+ /* Basic sanity checking */
+ if (boundary != 0 && boundary < maxsegsz)
+ maxsegsz = boundary;
+
+ if (maxsegsz == 0) {
+ return (EINVAL);
+ }
+
+ /* Return a NULL tag on failure */
+ *dmat = NULL;
+ error = common_bus_dma_tag_create(parent != NULL ? &parent->common :
+ NULL, alignment, boundary, lowaddr, highaddr, maxsize, nsegments,
+ maxsegsz, flags, lockfunc, lockfuncarg,
+ sizeof (struct bus_dma_tag), (void **)&newtag);
+ if (error != 0)
+ return (error);
+
+ newtag->common.impl = &bus_dma_bounce_impl;
+ newtag->map_count = 0;
+ newtag->segments = NULL;
+
+ if ((flags & BUS_DMA_COHERENT) != 0)
+ newtag->bounce_flags |= BF_COHERENT;
+
+ if (parent != NULL) {
+ newtag->iommu = parent->iommu;
+ newtag->iommu_cookie = parent->iommu_cookie;
+
+ if ((parent->bounce_flags & BF_COULD_BOUNCE) != 0)
+ newtag->bounce_flags |= BF_COULD_BOUNCE;
+
+ /* Copy some flags from the parent */
+ newtag->bounce_flags |= parent->bounce_flags & BF_COHERENT;
+ }
+
+ if ((newtag->common.lowaddr < ptoa((vm_paddr_t)Maxmem) ||
+ (newtag->common.alignment > 1)) && newtag->iommu == NULL)
+ newtag->bounce_flags |= BF_COULD_BOUNCE;
+
+ if ((flags & BUS_DMA_ALLOCNOW) != 0)
+ error = bounce_bus_dma_zone_setup(newtag);
+ else
+ error = 0;
+
+ if (error != 0) {
+ free(newtag, M_DEVBUF);
+ } else {
+ *dmat = newtag;
+ }
+ CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d\n",
+ __func__, newtag, (newtag != NULL ? newtag->common.flags : 0), error);
+ return (error);
+}
+
+static int
+bounce_bus_dma_tag_destroy(bus_dma_tag_t dmat)
+{
+ int error = 0;
+
+ if (dmat != NULL) {
+ if (dmat->map_count != 0) {
+ error = EBUSY;
+ goto out;
+ }
+ if (dmat->segments != NULL)
+ free(dmat->segments, M_DEVBUF);
+ free(dmat, M_DEVBUF);
+ }
+out:
+ CTR3(KTR_BUSDMA, "%s tag %p error %d\n", __func__, dmat, error);
+ return (error);
+}
+
+/*
+ * Update the domain for the tag. We may need to reallocate the zone and
+ * bounce pages.
+ */
+static int
+bounce_bus_dma_tag_set_domain(bus_dma_tag_t dmat)
+{
+
+ KASSERT(dmat->map_count == 0,
+ ("bounce_bus_dma_tag_set_domain: Domain set after use.\n"));
+ if ((dmat->bounce_flags & BF_COULD_BOUNCE) == 0 ||
+ dmat->bounce_zone == NULL)
+ return (0);
+ dmat->bounce_flags &= ~BF_MIN_ALLOC_COMP;
+ return (bounce_bus_dma_zone_setup(dmat));
+}
+
+static bool
+bounce_bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen)
+{
+
+ if (!might_bounce(dmat, NULL, buf, buflen))
+ return (true);
+ return (!_bus_dmamap_pagesneeded(dmat, NULL, buf, buflen, NULL));
+}
+
+static bus_dmamap_t
+alloc_dmamap(bus_dma_tag_t dmat, int flags)
+{
+ u_long mapsize;
+ bus_dmamap_t map;
+
+ mapsize = sizeof(*map);
+ mapsize += sizeof(struct sync_list) * dmat->common.nsegments;
+ map = malloc(mapsize, M_DEVBUF, flags | M_ZERO);
+ if (map == NULL)
+ return (NULL);
+
+ /* Initialize the new map */
+ STAILQ_INIT(&map->bpages);
+
+ /* temporary glue for existing pSeries IOMMU setup */
+ map->nsegs = 0;
+
+ return (map);
+}
+
+/*
+ * Allocate a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+static int
+bounce_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
+{
+ int error;
+
+ error = 0;
+
+ if (dmat->segments == NULL) {
+ dmat->segments = mallocarray_domainset(dmat->common.nsegments,
+ sizeof(bus_dma_segment_t), M_DEVBUF,
+ DOMAINSET_PREF(dmat->common.domain), M_NOWAIT);
+ if (dmat->segments == NULL) {
+ CTR3(KTR_BUSDMA, "%s: tag %p error %d\n",
+ __func__, dmat, ENOMEM);
+ return (ENOMEM);
+ }
+ }
+
+ *mapp = alloc_dmamap(dmat, M_NOWAIT);
+ if (*mapp == NULL) {
+ CTR3(KTR_BUSDMA, "%s: tag %p error %d\n",
+ __func__, dmat, ENOMEM);
+ return (ENOMEM);
+ }
+
+ /*
+ * Bouncing might be required if the driver asks for an active
+ * exclusion region, a data alignment that is stricter than 1, and/or
+ * an active address boundary.
+ */
+ if (dmat->bounce_flags & BF_COULD_BOUNCE) {
+ /* Must bounce */
+ struct bounce_zone *bz;
+ int maxpages;
+
+ if (dmat->bounce_zone == NULL) {
+ if ((error = alloc_bounce_zone(dmat)) != 0)
+ return (error);
+ }
+ bz = dmat->bounce_zone;
+
+ (*mapp)->flags = DMAMAP_COULD_BOUNCE;
+
+ /*
+ * Attempt to add pages to our pool on a per-instance
+ * basis up to a sane limit.
+ */
+ if (dmat->common.alignment > 1)
+ maxpages = MAX_BPAGES;
+ else
+ maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->common.lowaddr));
+ if ((dmat->bounce_flags & BF_MIN_ALLOC_COMP) == 0
+ || (bz->map_count > 0 && bz->total_bpages < maxpages)) {
+ int pages;
+
+ pages = MAX(atop(dmat->common.maxsize), 1);
+ pages = MIN(maxpages - bz->total_bpages, pages);
+ pages = MAX(pages, 1);
+ if (alloc_bounce_pages(dmat, pages) < pages)
+ error = ENOMEM;
+
+ if ((dmat->bounce_flags & BF_MIN_ALLOC_COMP) == 0) {
+ if (error == 0)
+ dmat->bounce_flags |= BF_MIN_ALLOC_COMP;
+ } else {
+ error = 0;
+ }
+ }
+ bz->map_count++;
+ }
+
+ if (error == 0) {
+ dmat->map_count++;
+ if ((dmat->bounce_flags & BF_COHERENT) != 0)
+ (*mapp)->flags |= DMAMAP_COHERENT;
+ } else {
+ free(*mapp, M_DEVBUF);
+ }
+ CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d\n",
+ __func__, dmat, dmat->common.flags, error);
+ return (error);
+}
+
+/*
+ * Destroy a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+static int
+bounce_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+ /* Check we are destroying the correct map type */
+ if ((map->flags & DMAMAP_FROM_DMAMEM) != 0)
+ panic("bounce_bus_dmamap_destroy: Invalid map freed\n");
+
+ if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) {
+ CTR3(KTR_BUSDMA, "%s: tag %p error %d\n", __func__, dmat, EBUSY);
+ return (EBUSY);
+ }
+ if (dmat->bounce_zone) {
+ KASSERT((map->flags & DMAMAP_COULD_BOUNCE) != 0,
+ ("%s: Bounce zone when cannot bounce", __func__));
+ dmat->bounce_zone->map_count--;
+ }
+ free(map, M_DEVBUF);
+ dmat->map_count--;
+ CTR2(KTR_BUSDMA, "%s: tag %p error 0\n", __func__, dmat);
+ return (0);
+}
+
+/*
+ * Allocate a piece of memory that can be efficiently mapped into
+ * bus device space based on the constraints lited in the dma tag.
+ * A dmamap to for use with dmamap_load is also allocated.
+ */
+static int
+bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
+ bus_dmamap_t *mapp)
+{
+ vm_memattr_t attr;
+ int mflags;
+
+ if (flags & BUS_DMA_NOWAIT)
+ mflags = M_NOWAIT;
+ else
+ mflags = M_WAITOK;
+
+ if (dmat->segments == NULL) {
+ dmat->segments = mallocarray_domainset(dmat->common.nsegments,
+ sizeof(bus_dma_segment_t), M_DEVBUF,
+ DOMAINSET_PREF(dmat->common.domain), mflags);
+ if (dmat->segments == NULL) {
+ CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d\n",
+ __func__, dmat, dmat->common.flags, ENOMEM);
+ return (ENOMEM);
+ }
+ }
+ if (flags & BUS_DMA_ZERO)
+ mflags |= M_ZERO;
+ if (flags & BUS_DMA_NOCACHE)
+ attr = VM_MEMATTR_UNCACHEABLE;
+ else if ((flags & BUS_DMA_COHERENT) != 0 &&
+ (dmat->bounce_flags & BF_COHERENT) == 0)
+ /*
+ * If we have a non-coherent tag, and are trying to allocate
+ * a coherent block of memory it needs to be uncached.
+ */
+ attr = VM_MEMATTR_UNCACHEABLE;
+ else
+ attr = VM_MEMATTR_DEFAULT;
+
+ /*
+ * Create the map, but don't set the could bounce flag as
+ * this allocation should never bounce;
+ */
+ *mapp = alloc_dmamap(dmat, mflags);
+ if (*mapp == NULL) {
+ CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d\n",
+ __func__, dmat, dmat->common.flags, ENOMEM);
+ return (ENOMEM);
+ }
+
+ /*
+ * Mark the map as coherent if we used uncacheable memory or the
+ * tag was already marked as coherent.
+ */
+ if (attr == VM_MEMATTR_UNCACHEABLE ||
+ (dmat->bounce_flags & BF_COHERENT) != 0)
+ (*mapp)->flags |= DMAMAP_COHERENT;
+
+ (*mapp)->flags = DMAMAP_FROM_DMAMEM;
+
+ /*
+ * XXX:
+ * (dmat->common.alignment <= dmat->common.maxsize) is just a quick hack; the exact
+ * alignment guarantees of malloc need to be nailed down, and the
+ * code below should be rewritten to take that into account.
+ *
+ * In the meantime, we'll warn the user if malloc gets it wrong.
+ */
+ if ((dmat->common.maxsize <= PAGE_SIZE) &&
+ (dmat->common.alignment <= dmat->common.maxsize) &&
+ dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) &&
+ attr == VM_MEMATTR_DEFAULT) {
+ *vaddr = malloc(dmat->common.maxsize, M_DEVBUF, mflags);
+ } else {
+ /*
+ * XXX Use Contigmalloc until it is merged into this facility
+ * and handles multi-seg allocations. Nobody is doing
+ * multi-seg allocations yet though.
+ * XXX Certain AGP hardware does.
+ */
+ *vaddr = kmem_alloc_contig(dmat->common.maxsize, mflags, 0ul,
+ dmat->common.lowaddr, dmat->common.alignment ? dmat->common.alignment : 1ul,
+ dmat->common.boundary, attr);
+ dmat->bounce_flags |= BF_KMEM_ALLOC;
+ }
+ if (*vaddr == NULL) {
+ CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d\n",
+ __func__, dmat, dmat->common.flags, ENOMEM);
+ return (ENOMEM);
+ } else if (!vm_addr_align_ok(vtophys(*vaddr), dmat->common.alignment)) {
+ printf("bus_dmamem_alloc failed to align memory properly.\n");
+ }
+ CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d\n",
+ __func__, dmat, dmat->common.flags, 0);
+ return (0);
+}
+
+/*
+ * Free a piece of memory and it's allociated dmamap, that was allocated
+ * via bus_dmamem_alloc.
+ */
+static void
+bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
+{
+ /*
+ * Check the map came from bounce_bus_dmamem_alloc, so the map
+ * should be NULL and the BF_KMEM_ALLOC flag cleared if malloc()
+ * was used and set if kmem_alloc_contig() was used.
+ */
+ if ((map->flags & DMAMAP_FROM_DMAMEM) == 0)
+ panic("bus_dmamem_free: Invalid map freed\n");
+ if ((dmat->bounce_flags & BF_KMEM_ALLOC) == 0)
+ free(vaddr, M_DEVBUF);
+ else
+ kmem_free(vaddr, dmat->common.maxsize);
+ free(map, M_DEVBUF);
+ dmat->map_count--;
+ CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat,
+ dmat->bounce_flags);
+}
+
+static bool
+_bus_dmamap_pagesneeded(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
+ bus_size_t buflen, int *pagesneeded)
+{
+ bus_addr_t curaddr;
+ bus_size_t sgsize;
+ int count;
+
+ /*
+ * Count the number of bounce pages needed in order to
+ * complete this transfer
+ */
+ count = 0;
+ curaddr = buf;
+ while (buflen != 0) {
+ sgsize = buflen;
+ if (must_bounce(dmat, map, curaddr, sgsize)) {
+ sgsize = MIN(sgsize,
+ PAGE_SIZE - (curaddr & PAGE_MASK));
+ if (pagesneeded == NULL)
+ return (true);
+ count++;
+ }
+ curaddr += sgsize;
+ buflen -= sgsize;
+ }
+
+ if (pagesneeded != NULL)
+ *pagesneeded = count;
+ return (count != 0);
+}
+
+static void
+_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
+ bus_size_t buflen, int flags)
+{
+
+ if ((map->flags & DMAMAP_COULD_BOUNCE) != 0 && map->pagesneeded == 0) {
+ CTR4(KTR_BUSDMA, "lowaddr= 0x%lx Maxmem= %ld, boundary= %ld, "
+ "alignment= %ld", dmat->common.lowaddr, ptoa((vm_paddr_t)Maxmem),
+ dmat->common.boundary, dmat->common.alignment);
+ CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded);
+ _bus_dmamap_pagesneeded(dmat, map, buf, buflen,
+ &map->pagesneeded);
+ _bus_dmamap_pagesneeded(dmat, map, buf, buflen,
+ &map->pagesneeded);
+ CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
+ }
+}
+
+static void
+_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
+ void *buf, bus_size_t buflen, int flags)
+{
+ vm_offset_t vaddr;
+ vm_offset_t vendaddr;
+ bus_addr_t paddr;
+
+ if ((map->flags & DMAMAP_COULD_BOUNCE) != 0 && map->pagesneeded == 0) {
+ CTR4(KTR_BUSDMA, "lowaddr= 0x%lx Maxmem= %ld, boundary= %ld, "
+ "alignment= %ld", dmat->common.lowaddr, ptoa((vm_paddr_t)Maxmem),
+ dmat->common.boundary, dmat->common.alignment);
+ CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded);
+ /*
+ * Count the number of bounce pages
+ * needed in order to complete this transfer
+ */
+ vaddr = (vm_offset_t)buf;
+ vendaddr = (vm_offset_t)buf + buflen;
+
+ while (vaddr < vendaddr) {
+ bus_size_t sg_len;
+
+ sg_len = MIN(vendaddr - vaddr,
+ PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK));
+ if (pmap == kernel_pmap)
+ paddr = pmap_kextract(vaddr);
+ else
+ paddr = pmap_extract(pmap, vaddr);
+ if (must_bounce(dmat, map, paddr, sg_len)) {
+ sg_len = roundup2(sg_len, dmat->common.alignment);
+ map->pagesneeded++;
+ }
+ vaddr += sg_len;
+ }
+ CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
+ }
+}
+
+/*
+ * Utility function to load a physical buffer. segp contains
+ * the starting segment on entrace, and the ending segment on exit.
+ */
+static int
+bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat,
+ bus_dmamap_t map,
+ vm_paddr_t buf, bus_size_t buflen,
+ int flags,
+ bus_dma_segment_t *segs,
+ int *segp)
+{
+ struct sync_list *sl;
+ bus_size_t sgsize;
+ bus_addr_t curaddr, sl_end;
+ int error;
+
+ if (segs == NULL)
+ segs = dmat->segments;
+
+ if ((dmat->bounce_flags & BF_COULD_BOUNCE) != 0) {
+ _bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
+ if (map->pagesneeded != 0) {
+ error = _bus_dmamap_reserve_pages(dmat, map, flags);
+ if (error)
+ return (error);
+ }
+ }
+
+ sl = map->slist + map->sync_count - 1;
+ sl_end = 0;
+
+ while (buflen > 0) {
+ curaddr = buf;
+ sgsize = buflen;
+ if (map->pagesneeded != 0 &&
+ must_bounce(dmat, map, curaddr, sgsize)) {
+ sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
+ curaddr = add_bounce_page(dmat, map, 0, curaddr,
+ sgsize);
+ } else if ((map->flags & DMAMAP_COHERENT) == 0) {
+ if (map->sync_count > 0)
+ sl_end = sl->paddr + sl->datacount;
+
+ if (map->sync_count == 0 || curaddr != sl_end) {
+ if (++map->sync_count > dmat->common.nsegments)
+ break;
+ sl++;
+ sl->vaddr = 0;
+ sl->paddr = curaddr;
+ sl->datacount = sgsize;
+ sl->pages = PHYS_TO_VM_PAGE(curaddr);
+ KASSERT(sl->pages != NULL,
+ ("%s: page at PA:0x%08lx is not in "
+ "vm_page_array", __func__, curaddr));
+ } else
+ sl->datacount += sgsize;
+ }
+ if (!_bus_dmamap_addsegs(dmat, map, curaddr, sgsize, segs,
+ segp))
+ break;
+ buf += sgsize;
+ buflen -= sgsize;
+ }
+
+ /*
+ * Did we fit?
+ */
+ return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
+}
+
+/*
+ * Utility function to load a linear buffer. segp contains
+ * the starting segment on entrance, and the ending segment on exit.
+ */
+static int
+bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
+ bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
+ int *segp)
+{
+ struct sync_list *sl;
+ bus_size_t sgsize;
+ bus_addr_t curaddr, sl_pend;
+ vm_offset_t kvaddr, vaddr, sl_vend;
+ int error;
+
+ if (segs == NULL)
+ segs = dmat->segments;
+
+ if (flags & BUS_DMA_LOAD_MBUF)
+ map->flags |= DMAMAP_MBUF;
+
+ if ((dmat->bounce_flags & BF_COULD_BOUNCE) != 0) {
+ _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
+ if (map->pagesneeded != 0) {
+ error = _bus_dmamap_reserve_pages(dmat, map, flags);
+ if (error)
+ return (error);
+ }
+ }
+
+ sl = map->slist + map->sync_count - 1;
+ vaddr = (vm_offset_t)buf;
+ sl_pend = 0;
+ sl_vend = 0;
+
+ while (buflen > 0) {
+ /*
+ * Get the physical address for this segment.
+ */
+ if (pmap == kernel_pmap) {
+ curaddr = pmap_kextract(vaddr);
+ kvaddr = vaddr;
+ } else {
+ curaddr = pmap_extract(pmap, vaddr);
+ kvaddr = 0;
+ }
+
+ /*
+ * Compute the segment size, and adjust counts.
+ */
+ sgsize = MIN(buflen, PAGE_SIZE - (curaddr & PAGE_MASK));
+ if (map->pagesneeded != 0 &&
+ must_bounce(dmat, map, curaddr, sgsize)) {
+ sgsize = roundup2(sgsize, dmat->common.alignment);
+ sgsize = MIN(sgsize, buflen);
+ curaddr = add_bounce_page(dmat, map, kvaddr, curaddr,
+ sgsize);
+ } else if ((map->flags & DMAMAP_COHERENT) == 0) {
+ if (map->sync_count > 0) {
+ sl_pend = sl->paddr + sl->datacount;
+ sl_vend = sl->vaddr + sl->datacount;
+ }
+
+ if (map->sync_count == 0 ||
+ (kvaddr != 0 && kvaddr != sl_vend) ||
+ (curaddr != sl_pend)) {
+ if (++map->sync_count > dmat->common.nsegments)
+ goto cleanup;
+ sl++;
+ sl->vaddr = kvaddr;
+ sl->paddr = curaddr;
+ if (kvaddr != 0) {
+ sl->pages = NULL;
+ } else {
+ sl->pages = PHYS_TO_VM_PAGE(curaddr);
+ KASSERT(sl->pages != NULL,
+ ("%s: page at PA:0x%08lx is not "
+ "in vm_page_array", __func__,
+ curaddr));
+ }
+ sl->datacount = sgsize;
+ } else
+ sl->datacount += sgsize;
+ }
+
+ if (!_bus_dmamap_addsegs(dmat, map, curaddr, sgsize, segs,
+ segp))
+ break;
+ vaddr += sgsize;
+ buflen -= MIN(sgsize, buflen); /* avoid underflow */
+ }
+
+cleanup:
+ /*
+ * Did we fit?
+ */
+ return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
+}
+
+static void
+bounce_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct memdesc *mem, bus_dmamap_callback_t *callback,
+ void *callback_arg)
+{
+
+ if ((map->flags & DMAMAP_COULD_BOUNCE) == 0)
+ return;
+
+ map->mem = *mem;
+ map->dmat = dmat;
+ map->callback = callback;
+ map->callback_arg = callback_arg;
+}
+
+static bus_dma_segment_t *
+bounce_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
+ bus_dma_segment_t *segs, int nsegs, int error)
+{
+ if (segs == NULL)
+ segs = dmat->segments;
+
+ /* temporary glue for existing pSeries IOMMU setup */
+ map->nsegs = nsegs;
+ if (dmat->iommu != NULL)
+ IOMMU_MAP(dmat->iommu, segs, &map->nsegs,
+ dmat->common.lowaddr, dmat->common.highaddr, dmat->common.alignment,
+ dmat->common.boundary, dmat->iommu_cookie);
+
+ return (segs);
+}
+
+/*
+ * Release the mapping held by map.
+ */
+static void
+bounce_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+ /* temporary glue for existing pSeries IOMMU setup */
+ if (dmat->iommu) {
+ IOMMU_UNMAP(dmat->iommu, dmat->segments, map->nsegs,
+ dmat->iommu_cookie);
+ map->nsegs = 0;
+ }
+
+ free_bounce_pages(dmat, map);
+ map->sync_count = 0;
+ map->flags &= ~DMAMAP_MBUF;
+}
+
+static void
+bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
+{
+ struct bounce_page *bpage;
+ struct sync_list *sl, *end;
+ vm_offset_t datavaddr, tempvaddr;
+
+ if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
+ /*
+ * Handle data bouncing. We might also
+ * want to add support for invalidating
+ * the caches on broken hardware
+ */
+ CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
+ "performing bounce\n", __func__, dmat, dmat->common.flags, op);
+
+ if (op & BUS_DMASYNC_PREWRITE) {
+ while (bpage != NULL) {
+ tempvaddr = 0;
+ datavaddr = bpage->datavaddr;
+ if (datavaddr == 0) {
+ tempvaddr = pmap_quick_enter_page(
+ bpage->datapage);
+ datavaddr = tempvaddr |
+ bpage->dataoffs;
+ }
+
+ bcopy((void *)datavaddr,
+ (void *)bpage->vaddr, bpage->datacount);
+
+ if (tempvaddr != 0)
+ pmap_quick_remove_page(tempvaddr);
+ if ((map->flags & DMAMAP_COHERENT) == 0)
+ cpu_flush_dcache((void*)bpage->vaddr,
+ bpage->datacount);
+ bpage = STAILQ_NEXT(bpage, links);
+ }
+ dmat->bounce_zone->total_bounced++;
+ } else if ((op & BUS_DMASYNC_PREREAD) != 0) {
+ while (bpage != NULL) {
+ if ((map->flags & DMAMAP_COHERENT) == 0)
+ cpu_flush_dcache((void*)bpage->vaddr,
+ bpage->datacount);
+ bpage = STAILQ_NEXT(bpage, links);
+ }
+ }
+
+ if (op & BUS_DMASYNC_POSTREAD) {
+ while (bpage != NULL) {
+ if ((map->flags & DMAMAP_COHERENT) == 0)
+ cpu_flush_dcache((void*)bpage->vaddr,
+ bpage->datacount);
+ tempvaddr = 0;
+ datavaddr = bpage->datavaddr;
+ if (datavaddr == 0) {
+ tempvaddr = pmap_quick_enter_page(
+ bpage->datapage);
+ datavaddr = tempvaddr |
+ bpage->dataoffs;
+ }
+
+ bcopy((void *)bpage->vaddr,
+ (void *)datavaddr, bpage->datacount);
+
+ if (tempvaddr != 0)
+ pmap_quick_remove_page(tempvaddr);
+ bpage = STAILQ_NEXT(bpage, links);
+ }
+ dmat->bounce_zone->total_bounced++;
+ }
+ }
+
+ /*
+ * Cache maintenance for normal (non-COHERENT non-bounce) buffers.
+ */
+ if (map->sync_count != 0) {
+ sl = &map->slist[0];
+ end = &map->slist[map->sync_count];
+ CTR3(KTR_BUSDMA, "%s: tag %p op 0x%x "
+ "performing sync\n", __func__, dmat, op);
+
+ for ( ; sl != end; ++sl)
+ cpu_flush_dcache((void*)sl, op);
+ }
+
+ if ((op & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) != 0) {
+ powerpc_sync();
+ }
+}
+
+int
+bus_dma_tag_set_iommu(bus_dma_tag_t tag, device_t iommu, void *cookie)
+{
+ tag->iommu = iommu;
+ tag->iommu_cookie = cookie;
+
+ return (0);
+}
+
+struct bus_dma_impl bus_dma_bounce_impl = {
+ .tag_create = bounce_bus_dma_tag_create,
+ .tag_destroy = bounce_bus_dma_tag_destroy,
+ .tag_set_domain = bounce_bus_dma_tag_set_domain,
+ .id_mapped = bounce_bus_dma_id_mapped,
+ .map_create = bounce_bus_dmamap_create,
+ .map_destroy = bounce_bus_dmamap_destroy,
+ .mem_alloc = bounce_bus_dmamem_alloc,
+ .mem_free = bounce_bus_dmamem_free,
+ .load_phys = bounce_bus_dmamap_load_phys,
+ .load_buffer = bounce_bus_dmamap_load_buffer,
+ .load_ma = bus_dmamap_load_ma_triv,
+ .map_waitok = bounce_bus_dmamap_waitok,
+ .map_complete = bounce_bus_dmamap_complete,
+ .map_unload = bounce_bus_dmamap_unload,
+ .map_sync = bounce_bus_dmamap_sync
+};
diff --git a/sys/powerpc/powerpc/busdma_machdep.c b/sys/powerpc/powerpc/busdma_machdep.c
--- a/sys/powerpc/powerpc/busdma_machdep.c
+++ b/sys/powerpc/powerpc/busdma_machdep.c
@@ -1,9 +1,14 @@
/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
* Copyright (c) 1997, 1998 Justin T. Gibbs.
+ * Copyright (c) 2013, 2015 The FreeBSD Foundation
* All rights reserved.
*
+ * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Portions of this software were developed by Semihalf
+ * under sponsorship of the FreeBSD Foundation.
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -26,761 +31,147 @@
* SUCH DAMAGE.
*/
-/*
- * From amd64/busdma_machdep.c, r204214
- */
-
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/bus.h>
-#include <sys/interrupt.h>
#include <sys/kernel.h>
#include <sys/ktr.h>
#include <sys/lock.h>
-#include <sys/proc.h>
#include <sys/memdesc.h>
#include <sys/mutex.h>
-#include <sys/sysctl.h>
#include <sys/uio.h>
-
#include <vm/vm.h>
#include <vm/vm_extern.h>
-#include <vm/vm_kern.h>
-#include <vm/vm_page.h>
-#include <vm/vm_map.h>
+#include <vm/pmap.h>
-#include <machine/atomic.h>
#include <machine/bus.h>
-#include <machine/cpufunc.h>
-#include <machine/md_var.h>
-
-#include "iommu_if.h"
-
-#define MAX_BPAGES MIN(8192, physmem/40)
-
-struct bounce_page;
-struct bounce_zone;
-
-struct bus_dma_tag {
- bus_size_t alignment;
- bus_addr_t boundary;
- bus_addr_t lowaddr;
- bus_addr_t highaddr;
- bus_size_t maxsize;
- bus_size_t maxsegsz;
- u_int nsegments;
- int flags;
- int map_count;
- bus_dma_lock_t *lockfunc;
- void *lockfuncarg;
- struct bounce_zone *bounce_zone;
- device_t iommu;
- void *iommu_cookie;
-};
-
-static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
- "Busdma parameters");
-
-struct bus_dmamap {
- STAILQ_HEAD(, bounce_page) bpages;
- int pagesneeded;
- int pagesreserved;
- bus_dma_tag_t dmat;
- struct memdesc mem;
- bus_dma_segment_t *segments;
- int nsegs;
- bus_dmamap_callback_t *callback;
- void *callback_arg;
- __sbintime_t queued_time;
- STAILQ_ENTRY(bus_dmamap) links;
- int contigalloc;
-};
-
-static MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata");
+#include <machine/bus_dma_impl.h>
-#define dmat_alignment(dmat) ((dmat)->alignment)
-#define dmat_bounce_flags(dmat) (0)
-#define dmat_boundary(dmat) ((dmat)->boundary)
-#define dmat_flags(dmat) ((dmat)->flags)
-#define dmat_highaddr(dmat) ((dmat)->highaddr)
-#define dmat_lowaddr(dmat) ((dmat)->lowaddr)
-#define dmat_lockfunc(dmat) ((dmat)->lockfunc)
-#define dmat_lockfuncarg(dmat) ((dmat)->lockfuncarg)
-#define dmat_maxsegsz(dmat) ((dmat)->maxsegsz)
-#define dmat_nsegments(dmat) ((dmat)->nsegments)
-
-#include "../../kern/subr_busdma_bounce.c"
-
-/*
- * Returns true if the address falls within the tag's exclusion window, or
- * fails to meet its alignment requirements.
- */
-static __inline bool
-must_bounce(bus_dma_tag_t dmat, bus_addr_t paddr)
-{
-
- if (dmat->iommu == NULL && paddr > dmat->lowaddr &&
- paddr <= dmat->highaddr)
- return (true);
- if (!vm_addr_align_ok(paddr, dmat->alignment))
- return (true);
-
- return (false);
-}
-
-#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3
-#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
-/*
- * Allocate a device specific dma_tag.
- */
int
-bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
- bus_addr_t boundary, bus_addr_t lowaddr,
- bus_addr_t highaddr, bus_dma_filter_t *filter,
- void *filterarg, bus_size_t maxsize, int nsegments,
- bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
- void *lockfuncarg, bus_dma_tag_t *dmat)
+common_bus_dma_tag_create(struct bus_dma_tag_common *parent,
+ bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr,
+ bus_addr_t highaddr, bus_size_t maxsize, int nsegments,
+ bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
+ void *lockfuncarg, size_t sz, void **dmat)
{
- bus_dma_tag_t newtag;
- int error = 0;
+ void *newtag;
+ struct bus_dma_tag_common *common;
+ KASSERT(sz >= sizeof(struct bus_dma_tag_common), ("sz"));
+ /* Return a NULL tag on failure */
+ *dmat = NULL;
/* Basic sanity checking */
if (boundary != 0 && boundary < maxsegsz)
maxsegsz = boundary;
-
- if (maxsegsz == 0) {
+ if (maxsegsz == 0)
return (EINVAL);
- }
- /* Filters are no longer supported. */
- if (filter != NULL || filterarg != NULL)
- return (EINVAL);
-
- /* Return a NULL tag on failure */
- *dmat = NULL;
-
- newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF,
- M_ZERO | M_NOWAIT);
+ newtag = malloc(sz, M_DEVBUF, M_ZERO | M_NOWAIT);
if (newtag == NULL) {
CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
- __func__, newtag, 0, error);
+ __func__, newtag, 0, ENOMEM);
return (ENOMEM);
}
- newtag->alignment = alignment;
- newtag->boundary = boundary;
- newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
- newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1);
- newtag->maxsize = maxsize;
- newtag->nsegments = nsegments;
- newtag->maxsegsz = maxsegsz;
- newtag->flags = flags;
- newtag->map_count = 0;
+ common = newtag;
+ common->impl = &bus_dma_bounce_impl;
+ common->alignment = alignment;
+ common->boundary = boundary;
+ common->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
+ common->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1);
+ common->maxsize = maxsize;
+ common->nsegments = nsegments;
+ common->maxsegsz = maxsegsz;
+ common->flags = flags;
if (lockfunc != NULL) {
- newtag->lockfunc = lockfunc;
- newtag->lockfuncarg = lockfuncarg;
+ common->lockfunc = lockfunc;
+ common->lockfuncarg = lockfuncarg;
} else {
- newtag->lockfunc = _busdma_dflt_lock;
- newtag->lockfuncarg = NULL;
+ common->lockfunc = _busdma_dflt_lock;
+ common->lockfuncarg = NULL;
}
/* Take into account any restrictions imposed by our parent tag */
if (parent != NULL) {
- newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
- newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
- if (newtag->boundary == 0)
- newtag->boundary = parent->boundary;
- else if (parent->boundary != 0)
- newtag->boundary = MIN(parent->boundary,
- newtag->boundary);
-
- newtag->iommu = parent->iommu;
- newtag->iommu_cookie = parent->iommu_cookie;
- }
-
- if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && newtag->iommu == NULL)
- newtag->flags |= BUS_DMA_COULD_BOUNCE;
-
- if (newtag->alignment > 1)
- newtag->flags |= BUS_DMA_COULD_BOUNCE;
-
- if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
- (flags & BUS_DMA_ALLOCNOW) != 0) {
- struct bounce_zone *bz;
-
- /* Must bounce */
-
- if ((error = alloc_bounce_zone(newtag)) != 0) {
- free(newtag, M_DEVBUF);
- return (error);
+ common->impl = parent->impl;
+ common->lowaddr = MIN(parent->lowaddr, common->lowaddr);
+ common->highaddr = MAX(parent->highaddr, common->highaddr);
+ if (common->boundary == 0)
+ common->boundary = parent->boundary;
+ else if (parent->boundary != 0) {
+ common->boundary = MIN(parent->boundary,
+ common->boundary);
}
- bz = newtag->bounce_zone;
-
- if (ptoa(bz->total_bpages) < maxsize) {
- int pages;
-
- pages = atop(maxsize) - bz->total_bpages;
-
- /* Add pages to our bounce pool */
- if (alloc_bounce_pages(newtag, pages) < pages)
- error = ENOMEM;
- }
- /* Performed initial allocation */
- newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
- }
-
- if (error != 0) {
- free(newtag, M_DEVBUF);
- } else {
- *dmat = newtag;
}
- CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
- __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
- return (error);
-}
-
-void
-bus_dma_template_clone(bus_dma_template_t *t, bus_dma_tag_t dmat)
-{
-
- if (t == NULL || dmat == NULL)
- return;
-
- t->alignment = dmat->alignment;
- t->boundary = dmat->boundary;
- t->lowaddr = dmat->lowaddr;
- t->highaddr = dmat->highaddr;
- t->maxsize = dmat->maxsize;
- t->nsegments = dmat->nsegments;
- t->maxsegsize = dmat->maxsegsz;
- t->flags = dmat->flags;
- t->lockfunc = dmat->lockfunc;
- t->lockfuncarg = dmat->lockfuncarg;
-}
-
-int
-bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain)
-{
-
+ *dmat = common;
return (0);
}
-int
-bus_dma_tag_destroy(bus_dma_tag_t dmat)
-{
- int error = 0;
-
- if (dmat != NULL) {
- if (dmat->map_count != 0) {
- error = EBUSY;
- goto out;
- }
-
- free(dmat, M_DEVBUF);
- }
-out:
- CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat, error);
- return (error);
-}
-
/*
- * Allocate a handle for mapping from kva/uva/physical
- * address space into bus device space.
+ * Allocate a device specific dma_tag.
*/
int
-bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
+bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
+ bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
+ bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
+ int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
+ void *lockfuncarg, bus_dma_tag_t *dmat)
{
+ struct bus_dma_tag_common *tc;
int error;
- error = 0;
-
- *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
- M_NOWAIT | M_ZERO);
- if (*mapp == NULL) {
- CTR3(KTR_BUSDMA, "%s: tag %p error %d",
- __func__, dmat, ENOMEM);
- return (ENOMEM);
- }
-
- /* Initialize the new map */
- STAILQ_INIT(&((*mapp)->bpages));
-
- /*
- * Bouncing might be required if the driver asks for an active
- * exclusion region, a data alignment that is stricter than 1, and/or
- * an active address boundary.
- */
- if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
- /* Must bounce */
- struct bounce_zone *bz;
- int maxpages;
-
- if (dmat->bounce_zone == NULL) {
- if ((error = alloc_bounce_zone(dmat)) != 0)
- return (error);
- }
- bz = dmat->bounce_zone;
-
- /*
- * Attempt to add pages to our pool on a per-instance
- * basis up to a sane limit.
- */
- if (dmat->alignment > 1)
- maxpages = MAX_BPAGES;
- else
- maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr));
- if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
- || (bz->map_count > 0 && bz->total_bpages < maxpages)) {
- int pages;
-
- pages = MAX(atop(dmat->maxsize), 1);
- pages = MIN(maxpages - bz->total_bpages, pages);
- pages = MAX(pages, 1);
- if (alloc_bounce_pages(dmat, pages) < pages)
- error = ENOMEM;
-
- if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
- if (error == 0)
- dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
- } else {
- error = 0;
- }
- }
- bz->map_count++;
- }
-
- (*mapp)->nsegs = 0;
- (*mapp)->segments = (bus_dma_segment_t *)malloc(
- sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
- M_NOWAIT);
- if ((*mapp)->segments == NULL) {
- CTR3(KTR_BUSDMA, "%s: tag %p error %d",
- __func__, dmat, ENOMEM);
- return (ENOMEM);
- }
-
- if (error == 0)
- dmat->map_count++;
- CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
- __func__, dmat, dmat->flags, error);
- return (error);
-}
-
-/*
- * Destroy a handle for mapping from kva/uva/physical
- * address space into bus device space.
- */
-int
-bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
-{
- if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
- if (STAILQ_FIRST(&map->bpages) != NULL) {
- CTR3(KTR_BUSDMA, "%s: tag %p error %d",
- __func__, dmat, EBUSY);
- return (EBUSY);
- }
- if (dmat->bounce_zone)
- dmat->bounce_zone->map_count--;
- }
- free(map->segments, M_DEVBUF);
- free(map, M_DEVBUF);
- dmat->map_count--;
- CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
- return (0);
-}
-
-/*
- * Allocate a piece of memory that can be efficiently mapped into
- * bus device space based on the constraints lited in the dma tag.
- * A dmamap to for use with dmamap_load is also allocated.
- */
-int
-bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
- bus_dmamap_t *mapp)
-{
- vm_memattr_t attr;
- int mflags;
-
- if (flags & BUS_DMA_NOWAIT)
- mflags = M_NOWAIT;
- else
- mflags = M_WAITOK;
-
- bus_dmamap_create(dmat, flags, mapp);
-
- if (flags & BUS_DMA_ZERO)
- mflags |= M_ZERO;
- if (flags & BUS_DMA_NOCACHE)
- attr = VM_MEMATTR_UNCACHEABLE;
- else
- attr = VM_MEMATTR_DEFAULT;
+ /* Filters are no longer supported. */
+ if (filter != NULL || filterarg != NULL)
+ return (EINVAL);
- /*
- * XXX:
- * (dmat->alignment <= dmat->maxsize) is just a quick hack; the exact
- * alignment guarantees of malloc need to be nailed down, and the
- * code below should be rewritten to take that into account.
- *
- * In the meantime, we'll warn the user if malloc gets it wrong.
- */
- if ((dmat->maxsize <= PAGE_SIZE) &&
- (dmat->alignment <= dmat->maxsize) &&
- dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem) &&
- attr == VM_MEMATTR_DEFAULT) {
- *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
+ if (parent == NULL) {
+ error = bus_dma_bounce_impl.tag_create(parent, alignment,
+ boundary, lowaddr, highaddr, maxsize, nsegments, maxsegsz,
+ flags, lockfunc, lockfuncarg, dmat);
} else {
- /*
- * XXX Use Contigmalloc until it is merged into this facility
- * and handles multi-seg allocations. Nobody is doing
- * multi-seg allocations yet though.
- * XXX Certain AGP hardware does.
- */
- *vaddr = kmem_alloc_contig(dmat->maxsize, mflags, 0ul,
- dmat->lowaddr, dmat->alignment ? dmat->alignment : 1ul,
- dmat->boundary, attr);
- (*mapp)->contigalloc = 1;
+ tc = (struct bus_dma_tag_common *)parent;
+ error = tc->impl->tag_create(parent, alignment,
+ boundary, lowaddr, highaddr, maxsize, nsegments, maxsegsz,
+ flags, lockfunc, lockfuncarg, dmat);
}
- if (*vaddr == NULL) {
- CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
- __func__, dmat, dmat->flags, ENOMEM);
- return (ENOMEM);
- } else if (!vm_addr_align_ok(vtophys(*vaddr), dmat->alignment)) {
- printf("bus_dmamem_alloc failed to align memory properly.\n");
- }
- CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
- __func__, dmat, dmat->flags, 0);
- return (0);
+ return (error);
}
-/*
- * Free a piece of memory and it's allociated dmamap, that was allocated
- * via bus_dmamem_alloc.
- */
void
-bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
-{
-
- if (!map->contigalloc)
- free(vaddr, M_DEVBUF);
- else
- kmem_free(vaddr, dmat->maxsize);
- bus_dmamap_destroy(dmat, map);
- CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
-}
-
-static void
-_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
- bus_size_t buflen, int flags)
-{
- bus_addr_t curaddr;
- bus_size_t sgsize;
-
- if (map->pagesneeded == 0) {
- CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
- "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem),
- dmat->boundary, dmat->alignment);
- CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded);
- /*
- * Count the number of bounce pages
- * needed in order to complete this transfer
- */
- curaddr = buf;
- while (buflen != 0) {
- sgsize = buflen;
- if (must_bounce(dmat, curaddr)) {
- sgsize = MIN(sgsize,
- PAGE_SIZE - (curaddr & PAGE_MASK));
- map->pagesneeded++;
- }
- curaddr += sgsize;
- buflen -= sgsize;
- }
- CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
- }
-}
-
-static void
-_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
- void *buf, bus_size_t buflen, int flags)
-{
- vm_offset_t vaddr;
- vm_offset_t vendaddr;
- bus_addr_t paddr;
-
- if (map->pagesneeded == 0) {
- CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
- "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem),
- dmat->boundary, dmat->alignment);
- CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded);
- /*
- * Count the number of bounce pages
- * needed in order to complete this transfer
- */
- vaddr = (vm_offset_t)buf;
- vendaddr = (vm_offset_t)buf + buflen;
-
- while (vaddr < vendaddr) {
- bus_size_t sg_len;
-
- sg_len = MIN(vendaddr - vaddr,
- PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK));
- if (pmap == kernel_pmap)
- paddr = pmap_kextract(vaddr);
- else
- paddr = pmap_extract(pmap, vaddr);
- if (must_bounce(dmat, paddr)) {
- sg_len = roundup2(sg_len, dmat->alignment);
- map->pagesneeded++;
- }
- vaddr += sg_len;
- }
- CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
- }
-}
-
-/*
- * Utility function to load a physical buffer. segp contains
- * the starting segment on entrace, and the ending segment on exit.
- */
-int
-_bus_dmamap_load_phys(bus_dma_tag_t dmat,
- bus_dmamap_t map,
- vm_paddr_t buf, bus_size_t buflen,
- int flags,
- bus_dma_segment_t *segs,
- int *segp)
+bus_dma_template_clone(bus_dma_template_t *t, bus_dma_tag_t dmat)
{
- bus_addr_t curaddr;
- bus_size_t sgsize;
- int error;
+ struct bus_dma_tag_common *common;
- if (segs == NULL)
- segs = map->segments;
+ if (t == NULL || dmat == NULL)
+ return;
- if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
- _bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
- if (map->pagesneeded != 0) {
- error = _bus_dmamap_reserve_pages(dmat, map, flags);
- if (error)
- return (error);
- }
- }
+ common = (struct bus_dma_tag_common *)dmat;
- while (buflen > 0) {
- curaddr = buf;
- sgsize = buflen;
- if (map->pagesneeded != 0 && must_bounce(dmat, curaddr)) {
- sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
- curaddr = add_bounce_page(dmat, map, 0, curaddr,
- sgsize);
- }
- if (!_bus_dmamap_addsegs(dmat, map, curaddr, sgsize, segs,
- segp))
- break;
- buf += sgsize;
- buflen -= sgsize;
- }
-
- /*
- * Did we fit?
- */
- return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
+ t->alignment = common->alignment;
+ t->boundary = common->boundary;
+ t->lowaddr = common->lowaddr;
+ t->highaddr = common->highaddr;
+ t->maxsize = common->maxsize;
+ t->nsegments = common->nsegments;
+ t->maxsegsize = common->maxsegsz;
+ t->flags = common->flags;
+ t->lockfunc = common->lockfunc;
+ t->lockfuncarg = common->lockfuncarg;
}
int
-_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
- struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
- bus_dma_segment_t *segs, int *segp)
-{
-
- return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags,
- segs, segp));
-}
-
-/*
- * Utility function to load a linear buffer. segp contains
- * the starting segment on entrance, and the ending segment on exit.
- */
-int
-_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
- bus_dmamap_t map,
- void *buf, bus_size_t buflen,
- pmap_t pmap,
- int flags,
- bus_dma_segment_t *segs,
- int *segp)
-{
- bus_size_t sgsize;
- bus_addr_t curaddr;
- vm_offset_t kvaddr, vaddr;
- int error;
-
- if (segs == NULL)
- segs = map->segments;
-
- if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
- _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
- if (map->pagesneeded != 0) {
- error = _bus_dmamap_reserve_pages(dmat, map, flags);
- if (error)
- return (error);
- }
- }
-
- vaddr = (vm_offset_t)buf;
-
- while (buflen > 0) {
- /*
- * Get the physical address for this segment.
- */
- if (pmap == kernel_pmap) {
- curaddr = pmap_kextract(vaddr);
- kvaddr = vaddr;
- } else {
- curaddr = pmap_extract(pmap, vaddr);
- kvaddr = 0;
- }
-
- /*
- * Compute the segment size, and adjust counts.
- */
- sgsize = MIN(buflen, PAGE_SIZE - (curaddr & PAGE_MASK));
- if (map->pagesneeded != 0 && must_bounce(dmat, curaddr)) {
- sgsize = roundup2(sgsize, dmat->alignment);
- sgsize = MIN(sgsize, buflen);
- curaddr = add_bounce_page(dmat, map, kvaddr, curaddr,
- sgsize);
- }
-
- if (!_bus_dmamap_addsegs(dmat, map, curaddr, sgsize, segs,
- segp))
- break;
- vaddr += sgsize;
- buflen -= MIN(sgsize, buflen); /* avoid underflow */
- }
-
- /*
- * Did we fit?
- */
- return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
-}
-
-void
-_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
- struct memdesc *mem, bus_dmamap_callback_t *callback,
- void *callback_arg)
-{
-
- if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
- map->dmat = dmat;
- map->mem = *mem;
- map->callback = callback;
- map->callback_arg = callback_arg;
- }
-}
-
-bus_dma_segment_t *
-_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
- bus_dma_segment_t *segs, int nsegs, int error)
-{
-
- map->nsegs = nsegs;
- if (segs != NULL)
- memcpy(map->segments, segs, map->nsegs*sizeof(segs[0]));
- if (dmat->iommu != NULL)
- IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs,
- dmat->lowaddr, dmat->highaddr, dmat->alignment,
- dmat->boundary, dmat->iommu_cookie);
-
- if (segs != NULL)
- memcpy(segs, map->segments, map->nsegs*sizeof(segs[0]));
- else
- segs = map->segments;
-
- return (segs);
-}
-
-/*
- * Release the mapping held by map.
- */
-void
-bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
-{
- if (dmat->iommu) {
- IOMMU_UNMAP(dmat->iommu, map->segments, map->nsegs, dmat->iommu_cookie);
- map->nsegs = 0;
- }
-
- free_bounce_pages(dmat, map);
-}
-
-void
-bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
+bus_dma_tag_destroy(bus_dma_tag_t dmat)
{
- struct bounce_page *bpage;
- vm_offset_t datavaddr, tempvaddr;
-
- if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
- /*
- * Handle data bouncing. We might also
- * want to add support for invalidating
- * the caches on broken hardware
- */
- CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
- "performing bounce", __func__, dmat, dmat->flags, op);
-
- if (op & BUS_DMASYNC_PREWRITE) {
- while (bpage != NULL) {
- tempvaddr = 0;
- datavaddr = bpage->datavaddr;
- if (datavaddr == 0) {
- tempvaddr = pmap_quick_enter_page(
- bpage->datapage);
- datavaddr = tempvaddr |
- bpage->dataoffs;
- }
-
- bcopy((void *)datavaddr,
- (void *)bpage->vaddr, bpage->datacount);
-
- if (tempvaddr != 0)
- pmap_quick_remove_page(tempvaddr);
- bpage = STAILQ_NEXT(bpage, links);
- }
- dmat->bounce_zone->total_bounced++;
- }
+ struct bus_dma_tag_common *tc;
- if (op & BUS_DMASYNC_POSTREAD) {
- while (bpage != NULL) {
- tempvaddr = 0;
- datavaddr = bpage->datavaddr;
- if (datavaddr == 0) {
- tempvaddr = pmap_quick_enter_page(
- bpage->datapage);
- datavaddr = tempvaddr |
- bpage->dataoffs;
- }
-
- bcopy((void *)bpage->vaddr,
- (void *)datavaddr, bpage->datacount);
-
- if (tempvaddr != 0)
- pmap_quick_remove_page(tempvaddr);
- bpage = STAILQ_NEXT(bpage, links);
- }
- dmat->bounce_zone->total_bounced++;
- }
- }
-
- powerpc_sync();
+ tc = (struct bus_dma_tag_common *)dmat;
+ return (tc->impl->tag_destroy(dmat));
}
int
-bus_dma_tag_set_iommu(bus_dma_tag_t tag, device_t iommu, void *cookie)
+bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain)
{
- tag->iommu = iommu;
- tag->iommu_cookie = cookie;
return (0);
}
diff --git a/sys/powerpc/pseries/phyp_vscsi.c b/sys/powerpc/pseries/phyp_vscsi.c
--- a/sys/powerpc/pseries/phyp_vscsi.c
+++ b/sys/powerpc/pseries/phyp_vscsi.c
@@ -36,7 +36,6 @@
#include <sys/conf.h>
#include <sys/eventhandler.h>
#include <sys/rman.h>
-#include <sys/bus_dma.h>
#include <sys/bio.h>
#include <sys/ioccom.h>
#include <sys/uio.h>
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Sun, Feb 15, 7:17 PM (11 h, 22 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
28753150
Default Alt Text
D55084.diff (61 KB)
Attached To
Mode
D55084: powerpc64/busdma: Migrate bounce DMA to common framework
Attached
Detach File
Event Timeline
Log In to Comment