Page MenuHomeFreeBSD

D14971.id41125.diff
No OneTemporary

D14971.id41125.diff

Index: sys/dev/xdma/xdma.h
===================================================================
--- /dev/null
+++ sys/dev/xdma/xdma.h
@@ -0,0 +1,266 @@
+/*-
+ * Copyright (c) 2016-2018 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _DEV_XDMA_XDMA_H_
+#define _DEV_XDMA_XDMA_H_
+
+#include <sys/proc.h>
+
+enum xdma_direction {
+ XDMA_MEM_TO_MEM,
+ XDMA_MEM_TO_DEV,
+ XDMA_DEV_TO_MEM,
+ XDMA_DEV_TO_DEV,
+};
+
+enum xdma_operation_type {
+ XDMA_MEMCPY,
+ XDMA_CYCLIC,
+ XDMA_FIFO,
+ XDMA_SG,
+};
+
+enum xdma_request_type {
+ XR_TYPE_PHYS,
+ XR_TYPE_VIRT,
+ XR_TYPE_MBUF,
+ XR_TYPE_BIO,
+};
+
+enum xdma_command {
+ XDMA_CMD_BEGIN,
+ XDMA_CMD_PAUSE,
+ XDMA_CMD_TERMINATE,
+};
+
+struct xdma_transfer_status {
+ uint32_t transferred;
+ int error;
+};
+
+typedef struct xdma_transfer_status xdma_transfer_status_t;
+
+struct xdma_controller {
+ device_t dev; /* DMA consumer device_t. */
+ device_t dma_dev; /* A real DMA device_t. */
+ void *data; /* OFW MD part. */
+
+ /* List of virtual channels allocated. */
+ TAILQ_HEAD(xdma_channel_list, xdma_channel) channels;
+};
+
+typedef struct xdma_controller xdma_controller_t;
+
+struct xchan_buf {
+ bus_dmamap_t map;
+ uint32_t nsegs;
+ uint32_t nsegs_left;
+ void *cbuf;
+};
+
+struct xdma_request {
+ struct mbuf *m;
+ struct bio *bp;
+ enum xdma_operation_type operation;
+ enum xdma_request_type req_type;
+ enum xdma_direction direction;
+ bus_addr_t src_addr;
+ bus_addr_t dst_addr;
+ uint8_t src_width;
+ uint8_t dst_width;
+ bus_size_t block_num;
+ bus_size_t block_len;
+ xdma_transfer_status_t status;
+ void *user;
+ TAILQ_ENTRY(xdma_request) xr_next;
+ struct xchan_buf buf;
+};
+
+struct xdma_sglist {
+ bus_addr_t src_addr;
+ bus_addr_t dst_addr;
+ size_t len;
+ uint8_t src_width;
+ uint8_t dst_width;
+ enum xdma_direction direction;
+ bool first;
+ bool last;
+};
+
+struct xdma_channel {
+ xdma_controller_t *xdma;
+
+ uint32_t flags;
+#define XCHAN_BUFS_ALLOCATED (1 << 0)
+#define XCHAN_SGLIST_ALLOCATED (1 << 1)
+#define XCHAN_CONFIGURED (1 << 2)
+#define XCHAN_TYPE_CYCLIC (1 << 3)
+#define XCHAN_TYPE_MEMCPY (1 << 4)
+#define XCHAN_TYPE_FIFO (1 << 5)
+#define XCHAN_TYPE_SG (1 << 6)
+
+ uint32_t caps;
+#define XCHAN_CAP_BUSDMA (1 << 0)
+#define XCHAN_CAP_BUSDMA_NOSEG (1 << 1)
+
+ /* A real hardware driver channel. */
+ void *chan;
+
+ /* Interrupt handlers. */
+ TAILQ_HEAD(, xdma_intr_handler) ie_handlers;
+ TAILQ_ENTRY(xdma_channel) xchan_next;
+
+ struct mtx mtx_lock;
+ struct mtx mtx_qin_lock;
+ struct mtx mtx_qout_lock;
+ struct mtx mtx_bank_lock;
+ struct mtx mtx_proc_lock;
+
+ /* Request queue. */
+ bus_dma_tag_t dma_tag_bufs;
+ struct xdma_request *xr_mem;
+ uint32_t xr_num;
+
+ /* Bus dma tag options. */
+ bus_size_t maxsegsize;
+ bus_size_t maxnsegs;
+ bus_size_t alignment;
+ bus_addr_t boundary;
+ bus_addr_t lowaddr;
+ bus_addr_t highaddr;
+
+ struct xdma_sglist *sg;
+
+ TAILQ_HEAD(, xdma_request) bank;
+ TAILQ_HEAD(, xdma_request) queue_in;
+ TAILQ_HEAD(, xdma_request) queue_out;
+ TAILQ_HEAD(, xdma_request) processing;
+ struct mtx mtx_queue;
+};
+
+typedef struct xdma_channel xdma_channel_t;
+
+struct xdma_intr_handler {
+ int (*cb)(void *cb_user, xdma_transfer_status_t *status);
+ void *cb_user;
+ struct mtx ih_lock;
+ TAILQ_ENTRY(xdma_intr_handler) ih_next;
+};
+
+static MALLOC_DEFINE(M_XDMA, "xdma", "xDMA framework");
+
+#define XCHAN_LOCK(xchan) mtx_lock(&(xchan)->mtx_lock)
+#define XCHAN_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_lock)
+#define XCHAN_ASSERT_LOCKED(xchan) \
+ mtx_assert(&(xchan)->mtx_lock, MA_OWNED)
+
+#define QUEUE_IN_LOCK(xchan) mtx_lock(&(xchan)->mtx_qin_lock)
+#define QUEUE_IN_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_qin_lock)
+#define QUEUE_IN_ASSERT_LOCKED(xchan) \
+ mtx_assert(&(xchan)->mtx_qin_lock, MA_OWNED)
+
+#define QUEUE_OUT_LOCK(xchan) mtx_lock(&(xchan)->mtx_qout_lock)
+#define QUEUE_OUT_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_qout_lock)
+#define QUEUE_OUT_ASSERT_LOCKED(xchan) \
+ mtx_assert(&(xchan)->mtx_qout_lock, MA_OWNED)
+
+#define QUEUE_BANK_LOCK(xchan) mtx_lock(&(xchan)->mtx_bank_lock)
+#define QUEUE_BANK_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_bank_lock)
+#define QUEUE_BANK_ASSERT_LOCKED(xchan) \
+ mtx_assert(&(xchan)->mtx_bank_lock, MA_OWNED)
+
+#define QUEUE_PROC_LOCK(xchan) mtx_lock(&(xchan)->mtx_proc_lock)
+#define QUEUE_PROC_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_proc_lock)
+#define QUEUE_PROC_ASSERT_LOCKED(xchan) \
+ mtx_assert(&(xchan)->mtx_proc_lock, MA_OWNED)
+
+#define XDMA_SGLIST_MAXLEN 2048
+#define XDMA_MAX_SEG 128
+
+/* xDMA controller ops */
+xdma_controller_t *xdma_ofw_get(device_t dev, const char *prop);
+int xdma_put(xdma_controller_t *xdma);
+
+/* xDMA channel ops */
+xdma_channel_t * xdma_channel_alloc(xdma_controller_t *, uint32_t caps);
+int xdma_channel_free(xdma_channel_t *);
+int xdma_request(xdma_channel_t *xchan, struct xdma_request *r);
+
+/* SG interface */
+int xdma_prep_sg(xdma_channel_t *, uint32_t,
+ bus_size_t, bus_size_t, bus_size_t, bus_addr_t, bus_addr_t, bus_addr_t);
+void xdma_channel_free_sg(xdma_channel_t *xchan);
+int xdma_queue_submit_sg(xdma_channel_t *xchan);
+void xchan_seg_done(xdma_channel_t *xchan, xdma_transfer_status_t *);
+
+/* Queue operations */
+int xdma_dequeue_mbuf(xdma_channel_t *xchan, struct mbuf **m,
+ xdma_transfer_status_t *);
+int xdma_enqueue_mbuf(xdma_channel_t *xchan, struct mbuf **m, uintptr_t addr,
+ uint8_t, uint8_t, enum xdma_direction dir);
+int xdma_dequeue_bio(xdma_channel_t *xchan, struct bio **bp,
+ xdma_transfer_status_t *status);
+int xdma_enqueue_bio(xdma_channel_t *xchan, struct bio **bp, bus_addr_t addr,
+ uint8_t, uint8_t, enum xdma_direction dir);
+int xdma_dequeue(xdma_channel_t *xchan, void **user,
+ xdma_transfer_status_t *status);
+int xdma_enqueue(xdma_channel_t *xchan, uintptr_t src, uintptr_t dst,
+ uint8_t, uint8_t, bus_size_t, enum xdma_direction dir, void *);
+int xdma_queue_submit(xdma_channel_t *xchan);
+
+/* Mbuf operations */
+uint32_t xdma_mbuf_defrag(xdma_channel_t *xchan, struct xdma_request *xr);
+uint32_t xdma_mbuf_chain_count(struct mbuf *m0);
+
+/* Channel Control */
+int xdma_control(xdma_channel_t *xchan, enum xdma_command cmd);
+
+/* Interrupt callback */
+int xdma_setup_intr(xdma_channel_t *xchan, int (*cb)(void *,
+ xdma_transfer_status_t *), void *arg, void **);
+int xdma_teardown_intr(xdma_channel_t *xchan, struct xdma_intr_handler *ih);
+int xdma_teardown_all_intr(xdma_channel_t *xchan);
+void xdma_callback(struct xdma_channel *xchan, xdma_transfer_status_t *status);
+
+/* Sglist */
+int xchan_sglist_alloc(xdma_channel_t *xchan);
+void xchan_sglist_free(xdma_channel_t *xchan);
+int xdma_sglist_add(struct xdma_sglist *sg, struct bus_dma_segment *seg,
+ uint32_t nsegs, struct xdma_request *xr);
+
+/* Requests bank */
+void xchan_bank_init(xdma_channel_t *xchan);
+int xchan_bank_free(xdma_channel_t *xchan);
+struct xdma_request * xchan_bank_get(xdma_channel_t *xchan);
+int xchan_bank_put(xdma_channel_t *xchan, struct xdma_request *xr);
+
+#endif /* !_DEV_XDMA_XDMA_H_ */
Index: sys/dev/xdma/xdma.c
===================================================================
--- /dev/null
+++ sys/dev/xdma/xdma.c
@@ -0,0 +1,405 @@
+/*-
+ * Copyright (c) 2016-2018 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_platform.h"
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/queue.h>
+#include <sys/kobj.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/limits.h>
+#include <sys/lock.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+#include <sys/sx.h>
+
+#include <machine/bus.h>
+
+#ifdef FDT
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#endif
+
+#include <dev/xdma/xdma.h>
+
+#include <xdma_if.h>
+
+/*
+ * Multiple xDMA controllers may work with single DMA device,
+ * so we have global lock for physical channel management.
+ */
+static struct sx xdma_sx;
+
+#define XDMA_LOCK() sx_xlock(&xdma_sx)
+#define XDMA_UNLOCK() sx_xunlock(&xdma_sx)
+#define XDMA_ASSERT_LOCKED() sx_xassert(&xdma_sx, MA_OWNED)
+
+/*
+ * Allocate virtual xDMA channel.
+ */
+xdma_channel_t *
+xdma_channel_alloc(xdma_controller_t *xdma, uint32_t caps)
+{
+ xdma_channel_t *xchan;
+ int ret;
+
+ xchan = malloc(sizeof(xdma_channel_t), M_XDMA, M_WAITOK | M_ZERO);
+ xchan->xdma = xdma;
+ xchan->caps = caps;
+
+ XDMA_LOCK();
+
+ /* Request a real channel from hardware driver. */
+ ret = XDMA_CHANNEL_ALLOC(xdma->dma_dev, xchan);
+ if (ret != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't request hardware channel.\n", __func__);
+ XDMA_UNLOCK();
+ free(xchan, M_XDMA);
+
+ return (NULL);
+ }
+
+ TAILQ_INIT(&xchan->ie_handlers);
+
+ mtx_init(&xchan->mtx_lock, "xDMA chan", NULL, MTX_DEF);
+ mtx_init(&xchan->mtx_qin_lock, "xDMA qin", NULL, MTX_DEF);
+ mtx_init(&xchan->mtx_qout_lock, "xDMA qout", NULL, MTX_DEF);
+ mtx_init(&xchan->mtx_bank_lock, "xDMA bank", NULL, MTX_DEF);
+ mtx_init(&xchan->mtx_proc_lock, "xDMA proc", NULL, MTX_DEF);
+
+ TAILQ_INIT(&xchan->bank);
+ TAILQ_INIT(&xchan->queue_in);
+ TAILQ_INIT(&xchan->queue_out);
+ TAILQ_INIT(&xchan->processing);
+
+ TAILQ_INSERT_TAIL(&xdma->channels, xchan, xchan_next);
+
+ XDMA_UNLOCK();
+
+ return (xchan);
+}
+
+int
+xdma_channel_free(xdma_channel_t *xchan)
+{
+ xdma_controller_t *xdma;
+ int err;
+
+ xdma = xchan->xdma;
+ KASSERT(xdma != NULL, ("xdma is NULL"));
+
+ XDMA_LOCK();
+
+ /* Free the real DMA channel. */
+ err = XDMA_CHANNEL_FREE(xdma->dma_dev, xchan);
+ if (err != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't free real hw channel.\n", __func__);
+ XDMA_UNLOCK();
+ return (-1);
+ }
+
+ if (xchan->flags & XCHAN_TYPE_SG)
+ xdma_channel_free_sg(xchan);
+
+ xdma_teardown_all_intr(xchan);
+
+ mtx_destroy(&xchan->mtx_lock);
+ mtx_destroy(&xchan->mtx_qin_lock);
+ mtx_destroy(&xchan->mtx_qout_lock);
+ mtx_destroy(&xchan->mtx_bank_lock);
+ mtx_destroy(&xchan->mtx_proc_lock);
+
+ TAILQ_REMOVE(&xdma->channels, xchan, xchan_next);
+
+ free(xchan, M_XDMA);
+
+ XDMA_UNLOCK();
+
+ return (0);
+}
+
+int
+xdma_setup_intr(xdma_channel_t *xchan,
+ int (*cb)(void *, xdma_transfer_status_t *),
+ void *arg, void **ihandler)
+{
+ struct xdma_intr_handler *ih;
+ xdma_controller_t *xdma;
+
+ xdma = xchan->xdma;
+ KASSERT(xdma != NULL, ("xdma is NULL"));
+
+ /* Sanity check. */
+ if (cb == NULL) {
+ device_printf(xdma->dev,
+ "%s: Can't setup interrupt handler.\n",
+ __func__);
+
+ return (-1);
+ }
+
+ ih = malloc(sizeof(struct xdma_intr_handler),
+ M_XDMA, M_WAITOK | M_ZERO);
+ ih->cb = cb;
+ ih->cb_user = arg;
+
+ TAILQ_INSERT_TAIL(&xchan->ie_handlers, ih, ih_next);
+
+ if (ihandler != NULL)
+ *ihandler = ih;
+
+ return (0);
+}
+
+int
+xdma_teardown_intr(xdma_channel_t *xchan, struct xdma_intr_handler *ih)
+{
+ xdma_controller_t *xdma;
+
+ xdma = xchan->xdma;
+ KASSERT(xdma != NULL, ("xdma is NULL"));
+
+ /* Sanity check. */
+ if (ih == NULL) {
+ device_printf(xdma->dev,
+ "%s: Can't teardown interrupt.\n", __func__);
+ return (-1);
+ }
+
+ TAILQ_REMOVE(&xchan->ie_handlers, ih, ih_next);
+ free(ih, M_XDMA);
+
+ return (0);
+}
+
+int
+xdma_teardown_all_intr(xdma_channel_t *xchan)
+{
+ struct xdma_intr_handler *ih_tmp;
+ struct xdma_intr_handler *ih;
+ xdma_controller_t *xdma;
+
+ xdma = xchan->xdma;
+ KASSERT(xdma != NULL, ("xdma is NULL"));
+
+ TAILQ_FOREACH_SAFE(ih, &xchan->ie_handlers, ih_next, ih_tmp) {
+ TAILQ_REMOVE(&xchan->ie_handlers, ih, ih_next);
+ free(ih, M_XDMA);
+ }
+
+ return (0);
+}
+
+int
+xdma_request(xdma_channel_t *xchan, struct xdma_request *req)
+{
+ xdma_controller_t *xdma;
+ int ret;
+
+ xdma = xchan->xdma;
+
+ KASSERT(xdma != NULL, ("xdma is NULL"));
+
+ XCHAN_LOCK(xchan);
+ ret = XDMA_CHANNEL_REQUEST(xdma->dma_dev, xchan, req);
+ if (ret != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't request a transfer.\n", __func__);
+ XCHAN_UNLOCK(xchan);
+
+ return (-1);
+ }
+ XCHAN_UNLOCK(xchan);
+
+ return (0);
+}
+
+int
+xdma_control(xdma_channel_t *xchan, enum xdma_command cmd)
+{
+ xdma_controller_t *xdma;
+ int ret;
+
+ xdma = xchan->xdma;
+ KASSERT(xdma != NULL, ("xdma is NULL"));
+
+ ret = XDMA_CHANNEL_CONTROL(xdma->dma_dev, xchan, cmd);
+ if (ret != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't process command.\n", __func__);
+ return (-1);
+ }
+
+ return (0);
+}
+
+void
+xdma_callback(xdma_channel_t *xchan, xdma_transfer_status_t *status)
+{
+ struct xdma_intr_handler *ih_tmp;
+ struct xdma_intr_handler *ih;
+ xdma_controller_t *xdma;
+
+ xdma = xchan->xdma;
+ KASSERT(xdma != NULL, ("xdma is NULL"));
+
+ TAILQ_FOREACH_SAFE(ih, &xchan->ie_handlers, ih_next, ih_tmp)
+ if (ih->cb != NULL)
+ ih->cb(ih->cb_user, status);
+
+ if (xchan->flags & XCHAN_TYPE_SG)
+ xdma_queue_submit(xchan);
+}
+
+#ifdef FDT
+/*
+ * Notify the DMA driver we have machine-dependent data in FDT.
+ */
+static int
+xdma_ofw_md_data(xdma_controller_t *xdma, pcell_t *cells, int ncells)
+{
+ uint32_t ret;
+
+ ret = XDMA_OFW_MD_DATA(xdma->dma_dev,
+ cells, ncells, (void **)&xdma->data);
+
+ return (ret);
+}
+
+/*
+ * Allocate xdma controller.
+ */
+xdma_controller_t *
+xdma_ofw_get(device_t dev, const char *prop)
+{
+ phandle_t node, parent;
+ xdma_controller_t *xdma;
+ device_t dma_dev;
+ pcell_t *cells;
+ int ncells;
+ int error;
+ int ndmas;
+ int idx;
+
+ node = ofw_bus_get_node(dev);
+ if (node <= 0)
+ device_printf(dev,
+ "%s called on not ofw based device.\n", __func__);
+
+ error = ofw_bus_parse_xref_list_get_length(node,
+ "dmas", "#dma-cells", &ndmas);
+ if (error) {
+ device_printf(dev,
+ "%s can't get dmas list.\n", __func__);
+ return (NULL);
+ }
+
+ if (ndmas == 0) {
+ device_printf(dev,
+ "%s dmas list is empty.\n", __func__);
+ return (NULL);
+ }
+
+ error = ofw_bus_find_string_index(node, "dma-names", prop, &idx);
+ if (error != 0) {
+ device_printf(dev,
+ "%s can't find string index.\n", __func__);
+ return (NULL);
+ }
+
+ error = ofw_bus_parse_xref_list_alloc(node, "dmas", "#dma-cells",
+ idx, &parent, &ncells, &cells);
+ if (error != 0) {
+ device_printf(dev,
+ "%s can't get dma device xref.\n", __func__);
+ return (NULL);
+ }
+
+ dma_dev = OF_device_from_xref(parent);
+ if (dma_dev == NULL) {
+ device_printf(dev,
+ "%s can't get dma device.\n", __func__);
+ return (NULL);
+ }
+
+ xdma = malloc(sizeof(struct xdma_controller),
+ M_XDMA, M_WAITOK | M_ZERO);
+ xdma->dev = dev;
+ xdma->dma_dev = dma_dev;
+
+ TAILQ_INIT(&xdma->channels);
+
+ xdma_ofw_md_data(xdma, cells, ncells);
+ free(cells, M_OFWPROP);
+
+ return (xdma);
+}
+#endif
+
+/*
+ * Free xDMA controller object.
+ */
+int
+xdma_put(xdma_controller_t *xdma)
+{
+
+ XDMA_LOCK();
+
+ /* Ensure no channels allocated. */
+ if (!TAILQ_EMPTY(&xdma->channels)) {
+ device_printf(xdma->dev, "%s: Can't free xDMA\n", __func__);
+ return (-1);
+ }
+
+ free(xdma->data, M_DEVBUF);
+ free(xdma, M_XDMA);
+
+ XDMA_UNLOCK();
+
+ return (0);
+}
+
+static void
+xdma_init(void)
+{
+
+ sx_init(&xdma_sx, "xDMA");
+}
+
+SYSINIT(xdma, SI_SUB_DRIVERS, SI_ORDER_FIRST, xdma_init, NULL);
Index: sys/dev/xdma/xdma_bank.c
===================================================================
--- /dev/null
+++ sys/dev/xdma/xdma_bank.c
@@ -0,0 +1,99 @@
+/*-
+ * Copyright (c) 2018 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_platform.h"
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+
+#include <machine/bus.h>
+
+#include <dev/xdma/xdma.h>
+
+void
+xchan_bank_init(xdma_channel_t *xchan)
+{
+ struct xdma_request *xr;
+ xdma_controller_t *xdma;
+ int i;
+
+ xdma = xchan->xdma;
+ KASSERT(xdma != NULL, ("xdma is NULL"));
+
+ xchan->xr_mem = malloc(sizeof(struct xdma_request) * xchan->xr_num,
+ M_XDMA, M_WAITOK | M_ZERO);
+
+ for (i = 0; i < xchan->xr_num; i++) {
+ xr = &xchan->xr_mem[i];
+ TAILQ_INSERT_TAIL(&xchan->bank, xr, xr_next);
+ }
+}
+
+int
+xchan_bank_free(xdma_channel_t *xchan)
+{
+
+ free(xchan->xr_mem, M_XDMA);
+
+ return (0);
+}
+
+struct xdma_request *
+xchan_bank_get(xdma_channel_t *xchan)
+{
+ struct xdma_request *xr;
+ struct xdma_request *xr_tmp;
+
+ QUEUE_BANK_LOCK(xchan);
+ TAILQ_FOREACH_SAFE(xr, &xchan->bank, xr_next, xr_tmp) {
+ TAILQ_REMOVE(&xchan->bank, xr, xr_next);
+ break;
+ }
+ QUEUE_BANK_UNLOCK(xchan);
+
+ return (xr);
+}
+
+int
+xchan_bank_put(xdma_channel_t *xchan, struct xdma_request *xr)
+{
+
+ QUEUE_BANK_LOCK(xchan);
+ TAILQ_INSERT_TAIL(&xchan->bank, xr, xr_next);
+ QUEUE_BANK_UNLOCK(xchan);
+
+ return (0);
+}
Index: sys/dev/xdma/xdma_bio.c
===================================================================
--- /dev/null
+++ sys/dev/xdma/xdma_bio.c
@@ -0,0 +1,105 @@
+/*-
+ * Copyright (c) 2017-2018 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_platform.h"
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/mutex.h>
+#include <sys/malloc.h>
+
+#include <machine/bus.h>
+
+#include <dev/xdma/xdma.h>
+
+int
+xdma_dequeue_bio(xdma_channel_t *xchan, struct bio **bp,
+ xdma_transfer_status_t *status)
+{
+ struct xdma_request *xr_tmp;
+ struct xdma_request *xr;
+
+ QUEUE_OUT_LOCK(xchan);
+ TAILQ_FOREACH_SAFE(xr, &xchan->queue_out, xr_next, xr_tmp) {
+ TAILQ_REMOVE(&xchan->queue_out, xr, xr_next);
+ break;
+ }
+ QUEUE_OUT_UNLOCK(xchan);
+
+ if (xr == NULL)
+ return (-1);
+
+ *bp = xr->bp;
+
+ status->error = xr->status.error;
+ status->transferred = xr->status.transferred;
+
+ xchan_bank_put(xchan, xr);
+
+ return (0);
+}
+
+int
+xdma_enqueue_bio(xdma_channel_t *xchan, struct bio **bp,
+ bus_addr_t addr, uint8_t src_width, uint8_t dst_width,
+ enum xdma_direction dir)
+{
+ struct xdma_request *xr;
+ xdma_controller_t *xdma;
+
+ xdma = xchan->xdma;
+
+ xr = xchan_bank_get(xchan);
+ if (xr == NULL)
+ return (-1); /* No space is available yet. */
+
+ xr->direction = dir;
+ xr->bp = *bp;
+ xr->req_type = XR_TYPE_BIO;
+ xr->src_width = src_width;
+ xr->dst_width = dst_width;
+ if (dir == XDMA_MEM_TO_DEV) {
+ xr->dst_addr = addr;
+ xr->src_addr = 0;
+ } else {
+ xr->dst_addr = 0;
+ xr->src_addr = addr;
+ }
+
+ QUEUE_IN_LOCK(xchan);
+ TAILQ_INSERT_TAIL(&xchan->queue_in, xr, xr_next);
+ QUEUE_IN_UNLOCK(xchan);
+
+ return (0);
+}
Index: sys/dev/xdma/xdma_if.m
===================================================================
--- /dev/null
+++ sys/dev/xdma/xdma_if.m
@@ -0,0 +1,115 @@
+#-
+# Copyright (c) 2016-2018 Ruslan Bukin <br@bsdpad.com>
+# All rights reserved.
+#
+# This software was developed by SRI International and the University of
+# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+# ("CTSRD"), as part of the DARPA CRASH research programme.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# $FreeBSD$
+#
+
+#include <machine/bus.h>
+
+#ifdef FDT
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#endif
+
+#include <dev/xdma/xdma.h>
+
+INTERFACE xdma;
+
+#
+# Request a transfer.
+#
+METHOD int channel_request {
+ device_t dev;
+ struct xdma_channel *xchan;
+ struct xdma_request *req;
+};
+
+#
+# Prepare xDMA channel for a scatter-gather transfer.
+#
+METHOD int channel_prep_sg {
+ device_t dev;
+ struct xdma_channel *xchan;
+};
+
+#
+# Query DMA engine driver for the amount of free entries
+# (descriptors) are available.
+#
+METHOD int channel_capacity {
+ device_t dev;
+ struct xdma_channel *xchan;
+ uint32_t *capacity;
+};
+
+#
+# Submit sglist list to DMA engine driver.
+#
+METHOD int channel_submit_sg {
+ device_t dev;
+ struct xdma_channel *xchan;
+ struct xdma_sglist *sg;
+ uint32_t sg_n;
+};
+
+#
+# Notify driver we have machine-dependend data.
+#
+METHOD int ofw_md_data {
+ device_t dev;
+ pcell_t *cells;
+ int ncells;
+ void **data;
+};
+
+#
+# Allocate both virtual and harware channels.
+#
+METHOD int channel_alloc {
+ device_t dev;
+ struct xdma_channel *xchan;
+};
+
+#
+# Free the real hardware channel.
+#
+METHOD int channel_free {
+ device_t dev;
+ struct xdma_channel *xchan;
+};
+
+#
+# Begin, pause or terminate the channel operation.
+#
+METHOD int channel_control {
+ device_t dev;
+ struct xdma_channel *xchan;
+ int cmd;
+};
Index: sys/dev/xdma/xdma_mbuf.c
===================================================================
--- /dev/null
+++ sys/dev/xdma/xdma_mbuf.c
@@ -0,0 +1,154 @@
+/*-
+ * Copyright (c) 2017-2018 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_platform.h"
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/mutex.h>
+#include <sys/mbuf.h>
+
+#include <machine/bus.h>
+
+#ifdef FDT
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#endif
+
+#include <dev/xdma/xdma.h>
+
+int
+xdma_dequeue_mbuf(xdma_channel_t *xchan, struct mbuf **mp,
+ xdma_transfer_status_t *status)
+{
+ struct xdma_request *xr;
+ struct xdma_request *xr_tmp;
+
+ QUEUE_OUT_LOCK(xchan);
+ TAILQ_FOREACH_SAFE(xr, &xchan->queue_out, xr_next, xr_tmp) {
+ TAILQ_REMOVE(&xchan->queue_out, xr, xr_next);
+ break;
+ }
+ QUEUE_OUT_UNLOCK(xchan);
+
+ if (xr == NULL)
+ return (-1);
+
+ *mp = xr->m;
+ status->error = xr->status.error;
+ status->transferred = xr->status.transferred;
+
+ xchan_bank_put(xchan, xr);
+
+ return (0);
+}
+
+int
+xdma_enqueue_mbuf(xdma_channel_t *xchan, struct mbuf **mp,
+ uintptr_t addr, uint8_t src_width, uint8_t dst_width,
+ enum xdma_direction dir)
+{
+ struct xdma_request *xr;
+ xdma_controller_t *xdma;
+
+ xdma = xchan->xdma;
+
+ xr = xchan_bank_get(xchan);
+ if (xr == NULL)
+ return (-1); /* No space is available yet. */
+
+ xr->direction = dir;
+ xr->m = *mp;
+ xr->req_type = XR_TYPE_MBUF;
+ if (dir == XDMA_MEM_TO_DEV) {
+ xr->dst_addr = addr;
+ xr->src_addr = 0;
+ } else {
+ xr->src_addr = addr;
+ xr->dst_addr = 0;
+ }
+ xr->src_width = src_width;
+ xr->dst_width = dst_width;
+
+ QUEUE_IN_LOCK(xchan);
+ TAILQ_INSERT_TAIL(&xchan->queue_in, xr, xr_next);
+ QUEUE_IN_UNLOCK(xchan);
+
+ return (0);
+}
+
+uint32_t
+xdma_mbuf_chain_count(struct mbuf *m0)
+{
+ struct mbuf *m;
+ uint32_t c;
+
+ c = 0;
+
+ for (m = m0; m != NULL; m = m->m_next)
+ c++;
+
+ return (c);
+}
+
+uint32_t
+xdma_mbuf_defrag(xdma_channel_t *xchan, struct xdma_request *xr)
+{
+ xdma_controller_t *xdma;
+ struct mbuf *m;
+ uint32_t c;
+
+ xdma = xchan->xdma;
+
+ c = xdma_mbuf_chain_count(xr->m);
+ if (c == 1)
+ return (c); /* Nothing to do. */
+
+ if (xchan->caps & XCHAN_CAP_BUSDMA) {
+ if ((xchan->caps & XCHAN_CAP_BUSDMA_NOSEG) || \
+ (c > xchan->maxnsegs)) {
+ if ((m = m_defrag(xr->m, M_NOWAIT)) == NULL) {
+ device_printf(xdma->dma_dev,
+ "%s: Can't defrag mbuf\n",
+ __func__);
+ return (c);
+ }
+ xr->m = m;
+ c = 1;
+ }
+ }
+
+ return (c);
+}
Index: sys/dev/xdma/xdma_queue.c
===================================================================
--- /dev/null
+++ sys/dev/xdma/xdma_queue.c
@@ -0,0 +1,124 @@
+/*-
+ * Copyright (c) 2018 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_platform.h"
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/mutex.h>
+#include <sys/malloc.h>
+#include <sys/bus_dma.h>
+
+#include <dev/xdma/xdma.h>
+
+int
+xdma_dequeue(xdma_channel_t *xchan, void **user,
+ xdma_transfer_status_t *status)
+{
+ struct xdma_request *xr_tmp;
+ struct xdma_request *xr;
+
+ QUEUE_OUT_LOCK(xchan);
+ TAILQ_FOREACH_SAFE(xr, &xchan->queue_out, xr_next, xr_tmp) {
+ TAILQ_REMOVE(&xchan->queue_out, xr, xr_next);
+ break;
+ }
+ QUEUE_OUT_UNLOCK(xchan);
+
+ if (xr == NULL)
+ return (-1);
+
+ *user = xr->user;
+ status->error = xr->status.error;
+ status->transferred = xr->status.transferred;
+
+ xchan_bank_put(xchan, xr);
+
+ return (0);
+}
+
+int
+xdma_enqueue(xdma_channel_t *xchan, uintptr_t src, uintptr_t dst,
+ uint8_t src_width, uint8_t dst_width, bus_size_t len,
+ enum xdma_direction dir, void *user)
+{
+ struct xdma_request *xr;
+ xdma_controller_t *xdma;
+
+ xdma = xchan->xdma;
+ KASSERT(xdma != NULL, ("xdma is NULL"));
+
+ xr = xchan_bank_get(xchan);
+ if (xr == NULL)
+ return (-1); /* No space is available. */
+
+ xr->user = user;
+ xr->direction = dir;
+ xr->m = NULL;
+ xr->bp = NULL;
+ xr->block_num = 1;
+ xr->block_len = len;
+ xr->req_type = XR_TYPE_VIRT;
+ xr->src_addr = src;
+ xr->dst_addr = dst;
+ xr->src_width = src_width;
+ xr->dst_width = dst_width;
+
+ QUEUE_IN_LOCK(xchan);
+ TAILQ_INSERT_TAIL(&xchan->queue_in, xr, xr_next);
+ QUEUE_IN_UNLOCK(xchan);
+
+ return (0);
+}
+
+int
+xdma_queue_submit(xdma_channel_t *xchan)
+{
+ xdma_controller_t *xdma;
+ int ret;
+
+ xdma = xchan->xdma;
+ KASSERT(xdma != NULL, ("xdma is NULL"));
+
+ ret = 0;
+
+ XCHAN_LOCK(xchan);
+
+ if (xchan->flags & XCHAN_TYPE_SG)
+ ret = xdma_queue_submit_sg(xchan);
+
+ XCHAN_UNLOCK(xchan);
+
+ return (ret);
+}
Index: sys/dev/xdma/xdma_sg.c
===================================================================
--- /dev/null
+++ sys/dev/xdma/xdma_sg.c
@@ -0,0 +1,595 @@
+/*-
+ * Copyright (c) 2018 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_platform.h"
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/mbuf.h>
+#include <sys/bus_dma.h>
+
+#include <machine/bus.h>
+
+#ifdef FDT
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#endif
+
+#include <dev/xdma/xdma.h>
+
+#include <xdma_if.h>
+
+struct seg_load_request {
+ struct bus_dma_segment *seg;
+ uint32_t nsegs;
+ uint32_t error;
+};
+
+static int
+_xchan_bufs_alloc(xdma_channel_t *xchan)
+{
+ xdma_controller_t *xdma;
+ struct xdma_request *xr;
+ int i;
+
+ xdma = xchan->xdma;
+
+ for (i = 0; i < xchan->xr_num; i++) {
+ xr = &xchan->xr_mem[i];
+ xr->buf.cbuf = contigmalloc(xchan->maxsegsize,
+ M_XDMA, 0, 0, ~0, PAGE_SIZE, 0);
+ if (xr->buf.cbuf == NULL) {
+ device_printf(xdma->dev,
+ "%s: Can't allocate contiguous kernel"
+ " physical memory\n", __func__);
+ return (-1);
+ }
+ }
+
+ return (0);
+}
+
+static int
+_xchan_bufs_alloc_busdma(xdma_channel_t *xchan)
+{
+ xdma_controller_t *xdma;
+ struct xdma_request *xr;
+ int err;
+ int i;
+
+ xdma = xchan->xdma;
+
+ /* Create bus_dma tag */
+ err = bus_dma_tag_create(
+ bus_get_dma_tag(xdma->dev), /* Parent tag. */
+ xchan->alignment, /* alignment */
+ xchan->boundary, /* boundary */
+ xchan->lowaddr, /* lowaddr */
+ xchan->highaddr, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ xchan->maxsegsize * xchan->maxnsegs, /* maxsize */
+ xchan->maxnsegs, /* nsegments */
+ xchan->maxsegsize, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &xchan->dma_tag_bufs);
+ if (err != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't create bus_dma tag.\n", __func__);
+ return (-1);
+ }
+
+ for (i = 0; i < xchan->xr_num; i++) {
+ xr = &xchan->xr_mem[i];
+ err = bus_dmamap_create(xchan->dma_tag_bufs, 0,
+ &xr->buf.map);
+ if (err != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't create buf DMA map.\n", __func__);
+
+ /* Cleanup. */
+ bus_dma_tag_destroy(xchan->dma_tag_bufs);
+
+ return (-1);
+ }
+ }
+
+ return (0);
+}
+
+static int
+xchan_bufs_alloc(xdma_channel_t *xchan)
+{
+ xdma_controller_t *xdma;
+ int ret;
+
+ xdma = xchan->xdma;
+
+ if (xdma == NULL) {
+ device_printf(xdma->dev,
+ "%s: Channel was not allocated properly.\n", __func__);
+ return (-1);
+ }
+
+ if (xchan->caps & XCHAN_CAP_BUSDMA)
+ ret = _xchan_bufs_alloc_busdma(xchan);
+ else
+ ret = _xchan_bufs_alloc(xchan);
+ if (ret != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't allocate bufs.\n", __func__);
+ return (-1);
+ }
+
+ xchan->flags |= XCHAN_BUFS_ALLOCATED;
+
+ return (0);
+}
+
+static int
+xchan_bufs_free(xdma_channel_t *xchan)
+{
+ struct xdma_request *xr;
+ struct xchan_buf *b;
+ int i;
+
+ if ((xchan->flags & XCHAN_BUFS_ALLOCATED) == 0)
+ return (-1);
+
+ if (xchan->caps & XCHAN_CAP_BUSDMA) {
+ for (i = 0; i < xchan->xr_num; i++) {
+ xr = &xchan->xr_mem[i];
+ b = &xr->buf;
+ bus_dmamap_destroy(xchan->dma_tag_bufs, b->map);
+ }
+ bus_dma_tag_destroy(xchan->dma_tag_bufs);
+ } else {
+ for (i = 0; i < xchan->xr_num; i++) {
+ xr = &xchan->xr_mem[i];
+ contigfree(xr->buf.cbuf, xchan->maxsegsize, M_XDMA);
+ }
+ }
+
+ xchan->flags &= ~XCHAN_BUFS_ALLOCATED;
+
+ return (0);
+}
+
+void
+xdma_channel_free_sg(xdma_channel_t *xchan)
+{
+
+ xchan_bufs_free(xchan);
+ xchan_sglist_free(xchan);
+ xchan_bank_free(xchan);
+}
+
+/*
+ * Prepare xchan for a scatter-gather transfer.
+ * xr_num - xdma requests queue size,
+ * maxsegsize - maximum allowed scatter-gather list element size in bytes
+ */
+int
+xdma_prep_sg(xdma_channel_t *xchan, uint32_t xr_num,
+ bus_size_t maxsegsize, bus_size_t maxnsegs,
+ bus_size_t alignment, bus_addr_t boundary,
+ bus_addr_t lowaddr, bus_addr_t highaddr)
+{
+ xdma_controller_t *xdma;
+ int ret;
+
+ xdma = xchan->xdma;
+
+ KASSERT(xdma != NULL, ("xdma is NULL"));
+
+ if (xchan->flags & XCHAN_CONFIGURED) {
+ device_printf(xdma->dev,
+ "%s: Channel is already configured.\n", __func__);
+ return (-1);
+ }
+
+ xchan->xr_num = xr_num;
+ xchan->maxsegsize = maxsegsize;
+ xchan->maxnsegs = maxnsegs;
+ xchan->alignment = alignment;
+ xchan->boundary = boundary;
+ xchan->lowaddr = lowaddr;
+ xchan->highaddr = highaddr;
+
+ if (xchan->maxnsegs > XDMA_MAX_SEG) {
+ device_printf(xdma->dev, "%s: maxnsegs is too big\n",
+ __func__);
+ return (-1);
+ }
+
+ xchan_bank_init(xchan);
+
+ /* Allocate sglist. */
+ ret = xchan_sglist_alloc(xchan);
+ if (ret != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't allocate sglist.\n", __func__);
+ return (-1);
+ }
+
+ /* Allocate bufs. */
+ ret = xchan_bufs_alloc(xchan);
+ if (ret != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't allocate bufs.\n", __func__);
+
+ /* Cleanup */
+ xchan_sglist_free(xchan);
+ xchan_bank_free(xchan);
+
+ return (-1);
+ }
+
+ xchan->flags |= (XCHAN_CONFIGURED | XCHAN_TYPE_SG);
+
+ XCHAN_LOCK(xchan);
+ ret = XDMA_CHANNEL_PREP_SG(xdma->dma_dev, xchan);
+ if (ret != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't prepare SG transfer.\n", __func__);
+ XCHAN_UNLOCK(xchan);
+
+ return (-1);
+ }
+ XCHAN_UNLOCK(xchan);
+
+ return (0);
+}
+
+void
+xchan_seg_done(xdma_channel_t *xchan,
+ struct xdma_transfer_status *st)
+{
+ struct xdma_request *xr;
+ xdma_controller_t *xdma;
+ struct xchan_buf *b;
+
+ xdma = xchan->xdma;
+
+ xr = TAILQ_FIRST(&xchan->processing);
+ if (xr == NULL)
+ panic("request not found\n");
+
+ b = &xr->buf;
+
+ atomic_subtract_int(&b->nsegs_left, 1);
+
+ if (b->nsegs_left == 0) {
+ if (xchan->caps & XCHAN_CAP_BUSDMA) {
+ if (xr->direction == XDMA_MEM_TO_DEV)
+ bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
+ BUS_DMASYNC_POSTWRITE);
+ else
+ bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(xchan->dma_tag_bufs, b->map);
+ }
+ xr->status.error = st->error;
+ xr->status.transferred = st->transferred;
+
+ QUEUE_PROC_LOCK(xchan);
+ TAILQ_REMOVE(&xchan->processing, xr, xr_next);
+ QUEUE_PROC_UNLOCK(xchan);
+
+ QUEUE_OUT_LOCK(xchan);
+ TAILQ_INSERT_TAIL(&xchan->queue_out, xr, xr_next);
+ QUEUE_OUT_UNLOCK(xchan);
+ }
+}
+
+static void
+xdma_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
+ struct seg_load_request *slr;
+ struct bus_dma_segment *seg;
+ int i;
+
+ slr = arg;
+ seg = slr->seg;
+
+ if (error != 0) {
+ slr->error = error;
+ return;
+ }
+
+ slr->nsegs = nsegs;
+
+ for (i = 0; i < nsegs; i++) {
+ seg[i].ds_addr = segs[i].ds_addr;
+ seg[i].ds_len = segs[i].ds_len;
+ }
+}
+
+static int
+_xdma_load_data_busdma(xdma_channel_t *xchan, struct xdma_request *xr,
+ struct bus_dma_segment *seg)
+{
+ xdma_controller_t *xdma;
+ struct seg_load_request slr;
+ uint32_t nsegs;
+ void *addr;
+ int error;
+
+ xdma = xchan->xdma;
+
+ error = 0;
+ nsegs = 0;
+
+ switch (xr->req_type) {
+ case XR_TYPE_MBUF:
+ error = bus_dmamap_load_mbuf_sg(xchan->dma_tag_bufs,
+ xr->buf.map, xr->m, seg, &nsegs, BUS_DMA_NOWAIT);
+ break;
+ case XR_TYPE_BIO:
+ slr.nsegs = 0;
+ slr.error = 0;
+ slr.seg = seg;
+ error = bus_dmamap_load_bio(xchan->dma_tag_bufs,
+ xr->buf.map, xr->bp, xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
+ if (slr.error != 0) {
+ device_printf(xdma->dma_dev,
+ "%s: bus_dmamap_load failed, err %d\n",
+ __func__, slr.error);
+ return (0);
+ }
+ nsegs = slr.nsegs;
+ break;
+ case XR_TYPE_VIRT:
+ switch (xr->direction) {
+ case XDMA_MEM_TO_DEV:
+ addr = (void *)xr->src_addr;
+ break;
+ case XDMA_DEV_TO_MEM:
+ addr = (void *)xr->dst_addr;
+ break;
+ default:
+ device_printf(xdma->dma_dev,
+ "%s: Direction is not supported\n", __func__);
+ return (0);
+ }
+ slr.nsegs = 0;
+ slr.error = 0;
+ slr.seg = seg;
+ error = bus_dmamap_load(xchan->dma_tag_bufs, xr->buf.map,
+ addr, (xr->block_len * xr->block_num),
+ xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
+ if (slr.error != 0) {
+ device_printf(xdma->dma_dev,
+ "%s: bus_dmamap_load failed, err %d\n",
+ __func__, slr.error);
+ return (0);
+ }
+ nsegs = slr.nsegs;
+ break;
+ default:
+ break;
+ }
+
+ if (error != 0) {
+ if (error == ENOMEM) {
+ /*
+ * Out of memory. Try again later.
+ * TODO: count errors.
+ */
+ } else
+ device_printf(xdma->dma_dev,
+ "%s: bus_dmamap_load failed with err %d\n",
+ __func__, error);
+ return (0);
+ }
+
+ if (xr->direction == XDMA_MEM_TO_DEV)
+ bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
+ BUS_DMASYNC_PREWRITE);
+ else
+ bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
+ BUS_DMASYNC_PREREAD);
+
+ return (nsegs);
+}
+
+static int
+_xdma_load_data(xdma_channel_t *xchan, struct xdma_request *xr,
+ struct bus_dma_segment *seg)
+{
+ xdma_controller_t *xdma;
+ struct mbuf *m;
+ uint32_t nsegs;
+
+ xdma = xchan->xdma;
+
+ m = xr->m;
+
+ nsegs = 1;
+
+ switch (xr->req_type) {
+ case XR_TYPE_MBUF:
+ if (xr->direction == XDMA_MEM_TO_DEV) {
+ m_copydata(m, 0, m->m_pkthdr.len, xr->buf.cbuf);
+ seg[0].ds_addr = (bus_addr_t)xr->buf.cbuf;
+ seg[0].ds_len = m->m_pkthdr.len;
+ } else {
+ seg[0].ds_addr = mtod(m, bus_addr_t);
+ seg[0].ds_len = m->m_pkthdr.len;
+ }
+ break;
+ case XR_TYPE_BIO:
+ case XR_TYPE_VIRT:
+ default:
+ panic("implement me\n");
+ }
+
+ return (nsegs);
+}
+
+static int
+xdma_load_data(xdma_channel_t *xchan,
+ struct xdma_request *xr, struct bus_dma_segment *seg)
+{
+ xdma_controller_t *xdma;
+ int error;
+ int nsegs;
+
+ xdma = xchan->xdma;
+
+ error = 0;
+ nsegs = 0;
+
+ if (xchan->caps & XCHAN_CAP_BUSDMA)
+ nsegs = _xdma_load_data_busdma(xchan, xr, seg);
+ else
+ nsegs = _xdma_load_data(xchan, xr, seg);
+ if (nsegs == 0)
+ return (0); /* Try again later. */
+
+ xr->buf.nsegs = nsegs;
+ xr->buf.nsegs_left = nsegs;
+
+ return (nsegs);
+}
+
+static int
+xdma_process(xdma_channel_t *xchan,
+ struct xdma_sglist *sg)
+{
+ struct bus_dma_segment seg[XDMA_MAX_SEG];
+ struct xdma_request *xr;
+ struct xdma_request *xr_tmp;
+ xdma_controller_t *xdma;
+ uint32_t capacity;
+ uint32_t n;
+ uint32_t c;
+ int nsegs;
+ int ret;
+
+ XCHAN_ASSERT_LOCKED(xchan);
+
+ xdma = xchan->xdma;
+
+ n = 0;
+
+ ret = XDMA_CHANNEL_CAPACITY(xdma->dma_dev, xchan, &capacity);
+ if (ret != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't get DMA controller capacity.\n", __func__);
+ return (-1);
+ }
+
+ TAILQ_FOREACH_SAFE(xr, &xchan->queue_in, xr_next, xr_tmp) {
+ switch (xr->req_type) {
+ case XR_TYPE_MBUF:
+ c = xdma_mbuf_defrag(xchan, xr);
+ break;
+ case XR_TYPE_BIO:
+ case XR_TYPE_VIRT:
+ default:
+ c = 1;
+ }
+
+ if (capacity <= (c + n)) {
+ /*
+ * No space yet available for the entire
+ * request in the DMA engine.
+ */
+ break;
+ }
+
+ if ((c + n + xchan->maxnsegs) >= XDMA_SGLIST_MAXLEN) {
+ /* Sglist is full. */
+ break;
+ }
+
+ nsegs = xdma_load_data(xchan, xr, seg);
+ if (nsegs == 0)
+ break;
+
+ xdma_sglist_add(&sg[n], seg, nsegs, xr);
+ n += nsegs;
+
+ QUEUE_IN_LOCK(xchan);
+ TAILQ_REMOVE(&xchan->queue_in, xr, xr_next);
+ QUEUE_IN_UNLOCK(xchan);
+
+ QUEUE_PROC_LOCK(xchan);
+ TAILQ_INSERT_TAIL(&xchan->processing, xr, xr_next);
+ QUEUE_PROC_UNLOCK(xchan);
+ }
+
+ return (n);
+}
+
+int
+xdma_queue_submit_sg(xdma_channel_t *xchan)
+{
+ struct xdma_sglist *sg;
+ xdma_controller_t *xdma;
+ uint32_t sg_n;
+ int ret;
+
+ xdma = xchan->xdma;
+ KASSERT(xdma != NULL, ("xdma is NULL"));
+
+ XCHAN_ASSERT_LOCKED(xchan);
+
+ sg = xchan->sg;
+
+ if ((xchan->flags & XCHAN_BUFS_ALLOCATED) == 0) {
+ device_printf(xdma->dev,
+ "%s: Can't submit a transfer: no bufs\n",
+ __func__);
+ return (-1);
+ }
+
+ sg_n = xdma_process(xchan, sg);
+ if (sg_n == 0)
+ return (0); /* Nothing to submit */
+
+ /* Now submit sglist to DMA engine driver. */
+ ret = XDMA_CHANNEL_SUBMIT_SG(xdma->dma_dev, xchan, sg, sg_n);
+ if (ret != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't submit an sglist.\n", __func__);
+ return (-1);
+ }
+
+ return (0);
+}
Index: sys/dev/xdma/xdma_sglist.c
===================================================================
--- /dev/null
+++ sys/dev/xdma/xdma_sglist.c
@@ -0,0 +1,101 @@
+/*-
+ * Copyright (c) 2017-2018 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_platform.h"
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/bus_dma.h>
+
+#include <dev/xdma/xdma.h>
+
+int
+xchan_sglist_alloc(xdma_channel_t *xchan)
+{
+ uint32_t sz;
+
+ if (xchan->flags & XCHAN_SGLIST_ALLOCATED)
+ return (-1);
+
+ sz = (sizeof(struct xdma_sglist) * XDMA_SGLIST_MAXLEN);
+ xchan->sg = malloc(sz, M_XDMA, M_WAITOK | M_ZERO);
+ xchan->flags |= XCHAN_SGLIST_ALLOCATED;
+
+ return (0);
+}
+
+void
+xchan_sglist_free(xdma_channel_t *xchan)
+{
+
+ if (xchan->flags & XCHAN_SGLIST_ALLOCATED)
+ free(xchan->sg, M_XDMA);
+
+ xchan->flags &= ~XCHAN_SGLIST_ALLOCATED;
+}
+
+int
+xdma_sglist_add(struct xdma_sglist *sg, struct bus_dma_segment *seg,
+ uint32_t nsegs, struct xdma_request *xr)
+{
+ int i;
+
+ if (nsegs == 0)
+ return (-1);
+
+ for (i = 0; i < nsegs; i++) {
+ sg[i].src_width = xr->src_width;
+ sg[i].dst_width = xr->dst_width;
+
+ if (xr->direction == XDMA_MEM_TO_DEV) {
+ sg[i].src_addr = seg[i].ds_addr;
+ sg[i].dst_addr = xr->dst_addr;
+ } else {
+ sg[i].src_addr = xr->src_addr;
+ sg[i].dst_addr = seg[i].ds_addr;
+ }
+ sg[i].len = seg[i].ds_len;
+ sg[i].direction = xr->direction;
+
+ sg[i].first = 0;
+ sg[i].last = 0;
+ }
+
+ sg[0].first = 1;
+ sg[nsegs - 1].last = 1;
+
+ return (0);
+}

File Metadata

Mime Type
text/plain
Expires
Wed, Dec 25, 2:35 PM (4 h, 17 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
15596864
Default Alt Text
D14971.id41125.diff (51 KB)

Event Timeline