Index: sys/dev/xdma/xdma.h
===================================================================
--- sys/dev/xdma/xdma.h
+++ sys/dev/xdma/xdma.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2016 Ruslan Bukin
+ * Copyright (c) 2016-2017 Ruslan Bukin
* All rights reserved.
*
* This software was developed by SRI International and the University of
@@ -30,8 +30,10 @@
* $FreeBSD$
*/
-#ifndef _DEV_EXTRES_XDMA_H_
-#define _DEV_EXTRES_XDMA_H_
+#ifndef _DEV_XDMA_H_
+#define _DEV_XDMA_H_
+
+#include
enum xdma_direction {
XDMA_MEM_TO_MEM,
@@ -46,6 +48,12 @@
XDMA_CYCLIC,
};
+enum xdma_request_type {
+ XR_TYPE_ADDR,
+ XR_TYPE_MBUF,
+ XR_TYPE_BIO,
+};
+
enum xdma_command {
XDMA_CMD_BEGIN,
XDMA_CMD_PAUSE,
@@ -53,10 +61,19 @@
XDMA_CMD_TERMINATE_ALL,
};
+struct xdma_transfer_status {
+ uint32_t transferred;
+ int error;
+};
+
+typedef struct xdma_transfer_status xdma_transfer_status_t;
+
struct xdma_controller {
device_t dev; /* DMA consumer device_t. */
device_t dma_dev; /* A real DMA device_t. */
void *data; /* OFW MD part. */
+ struct proc *xdma_proc;
+ struct mtx proc_mtx;
/* List of virtual channels allocated. */
TAILQ_HEAD(xdma_channel_list, xdma_channel) channels;
@@ -64,6 +81,27 @@
typedef struct xdma_controller xdma_controller_t;
+/* SG type of transfer. */
+struct xdma_request {
+ struct mbuf *m;
+ struct bio *bp;
+ enum xdma_request_type type;
+ enum xdma_direction direction;
+ bus_addr_t src_addr; /* Physical address. */
+ bus_addr_t dst_addr; /* Physical address. */
+ uint8_t src_width;
+ uint8_t dst_width;
+ bus_size_t len;
+ xdma_transfer_status_t status;
+ bool done;
+ void *user;
+};
+
+/*
+ * Cyclic/memcpy type of transfer.
+ * Legacy configuration struct
+ * TODO: replace with xdma_request.
+ */
struct xdma_channel_config {
enum xdma_direction direction;
uintptr_t src_addr; /* Physical address. */
@@ -76,55 +114,107 @@
typedef struct xdma_channel_config xdma_config_t;
-struct xdma_descriptor {
- bus_addr_t ds_addr;
- bus_size_t ds_len;
+struct xdma_sglist {
+ bus_addr_t src_addr;
+ bus_addr_t dst_addr;
+ size_t len;
+ uint8_t src_width;
+ uint8_t dst_width;
+ enum xdma_direction direction;
+ bool first;
+ bool last;
+};
+
+struct xchan_buf {
+ bus_dmamap_t map;
+ struct xdma_request *xr;
+ uint32_t nsegs;
+ uint32_t nsegs_left;
+ void *cbuf;
};
-typedef struct xdma_descriptor xdma_descriptor_t;
+typedef struct xchan_buf xchan_buf_t;
struct xdma_channel {
xdma_controller_t *xdma;
xdma_config_t conf;
- uint8_t flags;
-#define XCHAN_DESC_ALLOCATED (1 << 0)
-#define XCHAN_CONFIGURED (1 << 1)
-#define XCHAN_TYPE_CYCLIC (1 << 2)
-#define XCHAN_TYPE_MEMCPY (1 << 3)
+ uint32_t flags;
+#define XCHAN_BUFS_ALLOCATED (1 << 0)
+#define XCHAN_SGLIST_ALLOCATED (1 << 1)
+#define XCHAN_CONFIGURED (1 << 2)
+#define XCHAN_TYPE_CYCLIC (1 << 3)
+#define XCHAN_TYPE_MEMCPY (1 << 4)
+#define XCHAN_TYPE_FIFO (1 << 5)
+#define XCHAN_TYPE_SG (1 << 6)
+
+ uint32_t caps;
+#define XCHAN_CAP_BUSDMA (1 << 0)
+#define XCHAN_CAP_BUSDMA_NOSEG (1 << 1)
/* A real hardware driver channel. */
void *chan;
/* Interrupt handlers. */
TAILQ_HEAD(, xdma_intr_handler) ie_handlers;
-
- /* Descriptors. */
- bus_dma_tag_t dma_tag;
- bus_dmamap_t dma_map;
- void *descs;
- xdma_descriptor_t *descs_phys;
- uint8_t map_err;
+ TAILQ_ENTRY(xdma_channel) xchan_next;
struct mtx mtx_lock;
- TAILQ_ENTRY(xdma_channel) xchan_next;
+ /* Request queue. */
+ struct xdma_request *xr;
+ uint32_t xr_num;
+ uint32_t xr_count;
+ uint32_t xr_head;
+ uint32_t xr_processed;
+ uint32_t xr_tail;
+
+ /* Bus dma bufs. */
+ xchan_buf_t *bufs;
+ uint32_t bufs_num;
+ bus_dma_tag_t dma_tag_bufs;
+ uint32_t buf_head;
+ uint32_t buf_tail;
+
+ /* Bus dma tag options. */
+ uint32_t maxsegsize;
+ uint32_t maxnsegs;
+ uint32_t alignment;
+
+ struct xdma_sglist *sg;
};
typedef struct xdma_channel xdma_channel_t;
-/* xDMA controller alloc/free */
+/* xDMA controller ops */
xdma_controller_t *xdma_ofw_get(device_t dev, const char *prop);
int xdma_put(xdma_controller_t *xdma);
-xdma_channel_t * xdma_channel_alloc(xdma_controller_t *);
+/* xDMA channel ops */
+xdma_channel_t * xdma_channel_alloc(xdma_controller_t *, uint32_t caps);
int xdma_channel_free(xdma_channel_t *);
int xdma_prep_cyclic(xdma_channel_t *, enum xdma_direction,
uintptr_t, uintptr_t, int, int, int, int);
int xdma_prep_memcpy(xdma_channel_t *, uintptr_t, uintptr_t, size_t len);
-int xdma_desc_alloc(xdma_channel_t *, uint32_t, uint32_t);
-int xdma_desc_free(xdma_channel_t *xchan);
+int xdma_prep_sg(xdma_channel_t *xchan, uint32_t, uint32_t, uint32_t);
+
+int xchan_seg_done(xdma_channel_t *xchan, xdma_transfer_status_t *);
+
+/* xchan queues operations */
+int xdma_dequeue_mbuf(xdma_channel_t *xchan, struct mbuf **m, xdma_transfer_status_t *);
+int xdma_enqueue_mbuf(xdma_channel_t *xchan, struct mbuf **m, uintptr_t addr, enum xdma_direction dir);
+int xdma_dequeue_bio(xdma_channel_t *xchan, struct bio **bp, xdma_transfer_status_t *status);
+int xdma_enqueue_bio(xdma_channel_t *xchan, struct bio **bp, bus_addr_t addr,
+ enum xdma_direction dir, uint8_t, uint8_t);
+int xdma_dequeue(xdma_channel_t *xchan, void **user, xdma_transfer_status_t *status);
+int xdma_enqueue(xdma_channel_t *xchan, uintptr_t src, uintptr_t dst,
+ bus_size_t, enum xdma_direction dir, void *);
+
+int xdma_queue_submit(xdma_channel_t *xchan);
+
+uint32_t xdma_mbuf_defrag(xdma_channel_t *xchan, struct xdma_request *xr);
+uint32_t xdma_mbuf_chain_count(struct mbuf *m0);
/* Channel Control */
int xdma_begin(xdma_channel_t *xchan);
@@ -132,17 +222,37 @@
int xdma_terminate(xdma_channel_t *xchan);
/* Interrupt callback */
-int xdma_setup_intr(xdma_channel_t *xchan, int (*cb)(void *), void *arg, void **);
+int xdma_setup_intr(xdma_channel_t *xchan, int (*cb)(void *, xdma_transfer_status_t *), void *arg, void **);
int xdma_teardown_intr(xdma_channel_t *xchan, struct xdma_intr_handler *ih);
int xdma_teardown_all_intr(xdma_channel_t *xchan);
-int xdma_callback(struct xdma_channel *xchan);
-void xdma_assert_locked(void);
+int xdma_callback(struct xdma_channel *xchan, xdma_transfer_status_t *status);
+
+int xchan_sglist_init(xdma_channel_t *xchan);
+int xchan_sglist_free(xdma_channel_t *xchan);
+int xdma_sglist_add(struct xdma_sglist *sg, struct bus_dma_segment *seg,
+ uint32_t nsegs, struct xdma_request *xr);
struct xdma_intr_handler {
- int (*cb)(void *);
+ int (*cb)(void *cb_user, xdma_transfer_status_t *status);
void *cb_user;
struct mtx ih_lock;
TAILQ_ENTRY(xdma_intr_handler) ih_next;
};
-#endif /* !_DEV_EXTRES_XDMA_H_ */
+static __inline uint32_t
+xchan_next_req(xdma_channel_t *xchan, uint32_t curidx)
+{
+
+ return ((curidx + 1) % xchan->xr_num);
+}
+
+static __inline uint32_t
+xchan_next_buf(xdma_channel_t *xchan, uint32_t curidx)
+{
+
+ return ((curidx + 1) % xchan->bufs_num);
+}
+
+static MALLOC_DEFINE(M_XDMA, "xdma", "xDMA framework");
+
+#endif /* !_DEV_XDMA_H_ */
Index: sys/dev/xdma/xdma.c
===================================================================
--- sys/dev/xdma/xdma.c
+++ sys/dev/xdma/xdma.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2016 Ruslan Bukin
+ * Copyright (c) 2016-2017 Ruslan Bukin
* All rights reserved.
*
* This software was developed by SRI International and the University of
@@ -36,10 +36,12 @@
#include
#include
#include
+#include
#include
#include
#include
#include
+#include
#include
#include
#include
@@ -59,13 +61,12 @@
#include
-MALLOC_DEFINE(M_XDMA, "xdma", "xDMA framework");
-
/*
* Multiple xDMA controllers may work with single DMA device,
* so we have global lock for physical channel management.
*/
static struct mtx xdma_mtx;
+
#define XDMA_LOCK() mtx_lock(&xdma_mtx)
#define XDMA_UNLOCK() mtx_unlock(&xdma_mtx)
#define XDMA_ASSERT_LOCKED() mtx_assert(&xdma_mtx, MA_OWNED)
@@ -77,11 +78,40 @@
#define XCHAN_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_lock)
#define XCHAN_ASSERT_LOCKED(xchan) mtx_assert(&(xchan)->mtx_lock, MA_OWNED)
+static int xchan_bufs_alloc(xdma_channel_t *xchan);
+static int xchan_bufs_free(xdma_channel_t *xchan);
+
+static void
+xdma_task(void *arg)
+{
+ xdma_controller_t *xdma;
+ xdma_channel_t *xchan_tmp;
+ xdma_channel_t *xchan;
+
+ xdma = arg;
+
+ for (;;) {
+ mtx_lock(&xdma->proc_mtx);
+ msleep(xdma, &xdma->proc_mtx, PRIBIO, "jobqueue", hz);
+ mtx_unlock(&xdma->proc_mtx);
+
+ if (TAILQ_EMPTY(&xdma->channels)) {
+ continue;
+ }
+
+ TAILQ_FOREACH_SAFE(xchan, &xdma->channels, xchan_next, xchan_tmp) {
+ if (xchan->flags & XCHAN_TYPE_SG) {
+ xdma_queue_submit(xchan);
+ }
+ }
+ }
+}
+
/*
* Allocate virtual xDMA channel.
*/
xdma_channel_t *
-xdma_channel_alloc(xdma_controller_t *xdma)
+xdma_channel_alloc(xdma_controller_t *xdma, uint32_t caps)
{
xdma_channel_t *xchan;
int ret;
@@ -93,6 +123,7 @@
return (NULL);
}
xchan->xdma = xdma;
+ xchan->caps = caps;
XDMA_LOCK();
@@ -138,8 +169,13 @@
xdma_teardown_all_intr(xchan);
- /* Deallocate descriptors, if any. */
- xdma_desc_free(xchan);
+ /* Deallocate bufs, if any. */
+ xchan_bufs_free(xchan);
+ xchan_sglist_free(xchan);
+
+ if (xchan->flags & XCHAN_TYPE_SG) {
+ free(xchan->xr, M_XDMA);
+ }
mtx_destroy(&xchan->mtx_lock);
@@ -153,8 +189,9 @@
}
int
-xdma_setup_intr(xdma_channel_t *xchan, int (*cb)(void *), void *arg,
- void **ihandler)
+xdma_setup_intr(xdma_channel_t *xchan,
+ int (*cb)(void *, xdma_transfer_status_t *),
+ void *arg, void **ihandler)
{
struct xdma_intr_handler *ih;
xdma_controller_t *xdma;
@@ -232,197 +269,604 @@
return (0);
}
-static void
-xdma_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
+static int
+xdma_bufs_alloc_no_busdma(xdma_channel_t *xchan)
{
- xdma_channel_t *xchan;
+ xdma_controller_t *xdma;
+ int nsegments;
int i;
- xchan = (xdma_channel_t *)arg;
- KASSERT(xchan != NULL, ("xchan is NULL"));
+ xdma = xchan->xdma;
- if (err) {
- xchan->map_err = 1;
- return;
+ nsegments = xchan->bufs_num;
+
+ xchan->bufs = malloc(nsegments * sizeof(struct xchan_buf),
+ M_XDMA, (M_WAITOK | M_ZERO));
+ if (xchan->bufs == NULL) {
+ device_printf(xdma->dev,
+ "%s: Can't allocate memory.\n", __func__);
+ return (-1);
}
- for (i = 0; i < nseg; i++) {
- xchan->descs_phys[i].ds_addr = segs[i].ds_addr;
- xchan->descs_phys[i].ds_len = segs[i].ds_len;
+ for (i = 0; i < nsegments; i++) {
+ xchan->bufs[i].cbuf = contigmalloc(xchan->maxsegsize,
+ M_XDMA, 0, 0, ~0, PAGE_SIZE, 0);
}
+
+ return (0);
}
static int
-xdma_desc_alloc_bus_dma(xdma_channel_t *xchan, uint32_t desc_size,
- uint32_t align)
+xdma_bufs_alloc_busdma(xdma_channel_t *xchan)
{
xdma_controller_t *xdma;
- bus_size_t all_desc_sz;
- xdma_config_t *conf;
int nsegments;
int err;
+ int i;
xdma = xchan->xdma;
- conf = &xchan->conf;
- nsegments = conf->block_num;
- all_desc_sz = (nsegments * desc_size);
+ nsegments = xchan->bufs_num;
+
+#if 0
+ printf("%s: nseg %d\n", __func__, nsegments);
+#endif
+
+ xchan->bufs = malloc(nsegments * sizeof(struct xchan_buf),
+ M_XDMA, (M_WAITOK | M_ZERO));
+ if (xchan->bufs == NULL) {
+ device_printf(xdma->dev,
+ "%s: Can't allocate memory.\n", __func__);
+ return (-1);
+ }
err = bus_dma_tag_create(
- bus_get_dma_tag(xdma->dev),
- align, desc_size, /* alignment, boundary */
+ bus_get_dma_tag(xdma->dev), /* Parent tag. */
+ xchan->alignment, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
- all_desc_sz, nsegments, /* maxsize, nsegments*/
- desc_size, 0, /* maxsegsize, flags */
+ xchan->maxsegsize * xchan->maxnsegs, /* maxsize */
+ xchan->maxnsegs, /* nsegments */
+ xchan->maxsegsize, /* maxsegsize */
+ 0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
- &xchan->dma_tag);
- if (err) {
+ &xchan->dma_tag_bufs);
+ if (err != 0) {
device_printf(xdma->dev,
"%s: Can't create bus_dma tag.\n", __func__);
return (-1);
}
- err = bus_dmamem_alloc(xchan->dma_tag, (void **)&xchan->descs,
- BUS_DMA_WAITOK | BUS_DMA_COHERENT, &xchan->dma_map);
- if (err) {
+ for (i = 0; i < nsegments; i++) {
+ err = bus_dmamap_create(xchan->dma_tag_bufs, 0,
+ &xchan->bufs[i].map);
+ if (err != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't create buf DMA map.\n", __func__);
+ return (-1);
+ }
+ }
+
+ return (0);
+}
+
+static int
+xchan_bufs_alloc(xdma_channel_t *xchan)
+{
+ xdma_controller_t *xdma;
+ int ret;
+
+ xdma = xchan->xdma;
+ if (xdma == NULL) {
device_printf(xdma->dev,
- "%s: Can't allocate memory for descriptors.\n", __func__);
+ "%s: Channel was not allocated properly.\n", __func__);
return (-1);
}
- xchan->descs_phys = malloc(nsegments * sizeof(xdma_descriptor_t), M_XDMA,
- (M_WAITOK | M_ZERO));
-
- xchan->map_err = 0;
- err = bus_dmamap_load(xchan->dma_tag, xchan->dma_map, xchan->descs,
- all_desc_sz, xdma_dmamap_cb, xchan, BUS_DMA_WAITOK);
- if (err) {
+ if (xchan->caps & XCHAN_CAP_BUSDMA) {
+ ret = xdma_bufs_alloc_busdma(xchan);
+ } else {
+ ret = xdma_bufs_alloc_no_busdma(xchan);
+ }
+ if (ret != 0) {
device_printf(xdma->dev,
- "%s: Can't load DMA map.\n", __func__);
+ "%s: Can't setup busdma.\n",
+ __func__);
return (-1);
}
- if (xchan->map_err != 0) {
+ xchan->flags |= XCHAN_BUFS_ALLOCATED;
+
+ return (0);
+}
+
+static int
+xchan_bufs_free(xdma_channel_t *xchan)
+{
+ xchan_buf_t *b;
+ int i;
+
+ if ((xchan->flags & XCHAN_BUFS_ALLOCATED) == 0) {
+ /* No bufs allocated. */
+ return (-1);
+ }
+
+ for (i = 0; i < xchan->bufs_num; i++) {
+ b = &xchan->bufs[i];
+ bus_dmamap_destroy(xchan->dma_tag_bufs, b->map);
+ }
+
+ bus_dma_tag_destroy(xchan->dma_tag_bufs);
+ free(xchan->bufs, M_XDMA);
+
+ xchan->flags &= ~XCHAN_BUFS_ALLOCATED;
+
+ return (0);
+}
+
+int
+xdma_prep_memcpy(xdma_channel_t *xchan, uintptr_t src_addr,
+ uintptr_t dst_addr, size_t len)
+{
+ xdma_controller_t *xdma;
+ xdma_config_t *conf;
+ int ret;
+
+ xdma = xchan->xdma;
+ KASSERT(xdma != NULL, ("xdma is NULL"));
+
+ conf = &xchan->conf;
+ conf->direction = XDMA_MEM_TO_MEM;
+ conf->src_addr = src_addr;
+ conf->dst_addr = dst_addr;
+ conf->block_len = len;
+ conf->block_num = 1;
+
+ xchan->flags |= (XCHAN_CONFIGURED | XCHAN_TYPE_MEMCPY);
+
+ XCHAN_LOCK(xchan);
+
+ ret = XDMA_CHANNEL_PREP_MEMCPY(xdma->dma_dev, xchan);
+ if (ret != 0) {
device_printf(xdma->dev,
- "%s: Can't load DMA map.\n", __func__);
+ "%s: Can't prepare memcpy transfer.\n", __func__);
+ XCHAN_UNLOCK(xchan);
+
return (-1);
}
+ XCHAN_UNLOCK(xchan);
+
return (0);
}
/*
- * This function called by DMA controller driver.
+ * xr_num - xdma requests queue size,
+ * maxsegsize - maximum allowed scatter-gather list element size in bytes
*/
int
-xdma_desc_alloc(xdma_channel_t *xchan, uint32_t desc_size, uint32_t align)
+xdma_prep_sg(xdma_channel_t *xchan, uint32_t xr_num,
+ uint32_t maxsegsize, uint32_t alignment)
{
xdma_controller_t *xdma;
- xdma_config_t *conf;
int ret;
- XCHAN_ASSERT_LOCKED(xchan);
-
xdma = xchan->xdma;
- if (xdma == NULL) {
+
+ KASSERT(xdma != NULL, ("xdma is NULL"));
+
+ if (xchan->flags & XCHAN_CONFIGURED) {
device_printf(xdma->dev,
- "%s: Channel was not allocated properly.\n", __func__);
+ "%s: Channel is already configured.\n", __func__);
return (-1);
}
- if (xchan->flags & XCHAN_DESC_ALLOCATED) {
+ xchan->maxsegsize = maxsegsize;
+ xchan->alignment = alignment;
+ xchan->maxnsegs = 8;
+ xchan->bufs_num = xr_num;
+ xchan->xr_num = xr_num;
+
+ /* Allocate sglist. */
+ ret = xchan_sglist_init(xchan);
+ if (ret != 0) {
device_printf(xdma->dev,
- "%s: Descriptors already allocated.\n", __func__);
+ "%s: Can't allocate sglist.\n", __func__);
return (-1);
}
- if ((xchan->flags & XCHAN_CONFIGURED) == 0) {
+ /* Allocate request queue. */
+ xchan->xr = malloc(sizeof(struct xdma_request) * xr_num,
+ M_XDMA, M_WAITOK | M_ZERO);
+ if (xchan->xr == NULL) {
device_printf(xdma->dev,
- "%s: Channel has no configuration.\n", __func__);
+ "%s: Can't allocate request queue.\n", __func__);
return (-1);
}
- conf = &xchan->conf;
+ xchan->flags |= (XCHAN_CONFIGURED | XCHAN_TYPE_SG);
- XCHAN_UNLOCK(xchan);
- ret = xdma_desc_alloc_bus_dma(xchan, desc_size, align);
XCHAN_LOCK(xchan);
+
+ /* Deallocate bufs, if any. */
+ xchan_bufs_free(xchan);
+
+ ret = XDMA_CHANNEL_PREP_SG(xdma->dma_dev, xchan);
if (ret != 0) {
device_printf(xdma->dev,
- "%s: Can't allocate memory for descriptors.\n",
- __func__);
+ "%s: Can't prepare SG transfer.\n", __func__);
+ XCHAN_UNLOCK(xchan);
+
return (-1);
}
- xchan->flags |= XCHAN_DESC_ALLOCATED;
+ /* Allocate bufs */
+ ret = xchan_bufs_alloc(xchan);
+ if (ret != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't allocate bufs.\n", __func__);
+ return (-1);
+ }
- /* We are going to write to descriptors. */
- bus_dmamap_sync(xchan->dma_tag, xchan->dma_map, BUS_DMASYNC_PREWRITE);
+ XCHAN_UNLOCK(xchan);
return (0);
}
int
-xdma_desc_free(xdma_channel_t *xchan)
+xdma_dequeue(xdma_channel_t *xchan, void **user,
+ xdma_transfer_status_t *status)
{
+ struct xdma_request *xr;
- if ((xchan->flags & XCHAN_DESC_ALLOCATED) == 0) {
- /* No descriptors allocated. */
+ if (xchan->xr_tail == xchan->xr_processed) {
return (-1);
}
- bus_dmamap_unload(xchan->dma_tag, xchan->dma_map);
- bus_dmamem_free(xchan->dma_tag, xchan->descs, xchan->dma_map);
- bus_dma_tag_destroy(xchan->dma_tag);
- free(xchan->descs_phys, M_XDMA);
+ xr = &xchan->xr[xchan->xr_tail];
+ if (xr->done == 0) {
+ return (-1);
+ }
- xchan->flags &= ~(XCHAN_DESC_ALLOCATED);
+ *user = xr->user;
+ status->error = xr->status.error;
+ status->transferred = xr->status.transferred;
+ xchan->xr_tail = xchan_next_req(xchan, xchan->xr_tail);
+ atomic_subtract_int(&xchan->xr_count, 1);
return (0);
}
int
-xdma_prep_memcpy(xdma_channel_t *xchan, uintptr_t src_addr,
- uintptr_t dst_addr, size_t len)
+xdma_enqueue(xdma_channel_t *xchan, uintptr_t src, uintptr_t dst,
+ bus_size_t len, enum xdma_direction dir, void *user)
{
+ struct xdma_request *xr;
xdma_controller_t *xdma;
- xdma_config_t *conf;
+
+ xdma = xchan->xdma;
+
+ if (xchan->xr_count >= (xchan->xr_num - 1)) {
+ /* No space is available yet. */
+ return (-1);
+ }
+
+ xr = &xchan->xr[xchan->xr_head];
+ xr->user = user;
+ xr->direction = dir;
+ xr->m = NULL;
+ xr->len = len;
+ xr->type = 0;
+ xr->src_addr = src;
+ xr->dst_addr = dst;
+#if 0
+ if (dir == XDMA_MEM_TO_DEV) {
+ xr->dst_addr = addr;
+ xr->src_addr = 0;
+ } else {
+ xr->src_addr = addr;
+ xr->dst_addr = 0;
+ }
+#endif
+ xr->done = 0;
+ xchan->xr_head = xchan_next_req(xchan, xchan->xr_head);
+ atomic_add_int(&xchan->xr_count, 1);
+
+ return (0);
+}
+
+struct seg_load_request {
+ struct bus_dma_segment *seg;
+ uint32_t nsegs;
+ uint32_t error;
+};
+
+static void
+xdma_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
+ struct seg_load_request *slr;
+ struct bus_dma_segment *seg;
+ int i;
+
+ slr = arg;
+ seg = slr->seg;
+
+ if (error != 0) {
+ slr->error = error;
+ return;
+ }
+
+ slr->nsegs = nsegs;
+
+ for (i = 0; i < nsegs; i++) {
+ seg[i].ds_addr = segs[i].ds_addr;
+ seg[i].ds_len = segs[i].ds_len;
+ }
+}
+
+static int
+xdma_load_busdma(xdma_channel_t *xchan, struct xdma_request *xr,
+ struct bus_dma_segment *seg, uint32_t i)
+{
+ xdma_controller_t *xdma;
+ struct seg_load_request slr;
+ uint32_t nsegs;
+ void *addr;
+ int error;
+
+ xdma = xchan->xdma;
+
+ error = 0;
+ nsegs = 0;
+
+ switch (xr->type) {
+ case XR_TYPE_MBUF:
+ error = bus_dmamap_load_mbuf_sg(xchan->dma_tag_bufs,
+ xchan->bufs[i].map, xr->m, seg, &nsegs, BUS_DMA_NOWAIT);
+ break;
+ case XR_TYPE_BIO:
+ slr.nsegs = 0;
+ slr.error = 0;
+ slr.seg = seg;
+ error = bus_dmamap_load_bio(xchan->dma_tag_bufs,
+ xchan->bufs[i].map, xr->bp, xdma_get1paddr, &slr, BUS_DMA_NOWAIT);
+ if (slr.error != 0) {
+ device_printf(xdma->dma_dev,
+ "%s: bus_dmamap_load failed, err %d\n",
+ __func__, slr.error);
+ return (0);
+ }
+ nsegs = slr.nsegs;
+ break;
+ case XR_TYPE_ADDR:
+ switch (xr->direction) {
+ case XDMA_MEM_TO_DEV:
+ addr = (void *)xr->src_addr;
+ break;
+ case XDMA_DEV_TO_MEM:
+ addr = (void *)xr->dst_addr;
+ break;
+ default:
+ device_printf(xdma->dma_dev,
+ "%s: Direction is not supported\n", __func__);
+ return (0);
+ }
+ slr.nsegs = 0;
+ slr.error = 0;
+ slr.seg = seg;
+ error = bus_dmamap_load(xchan->dma_tag_bufs, xchan->bufs[i].map,
+ addr, xr->len, xdma_get1paddr, &slr, BUS_DMA_NOWAIT);
+ if (slr.error != 0) {
+ device_printf(xdma->dma_dev,
+ "%s: bus_dmamap_load failed, err %d\n",
+ __func__, slr.error);
+ return (0);
+ }
+ nsegs = slr.nsegs;
+ break;
+ default:
+ break;
+ }
+
+ if (error != 0) {
+ if (error == ENOMEM) {
+ /*
+ * Out of memory. Try again later.
+ * TODO: count errors.
+ */
+ } else {
+ device_printf(xdma->dma_dev,
+ "%s: bus_dmamap_load failed with err %d\n",
+ __func__, error);
+ }
+ return (0);
+ }
+
+ if (xr->direction == XDMA_MEM_TO_DEV) {
+ bus_dmamap_sync(xchan->dma_tag_bufs, xchan->bufs[i].map,
+ BUS_DMASYNC_PREWRITE);
+ } else {
+ bus_dmamap_sync(xchan->dma_tag_bufs, xchan->bufs[i].map,
+ BUS_DMASYNC_PREREAD);
+ }
+
+ return (nsegs);
+}
+
+static int
+xdma_load_no_busdma(xdma_channel_t *xchan, struct xdma_request *xr,
+ struct bus_dma_segment *seg, uint32_t i)
+{
+ xdma_controller_t *xdma;
+ struct mbuf *m;
+ uint32_t nsegs;
+
+ xdma = xchan->xdma;
+
+ m = xr->m;
+
+ nsegs = 1;
+
+ switch (xr->type) {
+ case XR_TYPE_MBUF:
+ if (xr->direction == XDMA_MEM_TO_DEV) {
+ m_copydata(m, 0, m->m_pkthdr.len, xchan->bufs[i].cbuf);
+ seg[0].ds_addr = (bus_addr_t)xchan->bufs[i].cbuf;
+ seg[0].ds_len = m->m_pkthdr.len;
+ } else {
+ seg[0].ds_addr = mtod(m, bus_addr_t);
+ seg[0].ds_len = m->m_pkthdr.len;
+ }
+ break;
+ case XR_TYPE_BIO:
+ case XR_TYPE_ADDR:
+ default:
+ panic("implement me\n");
+ }
+
+ return (nsegs);
+}
+
+static int
+xdma_sglist_prepare_one(xdma_channel_t *xchan,
+ struct xdma_request *xr, struct bus_dma_segment *seg)
+{
+ xdma_controller_t *xdma;
+ int error;
+ int nsegs;
+ int i;
+
+ xdma = xchan->xdma;
+
+ error = 0;
+ nsegs = 0;
+
+ i = xchan->buf_head;
+
+ if (xchan->caps & XCHAN_CAP_BUSDMA) {
+ nsegs = xdma_load_busdma(xchan, xr, seg, i);
+ } else {
+ nsegs = xdma_load_no_busdma(xchan, xr, seg, i);
+ }
+ if (nsegs == 0) {
+ /* Try again later. */
+ return (0);
+ }
+
+ xchan->bufs[i].xr = xr;
+ xchan->bufs[i].nsegs = nsegs;
+ xchan->bufs[i].nsegs_left = nsegs;
+
+ xchan->buf_head = xchan_next_buf(xchan, xchan->buf_head);
+
+ return (nsegs);
+}
+
+static int
+xdma_sglist_prepare(xdma_channel_t *xchan,
+ struct xdma_sglist *sg)
+{
+ struct bus_dma_segment seg[128];
+ struct xdma_request *xr;
+ xdma_controller_t *xdma;
+ uint32_t capacity;
+ uint32_t n;
+ uint32_t c;
+ int nsegs;
+ int ret;
+
+ xdma = xchan->xdma;
+
+ n = 0;
+
+ ret = XDMA_CHANNEL_CAPACITY(xdma->dma_dev, xchan, &capacity);
+ if (ret != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't get DMA controller capacity.\n", __func__);
+ return (-1);
+ }
+
+ for (;;) {
+ if (xchan->xr_processed == xchan->xr_head) {
+ /* All the requests processed. */
+ break;
+ }
+ xr = &xchan->xr[xchan->xr_processed];
+
+ switch (xr->type) {
+ case XR_TYPE_MBUF:
+ c = xdma_mbuf_defrag(xchan, xr);
+ break;
+ case XR_TYPE_BIO:
+ case XR_TYPE_ADDR:
+ default:
+ c = 1;
+ }
+
+ if (capacity <= (c + n)) {
+ /*
+ * No space yet available for the entire
+ * request in the DMA engine.
+ */
+ break;
+ }
+
+ nsegs = xdma_sglist_prepare_one(xchan, xr, seg);
+ if (nsegs == 0) {
+ break;
+ }
+
+ xdma_sglist_add(&sg[n], seg, nsegs, xr);
+ n += nsegs;
+
+ xchan->xr_processed = xchan_next_req(xchan, xchan->xr_processed);
+ }
+
+ return (n);
+}
+
+int
+xdma_queue_submit(xdma_channel_t *xchan)
+{
+ struct xdma_sglist *sg;
+ xdma_controller_t *xdma;
+ uint32_t sg_n;
int ret;
xdma = xchan->xdma;
KASSERT(xdma != NULL, ("xdma is NULL"));
- conf = &xchan->conf;
- conf->direction = XDMA_MEM_TO_MEM;
- conf->src_addr = src_addr;
- conf->dst_addr = dst_addr;
- conf->block_len = len;
- conf->block_num = 1;
+ sg = xchan->sg;
- xchan->flags |= (XCHAN_CONFIGURED | XCHAN_TYPE_MEMCPY);
+ if ((xchan->flags & XCHAN_BUFS_ALLOCATED) == 0) {
+ device_printf(xdma->dev,
+ "%s: Can't submit SG transfer: no bufs\n",
+ __func__);
+ return (-1);
+ }
XCHAN_LOCK(xchan);
- /* Deallocate old descriptors, if any. */
- xdma_desc_free(xchan);
+ sg_n = xdma_sglist_prepare(xchan, sg);
+ if (sg_n == 0) {
+ /* Nothing to submit */
+ XCHAN_UNLOCK(xchan);
+ return (0);
+ }
- ret = XDMA_CHANNEL_PREP_MEMCPY(xdma->dma_dev, xchan);
+ /* Now submit xdma_sglist to DMA engine driver. */
+
+ ret = XDMA_CHANNEL_SUBMIT_SG(xdma->dma_dev, xchan, sg, sg_n);
if (ret != 0) {
device_printf(xdma->dev,
- "%s: Can't prepare memcpy transfer.\n", __func__);
+ "%s: Can't submit SG transfer.\n", __func__);
+
XCHAN_UNLOCK(xchan);
return (-1);
}
- if (xchan->flags & XCHAN_DESC_ALLOCATED) {
- /* Driver created xDMA descriptors. */
- bus_dmamap_sync(xchan->dma_tag, xchan->dma_map,
- BUS_DMASYNC_POSTWRITE);
- }
-
XCHAN_UNLOCK(xchan);
return (0);
@@ -453,8 +897,8 @@
XCHAN_LOCK(xchan);
- /* Deallocate old descriptors, if any. */
- xdma_desc_free(xchan);
+ /* Deallocate bufs, if any. */
+ xchan_bufs_free(xchan);
ret = XDMA_CHANNEL_PREP_CYCLIC(xdma->dma_dev, xchan);
if (ret != 0) {
@@ -465,12 +909,6 @@
return (-1);
}
- if (xchan->flags & XCHAN_DESC_ALLOCATED) {
- /* Driver has created xDMA descriptors. */
- bus_dmamap_sync(xchan->dma_tag, xchan->dma_map,
- BUS_DMASYNC_POSTWRITE);
- }
-
XCHAN_UNLOCK(xchan);
return (0);
@@ -484,6 +922,11 @@
xdma = xchan->xdma;
+ if (xchan->flags & XCHAN_TYPE_SG) {
+ /* Not valid. */
+ return (0);
+ };
+
ret = XDMA_CHANNEL_CONTROL(xdma->dma_dev, xchan, XDMA_CMD_BEGIN);
if (ret != 0) {
device_printf(xdma->dev,
@@ -531,25 +974,60 @@
}
int
-xdma_callback(xdma_channel_t *xchan)
+xchan_seg_done(xdma_channel_t *xchan,
+ struct xdma_transfer_status *st)
{
- struct xdma_intr_handler *ih_tmp;
- struct xdma_intr_handler *ih;
+ struct xdma_request *xr;
+ xdma_controller_t *xdma;
+ xchan_buf_t *b;
- TAILQ_FOREACH_SAFE(ih, &xchan->ie_handlers, ih_next, ih_tmp) {
- if (ih->cb != NULL) {
- ih->cb(ih->cb_user);
+ xdma = xchan->xdma;
+
+ b = &xchan->bufs[xchan->buf_tail];
+ xr = b->xr;
+
+ atomic_subtract_int(&b->nsegs_left, 1);
+
+ if (b->nsegs_left == 0) {
+ if (xchan->caps & XCHAN_CAP_BUSDMA) {
+ if (xr->direction == XDMA_MEM_TO_DEV) {
+ bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
+ BUS_DMASYNC_POSTWRITE);
+ } else {
+ bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
+ BUS_DMASYNC_POSTREAD);
+ }
+
+ bus_dmamap_unload(xchan->dma_tag_bufs, b->map);
}
+ xr->status.error = st->error;
+ xr->status.transferred = st->transferred;
+ xr->done = 1;
+
+ xchan->buf_tail = xchan_next_buf(xchan, xchan->buf_tail);
}
return (0);
}
-void
-xdma_assert_locked(void)
+int
+xdma_callback(xdma_channel_t *xchan, xdma_transfer_status_t *status)
{
+ struct xdma_intr_handler *ih_tmp;
+ struct xdma_intr_handler *ih;
+ xdma_controller_t *xdma;
+
+ xdma = xchan->xdma;
- XDMA_ASSERT_LOCKED();
+ TAILQ_FOREACH_SAFE(ih, &xchan->ie_handlers, ih_next, ih_tmp) {
+ if (ih->cb != NULL) {
+ ih->cb(ih->cb_user, status);
+ }
+ }
+
+ wakeup(xdma);
+
+ return (0);
}
#ifdef FDT
@@ -561,7 +1039,8 @@
{
uint32_t ret;
- ret = XDMA_OFW_MD_DATA(xdma->dma_dev, cells, ncells, (void **)&xdma->data);
+ ret = XDMA_OFW_MD_DATA(xdma->dma_dev,
+ cells, ncells, (void **)&xdma->data);
return (ret);
}
@@ -637,6 +1116,9 @@
xdma_ofw_md_data(xdma, cells, ncells);
free(cells, M_OFWPROP);
+ mtx_init(&xdma->proc_mtx, "xDMA ofw controller", NULL, MTX_DEF);
+ kproc_create(&xdma_task, xdma, &xdma->xdma_proc, 0, 0, "xdma drainer");
+
return (xdma);
}
#endif
@@ -656,6 +1138,9 @@
return (-1);
}
+ kproc_shutdown(&xdma->xdma_proc, 0);
+ mtx_destroy(&xdma->proc_mtx);
+
free(xdma->data, M_DEVBUF);
free(xdma, M_XDMA);
Index: sys/dev/xdma/xdma_bio.c
===================================================================
--- /dev/null
+++ sys/dev/xdma/xdma_bio.c
@@ -0,0 +1,120 @@
+/*-
+ * Copyright (c) 2017 Ruslan Bukin
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include
+__FBSDID("$FreeBSD$");
+
+#include "opt_platform.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#ifdef FDT
+#include
+#include
+#include
+#endif
+
+#include
+
+int
+xdma_dequeue_bio(xdma_channel_t *xchan, struct bio **bp,
+ xdma_transfer_status_t *status)
+{
+ struct xdma_request *xr;
+
+ if (xchan->xr_tail == xchan->xr_processed) {
+ return (-1);
+ }
+
+ xr = &xchan->xr[xchan->xr_tail];
+ if (xr->done == 0) {
+ return (-1);
+ }
+
+ *bp = xr->bp;
+ status->error = xr->status.error;
+ status->transferred = xr->status.transferred;
+ xchan->xr_tail = xchan_next_req(xchan, xchan->xr_tail);
+ atomic_subtract_int(&xchan->xr_count, 1);
+
+ return (0);
+}
+
+int
+xdma_enqueue_bio(xdma_channel_t *xchan, struct bio **bp,
+ bus_addr_t addr, enum xdma_direction dir,
+ uint8_t src_width, uint8_t dst_width)
+{
+ struct xdma_request *xr;
+ xdma_controller_t *xdma;
+
+ xdma = xchan->xdma;
+
+ if (xchan->xr_count >= (xchan->xr_num - 1)) {
+ /* No space is available yet. */
+ return (-1);
+ }
+
+ xr = &xchan->xr[xchan->xr_head];
+ xr->direction = dir;
+ xr->bp = *bp;
+ xr->type = XR_TYPE_BIO;
+
+ xr->src_width = src_width;
+ xr->dst_width = dst_width;
+
+ if (dir == XDMA_MEM_TO_DEV) {
+ xr->dst_addr = addr;
+ xr->src_addr = 0;
+ } else {
+ xr->dst_addr = 0;
+ xr->src_addr = addr;
+ }
+ xr->done = 0;
+ xchan->xr_head = xchan_next_req(xchan, xchan->xr_head);
+ atomic_add_int(&xchan->xr_count, 1);
+
+ return (0);
+}
Index: sys/dev/xdma/xdma_fdt_test.c
===================================================================
--- sys/dev/xdma/xdma_fdt_test.c
+++ sys/dev/xdma/xdma_fdt_test.c
@@ -297,7 +297,12 @@
mtx_lock(&sc->mtx);
- xdmatest_test(sc);
+ if (xdmatest_test(sc) != 0) {
+ mtx_unlock(&sc->mtx);
+ device_printf(sc->dev,
+ "%s: Test failed.\n", __func__);
+ break;
+ }
timeout = 100;
Index: sys/dev/xdma/xdma_if.m
===================================================================
--- sys/dev/xdma/xdma_if.m
+++ sys/dev/xdma/xdma_if.m
@@ -59,6 +59,33 @@
};
#
+# Prepare xDMA channel for a scatter-gather transfer.
+#
+METHOD int channel_prep_sg {
+ device_t dev;
+ struct xdma_channel *xchan;
+};
+
+#
+# Check the amount of free entries for requests.
+#
+METHOD int channel_capacity {
+ device_t dev;
+ struct xdma_channel *xchan;
+ uint32_t *capacity;
+};
+
+#
+# Submit scatter-gather list to DMA.
+#
+METHOD int channel_submit_sg {
+ device_t dev;
+ struct xdma_channel *xchan;
+ struct xdma_sglist *sg;
+ uint32_t sg_n;
+};
+
+#
# Notify driver we have machine-dependend data.
#
METHOD int ofw_md_data {
@@ -77,7 +104,7 @@
};
#
-# Free the channel, including descriptors.
+# Free the channel.
#
METHOD int channel_free {
device_t dev;
Index: sys/dev/xdma/xdma_mbuf.c
===================================================================
--- /dev/null
+++ sys/dev/xdma/xdma_mbuf.c
@@ -0,0 +1,162 @@
+/*-
+ * Copyright (c) 2017 Ruslan Bukin
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include
+__FBSDID("$FreeBSD$");
+
+#include "opt_platform.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#ifdef FDT
+#include
+#include
+#include
+#endif
+
+#include
+
+int
+xdma_dequeue_mbuf(xdma_channel_t *xchan, struct mbuf **mp,
+ xdma_transfer_status_t *status)
+{
+ struct xdma_request *xr;
+
+ if (xchan->xr_tail == xchan->xr_processed) {
+ return (-1);
+ }
+
+ xr = &xchan->xr[xchan->xr_tail];
+ if (xr->done == 0) {
+ return (-1);
+ }
+
+ *mp = xr->m;
+ status->error = xr->status.error;
+ status->transferred = xr->status.transferred;
+ xchan->xr_tail = xchan_next_req(xchan, xchan->xr_tail);
+ atomic_subtract_int(&xchan->xr_count, 1);
+
+ return (0);
+}
+
+int
+xdma_enqueue_mbuf(xdma_channel_t *xchan, struct mbuf **mp,
+ uintptr_t addr, enum xdma_direction dir)
+{
+ struct xdma_request *xr;
+ xdma_controller_t *xdma;
+
+ xdma = xchan->xdma;
+
+ if (xchan->xr_count >= (xchan->xr_num - 1)) {
+ /* No space is available yet. */
+ return (-1);
+ }
+
+ xr = &xchan->xr[xchan->xr_head];
+ xr->direction = dir;
+ xr->m = *mp;
+ xr->type = XR_TYPE_MBUF;
+ if (dir == XDMA_MEM_TO_DEV) {
+ xr->dst_addr = addr;
+ xr->src_addr = 0;
+ } else {
+ xr->src_addr = addr;
+ xr->dst_addr = 0;
+ }
+ xr->done = 0;
+ xchan->xr_head = xchan_next_req(xchan, xchan->xr_head);
+ atomic_add_int(&xchan->xr_count, 1);
+
+ return (0);
+}
+
+uint32_t
+xdma_mbuf_chain_count(struct mbuf *m0)
+{
+ struct mbuf *m;
+ uint32_t c;
+
+ c = 0;
+
+ for (m = m0; m != NULL; m = m->m_next) {
+ c++;
+ }
+
+ return (c);
+}
+
+uint32_t
+xdma_mbuf_defrag(xdma_channel_t *xchan, struct xdma_request *xr)
+{
+ xdma_controller_t *xdma;
+ struct mbuf *m;
+ uint32_t c;
+
+ xdma = xchan->xdma;
+
+ c = xdma_mbuf_chain_count(xr->m);
+ if (c == 1) {
+ /* Nothing to do. */
+ return (c);
+ }
+
+ if (xchan->caps & XCHAN_CAP_BUSDMA) {
+ if ((xchan->caps & XCHAN_CAP_BUSDMA_NOSEG) || \
+ (c > xchan->maxnsegs)) {
+ if ((m = m_defrag(xr->m, M_NOWAIT)) == NULL) {
+ device_printf(xdma->dma_dev,
+ "%s: Can't defrag mbuf\n",
+ __func__);
+ return (c);
+ }
+ xr->m = m;
+ c = 1;
+ }
+ }
+
+ return (c);
+}
Index: sys/dev/xdma/xdma_sglist.c
===================================================================
--- /dev/null
+++ sys/dev/xdma/xdma_sglist.c
@@ -0,0 +1,127 @@
+/*-
+ * Copyright (c) 2017 Ruslan Bukin
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include
+__FBSDID("$FreeBSD$");
+
+#include "opt_platform.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#ifdef FDT
+#include
+#include
+#include
+#endif
+
+#include
+
+int
+xchan_sglist_init(xdma_channel_t *xchan)
+{
+ uint32_t sz;
+
+ if (xchan->flags & XCHAN_SGLIST_ALLOCATED) {
+ return (-1);
+ }
+
+ /* TODO: dehardcore */
+ sz = (sizeof(struct xdma_sglist) * 2048);
+
+ xchan->sg = malloc(sz, M_XDMA, M_WAITOK | M_ZERO);
+ if (xchan->sg == NULL) {
+ return (-1);
+ }
+
+ xchan->flags |= XCHAN_SGLIST_ALLOCATED;
+
+ return (0);
+}
+
+int
+xchan_sglist_free(xdma_channel_t *xchan)
+{
+
+ if (xchan->flags & XCHAN_SGLIST_ALLOCATED) {
+ free(xchan->sg, M_XDMA);
+ }
+
+ xchan->flags &= ~XCHAN_SGLIST_ALLOCATED;
+
+ return (0);
+}
+
+int
+xdma_sglist_add(struct xdma_sglist *sg, struct bus_dma_segment *seg,
+ uint32_t nsegs, struct xdma_request *xr)
+{
+ int i;
+
+ if (nsegs == 0) {
+ return (-1);
+ }
+
+ for (i = 0; i < nsegs; i++) {
+ sg[i].src_width = xr->src_width;
+ sg[i].dst_width = xr->dst_width;
+
+ if (xr->direction == XDMA_MEM_TO_DEV) {
+ sg[i].src_addr = seg[i].ds_addr;
+ sg[i].dst_addr = xr->dst_addr;
+ } else {
+ sg[i].src_addr = xr->src_addr;
+ sg[i].dst_addr = seg[i].ds_addr;
+ }
+ sg[i].len = seg[i].ds_len;
+ sg[i].direction = xr->direction;
+
+ sg[i].first = 0;
+ sg[i].last = 0;
+ }
+
+ sg[0].first = 1;
+ sg[nsegs - 1].last = 1;
+
+ return (0);
+}