Index: head/sys/conf/files
===================================================================
--- head/sys/conf/files
+++ head/sys/conf/files
@@ -3511,8 +3511,14 @@
no-obj no-implicit-rule \
clean "wpi.fw"
dev/xdma/xdma.c optional xdma
-dev/xdma/xdma_if.m optional xdma
+dev/xdma/xdma_bank.c optional xdma
+dev/xdma/xdma_bio.c optional xdma
dev/xdma/xdma_fdt_test.c optional xdma xdma_test fdt
+dev/xdma/xdma_if.m optional xdma
+dev/xdma/xdma_mbuf.c optional xdma
+dev/xdma/xdma_queue.c optional xdma
+dev/xdma/xdma_sg.c optional xdma
+dev/xdma/xdma_sglist.c optional xdma
dev/xe/if_xe.c optional xe
dev/xe/if_xe_pccard.c optional xe pccard
dev/xen/balloon/balloon.c optional xenhvm
Index: head/sys/dev/xdma/xdma.h
===================================================================
--- head/sys/dev/xdma/xdma.h
+++ head/sys/dev/xdma/xdma.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2016 Ruslan Bukin
+ * Copyright (c) 2016-2018 Ruslan Bukin
* All rights reserved.
*
* This software was developed by SRI International and the University of
@@ -30,9 +30,11 @@
* $FreeBSD$
*/
-#ifndef _DEV_EXTRES_XDMA_H_
-#define _DEV_EXTRES_XDMA_H_
+#ifndef _DEV_XDMA_XDMA_H_
+#define _DEV_XDMA_XDMA_H_
+#include
+
enum xdma_direction {
XDMA_MEM_TO_MEM,
XDMA_MEM_TO_DEV,
@@ -42,17 +44,31 @@
enum xdma_operation_type {
XDMA_MEMCPY,
- XDMA_SG,
XDMA_CYCLIC,
+ XDMA_FIFO,
+ XDMA_SG,
};
+enum xdma_request_type {
+ XR_TYPE_PHYS,
+ XR_TYPE_VIRT,
+ XR_TYPE_MBUF,
+ XR_TYPE_BIO,
+};
+
enum xdma_command {
XDMA_CMD_BEGIN,
XDMA_CMD_PAUSE,
XDMA_CMD_TERMINATE,
- XDMA_CMD_TERMINATE_ALL,
};
+struct xdma_transfer_status {
+ uint32_t transferred;
+ int error;
+};
+
+typedef struct xdma_transfer_status xdma_transfer_status_t;
+
struct xdma_controller {
device_t dev; /* DMA consumer device_t. */
device_t dma_dev; /* A real DMA device_t. */
@@ -64,85 +80,185 @@
typedef struct xdma_controller xdma_controller_t;
-struct xdma_channel_config {
- enum xdma_direction direction;
- uintptr_t src_addr; /* Physical address. */
- uintptr_t dst_addr; /* Physical address. */
- int block_len; /* In bytes. */
- int block_num; /* Count of blocks. */
- int src_width; /* In bytes. */
- int dst_width; /* In bytes. */
+struct xchan_buf {
+ bus_dmamap_t map;
+ uint32_t nsegs;
+ uint32_t nsegs_left;
+ void *cbuf;
};
-typedef struct xdma_channel_config xdma_config_t;
+struct xdma_request {
+ struct mbuf *m;
+ struct bio *bp;
+ enum xdma_operation_type operation;
+ enum xdma_request_type req_type;
+ enum xdma_direction direction;
+ bus_addr_t src_addr;
+ bus_addr_t dst_addr;
+ uint8_t src_width;
+ uint8_t dst_width;
+ bus_size_t block_num;
+ bus_size_t block_len;
+ xdma_transfer_status_t status;
+ void *user;
+ TAILQ_ENTRY(xdma_request) xr_next;
+ struct xchan_buf buf;
+};
-struct xdma_descriptor {
- bus_addr_t ds_addr;
- bus_size_t ds_len;
+struct xdma_sglist {
+ bus_addr_t src_addr;
+ bus_addr_t dst_addr;
+ size_t len;
+ uint8_t src_width;
+ uint8_t dst_width;
+ enum xdma_direction direction;
+ bool first;
+ bool last;
};
-typedef struct xdma_descriptor xdma_descriptor_t;
-
struct xdma_channel {
xdma_controller_t *xdma;
- xdma_config_t conf;
- uint8_t flags;
-#define XCHAN_DESC_ALLOCATED (1 << 0)
-#define XCHAN_CONFIGURED (1 << 1)
-#define XCHAN_TYPE_CYCLIC (1 << 2)
-#define XCHAN_TYPE_MEMCPY (1 << 3)
+ uint32_t flags;
+#define XCHAN_BUFS_ALLOCATED (1 << 0)
+#define XCHAN_SGLIST_ALLOCATED (1 << 1)
+#define XCHAN_CONFIGURED (1 << 2)
+#define XCHAN_TYPE_CYCLIC (1 << 3)
+#define XCHAN_TYPE_MEMCPY (1 << 4)
+#define XCHAN_TYPE_FIFO (1 << 5)
+#define XCHAN_TYPE_SG (1 << 6)
+ uint32_t caps;
+#define XCHAN_CAP_BUSDMA (1 << 0)
+#define XCHAN_CAP_BUSDMA_NOSEG (1 << 1)
+
/* A real hardware driver channel. */
void *chan;
/* Interrupt handlers. */
TAILQ_HEAD(, xdma_intr_handler) ie_handlers;
+ TAILQ_ENTRY(xdma_channel) xchan_next;
- /* Descriptors. */
- bus_dma_tag_t dma_tag;
- bus_dmamap_t dma_map;
- void *descs;
- xdma_descriptor_t *descs_phys;
- uint8_t map_err;
+ struct sx sx_lock;
+ struct sx sx_qin_lock;
+ struct sx sx_qout_lock;
+ struct sx sx_bank_lock;
+ struct sx sx_proc_lock;
- struct mtx mtx_lock;
+ /* Request queue. */
+ bus_dma_tag_t dma_tag_bufs;
+ struct xdma_request *xr_mem;
+ uint32_t xr_num;
- TAILQ_ENTRY(xdma_channel) xchan_next;
+ /* Bus dma tag options. */
+ bus_size_t maxsegsize;
+ bus_size_t maxnsegs;
+ bus_size_t alignment;
+ bus_addr_t boundary;
+ bus_addr_t lowaddr;
+ bus_addr_t highaddr;
+
+ struct xdma_sglist *sg;
+
+ TAILQ_HEAD(, xdma_request) bank;
+ TAILQ_HEAD(, xdma_request) queue_in;
+ TAILQ_HEAD(, xdma_request) queue_out;
+ TAILQ_HEAD(, xdma_request) processing;
};
typedef struct xdma_channel xdma_channel_t;
-/* xDMA controller alloc/free */
+struct xdma_intr_handler {
+ int (*cb)(void *cb_user, xdma_transfer_status_t *status);
+ void *cb_user;
+ TAILQ_ENTRY(xdma_intr_handler) ih_next;
+};
+
+static MALLOC_DEFINE(M_XDMA, "xdma", "xDMA framework");
+
+#define XCHAN_LOCK(xchan) sx_xlock(&(xchan)->sx_lock)
+#define XCHAN_UNLOCK(xchan) sx_xunlock(&(xchan)->sx_lock)
+#define XCHAN_ASSERT_LOCKED(xchan) \
+ sx_assert(&(xchan)->sx_lock, SX_XLOCKED)
+
+#define QUEUE_IN_LOCK(xchan) sx_xlock(&(xchan)->sx_qin_lock)
+#define QUEUE_IN_UNLOCK(xchan) sx_xunlock(&(xchan)->sx_qin_lock)
+#define QUEUE_IN_ASSERT_LOCKED(xchan) \
+ sx_assert(&(xchan)->sx_qin_lock, SX_XLOCKED)
+
+#define QUEUE_OUT_LOCK(xchan) sx_xlock(&(xchan)->sx_qout_lock)
+#define QUEUE_OUT_UNLOCK(xchan) sx_xunlock(&(xchan)->sx_qout_lock)
+#define QUEUE_OUT_ASSERT_LOCKED(xchan) \
+ sx_assert(&(xchan)->sx_qout_lock, SX_XLOCKED)
+
+#define QUEUE_BANK_LOCK(xchan) sx_xlock(&(xchan)->sx_bank_lock)
+#define QUEUE_BANK_UNLOCK(xchan) sx_xunlock(&(xchan)->sx_bank_lock)
+#define QUEUE_BANK_ASSERT_LOCKED(xchan) \
+ sx_assert(&(xchan)->sx_bank_lock, SX_XLOCKED)
+
+#define QUEUE_PROC_LOCK(xchan) sx_xlock(&(xchan)->sx_proc_lock)
+#define QUEUE_PROC_UNLOCK(xchan) sx_xunlock(&(xchan)->sx_proc_lock)
+#define QUEUE_PROC_ASSERT_LOCKED(xchan) \
+ sx_assert(&(xchan)->sx_proc_lock, SX_XLOCKED)
+
+#define XDMA_SGLIST_MAXLEN 2048
+#define XDMA_MAX_SEG 128
+
+/* xDMA controller ops */
xdma_controller_t *xdma_ofw_get(device_t dev, const char *prop);
int xdma_put(xdma_controller_t *xdma);
-xdma_channel_t * xdma_channel_alloc(xdma_controller_t *);
+/* xDMA channel ops */
+xdma_channel_t * xdma_channel_alloc(xdma_controller_t *, uint32_t caps);
int xdma_channel_free(xdma_channel_t *);
+int xdma_request(xdma_channel_t *xchan, struct xdma_request *r);
-int xdma_prep_cyclic(xdma_channel_t *, enum xdma_direction,
- uintptr_t, uintptr_t, int, int, int, int);
-int xdma_prep_memcpy(xdma_channel_t *, uintptr_t, uintptr_t, size_t len);
-int xdma_desc_alloc(xdma_channel_t *, uint32_t, uint32_t);
-int xdma_desc_free(xdma_channel_t *xchan);
+/* SG interface */
+int xdma_prep_sg(xdma_channel_t *, uint32_t,
+ bus_size_t, bus_size_t, bus_size_t, bus_addr_t, bus_addr_t, bus_addr_t);
+void xdma_channel_free_sg(xdma_channel_t *xchan);
+int xdma_queue_submit_sg(xdma_channel_t *xchan);
+void xchan_seg_done(xdma_channel_t *xchan, xdma_transfer_status_t *);
+/* Queue operations */
+int xdma_dequeue_mbuf(xdma_channel_t *xchan, struct mbuf **m,
+ xdma_transfer_status_t *);
+int xdma_enqueue_mbuf(xdma_channel_t *xchan, struct mbuf **m, uintptr_t addr,
+ uint8_t, uint8_t, enum xdma_direction dir);
+int xdma_dequeue_bio(xdma_channel_t *xchan, struct bio **bp,
+ xdma_transfer_status_t *status);
+int xdma_enqueue_bio(xdma_channel_t *xchan, struct bio **bp, bus_addr_t addr,
+ uint8_t, uint8_t, enum xdma_direction dir);
+int xdma_dequeue(xdma_channel_t *xchan, void **user,
+ xdma_transfer_status_t *status);
+int xdma_enqueue(xdma_channel_t *xchan, uintptr_t src, uintptr_t dst,
+ uint8_t, uint8_t, bus_size_t, enum xdma_direction dir, void *);
+int xdma_queue_submit(xdma_channel_t *xchan);
+
+/* Mbuf operations */
+uint32_t xdma_mbuf_defrag(xdma_channel_t *xchan, struct xdma_request *xr);
+uint32_t xdma_mbuf_chain_count(struct mbuf *m0);
+
/* Channel Control */
-int xdma_begin(xdma_channel_t *xchan);
-int xdma_pause(xdma_channel_t *xchan);
-int xdma_terminate(xdma_channel_t *xchan);
+int xdma_control(xdma_channel_t *xchan, enum xdma_command cmd);
/* Interrupt callback */
-int xdma_setup_intr(xdma_channel_t *xchan, int (*cb)(void *), void *arg, void **);
+int xdma_setup_intr(xdma_channel_t *xchan, int (*cb)(void *,
+ xdma_transfer_status_t *), void *arg, void **);
int xdma_teardown_intr(xdma_channel_t *xchan, struct xdma_intr_handler *ih);
int xdma_teardown_all_intr(xdma_channel_t *xchan);
-int xdma_callback(struct xdma_channel *xchan);
-void xdma_assert_locked(void);
+void xdma_callback(struct xdma_channel *xchan, xdma_transfer_status_t *status);
-struct xdma_intr_handler {
- int (*cb)(void *);
- void *cb_user;
- struct mtx ih_lock;
- TAILQ_ENTRY(xdma_intr_handler) ih_next;
-};
+/* Sglist */
+int xchan_sglist_alloc(xdma_channel_t *xchan);
+void xchan_sglist_free(xdma_channel_t *xchan);
+int xdma_sglist_add(struct xdma_sglist *sg, struct bus_dma_segment *seg,
+ uint32_t nsegs, struct xdma_request *xr);
-#endif /* !_DEV_EXTRES_XDMA_H_ */
+/* Requests bank */
+void xchan_bank_init(xdma_channel_t *xchan);
+int xchan_bank_free(xdma_channel_t *xchan);
+struct xdma_request * xchan_bank_get(xdma_channel_t *xchan);
+int xchan_bank_put(xdma_channel_t *xchan, struct xdma_request *xr);
+
+#endif /* !_DEV_XDMA_XDMA_H_ */
Index: head/sys/dev/xdma/xdma.c
===================================================================
--- head/sys/dev/xdma/xdma.c
+++ head/sys/dev/xdma/xdma.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2016 Ruslan Bukin
+ * Copyright (c) 2016-2018 Ruslan Bukin
* All rights reserved.
*
* This software was developed by SRI International and the University of
@@ -39,7 +39,6 @@
#include
#include
#include
-#include
#include
#include
#include
@@ -58,40 +57,28 @@
#include
-MALLOC_DEFINE(M_XDMA, "xdma", "xDMA framework");
-
/*
* Multiple xDMA controllers may work with single DMA device,
* so we have global lock for physical channel management.
*/
-static struct mtx xdma_mtx;
-#define XDMA_LOCK() mtx_lock(&xdma_mtx)
-#define XDMA_UNLOCK() mtx_unlock(&xdma_mtx)
-#define XDMA_ASSERT_LOCKED() mtx_assert(&xdma_mtx, MA_OWNED)
+static struct sx xdma_sx;
-/*
- * Per channel locks.
- */
-#define XCHAN_LOCK(xchan) mtx_lock(&(xchan)->mtx_lock)
-#define XCHAN_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_lock)
-#define XCHAN_ASSERT_LOCKED(xchan) mtx_assert(&(xchan)->mtx_lock, MA_OWNED)
+#define XDMA_LOCK() sx_xlock(&xdma_sx)
+#define XDMA_UNLOCK() sx_xunlock(&xdma_sx)
+#define XDMA_ASSERT_LOCKED() sx_xassert(&xdma_sx, MA_OWNED)
/*
* Allocate virtual xDMA channel.
*/
xdma_channel_t *
-xdma_channel_alloc(xdma_controller_t *xdma)
+xdma_channel_alloc(xdma_controller_t *xdma, uint32_t caps)
{
xdma_channel_t *xchan;
int ret;
xchan = malloc(sizeof(xdma_channel_t), M_XDMA, M_WAITOK | M_ZERO);
- if (xchan == NULL) {
- device_printf(xdma->dev,
- "%s: Can't allocate memory for channel.\n", __func__);
- return (NULL);
- }
xchan->xdma = xdma;
+ xchan->caps = caps;
XDMA_LOCK();
@@ -107,8 +94,18 @@
}
TAILQ_INIT(&xchan->ie_handlers);
- mtx_init(&xchan->mtx_lock, "xDMA", NULL, MTX_DEF);
+ sx_init(&xchan->sx_lock, "xDMA chan");
+ sx_init(&xchan->sx_qin_lock, "xDMA qin");
+ sx_init(&xchan->sx_qout_lock, "xDMA qout");
+ sx_init(&xchan->sx_bank_lock, "xDMA bank");
+ sx_init(&xchan->sx_proc_lock, "xDMA proc");
+
+ TAILQ_INIT(&xchan->bank);
+ TAILQ_INIT(&xchan->queue_in);
+ TAILQ_INIT(&xchan->queue_out);
+ TAILQ_INIT(&xchan->processing);
+
TAILQ_INSERT_TAIL(&xdma->channels, xchan, xchan_next);
XDMA_UNLOCK();
@@ -123,6 +120,7 @@
int err;
xdma = xchan->xdma;
+ KASSERT(xdma != NULL, ("xdma is NULL"));
XDMA_LOCK();
@@ -135,13 +133,17 @@
return (-1);
}
+ if (xchan->flags & XCHAN_TYPE_SG)
+ xdma_channel_free_sg(xchan);
+
xdma_teardown_all_intr(xchan);
- /* Deallocate descriptors, if any. */
- xdma_desc_free(xchan);
+ sx_destroy(&xchan->sx_lock);
+ sx_destroy(&xchan->sx_qin_lock);
+ sx_destroy(&xchan->sx_qout_lock);
+ sx_destroy(&xchan->sx_bank_lock);
+ sx_destroy(&xchan->sx_proc_lock);
- mtx_destroy(&xchan->mtx_lock);
-
TAILQ_REMOVE(&xdma->channels, xchan, xchan_next);
free(xchan, M_XDMA);
@@ -152,8 +154,9 @@
}
int
-xdma_setup_intr(xdma_channel_t *xchan, int (*cb)(void *), void *arg,
- void **ihandler)
+xdma_setup_intr(xdma_channel_t *xchan,
+ int (*cb)(void *, xdma_transfer_status_t *),
+ void *arg, void **ihandler)
{
struct xdma_intr_handler *ih;
xdma_controller_t *xdma;
@@ -172,22 +175,15 @@
ih = malloc(sizeof(struct xdma_intr_handler),
M_XDMA, M_WAITOK | M_ZERO);
- if (ih == NULL) {
- device_printf(xdma->dev,
- "%s: Can't allocate memory for interrupt handler.\n",
- __func__);
-
- return (-1);
- }
-
ih->cb = cb;
ih->cb_user = arg;
+ XCHAN_LOCK(xchan);
TAILQ_INSERT_TAIL(&xchan->ie_handlers, ih, ih_next);
+ XCHAN_UNLOCK(xchan);
- if (ihandler != NULL) {
+ if (ihandler != NULL)
*ihandler = ih;
- }
return (0);
}
@@ -231,326 +227,67 @@
return (0);
}
-static void
-xdma_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
-{
- xdma_channel_t *xchan;
- int i;
-
- xchan = (xdma_channel_t *)arg;
- KASSERT(xchan != NULL, ("xchan is NULL"));
-
- if (err) {
- xchan->map_err = 1;
- return;
- }
-
- for (i = 0; i < nseg; i++) {
- xchan->descs_phys[i].ds_addr = segs[i].ds_addr;
- xchan->descs_phys[i].ds_len = segs[i].ds_len;
- }
-}
-
-static int
-xdma_desc_alloc_bus_dma(xdma_channel_t *xchan, uint32_t desc_size,
- uint32_t align)
-{
- xdma_controller_t *xdma;
- bus_size_t all_desc_sz;
- xdma_config_t *conf;
- int nsegments;
- int err;
-
- xdma = xchan->xdma;
- conf = &xchan->conf;
-
- nsegments = conf->block_num;
- all_desc_sz = (nsegments * desc_size);
-
- err = bus_dma_tag_create(
- bus_get_dma_tag(xdma->dev),
- align, desc_size, /* alignment, boundary */
- BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- all_desc_sz, nsegments, /* maxsize, nsegments*/
- desc_size, 0, /* maxsegsize, flags */
- NULL, NULL, /* lockfunc, lockarg */
- &xchan->dma_tag);
- if (err) {
- device_printf(xdma->dev,
- "%s: Can't create bus_dma tag.\n", __func__);
- return (-1);
- }
-
- err = bus_dmamem_alloc(xchan->dma_tag, (void **)&xchan->descs,
- BUS_DMA_WAITOK | BUS_DMA_COHERENT, &xchan->dma_map);
- if (err) {
- device_printf(xdma->dev,
- "%s: Can't allocate memory for descriptors.\n", __func__);
- return (-1);
- }
-
- xchan->descs_phys = malloc(nsegments * sizeof(xdma_descriptor_t), M_XDMA,
- (M_WAITOK | M_ZERO));
-
- xchan->map_err = 0;
- err = bus_dmamap_load(xchan->dma_tag, xchan->dma_map, xchan->descs,
- all_desc_sz, xdma_dmamap_cb, xchan, BUS_DMA_WAITOK);
- if (err) {
- device_printf(xdma->dev,
- "%s: Can't load DMA map.\n", __func__);
- return (-1);
- }
-
- if (xchan->map_err != 0) {
- device_printf(xdma->dev,
- "%s: Can't load DMA map.\n", __func__);
- return (-1);
- }
-
- return (0);
-}
-
-/*
- * This function called by DMA controller driver.
- */
int
-xdma_desc_alloc(xdma_channel_t *xchan, uint32_t desc_size, uint32_t align)
+xdma_request(xdma_channel_t *xchan, struct xdma_request *req)
{
xdma_controller_t *xdma;
- xdma_config_t *conf;
int ret;
- XCHAN_ASSERT_LOCKED(xchan);
-
xdma = xchan->xdma;
- if (xdma == NULL) {
- device_printf(xdma->dev,
- "%s: Channel was not allocated properly.\n", __func__);
- return (-1);
- }
- if (xchan->flags & XCHAN_DESC_ALLOCATED) {
- device_printf(xdma->dev,
- "%s: Descriptors already allocated.\n", __func__);
- return (-1);
- }
-
- if ((xchan->flags & XCHAN_CONFIGURED) == 0) {
- device_printf(xdma->dev,
- "%s: Channel has no configuration.\n", __func__);
- return (-1);
- }
-
- conf = &xchan->conf;
-
- XCHAN_UNLOCK(xchan);
- ret = xdma_desc_alloc_bus_dma(xchan, desc_size, align);
- XCHAN_LOCK(xchan);
- if (ret != 0) {
- device_printf(xdma->dev,
- "%s: Can't allocate memory for descriptors.\n",
- __func__);
- return (-1);
- }
-
- xchan->flags |= XCHAN_DESC_ALLOCATED;
-
- /* We are going to write to descriptors. */
- bus_dmamap_sync(xchan->dma_tag, xchan->dma_map, BUS_DMASYNC_PREWRITE);
-
- return (0);
-}
-
-int
-xdma_desc_free(xdma_channel_t *xchan)
-{
-
- if ((xchan->flags & XCHAN_DESC_ALLOCATED) == 0) {
- /* No descriptors allocated. */
- return (-1);
- }
-
- bus_dmamap_unload(xchan->dma_tag, xchan->dma_map);
- bus_dmamem_free(xchan->dma_tag, xchan->descs, xchan->dma_map);
- bus_dma_tag_destroy(xchan->dma_tag);
- free(xchan->descs_phys, M_XDMA);
-
- xchan->flags &= ~(XCHAN_DESC_ALLOCATED);
-
- return (0);
-}
-
-int
-xdma_prep_memcpy(xdma_channel_t *xchan, uintptr_t src_addr,
- uintptr_t dst_addr, size_t len)
-{
- xdma_controller_t *xdma;
- xdma_config_t *conf;
- int ret;
-
- xdma = xchan->xdma;
KASSERT(xdma != NULL, ("xdma is NULL"));
- conf = &xchan->conf;
- conf->direction = XDMA_MEM_TO_MEM;
- conf->src_addr = src_addr;
- conf->dst_addr = dst_addr;
- conf->block_len = len;
- conf->block_num = 1;
-
- xchan->flags |= (XCHAN_CONFIGURED | XCHAN_TYPE_MEMCPY);
-
XCHAN_LOCK(xchan);
-
- /* Deallocate old descriptors, if any. */
- xdma_desc_free(xchan);
-
- ret = XDMA_CHANNEL_PREP_MEMCPY(xdma->dma_dev, xchan);
+ ret = XDMA_CHANNEL_REQUEST(xdma->dma_dev, xchan, req);
if (ret != 0) {
device_printf(xdma->dev,
- "%s: Can't prepare memcpy transfer.\n", __func__);
+ "%s: Can't request a transfer.\n", __func__);
XCHAN_UNLOCK(xchan);
return (-1);
}
-
- if (xchan->flags & XCHAN_DESC_ALLOCATED) {
- /* Driver created xDMA descriptors. */
- bus_dmamap_sync(xchan->dma_tag, xchan->dma_map,
- BUS_DMASYNC_POSTWRITE);
- }
-
XCHAN_UNLOCK(xchan);
return (0);
}
int
-xdma_prep_cyclic(xdma_channel_t *xchan, enum xdma_direction dir,
- uintptr_t src_addr, uintptr_t dst_addr, int block_len,
- int block_num, int src_width, int dst_width)
+xdma_control(xdma_channel_t *xchan, enum xdma_command cmd)
{
xdma_controller_t *xdma;
- xdma_config_t *conf;
int ret;
xdma = xchan->xdma;
KASSERT(xdma != NULL, ("xdma is NULL"));
- conf = &xchan->conf;
- conf->direction = dir;
- conf->src_addr = src_addr;
- conf->dst_addr = dst_addr;
- conf->block_len = block_len;
- conf->block_num = block_num;
- conf->src_width = src_width;
- conf->dst_width = dst_width;
-
- xchan->flags |= (XCHAN_CONFIGURED | XCHAN_TYPE_CYCLIC);
-
- XCHAN_LOCK(xchan);
-
- /* Deallocate old descriptors, if any. */
- xdma_desc_free(xchan);
-
- ret = XDMA_CHANNEL_PREP_CYCLIC(xdma->dma_dev, xchan);
+ ret = XDMA_CHANNEL_CONTROL(xdma->dma_dev, xchan, cmd);
if (ret != 0) {
device_printf(xdma->dev,
- "%s: Can't prepare cyclic transfer.\n", __func__);
- XCHAN_UNLOCK(xchan);
-
+ "%s: Can't process command.\n", __func__);
return (-1);
}
- if (xchan->flags & XCHAN_DESC_ALLOCATED) {
- /* Driver has created xDMA descriptors. */
- bus_dmamap_sync(xchan->dma_tag, xchan->dma_map,
- BUS_DMASYNC_POSTWRITE);
- }
-
- XCHAN_UNLOCK(xchan);
-
return (0);
}
-int
-xdma_begin(xdma_channel_t *xchan)
+void
+xdma_callback(xdma_channel_t *xchan, xdma_transfer_status_t *status)
{
+ struct xdma_intr_handler *ih_tmp;
+ struct xdma_intr_handler *ih;
xdma_controller_t *xdma;
- int ret;
xdma = xchan->xdma;
+ KASSERT(xdma != NULL, ("xdma is NULL"));
- ret = XDMA_CHANNEL_CONTROL(xdma->dma_dev, xchan, XDMA_CMD_BEGIN);
- if (ret != 0) {
- device_printf(xdma->dev,
- "%s: Can't begin the channel operation.\n", __func__);
- return (-1);
- }
+ TAILQ_FOREACH_SAFE(ih, &xchan->ie_handlers, ih_next, ih_tmp)
+ if (ih->cb != NULL)
+ ih->cb(ih->cb_user, status);
- return (0);
+ if (xchan->flags & XCHAN_TYPE_SG)
+ xdma_queue_submit(xchan);
}
-int
-xdma_terminate(xdma_channel_t *xchan)
-{
- xdma_controller_t *xdma;
- int ret;
-
- xdma = xchan->xdma;
-
- ret = XDMA_CHANNEL_CONTROL(xdma->dma_dev, xchan, XDMA_CMD_TERMINATE);
- if (ret != 0) {
- device_printf(xdma->dev,
- "%s: Can't terminate the channel operation.\n", __func__);
- return (-1);
- }
-
- return (0);
-}
-
-int
-xdma_pause(xdma_channel_t *xchan)
-{
- xdma_controller_t *xdma;
- int ret;
-
- xdma = xchan->xdma;
-
- ret = XDMA_CHANNEL_CONTROL(xdma->dma_dev, xchan, XDMA_CMD_PAUSE);
- if (ret != 0) {
- device_printf(xdma->dev,
- "%s: Can't pause the channel operation.\n", __func__);
- return (-1);
- }
-
- return (ret);
-}
-
-int
-xdma_callback(xdma_channel_t *xchan)
-{
- struct xdma_intr_handler *ih_tmp;
- struct xdma_intr_handler *ih;
-
- TAILQ_FOREACH_SAFE(ih, &xchan->ie_handlers, ih_next, ih_tmp) {
- if (ih->cb != NULL) {
- ih->cb(ih->cb_user);
- }
- }
-
- return (0);
-}
-
-void
-xdma_assert_locked(void)
-{
-
- XDMA_ASSERT_LOCKED();
-}
-
#ifdef FDT
/*
* Notify the DMA driver we have machine-dependent data in FDT.
@@ -560,7 +297,8 @@
{
uint32_t ret;
- ret = XDMA_OFW_MD_DATA(xdma->dma_dev, cells, ncells, (void **)&xdma->data);
+ ret = XDMA_OFW_MD_DATA(xdma->dma_dev,
+ cells, ncells, (void **)&xdma->data);
return (ret);
}
@@ -581,10 +319,9 @@
int idx;
node = ofw_bus_get_node(dev);
- if (node <= 0) {
+ if (node <= 0)
device_printf(dev,
"%s called on not ofw based device.\n", __func__);
- }
error = ofw_bus_parse_xref_list_get_length(node,
"dmas", "#dma-cells", &ndmas);
@@ -622,12 +359,8 @@
return (NULL);
}
- xdma = malloc(sizeof(struct xdma_controller), M_XDMA, M_WAITOK | M_ZERO);
- if (xdma == NULL) {
- device_printf(dev,
- "%s can't allocate memory for xdma.\n", __func__);
- return (NULL);
- }
+ xdma = malloc(sizeof(struct xdma_controller),
+ M_XDMA, M_WAITOK | M_ZERO);
xdma->dev = dev;
xdma->dma_dev = dma_dev;
@@ -667,7 +400,7 @@
xdma_init(void)
{
- mtx_init(&xdma_mtx, "xDMA", NULL, MTX_DEF);
+ sx_init(&xdma_sx, "xDMA");
}
SYSINIT(xdma, SI_SUB_DRIVERS, SI_ORDER_FIRST, xdma_init, NULL);
Index: head/sys/dev/xdma/xdma_bank.c
===================================================================
--- head/sys/dev/xdma/xdma_bank.c
+++ head/sys/dev/xdma/xdma_bank.c
@@ -0,0 +1,99 @@
+/*-
+ * Copyright (c) 2018 Ruslan Bukin
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include
+__FBSDID("$FreeBSD$");
+
+#include "opt_platform.h"
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#include
+
+void
+xchan_bank_init(xdma_channel_t *xchan)
+{
+ struct xdma_request *xr;
+ xdma_controller_t *xdma;
+ int i;
+
+ xdma = xchan->xdma;
+ KASSERT(xdma != NULL, ("xdma is NULL"));
+
+ xchan->xr_mem = malloc(sizeof(struct xdma_request) * xchan->xr_num,
+ M_XDMA, M_WAITOK | M_ZERO);
+
+ for (i = 0; i < xchan->xr_num; i++) {
+ xr = &xchan->xr_mem[i];
+ TAILQ_INSERT_TAIL(&xchan->bank, xr, xr_next);
+ }
+}
+
+int
+xchan_bank_free(xdma_channel_t *xchan)
+{
+
+ free(xchan->xr_mem, M_XDMA);
+
+ return (0);
+}
+
+struct xdma_request *
+xchan_bank_get(xdma_channel_t *xchan)
+{
+ struct xdma_request *xr;
+ struct xdma_request *xr_tmp;
+
+ QUEUE_BANK_LOCK(xchan);
+ TAILQ_FOREACH_SAFE(xr, &xchan->bank, xr_next, xr_tmp) {
+ TAILQ_REMOVE(&xchan->bank, xr, xr_next);
+ break;
+ }
+ QUEUE_BANK_UNLOCK(xchan);
+
+ return (xr);
+}
+
+int
+xchan_bank_put(xdma_channel_t *xchan, struct xdma_request *xr)
+{
+
+ QUEUE_BANK_LOCK(xchan);
+ TAILQ_INSERT_TAIL(&xchan->bank, xr, xr_next);
+ QUEUE_BANK_UNLOCK(xchan);
+
+ return (0);
+}
Index: head/sys/dev/xdma/xdma_bio.c
===================================================================
--- head/sys/dev/xdma/xdma_bio.c
+++ head/sys/dev/xdma/xdma_bio.c
@@ -0,0 +1,105 @@
+/*-
+ * Copyright (c) 2017-2018 Ruslan Bukin
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include
+__FBSDID("$FreeBSD$");
+
+#include "opt_platform.h"
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#include
+
+int
+xdma_dequeue_bio(xdma_channel_t *xchan, struct bio **bp,
+ xdma_transfer_status_t *status)
+{
+ struct xdma_request *xr_tmp;
+ struct xdma_request *xr;
+
+ QUEUE_OUT_LOCK(xchan);
+ TAILQ_FOREACH_SAFE(xr, &xchan->queue_out, xr_next, xr_tmp) {
+ TAILQ_REMOVE(&xchan->queue_out, xr, xr_next);
+ break;
+ }
+ QUEUE_OUT_UNLOCK(xchan);
+
+ if (xr == NULL)
+ return (-1);
+
+ *bp = xr->bp;
+
+ status->error = xr->status.error;
+ status->transferred = xr->status.transferred;
+
+ xchan_bank_put(xchan, xr);
+
+ return (0);
+}
+
+int
+xdma_enqueue_bio(xdma_channel_t *xchan, struct bio **bp,
+ bus_addr_t addr, uint8_t src_width, uint8_t dst_width,
+ enum xdma_direction dir)
+{
+ struct xdma_request *xr;
+ xdma_controller_t *xdma;
+
+ xdma = xchan->xdma;
+
+ xr = xchan_bank_get(xchan);
+ if (xr == NULL)
+ return (-1); /* No space is available yet. */
+
+ xr->direction = dir;
+ xr->bp = *bp;
+ xr->req_type = XR_TYPE_BIO;
+ xr->src_width = src_width;
+ xr->dst_width = dst_width;
+ if (dir == XDMA_MEM_TO_DEV) {
+ xr->dst_addr = addr;
+ xr->src_addr = 0;
+ } else {
+ xr->dst_addr = 0;
+ xr->src_addr = addr;
+ }
+
+ QUEUE_IN_LOCK(xchan);
+ TAILQ_INSERT_TAIL(&xchan->queue_in, xr, xr_next);
+ QUEUE_IN_UNLOCK(xchan);
+
+ return (0);
+}
Index: head/sys/dev/xdma/xdma_fdt_test.c
===================================================================
--- head/sys/dev/xdma/xdma_fdt_test.c
+++ head/sys/dev/xdma/xdma_fdt_test.c
@@ -82,6 +82,7 @@
struct mtx mtx;
int done;
struct proc *newp;
+ struct xdma_request req;
};
static int xdmatest_probe(device_t dev);
@@ -232,8 +233,16 @@
sc->dst[i] = 0;
}
- /* Configure channel for memcpy transfer. */
- err = xdma_prep_memcpy(sc->xchan, sc->src_phys, sc->dst_phys, sc->len);
+ sc->req.type = XR_TYPE_PHYS_ADDR;
+ sc->req.direction = XDMA_MEM_TO_MEM;
+ sc->req.src_addr = sc->src_phys;
+ sc->req.dst_addr = sc->dst_phys;
+ sc->req.src_width = 4;
+ sc->req.dst_width = 4;
+ sc->req.block_len = sc->len;
+ sc->req.block_num = 1;
+
+ err = xdma_request(sc->xchan, sc->src_phys, sc->dst_phys, sc->len);
if (err != 0) {
device_printf(sc->dev, "Can't configure virtual channel.\n");
return (-1);
@@ -297,7 +306,12 @@
mtx_lock(&sc->mtx);
- xdmatest_test(sc);
+ if (xdmatest_test(sc) != 0) {
+ mtx_unlock(&sc->mtx);
+ device_printf(sc->dev,
+ "%s: Test failed.\n", __func__);
+ break;
+ }
timeout = 100;
Index: head/sys/dev/xdma/xdma_if.m
===================================================================
--- head/sys/dev/xdma/xdma_if.m
+++ head/sys/dev/xdma/xdma_if.m
@@ -1,5 +1,5 @@
#-
-# Copyright (c) 2016 Ruslan Bukin
+# Copyright (c) 2016-2018 Ruslan Bukin
# All rights reserved.
#
# This software was developed by SRI International and the University of
@@ -43,22 +43,43 @@
INTERFACE xdma;
#
-# Prepare a channel for cyclic transfer.
+# Request a transfer.
#
-METHOD int channel_prep_cyclic {
+METHOD int channel_request {
device_t dev;
struct xdma_channel *xchan;
+ struct xdma_request *req;
};
#
-# Prepare a channel for memcpy transfer.
+# Prepare xDMA channel for a scatter-gather transfer.
#
-METHOD int channel_prep_memcpy {
+METHOD int channel_prep_sg {
device_t dev;
struct xdma_channel *xchan;
};
#
+# Query DMA engine driver for the amount of free entries
+# (descriptors) are available.
+#
+METHOD int channel_capacity {
+ device_t dev;
+ struct xdma_channel *xchan;
+ uint32_t *capacity;
+};
+
+#
+# Submit sglist list to DMA engine driver.
+#
+METHOD int channel_submit_sg {
+ device_t dev;
+ struct xdma_channel *xchan;
+ struct xdma_sglist *sg;
+ uint32_t sg_n;
+};
+
+#
# Notify driver we have machine-dependend data.
#
METHOD int ofw_md_data {
@@ -77,7 +98,7 @@
};
#
-# Free the channel, including descriptors.
+# Free the real hardware channel.
#
METHOD int channel_free {
device_t dev;
Index: head/sys/dev/xdma/xdma_mbuf.c
===================================================================
--- head/sys/dev/xdma/xdma_mbuf.c
+++ head/sys/dev/xdma/xdma_mbuf.c
@@ -0,0 +1,154 @@
+/*-
+ * Copyright (c) 2017-2018 Ruslan Bukin
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include
+__FBSDID("$FreeBSD$");
+
+#include "opt_platform.h"
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#ifdef FDT
+#include
+#include
+#include
+#endif
+
+#include
+
+int
+xdma_dequeue_mbuf(xdma_channel_t *xchan, struct mbuf **mp,
+ xdma_transfer_status_t *status)
+{
+ struct xdma_request *xr;
+ struct xdma_request *xr_tmp;
+
+ QUEUE_OUT_LOCK(xchan);
+ TAILQ_FOREACH_SAFE(xr, &xchan->queue_out, xr_next, xr_tmp) {
+ TAILQ_REMOVE(&xchan->queue_out, xr, xr_next);
+ break;
+ }
+ QUEUE_OUT_UNLOCK(xchan);
+
+ if (xr == NULL)
+ return (-1);
+
+ *mp = xr->m;
+ status->error = xr->status.error;
+ status->transferred = xr->status.transferred;
+
+ xchan_bank_put(xchan, xr);
+
+ return (0);
+}
+
+int
+xdma_enqueue_mbuf(xdma_channel_t *xchan, struct mbuf **mp,
+ uintptr_t addr, uint8_t src_width, uint8_t dst_width,
+ enum xdma_direction dir)
+{
+ struct xdma_request *xr;
+ xdma_controller_t *xdma;
+
+ xdma = xchan->xdma;
+
+ xr = xchan_bank_get(xchan);
+ if (xr == NULL)
+ return (-1); /* No space is available yet. */
+
+ xr->direction = dir;
+ xr->m = *mp;
+ xr->req_type = XR_TYPE_MBUF;
+ if (dir == XDMA_MEM_TO_DEV) {
+ xr->dst_addr = addr;
+ xr->src_addr = 0;
+ } else {
+ xr->src_addr = addr;
+ xr->dst_addr = 0;
+ }
+ xr->src_width = src_width;
+ xr->dst_width = dst_width;
+
+ QUEUE_IN_LOCK(xchan);
+ TAILQ_INSERT_TAIL(&xchan->queue_in, xr, xr_next);
+ QUEUE_IN_UNLOCK(xchan);
+
+ return (0);
+}
+
+uint32_t
+xdma_mbuf_chain_count(struct mbuf *m0)
+{
+ struct mbuf *m;
+ uint32_t c;
+
+ c = 0;
+
+ for (m = m0; m != NULL; m = m->m_next)
+ c++;
+
+ return (c);
+}
+
+uint32_t
+xdma_mbuf_defrag(xdma_channel_t *xchan, struct xdma_request *xr)
+{
+ xdma_controller_t *xdma;
+ struct mbuf *m;
+ uint32_t c;
+
+ xdma = xchan->xdma;
+
+ c = xdma_mbuf_chain_count(xr->m);
+ if (c == 1)
+ return (c); /* Nothing to do. */
+
+ if (xchan->caps & XCHAN_CAP_BUSDMA) {
+ if ((xchan->caps & XCHAN_CAP_BUSDMA_NOSEG) || \
+ (c > xchan->maxnsegs)) {
+ if ((m = m_defrag(xr->m, M_NOWAIT)) == NULL) {
+ device_printf(xdma->dma_dev,
+ "%s: Can't defrag mbuf\n",
+ __func__);
+ return (c);
+ }
+ xr->m = m;
+ c = 1;
+ }
+ }
+
+ return (c);
+}
Index: head/sys/dev/xdma/xdma_queue.c
===================================================================
--- head/sys/dev/xdma/xdma_queue.c
+++ head/sys/dev/xdma/xdma_queue.c
@@ -0,0 +1,124 @@
+/*-
+ * Copyright (c) 2018 Ruslan Bukin
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include
+__FBSDID("$FreeBSD$");
+
+#include "opt_platform.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+int
+xdma_dequeue(xdma_channel_t *xchan, void **user,
+ xdma_transfer_status_t *status)
+{
+ struct xdma_request *xr_tmp;
+ struct xdma_request *xr;
+
+ QUEUE_OUT_LOCK(xchan);
+ TAILQ_FOREACH_SAFE(xr, &xchan->queue_out, xr_next, xr_tmp) {
+ TAILQ_REMOVE(&xchan->queue_out, xr, xr_next);
+ break;
+ }
+ QUEUE_OUT_UNLOCK(xchan);
+
+ if (xr == NULL)
+ return (-1);
+
+ *user = xr->user;
+ status->error = xr->status.error;
+ status->transferred = xr->status.transferred;
+
+ xchan_bank_put(xchan, xr);
+
+ return (0);
+}
+
+int
+xdma_enqueue(xdma_channel_t *xchan, uintptr_t src, uintptr_t dst,
+ uint8_t src_width, uint8_t dst_width, bus_size_t len,
+ enum xdma_direction dir, void *user)
+{
+ struct xdma_request *xr;
+ xdma_controller_t *xdma;
+
+ xdma = xchan->xdma;
+ KASSERT(xdma != NULL, ("xdma is NULL"));
+
+ xr = xchan_bank_get(xchan);
+ if (xr == NULL)
+ return (-1); /* No space is available. */
+
+ xr->user = user;
+ xr->direction = dir;
+ xr->m = NULL;
+ xr->bp = NULL;
+ xr->block_num = 1;
+ xr->block_len = len;
+ xr->req_type = XR_TYPE_VIRT;
+ xr->src_addr = src;
+ xr->dst_addr = dst;
+ xr->src_width = src_width;
+ xr->dst_width = dst_width;
+
+ QUEUE_IN_LOCK(xchan);
+ TAILQ_INSERT_TAIL(&xchan->queue_in, xr, xr_next);
+ QUEUE_IN_UNLOCK(xchan);
+
+ return (0);
+}
+
+int
+xdma_queue_submit(xdma_channel_t *xchan)
+{
+ xdma_controller_t *xdma;
+ int ret;
+
+ xdma = xchan->xdma;
+ KASSERT(xdma != NULL, ("xdma is NULL"));
+
+ ret = 0;
+
+ XCHAN_LOCK(xchan);
+
+ if (xchan->flags & XCHAN_TYPE_SG)
+ ret = xdma_queue_submit_sg(xchan);
+
+ XCHAN_UNLOCK(xchan);
+
+ return (ret);
+}
Index: head/sys/dev/xdma/xdma_sg.c
===================================================================
--- head/sys/dev/xdma/xdma_sg.c
+++ head/sys/dev/xdma/xdma_sg.c
@@ -0,0 +1,595 @@
+/*-
+ * Copyright (c) 2018 Ruslan Bukin
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include
+__FBSDID("$FreeBSD$");
+
+#include "opt_platform.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#ifdef FDT
+#include
+#include
+#include
+#endif
+
+#include
+
+#include
+
+struct seg_load_request {
+ struct bus_dma_segment *seg;
+ uint32_t nsegs;
+ uint32_t error;
+};
+
+static int
+_xchan_bufs_alloc(xdma_channel_t *xchan)
+{
+ xdma_controller_t *xdma;
+ struct xdma_request *xr;
+ int i;
+
+ xdma = xchan->xdma;
+
+ for (i = 0; i < xchan->xr_num; i++) {
+ xr = &xchan->xr_mem[i];
+ xr->buf.cbuf = contigmalloc(xchan->maxsegsize,
+ M_XDMA, 0, 0, ~0, PAGE_SIZE, 0);
+ if (xr->buf.cbuf == NULL) {
+ device_printf(xdma->dev,
+ "%s: Can't allocate contiguous kernel"
+ " physical memory\n", __func__);
+ return (-1);
+ }
+ }
+
+ return (0);
+}
+
+static int
+_xchan_bufs_alloc_busdma(xdma_channel_t *xchan)
+{
+ xdma_controller_t *xdma;
+ struct xdma_request *xr;
+ int err;
+ int i;
+
+ xdma = xchan->xdma;
+
+ /* Create bus_dma tag */
+ err = bus_dma_tag_create(
+ bus_get_dma_tag(xdma->dev), /* Parent tag. */
+ xchan->alignment, /* alignment */
+ xchan->boundary, /* boundary */
+ xchan->lowaddr, /* lowaddr */
+ xchan->highaddr, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ xchan->maxsegsize * xchan->maxnsegs, /* maxsize */
+ xchan->maxnsegs, /* nsegments */
+ xchan->maxsegsize, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &xchan->dma_tag_bufs);
+ if (err != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't create bus_dma tag.\n", __func__);
+ return (-1);
+ }
+
+ for (i = 0; i < xchan->xr_num; i++) {
+ xr = &xchan->xr_mem[i];
+ err = bus_dmamap_create(xchan->dma_tag_bufs, 0,
+ &xr->buf.map);
+ if (err != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't create buf DMA map.\n", __func__);
+
+ /* Cleanup. */
+ bus_dma_tag_destroy(xchan->dma_tag_bufs);
+
+ return (-1);
+ }
+ }
+
+ return (0);
+}
+
+static int
+xchan_bufs_alloc(xdma_channel_t *xchan)
+{
+ xdma_controller_t *xdma;
+ int ret;
+
+ xdma = xchan->xdma;
+
+ if (xdma == NULL) {
+ device_printf(xdma->dev,
+ "%s: Channel was not allocated properly.\n", __func__);
+ return (-1);
+ }
+
+ if (xchan->caps & XCHAN_CAP_BUSDMA)
+ ret = _xchan_bufs_alloc_busdma(xchan);
+ else
+ ret = _xchan_bufs_alloc(xchan);
+ if (ret != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't allocate bufs.\n", __func__);
+ return (-1);
+ }
+
+ xchan->flags |= XCHAN_BUFS_ALLOCATED;
+
+ return (0);
+}
+
+static int
+xchan_bufs_free(xdma_channel_t *xchan)
+{
+ struct xdma_request *xr;
+ struct xchan_buf *b;
+ int i;
+
+ if ((xchan->flags & XCHAN_BUFS_ALLOCATED) == 0)
+ return (-1);
+
+ if (xchan->caps & XCHAN_CAP_BUSDMA) {
+ for (i = 0; i < xchan->xr_num; i++) {
+ xr = &xchan->xr_mem[i];
+ b = &xr->buf;
+ bus_dmamap_destroy(xchan->dma_tag_bufs, b->map);
+ }
+ bus_dma_tag_destroy(xchan->dma_tag_bufs);
+ } else {
+ for (i = 0; i < xchan->xr_num; i++) {
+ xr = &xchan->xr_mem[i];
+ contigfree(xr->buf.cbuf, xchan->maxsegsize, M_XDMA);
+ }
+ }
+
+ xchan->flags &= ~XCHAN_BUFS_ALLOCATED;
+
+ return (0);
+}
+
+void
+xdma_channel_free_sg(xdma_channel_t *xchan)
+{
+
+ xchan_bufs_free(xchan);
+ xchan_sglist_free(xchan);
+ xchan_bank_free(xchan);
+}
+
+/*
+ * Prepare xchan for a scatter-gather transfer.
+ * xr_num - xdma requests queue size,
+ * maxsegsize - maximum allowed scatter-gather list element size in bytes
+ */
+int
+xdma_prep_sg(xdma_channel_t *xchan, uint32_t xr_num,
+ bus_size_t maxsegsize, bus_size_t maxnsegs,
+ bus_size_t alignment, bus_addr_t boundary,
+ bus_addr_t lowaddr, bus_addr_t highaddr)
+{
+ xdma_controller_t *xdma;
+ int ret;
+
+ xdma = xchan->xdma;
+
+ KASSERT(xdma != NULL, ("xdma is NULL"));
+
+ if (xchan->flags & XCHAN_CONFIGURED) {
+ device_printf(xdma->dev,
+ "%s: Channel is already configured.\n", __func__);
+ return (-1);
+ }
+
+ xchan->xr_num = xr_num;
+ xchan->maxsegsize = maxsegsize;
+ xchan->maxnsegs = maxnsegs;
+ xchan->alignment = alignment;
+ xchan->boundary = boundary;
+ xchan->lowaddr = lowaddr;
+ xchan->highaddr = highaddr;
+
+ if (xchan->maxnsegs > XDMA_MAX_SEG) {
+ device_printf(xdma->dev, "%s: maxnsegs is too big\n",
+ __func__);
+ return (-1);
+ }
+
+ xchan_bank_init(xchan);
+
+ /* Allocate sglist. */
+ ret = xchan_sglist_alloc(xchan);
+ if (ret != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't allocate sglist.\n", __func__);
+ return (-1);
+ }
+
+ /* Allocate bufs. */
+ ret = xchan_bufs_alloc(xchan);
+ if (ret != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't allocate bufs.\n", __func__);
+
+ /* Cleanup */
+ xchan_sglist_free(xchan);
+ xchan_bank_free(xchan);
+
+ return (-1);
+ }
+
+ xchan->flags |= (XCHAN_CONFIGURED | XCHAN_TYPE_SG);
+
+ XCHAN_LOCK(xchan);
+ ret = XDMA_CHANNEL_PREP_SG(xdma->dma_dev, xchan);
+ if (ret != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't prepare SG transfer.\n", __func__);
+ XCHAN_UNLOCK(xchan);
+
+ return (-1);
+ }
+ XCHAN_UNLOCK(xchan);
+
+ return (0);
+}
+
+void
+xchan_seg_done(xdma_channel_t *xchan,
+ struct xdma_transfer_status *st)
+{
+ struct xdma_request *xr;
+ xdma_controller_t *xdma;
+ struct xchan_buf *b;
+
+ xdma = xchan->xdma;
+
+ xr = TAILQ_FIRST(&xchan->processing);
+ if (xr == NULL)
+ panic("request not found\n");
+
+ b = &xr->buf;
+
+ atomic_subtract_int(&b->nsegs_left, 1);
+
+ if (b->nsegs_left == 0) {
+ if (xchan->caps & XCHAN_CAP_BUSDMA) {
+ if (xr->direction == XDMA_MEM_TO_DEV)
+ bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
+ BUS_DMASYNC_POSTWRITE);
+ else
+ bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(xchan->dma_tag_bufs, b->map);
+ }
+ xr->status.error = st->error;
+ xr->status.transferred = st->transferred;
+
+ QUEUE_PROC_LOCK(xchan);
+ TAILQ_REMOVE(&xchan->processing, xr, xr_next);
+ QUEUE_PROC_UNLOCK(xchan);
+
+ QUEUE_OUT_LOCK(xchan);
+ TAILQ_INSERT_TAIL(&xchan->queue_out, xr, xr_next);
+ QUEUE_OUT_UNLOCK(xchan);
+ }
+}
+
+static void
+xdma_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
+ struct seg_load_request *slr;
+ struct bus_dma_segment *seg;
+ int i;
+
+ slr = arg;
+ seg = slr->seg;
+
+ if (error != 0) {
+ slr->error = error;
+ return;
+ }
+
+ slr->nsegs = nsegs;
+
+ for (i = 0; i < nsegs; i++) {
+ seg[i].ds_addr = segs[i].ds_addr;
+ seg[i].ds_len = segs[i].ds_len;
+ }
+}
+
+static int
+_xdma_load_data_busdma(xdma_channel_t *xchan, struct xdma_request *xr,
+ struct bus_dma_segment *seg)
+{
+ xdma_controller_t *xdma;
+ struct seg_load_request slr;
+ uint32_t nsegs;
+ void *addr;
+ int error;
+
+ xdma = xchan->xdma;
+
+ error = 0;
+ nsegs = 0;
+
+ switch (xr->req_type) {
+ case XR_TYPE_MBUF:
+ error = bus_dmamap_load_mbuf_sg(xchan->dma_tag_bufs,
+ xr->buf.map, xr->m, seg, &nsegs, BUS_DMA_NOWAIT);
+ break;
+ case XR_TYPE_BIO:
+ slr.nsegs = 0;
+ slr.error = 0;
+ slr.seg = seg;
+ error = bus_dmamap_load_bio(xchan->dma_tag_bufs,
+ xr->buf.map, xr->bp, xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
+ if (slr.error != 0) {
+ device_printf(xdma->dma_dev,
+ "%s: bus_dmamap_load failed, err %d\n",
+ __func__, slr.error);
+ return (0);
+ }
+ nsegs = slr.nsegs;
+ break;
+ case XR_TYPE_VIRT:
+ switch (xr->direction) {
+ case XDMA_MEM_TO_DEV:
+ addr = (void *)xr->src_addr;
+ break;
+ case XDMA_DEV_TO_MEM:
+ addr = (void *)xr->dst_addr;
+ break;
+ default:
+ device_printf(xdma->dma_dev,
+ "%s: Direction is not supported\n", __func__);
+ return (0);
+ }
+ slr.nsegs = 0;
+ slr.error = 0;
+ slr.seg = seg;
+ error = bus_dmamap_load(xchan->dma_tag_bufs, xr->buf.map,
+ addr, (xr->block_len * xr->block_num),
+ xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
+ if (slr.error != 0) {
+ device_printf(xdma->dma_dev,
+ "%s: bus_dmamap_load failed, err %d\n",
+ __func__, slr.error);
+ return (0);
+ }
+ nsegs = slr.nsegs;
+ break;
+ default:
+ break;
+ }
+
+ if (error != 0) {
+ if (error == ENOMEM) {
+ /*
+ * Out of memory. Try again later.
+ * TODO: count errors.
+ */
+ } else
+ device_printf(xdma->dma_dev,
+ "%s: bus_dmamap_load failed with err %d\n",
+ __func__, error);
+ return (0);
+ }
+
+ if (xr->direction == XDMA_MEM_TO_DEV)
+ bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
+ BUS_DMASYNC_PREWRITE);
+ else
+ bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
+ BUS_DMASYNC_PREREAD);
+
+ return (nsegs);
+}
+
+static int
+_xdma_load_data(xdma_channel_t *xchan, struct xdma_request *xr,
+ struct bus_dma_segment *seg)
+{
+ xdma_controller_t *xdma;
+ struct mbuf *m;
+ uint32_t nsegs;
+
+ xdma = xchan->xdma;
+
+ m = xr->m;
+
+ nsegs = 1;
+
+ switch (xr->req_type) {
+ case XR_TYPE_MBUF:
+ if (xr->direction == XDMA_MEM_TO_DEV) {
+ m_copydata(m, 0, m->m_pkthdr.len, xr->buf.cbuf);
+ seg[0].ds_addr = (bus_addr_t)xr->buf.cbuf;
+ seg[0].ds_len = m->m_pkthdr.len;
+ } else {
+ seg[0].ds_addr = mtod(m, bus_addr_t);
+ seg[0].ds_len = m->m_pkthdr.len;
+ }
+ break;
+ case XR_TYPE_BIO:
+ case XR_TYPE_VIRT:
+ default:
+ panic("implement me\n");
+ }
+
+ return (nsegs);
+}
+
+static int
+xdma_load_data(xdma_channel_t *xchan,
+ struct xdma_request *xr, struct bus_dma_segment *seg)
+{
+ xdma_controller_t *xdma;
+ int error;
+ int nsegs;
+
+ xdma = xchan->xdma;
+
+ error = 0;
+ nsegs = 0;
+
+ if (xchan->caps & XCHAN_CAP_BUSDMA)
+ nsegs = _xdma_load_data_busdma(xchan, xr, seg);
+ else
+ nsegs = _xdma_load_data(xchan, xr, seg);
+ if (nsegs == 0)
+ return (0); /* Try again later. */
+
+ xr->buf.nsegs = nsegs;
+ xr->buf.nsegs_left = nsegs;
+
+ return (nsegs);
+}
+
+static int
+xdma_process(xdma_channel_t *xchan,
+ struct xdma_sglist *sg)
+{
+ struct bus_dma_segment seg[XDMA_MAX_SEG];
+ struct xdma_request *xr;
+ struct xdma_request *xr_tmp;
+ xdma_controller_t *xdma;
+ uint32_t capacity;
+ uint32_t n;
+ uint32_t c;
+ int nsegs;
+ int ret;
+
+ XCHAN_ASSERT_LOCKED(xchan);
+
+ xdma = xchan->xdma;
+
+ n = 0;
+
+ ret = XDMA_CHANNEL_CAPACITY(xdma->dma_dev, xchan, &capacity);
+ if (ret != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't get DMA controller capacity.\n", __func__);
+ return (-1);
+ }
+
+ TAILQ_FOREACH_SAFE(xr, &xchan->queue_in, xr_next, xr_tmp) {
+ switch (xr->req_type) {
+ case XR_TYPE_MBUF:
+ c = xdma_mbuf_defrag(xchan, xr);
+ break;
+ case XR_TYPE_BIO:
+ case XR_TYPE_VIRT:
+ default:
+ c = 1;
+ }
+
+ if (capacity <= (c + n)) {
+ /*
+ * No space yet available for the entire
+ * request in the DMA engine.
+ */
+ break;
+ }
+
+ if ((c + n + xchan->maxnsegs) >= XDMA_SGLIST_MAXLEN) {
+ /* Sglist is full. */
+ break;
+ }
+
+ nsegs = xdma_load_data(xchan, xr, seg);
+ if (nsegs == 0)
+ break;
+
+ xdma_sglist_add(&sg[n], seg, nsegs, xr);
+ n += nsegs;
+
+ QUEUE_IN_LOCK(xchan);
+ TAILQ_REMOVE(&xchan->queue_in, xr, xr_next);
+ QUEUE_IN_UNLOCK(xchan);
+
+ QUEUE_PROC_LOCK(xchan);
+ TAILQ_INSERT_TAIL(&xchan->processing, xr, xr_next);
+ QUEUE_PROC_UNLOCK(xchan);
+ }
+
+ return (n);
+}
+
+int
+xdma_queue_submit_sg(xdma_channel_t *xchan)
+{
+ struct xdma_sglist *sg;
+ xdma_controller_t *xdma;
+ uint32_t sg_n;
+ int ret;
+
+ xdma = xchan->xdma;
+ KASSERT(xdma != NULL, ("xdma is NULL"));
+
+ XCHAN_ASSERT_LOCKED(xchan);
+
+ sg = xchan->sg;
+
+ if ((xchan->flags & XCHAN_BUFS_ALLOCATED) == 0) {
+ device_printf(xdma->dev,
+ "%s: Can't submit a transfer: no bufs\n",
+ __func__);
+ return (-1);
+ }
+
+ sg_n = xdma_process(xchan, sg);
+ if (sg_n == 0)
+ return (0); /* Nothing to submit */
+
+ /* Now submit sglist to DMA engine driver. */
+ ret = XDMA_CHANNEL_SUBMIT_SG(xdma->dma_dev, xchan, sg, sg_n);
+ if (ret != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't submit an sglist.\n", __func__);
+ return (-1);
+ }
+
+ return (0);
+}
Index: head/sys/dev/xdma/xdma_sglist.c
===================================================================
--- head/sys/dev/xdma/xdma_sglist.c
+++ head/sys/dev/xdma/xdma_sglist.c
@@ -0,0 +1,100 @@
+/*-
+ * Copyright (c) 2017-2018 Ruslan Bukin
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include
+__FBSDID("$FreeBSD$");
+
+#include "opt_platform.h"
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+int
+xchan_sglist_alloc(xdma_channel_t *xchan)
+{
+ uint32_t sz;
+
+ if (xchan->flags & XCHAN_SGLIST_ALLOCATED)
+ return (-1);
+
+ sz = (sizeof(struct xdma_sglist) * XDMA_SGLIST_MAXLEN);
+ xchan->sg = malloc(sz, M_XDMA, M_WAITOK | M_ZERO);
+ xchan->flags |= XCHAN_SGLIST_ALLOCATED;
+
+ return (0);
+}
+
+void
+xchan_sglist_free(xdma_channel_t *xchan)
+{
+
+ if (xchan->flags & XCHAN_SGLIST_ALLOCATED)
+ free(xchan->sg, M_XDMA);
+
+ xchan->flags &= ~XCHAN_SGLIST_ALLOCATED;
+}
+
+int
+xdma_sglist_add(struct xdma_sglist *sg, struct bus_dma_segment *seg,
+ uint32_t nsegs, struct xdma_request *xr)
+{
+ int i;
+
+ if (nsegs == 0)
+ return (-1);
+
+ for (i = 0; i < nsegs; i++) {
+ sg[i].src_width = xr->src_width;
+ sg[i].dst_width = xr->dst_width;
+
+ if (xr->direction == XDMA_MEM_TO_DEV) {
+ sg[i].src_addr = seg[i].ds_addr;
+ sg[i].dst_addr = xr->dst_addr;
+ } else {
+ sg[i].src_addr = xr->src_addr;
+ sg[i].dst_addr = seg[i].ds_addr;
+ }
+ sg[i].len = seg[i].ds_len;
+ sg[i].direction = xr->direction;
+
+ sg[i].first = 0;
+ sg[i].last = 0;
+ }
+
+ sg[0].first = 1;
+ sg[nsegs - 1].last = 1;
+
+ return (0);
+}
Index: head/sys/mips/ingenic/jz4780_aic.c
===================================================================
--- head/sys/mips/ingenic/jz4780_aic.c
+++ head/sys/mips/ingenic/jz4780_aic.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2016 Ruslan Bukin
+ * Copyright (c) 2016-2018 Ruslan Bukin
* All rights reserved.
*
* This software was developed by SRI International and the University of
@@ -79,9 +79,12 @@
clk_t clk_i2s;
struct aic_rate *sr;
void *ih;
+ int internal_codec;
+
+ /* xDMA */
struct xdma_channel *xchan;
xdma_controller_t *xdma_tx;
- int internal_codec;
+ struct xdma_request req;
};
/* Channel registers */
@@ -288,25 +291,25 @@
}
static int
-aic_intr(void *arg)
+aic_intr(void *arg, xdma_transfer_status_t *status)
{
struct sc_pcminfo *scp;
+ struct xdma_request *req;
xdma_channel_t *xchan;
struct sc_chinfo *ch;
struct aic_softc *sc;
- xdma_config_t *conf;
int bufsize;
scp = arg;
sc = scp->sc;
ch = &scp->chan[0];
+ req = &sc->req;
xchan = sc->xchan;
- conf = &xchan->conf;
bufsize = sndbuf_getsize(ch->buffer);
- sc->pos += conf->block_len;
+ sc->pos += req->block_len;
if (sc->pos >= bufsize)
sc->pos -= bufsize;
@@ -331,20 +334,23 @@
KASSERT(fmt & AFMT_16BIT, ("16-bit audio supported only."));
- err = xdma_prep_cyclic(sc->xchan,
- XDMA_MEM_TO_DEV, /* direction */
- sc->buf_base_phys, /* src addr */
- sc->aic_fifo_paddr, /* dst addr */
- sndbuf_getblksz(ch->buffer), /* block len */
- sndbuf_getblkcnt(ch->buffer), /* block num */
- 2, /* src port width */
- 2); /* dst port width */
+ sc->req.operation = XDMA_CYCLIC;
+ sc->req.req_type = XR_TYPE_PHYS;
+ sc->req.direction = XDMA_MEM_TO_DEV;
+ sc->req.src_addr = sc->buf_base_phys;
+ sc->req.dst_addr = sc->aic_fifo_paddr;
+ sc->req.src_width = 2;
+ sc->req.dst_width = 2;
+ sc->req.block_len = sndbuf_getblksz(ch->buffer);
+ sc->req.block_num = sndbuf_getblkcnt(ch->buffer);
+
+ err = xdma_request(sc->xchan, &sc->req);
if (err != 0) {
device_printf(sc->dev, "Can't configure virtual channel\n");
return (-1);
}
- xdma_begin(sc->xchan);
+ xdma_control(sc->xchan, XDMA_CMD_BEGIN);
return (0);
}
@@ -385,7 +391,7 @@
reg &= ~(AICCR_TDMS | AICCR_ERPL);
WRITE4(sc, AICCR, reg);
- xdma_terminate(sc->xchan);
+ xdma_control(sc->xchan, XDMA_CMD_TERMINATE);
return (0);
}
@@ -686,7 +692,7 @@
}
/* Alloc xDMA virtual channel. */
- sc->xchan = xdma_channel_alloc(sc->xdma_tx);
+ sc->xchan = xdma_channel_alloc(sc->xdma_tx, 0);
if (sc->xchan == NULL) {
device_printf(dev, "Can't alloc virtual DMA channel.\n");
return (ENXIO);
Index: head/sys/mips/ingenic/jz4780_pdma.h
===================================================================
--- head/sys/mips/ingenic/jz4780_pdma.h
+++ head/sys/mips/ingenic/jz4780_pdma.h
@@ -37,6 +37,7 @@
#define PDMA_DRT(n) (0x0C + 0x20 * n) /* Channel n Request Source */
#define DRT_AUTO (1 << 3) /* Auto-request. */
#define PDMA_DCS(n) (0x10 + 0x20 * n) /* Channel n Control/Status */
+#define DCS_NDES (1 << 31) /* Non-descriptor mode. */
#define DCS_DES8 (1 << 30) /* Descriptor 8 Word. */
#define DCS_AR (1 << 4) /* Address Error. */
#define DCS_TT (1 << 3) /* Transfer Terminate. */
@@ -45,21 +46,19 @@
#define PDMA_DCM(n) (0x14 + 0x20 * n) /* Channel n Command */
#define DCM_SAI (1 << 23) /* Source Address Increment. */
#define DCM_DAI (1 << 22) /* Destination Address Increment. */
-
#define DCM_SP_S 14 /* Source port width. */
#define DCM_SP_M (0x3 << DCM_SP_S)
#define DCM_SP_1 (0x1 << DCM_SP_S) /* 1 byte */
#define DCM_SP_2 (0x2 << DCM_SP_S) /* 2 bytes */
#define DCM_SP_4 (0x0 << DCM_SP_S) /* 4 bytes */
-
#define DCM_DP_S 12 /* Destination port width. */
#define DCM_DP_M (0x3 << DCM_DP_S)
#define DCM_DP_1 (0x1 << DCM_DP_S) /* 1 byte */
#define DCM_DP_2 (0x2 << DCM_DP_S) /* 2 bytes */
#define DCM_DP_4 (0x0 << DCM_DP_S) /* 4 bytes */
-
#define DCM_TSZ_S 8 /* Transfer Data Size of a data unit. */
#define DCM_TSZ_M (0x7 << DCM_TSZ_S)
+#define DCM_TSZ_A (0x7 << DCM_TSZ_S) /* Autonomy */
#define DCM_TSZ_1 (0x1 << DCM_TSZ_S)
#define DCM_TSZ_2 (0x2 << DCM_TSZ_S)
#define DCM_TSZ_4 (0x0 << DCM_TSZ_S)
@@ -105,3 +104,6 @@
uint32_t drt; /* DMA Request Type */
uint32_t reserved[2];
};
+
+#define CHAN_DESC_COUNT 4096
+#define CHAN_DESC_SIZE (sizeof(struct pdma_hwdesc) * CHAN_DESC_COUNT)
Index: head/sys/mips/ingenic/jz4780_pdma.c
===================================================================
--- head/sys/mips/ingenic/jz4780_pdma.c
+++ head/sys/mips/ingenic/jz4780_pdma.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2016 Ruslan Bukin
+ * Copyright (c) 2016-2018 Ruslan Bukin
* All rights reserved.
*
* This software was developed by SRI International and the University of
@@ -61,6 +61,17 @@
#include "xdma_if.h"
+#define PDMA_DEBUG
+#undef PDMA_DEBUG
+
+#ifdef PDMA_DEBUG
+#define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
+#else
+#define dprintf(fmt, ...)
+#endif
+
+#define PDMA_DESC_RING_ALIGN 2048
+
struct pdma_softc {
device_t dev;
struct resource *res[2];
@@ -76,13 +87,22 @@
};
struct pdma_channel {
- xdma_channel_t *xchan;
struct pdma_fdt_data data;
int cur_desc;
int used;
int index;
int flags;
#define CHAN_DESCR_RELINK (1 << 0)
+
+ /* Descriptors */
+ bus_dma_tag_t desc_tag;
+ bus_dmamap_t desc_map;
+ struct pdma_hwdesc *desc_ring;
+ bus_addr_t desc_ring_paddr;
+
+ /* xDMA */
+ xdma_channel_t *xchan;
+ struct xdma_request *req;
};
#define PDMA_NCHANNELS 32
@@ -102,10 +122,11 @@
static void
pdma_intr(void *arg)
{
+ struct xdma_request *req;
+ xdma_transfer_status_t status;
struct pdma_channel *chan;
struct pdma_softc *sc;
xdma_channel_t *xchan;
- xdma_config_t *conf;
int pending;
int i;
@@ -120,7 +141,7 @@
if (pending & (1 << i)) {
chan = &pdma_channels[i];
xchan = chan->xchan;
- conf = &xchan->conf;
+ req = chan->req;
/* TODO: check for AR, HLT error bits here. */
@@ -130,11 +151,12 @@
if (chan->flags & CHAN_DESCR_RELINK) {
/* Enable again */
chan->cur_desc = (chan->cur_desc + 1) % \
- conf->block_num;
+ req->block_num;
chan_start(sc, chan);
}
- xdma_callback(chan->xchan);
+ status.error = 0;
+ xdma_callback(chan->xchan, &status);
}
}
}
@@ -217,7 +239,9 @@
/* 8 byte descriptor. */
WRITE4(sc, PDMA_DCS(chan->index), DCS_DES8);
- WRITE4(sc, PDMA_DDA(chan->index), xchan->descs_phys[chan->cur_desc].ds_addr);
+ WRITE4(sc, PDMA_DDA(chan->index),
+ chan->desc_ring_paddr + 8 * 4 * chan->cur_desc);
+
WRITE4(sc, PDMA_DDS, (1 << chan->index));
/* Channel transfer enable. */
@@ -249,7 +273,65 @@
return (0);
}
+static void
+dwc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
+
+ if (error != 0)
+ return;
+ *(bus_addr_t *)arg = segs[0].ds_addr;
+}
+
static int
+pdma_channel_setup_descriptors(device_t dev, struct pdma_channel *chan)
+{
+ struct pdma_softc *sc;
+ int error;
+
+ sc = device_get_softc(dev);
+
+ /*
+ * Set up TX descriptor ring, descriptors, and dma maps.
+ */
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(sc->dev), /* Parent tag. */
+ PDMA_DESC_RING_ALIGN, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ CHAN_DESC_SIZE, 1, /* maxsize, nsegments */
+ CHAN_DESC_SIZE, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &chan->desc_tag);
+ if (error != 0) {
+ device_printf(sc->dev,
+ "could not create TX ring DMA tag.\n");
+ return (-1);
+ }
+
+ error = bus_dmamem_alloc(chan->desc_tag, (void**)&chan->desc_ring,
+ BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
+ &chan->desc_map);
+ if (error != 0) {
+ device_printf(sc->dev,
+ "could not allocate TX descriptor ring.\n");
+ return (-1);
+ }
+
+ error = bus_dmamap_load(chan->desc_tag, chan->desc_map,
+ chan->desc_ring, CHAN_DESC_SIZE, dwc_get1paddr,
+ &chan->desc_ring_paddr, 0);
+ if (error != 0) {
+ device_printf(sc->dev,
+ "could not load TX descriptor ring map.\n");
+ return (-1);
+ }
+
+ return (0);
+}
+
+static int
pdma_channel_alloc(device_t dev, struct xdma_channel *xchan)
{
struct pdma_channel *chan;
@@ -258,8 +340,6 @@
sc = device_get_softc(dev);
- xdma_assert_locked();
-
for (i = 0; i < PDMA_NCHANNELS; i++) {
chan = &pdma_channels[i];
if (chan->used == 0) {
@@ -268,6 +348,8 @@
chan->used = 1;
chan->index = i;
+ pdma_channel_setup_descriptors(dev, chan);
+
return (0);
}
}
@@ -283,8 +365,6 @@
sc = device_get_softc(dev);
- xdma_assert_locked();
-
chan = (struct pdma_channel *)xchan->chan;
chan->used = 0;
@@ -292,50 +372,13 @@
}
static int
-pdma_channel_prep_memcpy(device_t dev, struct xdma_channel *xchan)
+access_width(struct xdma_request *req, uint32_t *dcm, uint32_t *max_width)
{
- struct pdma_channel *chan;
- struct pdma_hwdesc *desc;
- struct pdma_softc *sc;
- xdma_config_t *conf;
- int ret;
- sc = device_get_softc(dev);
-
- chan = (struct pdma_channel *)xchan->chan;
- /* Ensure we are not in operation */
- chan_stop(sc, chan);
-
- ret = xdma_desc_alloc(xchan, sizeof(struct pdma_hwdesc), 8);
- if (ret != 0) {
- device_printf(sc->dev,
- "%s: Can't allocate descriptors.\n", __func__);
- return (-1);
- }
-
- conf = &xchan->conf;
- desc = (struct pdma_hwdesc *)xchan->descs;
- desc[0].dsa = conf->src_addr;
- desc[0].dta = conf->dst_addr;
- desc[0].drt = DRT_AUTO;
- desc[0].dcm = DCM_SAI | DCM_DAI;
-
- /* 4 byte copy for now. */
- desc[0].dtc = (conf->block_len / 4);
- desc[0].dcm |= DCM_SP_4 | DCM_DP_4 | DCM_TSZ_4;
- desc[0].dcm |= DCM_TIE;
-
- return (0);
-}
-
-static int
-access_width(xdma_config_t *conf, uint32_t *dcm, uint32_t *max_width)
-{
-
*dcm = 0;
- *max_width = max(conf->src_width, conf->dst_width);
+ *max_width = max(req->src_width, req->dst_width);
- switch (conf->src_width) {
+ switch (req->src_width) {
case 1:
*dcm |= DCM_SP_1;
break;
@@ -349,7 +392,7 @@
return (-1);
}
- switch (conf->dst_width) {
+ switch (req->dst_width) {
case 1:
*dcm |= DCM_DP_1;
break;
@@ -381,69 +424,68 @@
}
static int
-pdma_channel_prep_cyclic(device_t dev, struct xdma_channel *xchan)
+pdma_channel_request(device_t dev, struct xdma_channel *xchan, struct xdma_request *req)
{
struct pdma_fdt_data *data;
struct pdma_channel *chan;
struct pdma_hwdesc *desc;
xdma_controller_t *xdma;
struct pdma_softc *sc;
- xdma_config_t *conf;
int max_width;
uint32_t reg;
uint32_t dcm;
- int ret;
int i;
sc = device_get_softc(dev);
- conf = &xchan->conf;
+ dprintf("%s: block_len %d block_num %d\n",
+ __func__, req->block_len, req->block_num);
+
xdma = xchan->xdma;
data = (struct pdma_fdt_data *)xdma->data;
- ret = xdma_desc_alloc(xchan, sizeof(struct pdma_hwdesc), 8);
- if (ret != 0) {
- device_printf(sc->dev,
- "%s: Can't allocate descriptors.\n", __func__);
- return (-1);
- }
-
chan = (struct pdma_channel *)xchan->chan;
/* Ensure we are not in operation */
chan_stop(sc, chan);
- chan->flags = CHAN_DESCR_RELINK;
+ if (req->operation == XDMA_CYCLIC)
+ chan->flags = CHAN_DESCR_RELINK;
chan->cur_desc = 0;
+ chan->req = req;
+
+ for (i = 0; i < req->block_num; i++) {
+ desc = &chan->desc_ring[i];
- desc = (struct pdma_hwdesc *)xchan->descs;
-
- for (i = 0; i < conf->block_num; i++) {
- if (conf->direction == XDMA_MEM_TO_DEV) {
- desc[i].dsa = conf->src_addr + (i * conf->block_len);
- desc[i].dta = conf->dst_addr;
- desc[i].drt = data->tx;
- desc[i].dcm = DCM_SAI;
- } else if (conf->direction == XDMA_DEV_TO_MEM) {
- desc[i].dsa = conf->src_addr;
- desc[i].dta = conf->dst_addr + (i * conf->block_len);
- desc[i].drt = data->rx;
- desc[i].dcm = DCM_DAI;
- } else if (conf->direction == XDMA_MEM_TO_MEM) {
- desc[i].dsa = conf->src_addr + (i * conf->block_len);
- desc[i].dta = conf->dst_addr + (i * conf->block_len);
- desc[i].drt = DRT_AUTO;
- desc[i].dcm = DCM_SAI | DCM_DAI;
+ if (req->direction == XDMA_MEM_TO_DEV) {
+ desc->dsa = req->src_addr + (i * req->block_len);
+ desc->dta = req->dst_addr;
+ desc->drt = data->tx;
+ desc->dcm = DCM_SAI;
+ } else if (req->direction == XDMA_DEV_TO_MEM) {
+ desc->dsa = req->src_addr;
+ desc->dta = req->dst_addr + (i * req->block_len);
+ desc->drt = data->rx;
+ desc->dcm = DCM_DAI;
+ } else if (req->direction == XDMA_MEM_TO_MEM) {
+ desc->dsa = req->src_addr + (i * req->block_len);
+ desc->dta = req->dst_addr + (i * req->block_len);
+ desc->drt = DRT_AUTO;
+ desc->dcm = DCM_SAI | DCM_DAI;
}
- if (access_width(conf, &dcm, &max_width) != 0) {
+ if (access_width(req, &dcm, &max_width) != 0) {
device_printf(dev,
"%s: can't configure access width\n", __func__);
return (-1);
}
- desc[i].dcm |= dcm | DCM_TIE;
- desc[i].dtc = (conf->block_len / max_width);
+ desc->dcm |= dcm | DCM_TIE;
+ desc->dtc = (req->block_len / max_width);
/*
+ * TODO: bus dma pre read/write sync here
+ */
+
+ /*
* PDMA does not provide interrupt after processing each descriptor,
* but after processing all the chain only.
* As a workaround we do unlink descriptors here, so our chain will
@@ -451,10 +493,10 @@
* on each interrupt again.
*/
if ((chan->flags & CHAN_DESCR_RELINK) == 0) {
- if (i != (conf->block_num - 1)) {
- desc[i].dcm |= DCM_LINK;
+ if (i != (req->block_num - 1)) {
+ desc->dcm |= DCM_LINK;
reg = ((i + 1) * sizeof(struct pdma_hwdesc));
- desc[i].dtc |= (reg >> 4) << 24;
+ desc->dtc |= (reg >> 4) << 24;
}
}
}
@@ -522,8 +564,7 @@
/* xDMA Interface */
DEVMETHOD(xdma_channel_alloc, pdma_channel_alloc),
DEVMETHOD(xdma_channel_free, pdma_channel_free),
- DEVMETHOD(xdma_channel_prep_cyclic, pdma_channel_prep_cyclic),
- DEVMETHOD(xdma_channel_prep_memcpy, pdma_channel_prep_memcpy),
+ DEVMETHOD(xdma_channel_request, pdma_channel_request),
DEVMETHOD(xdma_channel_control, pdma_channel_control),
#ifdef FDT
DEVMETHOD(xdma_ofw_md_data, pdma_ofw_md_data),