Index: stable/12/sys/dev/xdma/xdma.c
===================================================================
--- stable/12/sys/dev/xdma/xdma.c (revision 348622)
+++ stable/12/sys/dev/xdma/xdma.c (revision 348623)
@@ -1,406 +1,497 @@
/*-
- * Copyright (c) 2016-2018 Ruslan Bukin
- * All rights reserved.
+ * SPDX-License-Identifier: BSD-2-Clause
*
+ * Copyright (c) 2016-2019 Ruslan Bukin
+ *
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
* ("CTSRD"), as part of the DARPA CRASH research programme.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
__FBSDID("$FreeBSD$");
#include "opt_platform.h"
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
-#include
#include
#ifdef FDT
#include
#include
#include
#endif
#include
#include
/*
* Multiple xDMA controllers may work with single DMA device,
* so we have global lock for physical channel management.
*/
-static struct sx xdma_sx;
+static struct mtx xdma_mtx;
-#define XDMA_LOCK() sx_xlock(&xdma_sx)
-#define XDMA_UNLOCK() sx_xunlock(&xdma_sx)
-#define XDMA_ASSERT_LOCKED() sx_xassert(&xdma_sx, MA_OWNED)
+#define XDMA_LOCK() mtx_lock(&xdma_mtx)
+#define XDMA_UNLOCK() mtx_unlock(&xdma_mtx)
+#define XDMA_ASSERT_LOCKED() mtx_assert(&xdma_mtx, MA_OWNED)
+#define FDT_REG_CELLS 4
+
/*
* Allocate virtual xDMA channel.
*/
xdma_channel_t *
xdma_channel_alloc(xdma_controller_t *xdma, uint32_t caps)
{
xdma_channel_t *xchan;
int ret;
xchan = malloc(sizeof(xdma_channel_t), M_XDMA, M_WAITOK | M_ZERO);
xchan->xdma = xdma;
xchan->caps = caps;
XDMA_LOCK();
/* Request a real channel from hardware driver. */
ret = XDMA_CHANNEL_ALLOC(xdma->dma_dev, xchan);
if (ret != 0) {
device_printf(xdma->dev,
"%s: Can't request hardware channel.\n", __func__);
XDMA_UNLOCK();
free(xchan, M_XDMA);
return (NULL);
}
TAILQ_INIT(&xchan->ie_handlers);
- sx_init(&xchan->sx_lock, "xDMA chan");
- sx_init(&xchan->sx_qin_lock, "xDMA qin");
- sx_init(&xchan->sx_qout_lock, "xDMA qout");
- sx_init(&xchan->sx_bank_lock, "xDMA bank");
- sx_init(&xchan->sx_proc_lock, "xDMA proc");
+ mtx_init(&xchan->mtx_lock, "xDMA chan", NULL, MTX_DEF);
+ mtx_init(&xchan->mtx_qin_lock, "xDMA qin", NULL, MTX_DEF);
+ mtx_init(&xchan->mtx_qout_lock, "xDMA qout", NULL, MTX_DEF);
+ mtx_init(&xchan->mtx_bank_lock, "xDMA bank", NULL, MTX_DEF);
+ mtx_init(&xchan->mtx_proc_lock, "xDMA proc", NULL, MTX_DEF);
TAILQ_INIT(&xchan->bank);
TAILQ_INIT(&xchan->queue_in);
TAILQ_INIT(&xchan->queue_out);
TAILQ_INIT(&xchan->processing);
TAILQ_INSERT_TAIL(&xdma->channels, xchan, xchan_next);
XDMA_UNLOCK();
return (xchan);
}
int
xdma_channel_free(xdma_channel_t *xchan)
{
xdma_controller_t *xdma;
int err;
xdma = xchan->xdma;
KASSERT(xdma != NULL, ("xdma is NULL"));
XDMA_LOCK();
/* Free the real DMA channel. */
err = XDMA_CHANNEL_FREE(xdma->dma_dev, xchan);
if (err != 0) {
device_printf(xdma->dev,
"%s: Can't free real hw channel.\n", __func__);
XDMA_UNLOCK();
return (-1);
}
if (xchan->flags & XCHAN_TYPE_SG)
xdma_channel_free_sg(xchan);
xdma_teardown_all_intr(xchan);
- sx_destroy(&xchan->sx_lock);
- sx_destroy(&xchan->sx_qin_lock);
- sx_destroy(&xchan->sx_qout_lock);
- sx_destroy(&xchan->sx_bank_lock);
- sx_destroy(&xchan->sx_proc_lock);
+ mtx_destroy(&xchan->mtx_lock);
+ mtx_destroy(&xchan->mtx_qin_lock);
+ mtx_destroy(&xchan->mtx_qout_lock);
+ mtx_destroy(&xchan->mtx_bank_lock);
+ mtx_destroy(&xchan->mtx_proc_lock);
TAILQ_REMOVE(&xdma->channels, xchan, xchan_next);
free(xchan, M_XDMA);
XDMA_UNLOCK();
return (0);
}
int
xdma_setup_intr(xdma_channel_t *xchan,
int (*cb)(void *, xdma_transfer_status_t *),
void *arg, void **ihandler)
{
struct xdma_intr_handler *ih;
xdma_controller_t *xdma;
xdma = xchan->xdma;
KASSERT(xdma != NULL, ("xdma is NULL"));
/* Sanity check. */
if (cb == NULL) {
device_printf(xdma->dev,
"%s: Can't setup interrupt handler.\n",
__func__);
return (-1);
}
ih = malloc(sizeof(struct xdma_intr_handler),
M_XDMA, M_WAITOK | M_ZERO);
ih->cb = cb;
ih->cb_user = arg;
XCHAN_LOCK(xchan);
TAILQ_INSERT_TAIL(&xchan->ie_handlers, ih, ih_next);
XCHAN_UNLOCK(xchan);
if (ihandler != NULL)
*ihandler = ih;
return (0);
}
int
xdma_teardown_intr(xdma_channel_t *xchan, struct xdma_intr_handler *ih)
{
xdma_controller_t *xdma;
xdma = xchan->xdma;
KASSERT(xdma != NULL, ("xdma is NULL"));
/* Sanity check. */
if (ih == NULL) {
device_printf(xdma->dev,
"%s: Can't teardown interrupt.\n", __func__);
return (-1);
}
TAILQ_REMOVE(&xchan->ie_handlers, ih, ih_next);
free(ih, M_XDMA);
return (0);
}
int
xdma_teardown_all_intr(xdma_channel_t *xchan)
{
struct xdma_intr_handler *ih_tmp;
struct xdma_intr_handler *ih;
xdma_controller_t *xdma;
xdma = xchan->xdma;
KASSERT(xdma != NULL, ("xdma is NULL"));
TAILQ_FOREACH_SAFE(ih, &xchan->ie_handlers, ih_next, ih_tmp) {
TAILQ_REMOVE(&xchan->ie_handlers, ih, ih_next);
free(ih, M_XDMA);
}
return (0);
}
int
xdma_request(xdma_channel_t *xchan, struct xdma_request *req)
{
xdma_controller_t *xdma;
int ret;
xdma = xchan->xdma;
KASSERT(xdma != NULL, ("xdma is NULL"));
XCHAN_LOCK(xchan);
ret = XDMA_CHANNEL_REQUEST(xdma->dma_dev, xchan, req);
if (ret != 0) {
device_printf(xdma->dev,
"%s: Can't request a transfer.\n", __func__);
XCHAN_UNLOCK(xchan);
return (-1);
}
XCHAN_UNLOCK(xchan);
return (0);
}
int
xdma_control(xdma_channel_t *xchan, enum xdma_command cmd)
{
xdma_controller_t *xdma;
int ret;
xdma = xchan->xdma;
KASSERT(xdma != NULL, ("xdma is NULL"));
ret = XDMA_CHANNEL_CONTROL(xdma->dma_dev, xchan, cmd);
if (ret != 0) {
device_printf(xdma->dev,
"%s: Can't process command.\n", __func__);
return (-1);
}
return (0);
}
void
xdma_callback(xdma_channel_t *xchan, xdma_transfer_status_t *status)
{
struct xdma_intr_handler *ih_tmp;
struct xdma_intr_handler *ih;
xdma_controller_t *xdma;
xdma = xchan->xdma;
KASSERT(xdma != NULL, ("xdma is NULL"));
TAILQ_FOREACH_SAFE(ih, &xchan->ie_handlers, ih_next, ih_tmp)
if (ih->cb != NULL)
ih->cb(ih->cb_user, status);
if (xchan->flags & XCHAN_TYPE_SG)
xdma_queue_submit(xchan);
}
#ifdef FDT
/*
* Notify the DMA driver we have machine-dependent data in FDT.
*/
static int
xdma_ofw_md_data(xdma_controller_t *xdma, pcell_t *cells, int ncells)
{
uint32_t ret;
ret = XDMA_OFW_MD_DATA(xdma->dma_dev,
cells, ncells, (void **)&xdma->data);
return (ret);
}
+static int
+xdma_handle_mem_node(vmem_t *vmem, phandle_t memory)
+{
+ pcell_t reg[FDT_REG_CELLS * FDT_MEM_REGIONS];
+ pcell_t *regp;
+ int addr_cells, size_cells;
+ int i, reg_len, ret, tuple_size, tuples;
+ vmem_addr_t mem_start;
+ vmem_size_t mem_size;
+
+ if ((ret = fdt_addrsize_cells(OF_parent(memory), &addr_cells,
+ &size_cells)) != 0)
+ return (ret);
+
+ if (addr_cells > 2)
+ return (ERANGE);
+
+ tuple_size = sizeof(pcell_t) * (addr_cells + size_cells);
+ reg_len = OF_getproplen(memory, "reg");
+ if (reg_len <= 0 || reg_len > sizeof(reg))
+ return (ERANGE);
+
+ if (OF_getprop(memory, "reg", reg, reg_len) <= 0)
+ return (ENXIO);
+
+ tuples = reg_len / tuple_size;
+ regp = (pcell_t *)®
+ for (i = 0; i < tuples; i++) {
+ ret = fdt_data_to_res(regp, addr_cells, size_cells,
+ &mem_start, &mem_size);
+ if (ret != 0)
+ return (ret);
+
+ vmem_add(vmem, mem_start, mem_size, 0);
+ regp += addr_cells + size_cells;
+ }
+
+ return (0);
+}
+
+vmem_t *
+xdma_get_memory(device_t dev)
+{
+ phandle_t mem_node, node;
+ pcell_t mem_handle;
+ vmem_t *vmem;
+
+ node = ofw_bus_get_node(dev);
+ if (node <= 0) {
+ device_printf(dev,
+ "%s called on not ofw based device.\n", __func__);
+ return (NULL);
+ }
+
+ if (!OF_hasprop(node, "memory-region"))
+ return (NULL);
+
+ if (OF_getencprop(node, "memory-region", (void *)&mem_handle,
+ sizeof(mem_handle)) <= 0)
+ return (NULL);
+
+ vmem = vmem_create("xDMA vmem", 0, 0, PAGE_SIZE,
+ PAGE_SIZE, M_BESTFIT | M_WAITOK);
+ if (vmem == NULL)
+ return (NULL);
+
+ mem_node = OF_node_from_xref(mem_handle);
+ if (xdma_handle_mem_node(vmem, mem_node) != 0) {
+ vmem_destroy(vmem);
+ return (NULL);
+ }
+
+ return (vmem);
+}
+
+void
+xdma_put_memory(vmem_t *vmem)
+{
+
+ vmem_destroy(vmem);
+}
+
+void
+xchan_set_memory(xdma_channel_t *xchan, vmem_t *vmem)
+{
+
+ xchan->vmem = vmem;
+}
+
/*
* Allocate xdma controller.
*/
xdma_controller_t *
xdma_ofw_get(device_t dev, const char *prop)
{
phandle_t node, parent;
xdma_controller_t *xdma;
device_t dma_dev;
pcell_t *cells;
int ncells;
int error;
int ndmas;
int idx;
node = ofw_bus_get_node(dev);
if (node <= 0)
device_printf(dev,
"%s called on not ofw based device.\n", __func__);
error = ofw_bus_parse_xref_list_get_length(node,
"dmas", "#dma-cells", &ndmas);
if (error) {
device_printf(dev,
"%s can't get dmas list.\n", __func__);
return (NULL);
}
if (ndmas == 0) {
device_printf(dev,
"%s dmas list is empty.\n", __func__);
return (NULL);
}
error = ofw_bus_find_string_index(node, "dma-names", prop, &idx);
if (error != 0) {
device_printf(dev,
"%s can't find string index.\n", __func__);
return (NULL);
}
error = ofw_bus_parse_xref_list_alloc(node, "dmas", "#dma-cells",
idx, &parent, &ncells, &cells);
if (error != 0) {
device_printf(dev,
"%s can't get dma device xref.\n", __func__);
return (NULL);
}
dma_dev = OF_device_from_xref(parent);
if (dma_dev == NULL) {
device_printf(dev,
"%s can't get dma device.\n", __func__);
return (NULL);
}
xdma = malloc(sizeof(struct xdma_controller),
M_XDMA, M_WAITOK | M_ZERO);
xdma->dev = dev;
xdma->dma_dev = dma_dev;
TAILQ_INIT(&xdma->channels);
xdma_ofw_md_data(xdma, cells, ncells);
free(cells, M_OFWPROP);
return (xdma);
}
#endif
/*
* Free xDMA controller object.
*/
int
xdma_put(xdma_controller_t *xdma)
{
XDMA_LOCK();
/* Ensure no channels allocated. */
if (!TAILQ_EMPTY(&xdma->channels)) {
device_printf(xdma->dev, "%s: Can't free xDMA\n", __func__);
return (-1);
}
free(xdma->data, M_DEVBUF);
free(xdma, M_XDMA);
XDMA_UNLOCK();
return (0);
}
static void
xdma_init(void)
{
- sx_init(&xdma_sx, "xDMA");
+ mtx_init(&xdma_mtx, "xDMA", NULL, MTX_DEF);
}
SYSINIT(xdma, SI_SUB_DRIVERS, SI_ORDER_FIRST, xdma_init, NULL);
Index: stable/12/sys/dev/xdma/xdma.h
===================================================================
--- stable/12/sys/dev/xdma/xdma.h (revision 348622)
+++ stable/12/sys/dev/xdma/xdma.h (revision 348623)
@@ -1,264 +1,274 @@
/*-
- * Copyright (c) 2016-2018 Ruslan Bukin
- * All rights reserved.
+ * SPDX-License-Identifier: BSD-2-Clause
*
+ * Copyright (c) 2016-2019 Ruslan Bukin
+ *
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
* ("CTSRD"), as part of the DARPA CRASH research programme.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _DEV_XDMA_XDMA_H_
#define _DEV_XDMA_XDMA_H_
#include
+#include
enum xdma_direction {
XDMA_MEM_TO_MEM,
XDMA_MEM_TO_DEV,
XDMA_DEV_TO_MEM,
XDMA_DEV_TO_DEV,
};
enum xdma_operation_type {
XDMA_MEMCPY,
XDMA_CYCLIC,
XDMA_FIFO,
XDMA_SG,
};
enum xdma_request_type {
XR_TYPE_PHYS,
XR_TYPE_VIRT,
XR_TYPE_MBUF,
XR_TYPE_BIO,
};
enum xdma_command {
XDMA_CMD_BEGIN,
XDMA_CMD_PAUSE,
XDMA_CMD_TERMINATE,
};
struct xdma_transfer_status {
uint32_t transferred;
int error;
};
typedef struct xdma_transfer_status xdma_transfer_status_t;
struct xdma_controller {
device_t dev; /* DMA consumer device_t. */
device_t dma_dev; /* A real DMA device_t. */
void *data; /* OFW MD part. */
+ vmem_t *vmem; /* Bounce memory. */
/* List of virtual channels allocated. */
TAILQ_HEAD(xdma_channel_list, xdma_channel) channels;
};
typedef struct xdma_controller xdma_controller_t;
struct xchan_buf {
bus_dmamap_t map;
uint32_t nsegs;
uint32_t nsegs_left;
+ vm_offset_t vaddr;
+ vm_offset_t paddr;
+ vm_size_t size;
};
struct xdma_request {
struct mbuf *m;
struct bio *bp;
enum xdma_operation_type operation;
enum xdma_request_type req_type;
enum xdma_direction direction;
bus_addr_t src_addr;
bus_addr_t dst_addr;
uint8_t src_width;
uint8_t dst_width;
bus_size_t block_num;
bus_size_t block_len;
xdma_transfer_status_t status;
void *user;
TAILQ_ENTRY(xdma_request) xr_next;
struct xchan_buf buf;
};
struct xdma_sglist {
bus_addr_t src_addr;
bus_addr_t dst_addr;
size_t len;
uint8_t src_width;
uint8_t dst_width;
enum xdma_direction direction;
bool first;
bool last;
};
struct xdma_channel {
xdma_controller_t *xdma;
+ vmem_t *vmem;
uint32_t flags;
#define XCHAN_BUFS_ALLOCATED (1 << 0)
#define XCHAN_SGLIST_ALLOCATED (1 << 1)
#define XCHAN_CONFIGURED (1 << 2)
#define XCHAN_TYPE_CYCLIC (1 << 3)
#define XCHAN_TYPE_MEMCPY (1 << 4)
#define XCHAN_TYPE_FIFO (1 << 5)
#define XCHAN_TYPE_SG (1 << 6)
uint32_t caps;
#define XCHAN_CAP_BUSDMA (1 << 0)
#define XCHAN_CAP_NOSEG (1 << 1)
#define XCHAN_CAP_NOBUFS (1 << 2)
/* A real hardware driver channel. */
void *chan;
/* Interrupt handlers. */
TAILQ_HEAD(, xdma_intr_handler) ie_handlers;
TAILQ_ENTRY(xdma_channel) xchan_next;
- struct sx sx_lock;
- struct sx sx_qin_lock;
- struct sx sx_qout_lock;
- struct sx sx_bank_lock;
- struct sx sx_proc_lock;
+ struct mtx mtx_lock;
+ struct mtx mtx_qin_lock;
+ struct mtx mtx_qout_lock;
+ struct mtx mtx_bank_lock;
+ struct mtx mtx_proc_lock;
/* Request queue. */
bus_dma_tag_t dma_tag_bufs;
struct xdma_request *xr_mem;
uint32_t xr_num;
/* Bus dma tag options. */
bus_size_t maxsegsize;
bus_size_t maxnsegs;
bus_size_t alignment;
bus_addr_t boundary;
bus_addr_t lowaddr;
bus_addr_t highaddr;
struct xdma_sglist *sg;
TAILQ_HEAD(, xdma_request) bank;
TAILQ_HEAD(, xdma_request) queue_in;
TAILQ_HEAD(, xdma_request) queue_out;
TAILQ_HEAD(, xdma_request) processing;
};
typedef struct xdma_channel xdma_channel_t;
struct xdma_intr_handler {
int (*cb)(void *cb_user, xdma_transfer_status_t *status);
void *cb_user;
TAILQ_ENTRY(xdma_intr_handler) ih_next;
};
static MALLOC_DEFINE(M_XDMA, "xdma", "xDMA framework");
-#define XCHAN_LOCK(xchan) sx_xlock(&(xchan)->sx_lock)
-#define XCHAN_UNLOCK(xchan) sx_xunlock(&(xchan)->sx_lock)
+#define XCHAN_LOCK(xchan) mtx_lock(&(xchan)->mtx_lock)
+#define XCHAN_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_lock)
#define XCHAN_ASSERT_LOCKED(xchan) \
- sx_assert(&(xchan)->sx_lock, SX_XLOCKED)
+ mtx_assert(&(xchan)->mtx_lock, MA_OWNED)
-#define QUEUE_IN_LOCK(xchan) sx_xlock(&(xchan)->sx_qin_lock)
-#define QUEUE_IN_UNLOCK(xchan) sx_xunlock(&(xchan)->sx_qin_lock)
+#define QUEUE_IN_LOCK(xchan) mtx_lock(&(xchan)->mtx_qin_lock)
+#define QUEUE_IN_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_qin_lock)
#define QUEUE_IN_ASSERT_LOCKED(xchan) \
- sx_assert(&(xchan)->sx_qin_lock, SX_XLOCKED)
+ mtx_assert(&(xchan)->mtx_qin_lock, MA_OWNED)
-#define QUEUE_OUT_LOCK(xchan) sx_xlock(&(xchan)->sx_qout_lock)
-#define QUEUE_OUT_UNLOCK(xchan) sx_xunlock(&(xchan)->sx_qout_lock)
+#define QUEUE_OUT_LOCK(xchan) mtx_lock(&(xchan)->mtx_qout_lock)
+#define QUEUE_OUT_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_qout_lock)
#define QUEUE_OUT_ASSERT_LOCKED(xchan) \
- sx_assert(&(xchan)->sx_qout_lock, SX_XLOCKED)
+ mtx_assert(&(xchan)->mtx_qout_lock, MA_OWNED)
-#define QUEUE_BANK_LOCK(xchan) sx_xlock(&(xchan)->sx_bank_lock)
-#define QUEUE_BANK_UNLOCK(xchan) sx_xunlock(&(xchan)->sx_bank_lock)
+#define QUEUE_BANK_LOCK(xchan) mtx_lock(&(xchan)->mtx_bank_lock)
+#define QUEUE_BANK_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_bank_lock)
#define QUEUE_BANK_ASSERT_LOCKED(xchan) \
- sx_assert(&(xchan)->sx_bank_lock, SX_XLOCKED)
+ mtx_assert(&(xchan)->mtx_bank_lock, MA_OWNED)
-#define QUEUE_PROC_LOCK(xchan) sx_xlock(&(xchan)->sx_proc_lock)
-#define QUEUE_PROC_UNLOCK(xchan) sx_xunlock(&(xchan)->sx_proc_lock)
+#define QUEUE_PROC_LOCK(xchan) mtx_lock(&(xchan)->mtx_proc_lock)
+#define QUEUE_PROC_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_proc_lock)
#define QUEUE_PROC_ASSERT_LOCKED(xchan) \
- sx_assert(&(xchan)->sx_proc_lock, SX_XLOCKED)
+ mtx_assert(&(xchan)->mtx_proc_lock, MA_OWNED)
#define XDMA_SGLIST_MAXLEN 2048
#define XDMA_MAX_SEG 128
/* xDMA controller ops */
xdma_controller_t *xdma_ofw_get(device_t dev, const char *prop);
int xdma_put(xdma_controller_t *xdma);
+vmem_t * xdma_get_memory(device_t dev);
+void xdma_put_memory(vmem_t *vmem);
/* xDMA channel ops */
xdma_channel_t * xdma_channel_alloc(xdma_controller_t *, uint32_t caps);
int xdma_channel_free(xdma_channel_t *);
int xdma_request(xdma_channel_t *xchan, struct xdma_request *r);
+void xchan_set_memory(xdma_channel_t *xchan, vmem_t *vmem);
/* SG interface */
int xdma_prep_sg(xdma_channel_t *, uint32_t,
bus_size_t, bus_size_t, bus_size_t, bus_addr_t, bus_addr_t, bus_addr_t);
void xdma_channel_free_sg(xdma_channel_t *xchan);
int xdma_queue_submit_sg(xdma_channel_t *xchan);
void xchan_seg_done(xdma_channel_t *xchan, xdma_transfer_status_t *);
/* Queue operations */
int xdma_dequeue_mbuf(xdma_channel_t *xchan, struct mbuf **m,
xdma_transfer_status_t *);
int xdma_enqueue_mbuf(xdma_channel_t *xchan, struct mbuf **m, uintptr_t addr,
uint8_t, uint8_t, enum xdma_direction dir);
int xdma_dequeue_bio(xdma_channel_t *xchan, struct bio **bp,
xdma_transfer_status_t *status);
int xdma_enqueue_bio(xdma_channel_t *xchan, struct bio **bp, bus_addr_t addr,
uint8_t, uint8_t, enum xdma_direction dir);
int xdma_dequeue(xdma_channel_t *xchan, void **user,
xdma_transfer_status_t *status);
int xdma_enqueue(xdma_channel_t *xchan, uintptr_t src, uintptr_t dst,
uint8_t, uint8_t, bus_size_t, enum xdma_direction dir, void *);
int xdma_queue_submit(xdma_channel_t *xchan);
/* Mbuf operations */
uint32_t xdma_mbuf_defrag(xdma_channel_t *xchan, struct xdma_request *xr);
uint32_t xdma_mbuf_chain_count(struct mbuf *m0);
/* Channel Control */
int xdma_control(xdma_channel_t *xchan, enum xdma_command cmd);
/* Interrupt callback */
int xdma_setup_intr(xdma_channel_t *xchan, int (*cb)(void *,
xdma_transfer_status_t *), void *arg, void **);
int xdma_teardown_intr(xdma_channel_t *xchan, struct xdma_intr_handler *ih);
int xdma_teardown_all_intr(xdma_channel_t *xchan);
void xdma_callback(struct xdma_channel *xchan, xdma_transfer_status_t *status);
/* Sglist */
int xchan_sglist_alloc(xdma_channel_t *xchan);
void xchan_sglist_free(xdma_channel_t *xchan);
int xdma_sglist_add(struct xdma_sglist *sg, struct bus_dma_segment *seg,
uint32_t nsegs, struct xdma_request *xr);
/* Requests bank */
void xchan_bank_init(xdma_channel_t *xchan);
int xchan_bank_free(xdma_channel_t *xchan);
struct xdma_request * xchan_bank_get(xdma_channel_t *xchan);
int xchan_bank_put(xdma_channel_t *xchan, struct xdma_request *xr);
#endif /* !_DEV_XDMA_XDMA_H_ */
Index: stable/12/sys/dev/xdma/xdma_bank.c
===================================================================
--- stable/12/sys/dev/xdma/xdma_bank.c (revision 348622)
+++ stable/12/sys/dev/xdma/xdma_bank.c (revision 348623)
@@ -1,99 +1,98 @@
/*-
- * Copyright (c) 2018 Ruslan Bukin
+ * Copyright (c) 2018-2019 Ruslan Bukin
* All rights reserved.
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
* ("CTSRD"), as part of the DARPA CRASH research programme.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
__FBSDID("$FreeBSD$");
#include "opt_platform.h"
#include
#include
#include
#include
#include
-#include
#include
#include
void
xchan_bank_init(xdma_channel_t *xchan)
{
struct xdma_request *xr;
xdma_controller_t *xdma;
int i;
xdma = xchan->xdma;
KASSERT(xdma != NULL, ("xdma is NULL"));
xchan->xr_mem = malloc(sizeof(struct xdma_request) * xchan->xr_num,
M_XDMA, M_WAITOK | M_ZERO);
for (i = 0; i < xchan->xr_num; i++) {
xr = &xchan->xr_mem[i];
TAILQ_INSERT_TAIL(&xchan->bank, xr, xr_next);
}
}
int
xchan_bank_free(xdma_channel_t *xchan)
{
free(xchan->xr_mem, M_XDMA);
return (0);
}
struct xdma_request *
xchan_bank_get(xdma_channel_t *xchan)
{
struct xdma_request *xr;
struct xdma_request *xr_tmp;
QUEUE_BANK_LOCK(xchan);
TAILQ_FOREACH_SAFE(xr, &xchan->bank, xr_next, xr_tmp) {
TAILQ_REMOVE(&xchan->bank, xr, xr_next);
break;
}
QUEUE_BANK_UNLOCK(xchan);
return (xr);
}
int
xchan_bank_put(xdma_channel_t *xchan, struct xdma_request *xr)
{
QUEUE_BANK_LOCK(xchan);
TAILQ_INSERT_TAIL(&xchan->bank, xr, xr_next);
QUEUE_BANK_UNLOCK(xchan);
return (0);
}
Index: stable/12/sys/dev/xdma/xdma_bio.c
===================================================================
--- stable/12/sys/dev/xdma/xdma_bio.c (revision 348622)
+++ stable/12/sys/dev/xdma/xdma_bio.c (revision 348623)
@@ -1,105 +1,104 @@
/*-
- * Copyright (c) 2017-2018 Ruslan Bukin
+ * Copyright (c) 2017-2019 Ruslan Bukin
* All rights reserved.
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
* ("CTSRD"), as part of the DARPA CRASH research programme.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
__FBSDID("$FreeBSD$");
#include "opt_platform.h"
#include
#include
#include
#include
#include
-#include
#include
#include
int
xdma_dequeue_bio(xdma_channel_t *xchan, struct bio **bp,
xdma_transfer_status_t *status)
{
struct xdma_request *xr_tmp;
struct xdma_request *xr;
QUEUE_OUT_LOCK(xchan);
TAILQ_FOREACH_SAFE(xr, &xchan->queue_out, xr_next, xr_tmp) {
TAILQ_REMOVE(&xchan->queue_out, xr, xr_next);
break;
}
QUEUE_OUT_UNLOCK(xchan);
if (xr == NULL)
return (-1);
*bp = xr->bp;
status->error = xr->status.error;
status->transferred = xr->status.transferred;
xchan_bank_put(xchan, xr);
return (0);
}
int
xdma_enqueue_bio(xdma_channel_t *xchan, struct bio **bp,
bus_addr_t addr, uint8_t src_width, uint8_t dst_width,
enum xdma_direction dir)
{
struct xdma_request *xr;
xdma_controller_t *xdma;
xdma = xchan->xdma;
xr = xchan_bank_get(xchan);
if (xr == NULL)
return (-1); /* No space is available yet. */
xr->direction = dir;
xr->bp = *bp;
xr->req_type = XR_TYPE_BIO;
xr->src_width = src_width;
xr->dst_width = dst_width;
if (dir == XDMA_MEM_TO_DEV) {
xr->dst_addr = addr;
xr->src_addr = 0;
} else {
xr->dst_addr = 0;
xr->src_addr = addr;
}
QUEUE_IN_LOCK(xchan);
TAILQ_INSERT_TAIL(&xchan->queue_in, xr, xr_next);
QUEUE_IN_UNLOCK(xchan);
return (0);
}
Index: stable/12/sys/dev/xdma/xdma_mbuf.c
===================================================================
--- stable/12/sys/dev/xdma/xdma_mbuf.c (revision 348622)
+++ stable/12/sys/dev/xdma/xdma_mbuf.c (revision 348623)
@@ -1,150 +1,149 @@
/*-
- * Copyright (c) 2017-2018 Ruslan Bukin
+ * Copyright (c) 2017-2019 Ruslan Bukin
* All rights reserved.
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
* ("CTSRD"), as part of the DARPA CRASH research programme.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
__FBSDID("$FreeBSD$");
#include "opt_platform.h"
#include
#include
#include
#include
-#include
#include
#include
#ifdef FDT
#include
#include
#include
#endif
#include
int
xdma_dequeue_mbuf(xdma_channel_t *xchan, struct mbuf **mp,
xdma_transfer_status_t *status)
{
struct xdma_request *xr;
struct xdma_request *xr_tmp;
QUEUE_OUT_LOCK(xchan);
TAILQ_FOREACH_SAFE(xr, &xchan->queue_out, xr_next, xr_tmp) {
TAILQ_REMOVE(&xchan->queue_out, xr, xr_next);
break;
}
QUEUE_OUT_UNLOCK(xchan);
if (xr == NULL)
return (-1);
*mp = xr->m;
status->error = xr->status.error;
status->transferred = xr->status.transferred;
xchan_bank_put(xchan, xr);
return (0);
}
int
xdma_enqueue_mbuf(xdma_channel_t *xchan, struct mbuf **mp,
uintptr_t addr, uint8_t src_width, uint8_t dst_width,
enum xdma_direction dir)
{
struct xdma_request *xr;
xdma_controller_t *xdma;
xdma = xchan->xdma;
xr = xchan_bank_get(xchan);
if (xr == NULL)
return (-1); /* No space is available yet. */
xr->direction = dir;
xr->m = *mp;
xr->req_type = XR_TYPE_MBUF;
if (dir == XDMA_MEM_TO_DEV) {
xr->dst_addr = addr;
xr->src_addr = 0;
} else {
xr->src_addr = addr;
xr->dst_addr = 0;
}
xr->src_width = src_width;
xr->dst_width = dst_width;
QUEUE_IN_LOCK(xchan);
TAILQ_INSERT_TAIL(&xchan->queue_in, xr, xr_next);
QUEUE_IN_UNLOCK(xchan);
return (0);
}
uint32_t
xdma_mbuf_chain_count(struct mbuf *m0)
{
struct mbuf *m;
uint32_t c;
c = 0;
for (m = m0; m != NULL; m = m->m_next)
c++;
return (c);
}
uint32_t
xdma_mbuf_defrag(xdma_channel_t *xchan, struct xdma_request *xr)
{
xdma_controller_t *xdma;
struct mbuf *m;
uint32_t c;
xdma = xchan->xdma;
c = xdma_mbuf_chain_count(xr->m);
if (c == 1)
return (c); /* Nothing to do. */
if ((m = m_defrag(xr->m, M_NOWAIT)) == NULL) {
device_printf(xdma->dma_dev,
"%s: Can't defrag mbuf\n",
__func__);
return (c);
}
xr->m = m;
c = 1;
return (c);
}
Index: stable/12/sys/dev/xdma/xdma_queue.c
===================================================================
--- stable/12/sys/dev/xdma/xdma_queue.c (revision 348622)
+++ stable/12/sys/dev/xdma/xdma_queue.c (revision 348623)
@@ -1,125 +1,124 @@
/*-
- * Copyright (c) 2018 Ruslan Bukin
+ * Copyright (c) 2018-2019 Ruslan Bukin
* All rights reserved.
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
* ("CTSRD"), as part of the DARPA CRASH research programme.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
__FBSDID("$FreeBSD$");
#include "opt_platform.h"
#include
#include
#include
#include
#include
-#include
#include
#include
int
xdma_dequeue(xdma_channel_t *xchan, void **user,
xdma_transfer_status_t *status)
{
struct xdma_request *xr_tmp;
struct xdma_request *xr;
QUEUE_OUT_LOCK(xchan);
TAILQ_FOREACH_SAFE(xr, &xchan->queue_out, xr_next, xr_tmp) {
TAILQ_REMOVE(&xchan->queue_out, xr, xr_next);
break;
}
QUEUE_OUT_UNLOCK(xchan);
if (xr == NULL)
return (-1);
*user = xr->user;
status->error = xr->status.error;
status->transferred = xr->status.transferred;
xchan_bank_put(xchan, xr);
return (0);
}
int
xdma_enqueue(xdma_channel_t *xchan, uintptr_t src, uintptr_t dst,
uint8_t src_width, uint8_t dst_width, bus_size_t len,
enum xdma_direction dir, void *user)
{
struct xdma_request *xr;
xdma_controller_t *xdma;
xdma = xchan->xdma;
KASSERT(xdma != NULL, ("xdma is NULL"));
xr = xchan_bank_get(xchan);
if (xr == NULL)
return (-1); /* No space is available. */
xr->user = user;
xr->direction = dir;
xr->m = NULL;
xr->bp = NULL;
xr->block_num = 1;
xr->block_len = len;
xr->req_type = XR_TYPE_VIRT;
xr->src_addr = src;
xr->dst_addr = dst;
xr->src_width = src_width;
xr->dst_width = dst_width;
QUEUE_IN_LOCK(xchan);
TAILQ_INSERT_TAIL(&xchan->queue_in, xr, xr_next);
QUEUE_IN_UNLOCK(xchan);
return (0);
}
int
xdma_queue_submit(xdma_channel_t *xchan)
{
xdma_controller_t *xdma;
int ret;
xdma = xchan->xdma;
KASSERT(xdma != NULL, ("xdma is NULL"));
ret = 0;
XCHAN_LOCK(xchan);
if (xchan->flags & XCHAN_TYPE_SG)
ret = xdma_queue_submit_sg(xchan);
XCHAN_UNLOCK(xchan);
return (ret);
}
Index: stable/12/sys/dev/xdma/xdma_sg.c
===================================================================
--- stable/12/sys/dev/xdma/xdma_sg.c (revision 348622)
+++ stable/12/sys/dev/xdma/xdma_sg.c (revision 348623)
@@ -1,586 +1,646 @@
/*-
- * Copyright (c) 2018 Ruslan Bukin
- * All rights reserved.
+ * SPDX-License-Identifier: BSD-2-Clause
*
+ * Copyright (c) 2018-2019 Ruslan Bukin
+ *
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
* ("CTSRD"), as part of the DARPA CRASH research programme.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
__FBSDID("$FreeBSD$");
#include "opt_platform.h"
#include
#include
#include
#include
#include
#include
-#include
+#include
#include
+#include
+#include
+#include
+
#ifdef FDT
#include
#include
#include
#endif
#include
#include
struct seg_load_request {
struct bus_dma_segment *seg;
uint32_t nsegs;
uint32_t error;
};
+static void
+xchan_bufs_free_reserved(xdma_channel_t *xchan)
+{
+ struct xdma_request *xr;
+ vm_size_t size;
+ int i;
+
+ for (i = 0; i < xchan->xr_num; i++) {
+ xr = &xchan->xr_mem[i];
+ size = xr->buf.size;
+ if (xr->buf.vaddr) {
+ pmap_kremove_device(xr->buf.vaddr, size);
+ kva_free(xr->buf.vaddr, size);
+ xr->buf.vaddr = 0;
+ }
+ if (xr->buf.paddr) {
+ vmem_free(xchan->vmem, xr->buf.paddr, size);
+ xr->buf.paddr = 0;
+ }
+ xr->buf.size = 0;
+ }
+}
+
static int
-_xchan_bufs_alloc(xdma_channel_t *xchan)
+xchan_bufs_alloc_reserved(xdma_channel_t *xchan)
{
xdma_controller_t *xdma;
struct xdma_request *xr;
+ vmem_addr_t addr;
+ vm_size_t size;
int i;
xdma = xchan->xdma;
+ if (xchan->vmem == NULL)
+ return (ENOBUFS);
+
for (i = 0; i < xchan->xr_num; i++) {
xr = &xchan->xr_mem[i];
- /* TODO: bounce buffer */
+ size = round_page(xchan->maxsegsize);
+ if (vmem_alloc(xchan->vmem, size,
+ M_BESTFIT | M_NOWAIT, &addr)) {
+ device_printf(xdma->dev,
+ "%s: Can't allocate memory\n", __func__);
+ xchan_bufs_free_reserved(xchan);
+ return (ENOMEM);
+ }
+
+ xr->buf.size = size;
+ xr->buf.paddr = addr;
+ xr->buf.vaddr = kva_alloc(size);
+ if (xr->buf.vaddr == 0) {
+ device_printf(xdma->dev,
+ "%s: Can't allocate KVA\n", __func__);
+ xchan_bufs_free_reserved(xchan);
+ return (ENOMEM);
+ }
+ pmap_kenter_device(xr->buf.vaddr, size, addr);
}
return (0);
}
static int
-_xchan_bufs_alloc_busdma(xdma_channel_t *xchan)
+xchan_bufs_alloc_busdma(xdma_channel_t *xchan)
{
xdma_controller_t *xdma;
struct xdma_request *xr;
int err;
int i;
xdma = xchan->xdma;
/* Create bus_dma tag */
err = bus_dma_tag_create(
bus_get_dma_tag(xdma->dev), /* Parent tag. */
xchan->alignment, /* alignment */
xchan->boundary, /* boundary */
xchan->lowaddr, /* lowaddr */
xchan->highaddr, /* highaddr */
NULL, NULL, /* filter, filterarg */
xchan->maxsegsize * xchan->maxnsegs, /* maxsize */
xchan->maxnsegs, /* nsegments */
xchan->maxsegsize, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&xchan->dma_tag_bufs);
if (err != 0) {
device_printf(xdma->dev,
"%s: Can't create bus_dma tag.\n", __func__);
return (-1);
}
for (i = 0; i < xchan->xr_num; i++) {
xr = &xchan->xr_mem[i];
err = bus_dmamap_create(xchan->dma_tag_bufs, 0,
&xr->buf.map);
if (err != 0) {
device_printf(xdma->dev,
"%s: Can't create buf DMA map.\n", __func__);
/* Cleanup. */
bus_dma_tag_destroy(xchan->dma_tag_bufs);
return (-1);
}
}
return (0);
}
static int
xchan_bufs_alloc(xdma_channel_t *xchan)
{
xdma_controller_t *xdma;
int ret;
xdma = xchan->xdma;
if (xdma == NULL) {
device_printf(xdma->dev,
"%s: Channel was not allocated properly.\n", __func__);
return (-1);
}
if (xchan->caps & XCHAN_CAP_BUSDMA)
- ret = _xchan_bufs_alloc_busdma(xchan);
- else
- ret = _xchan_bufs_alloc(xchan);
+ ret = xchan_bufs_alloc_busdma(xchan);
+ else {
+ ret = xchan_bufs_alloc_reserved(xchan);
+ }
if (ret != 0) {
device_printf(xdma->dev,
"%s: Can't allocate bufs.\n", __func__);
return (-1);
}
xchan->flags |= XCHAN_BUFS_ALLOCATED;
return (0);
}
static int
xchan_bufs_free(xdma_channel_t *xchan)
{
struct xdma_request *xr;
struct xchan_buf *b;
int i;
if ((xchan->flags & XCHAN_BUFS_ALLOCATED) == 0)
return (-1);
if (xchan->caps & XCHAN_CAP_BUSDMA) {
for (i = 0; i < xchan->xr_num; i++) {
xr = &xchan->xr_mem[i];
b = &xr->buf;
bus_dmamap_destroy(xchan->dma_tag_bufs, b->map);
}
bus_dma_tag_destroy(xchan->dma_tag_bufs);
- } else {
- for (i = 0; i < xchan->xr_num; i++) {
- xr = &xchan->xr_mem[i];
- /* TODO: bounce buffer */
- }
- }
+ } else
+ xchan_bufs_free_reserved(xchan);
xchan->flags &= ~XCHAN_BUFS_ALLOCATED;
return (0);
}
void
xdma_channel_free_sg(xdma_channel_t *xchan)
{
xchan_bufs_free(xchan);
xchan_sglist_free(xchan);
xchan_bank_free(xchan);
}
/*
* Prepare xchan for a scatter-gather transfer.
* xr_num - xdma requests queue size,
* maxsegsize - maximum allowed scatter-gather list element size in bytes
*/
int
xdma_prep_sg(xdma_channel_t *xchan, uint32_t xr_num,
bus_size_t maxsegsize, bus_size_t maxnsegs,
bus_size_t alignment, bus_addr_t boundary,
bus_addr_t lowaddr, bus_addr_t highaddr)
{
xdma_controller_t *xdma;
int ret;
xdma = xchan->xdma;
KASSERT(xdma != NULL, ("xdma is NULL"));
if (xchan->flags & XCHAN_CONFIGURED) {
device_printf(xdma->dev,
"%s: Channel is already configured.\n", __func__);
return (-1);
}
xchan->xr_num = xr_num;
xchan->maxsegsize = maxsegsize;
xchan->maxnsegs = maxnsegs;
xchan->alignment = alignment;
xchan->boundary = boundary;
xchan->lowaddr = lowaddr;
xchan->highaddr = highaddr;
if (xchan->maxnsegs > XDMA_MAX_SEG) {
device_printf(xdma->dev, "%s: maxnsegs is too big\n",
__func__);
return (-1);
}
xchan_bank_init(xchan);
/* Allocate sglist. */
ret = xchan_sglist_alloc(xchan);
if (ret != 0) {
device_printf(xdma->dev,
"%s: Can't allocate sglist.\n", __func__);
return (-1);
}
/* Allocate buffers if required. */
if ((xchan->caps & XCHAN_CAP_NOBUFS) == 0) {
ret = xchan_bufs_alloc(xchan);
if (ret != 0) {
device_printf(xdma->dev,
"%s: Can't allocate bufs.\n", __func__);
/* Cleanup */
xchan_sglist_free(xchan);
xchan_bank_free(xchan);
return (-1);
}
}
xchan->flags |= (XCHAN_CONFIGURED | XCHAN_TYPE_SG);
XCHAN_LOCK(xchan);
ret = XDMA_CHANNEL_PREP_SG(xdma->dma_dev, xchan);
if (ret != 0) {
device_printf(xdma->dev,
"%s: Can't prepare SG transfer.\n", __func__);
XCHAN_UNLOCK(xchan);
return (-1);
}
XCHAN_UNLOCK(xchan);
return (0);
}
void
xchan_seg_done(xdma_channel_t *xchan,
struct xdma_transfer_status *st)
{
struct xdma_request *xr;
xdma_controller_t *xdma;
struct xchan_buf *b;
xdma = xchan->xdma;
xr = TAILQ_FIRST(&xchan->processing);
if (xr == NULL)
panic("request not found\n");
b = &xr->buf;
atomic_subtract_int(&b->nsegs_left, 1);
if (b->nsegs_left == 0) {
if (xchan->caps & XCHAN_CAP_BUSDMA) {
if (xr->direction == XDMA_MEM_TO_DEV)
bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
BUS_DMASYNC_POSTWRITE);
else
bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(xchan->dma_tag_bufs, b->map);
+ } else {
+ if (xr->req_type == XR_TYPE_MBUF &&
+ xr->direction == XDMA_DEV_TO_MEM)
+ m_copyback(xr->m, 0, st->transferred,
+ (void *)xr->buf.vaddr);
}
xr->status.error = st->error;
xr->status.transferred = st->transferred;
QUEUE_PROC_LOCK(xchan);
TAILQ_REMOVE(&xchan->processing, xr, xr_next);
QUEUE_PROC_UNLOCK(xchan);
QUEUE_OUT_LOCK(xchan);
TAILQ_INSERT_TAIL(&xchan->queue_out, xr, xr_next);
QUEUE_OUT_UNLOCK(xchan);
}
}
static void
xdma_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
struct seg_load_request *slr;
struct bus_dma_segment *seg;
int i;
slr = arg;
seg = slr->seg;
if (error != 0) {
slr->error = error;
return;
}
slr->nsegs = nsegs;
for (i = 0; i < nsegs; i++) {
seg[i].ds_addr = segs[i].ds_addr;
seg[i].ds_len = segs[i].ds_len;
}
}
static int
_xdma_load_data_busdma(xdma_channel_t *xchan, struct xdma_request *xr,
struct bus_dma_segment *seg)
{
xdma_controller_t *xdma;
struct seg_load_request slr;
uint32_t nsegs;
void *addr;
int error;
xdma = xchan->xdma;
error = 0;
nsegs = 0;
switch (xr->req_type) {
case XR_TYPE_MBUF:
error = bus_dmamap_load_mbuf_sg(xchan->dma_tag_bufs,
xr->buf.map, xr->m, seg, &nsegs, BUS_DMA_NOWAIT);
break;
case XR_TYPE_BIO:
slr.nsegs = 0;
slr.error = 0;
slr.seg = seg;
error = bus_dmamap_load_bio(xchan->dma_tag_bufs,
xr->buf.map, xr->bp, xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
if (slr.error != 0) {
device_printf(xdma->dma_dev,
"%s: bus_dmamap_load failed, err %d\n",
__func__, slr.error);
return (0);
}
nsegs = slr.nsegs;
break;
case XR_TYPE_VIRT:
switch (xr->direction) {
case XDMA_MEM_TO_DEV:
addr = (void *)xr->src_addr;
break;
case XDMA_DEV_TO_MEM:
addr = (void *)xr->dst_addr;
break;
default:
device_printf(xdma->dma_dev,
"%s: Direction is not supported\n", __func__);
return (0);
}
slr.nsegs = 0;
slr.error = 0;
slr.seg = seg;
error = bus_dmamap_load(xchan->dma_tag_bufs, xr->buf.map,
addr, (xr->block_len * xr->block_num),
xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
if (slr.error != 0) {
device_printf(xdma->dma_dev,
"%s: bus_dmamap_load failed, err %d\n",
__func__, slr.error);
return (0);
}
nsegs = slr.nsegs;
break;
default:
break;
}
if (error != 0) {
if (error == ENOMEM) {
/*
* Out of memory. Try again later.
* TODO: count errors.
*/
} else
device_printf(xdma->dma_dev,
"%s: bus_dmamap_load failed with err %d\n",
__func__, error);
return (0);
}
if (xr->direction == XDMA_MEM_TO_DEV)
bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
BUS_DMASYNC_PREWRITE);
else
bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
BUS_DMASYNC_PREREAD);
return (nsegs);
}
static int
_xdma_load_data(xdma_channel_t *xchan, struct xdma_request *xr,
struct bus_dma_segment *seg)
{
xdma_controller_t *xdma;
struct mbuf *m;
uint32_t nsegs;
xdma = xchan->xdma;
m = xr->m;
nsegs = 1;
switch (xr->req_type) {
case XR_TYPE_MBUF:
- seg[0].ds_addr = mtod(m, bus_addr_t);
+ if ((xchan->caps & XCHAN_CAP_NOBUFS) == 0) {
+ if (xr->direction == XDMA_MEM_TO_DEV)
+ m_copydata(m, 0, m->m_pkthdr.len,
+ (void *)xr->buf.vaddr);
+ seg[0].ds_addr = (bus_addr_t)xr->buf.paddr;
+ } else
+ seg[0].ds_addr = mtod(m, bus_addr_t);
seg[0].ds_len = m->m_pkthdr.len;
break;
case XR_TYPE_BIO:
case XR_TYPE_VIRT:
default:
panic("implement me\n");
}
return (nsegs);
}
static int
xdma_load_data(xdma_channel_t *xchan,
struct xdma_request *xr, struct bus_dma_segment *seg)
{
xdma_controller_t *xdma;
int error;
int nsegs;
xdma = xchan->xdma;
error = 0;
nsegs = 0;
if (xchan->caps & XCHAN_CAP_BUSDMA)
nsegs = _xdma_load_data_busdma(xchan, xr, seg);
else
nsegs = _xdma_load_data(xchan, xr, seg);
if (nsegs == 0)
return (0); /* Try again later. */
xr->buf.nsegs = nsegs;
xr->buf.nsegs_left = nsegs;
return (nsegs);
}
static int
xdma_process(xdma_channel_t *xchan,
struct xdma_sglist *sg)
{
struct bus_dma_segment seg[XDMA_MAX_SEG];
struct xdma_request *xr;
struct xdma_request *xr_tmp;
xdma_controller_t *xdma;
uint32_t capacity;
uint32_t n;
uint32_t c;
int nsegs;
int ret;
XCHAN_ASSERT_LOCKED(xchan);
xdma = xchan->xdma;
n = 0;
+ c = 0;
ret = XDMA_CHANNEL_CAPACITY(xdma->dma_dev, xchan, &capacity);
if (ret != 0) {
device_printf(xdma->dev,
"%s: Can't get DMA controller capacity.\n", __func__);
return (-1);
}
TAILQ_FOREACH_SAFE(xr, &xchan->queue_in, xr_next, xr_tmp) {
switch (xr->req_type) {
case XR_TYPE_MBUF:
if ((xchan->caps & XCHAN_CAP_NOSEG) ||
(c > xchan->maxnsegs))
c = xdma_mbuf_defrag(xchan, xr);
break;
case XR_TYPE_BIO:
case XR_TYPE_VIRT:
default:
c = 1;
}
if (capacity <= (c + n)) {
/*
* No space yet available for the entire
* request in the DMA engine.
*/
break;
}
if ((c + n + xchan->maxnsegs) >= XDMA_SGLIST_MAXLEN) {
/* Sglist is full. */
break;
}
nsegs = xdma_load_data(xchan, xr, seg);
if (nsegs == 0)
break;
xdma_sglist_add(&sg[n], seg, nsegs, xr);
n += nsegs;
QUEUE_IN_LOCK(xchan);
TAILQ_REMOVE(&xchan->queue_in, xr, xr_next);
QUEUE_IN_UNLOCK(xchan);
QUEUE_PROC_LOCK(xchan);
TAILQ_INSERT_TAIL(&xchan->processing, xr, xr_next);
QUEUE_PROC_UNLOCK(xchan);
}
return (n);
}
int
xdma_queue_submit_sg(xdma_channel_t *xchan)
{
struct xdma_sglist *sg;
xdma_controller_t *xdma;
uint32_t sg_n;
int ret;
xdma = xchan->xdma;
KASSERT(xdma != NULL, ("xdma is NULL"));
XCHAN_ASSERT_LOCKED(xchan);
sg = xchan->sg;
if ((xchan->caps & XCHAN_CAP_NOBUFS) == 0 &&
(xchan->flags & XCHAN_BUFS_ALLOCATED) == 0) {
device_printf(xdma->dev,
"%s: Can't submit a transfer: no bufs\n",
__func__);
return (-1);
}
sg_n = xdma_process(xchan, sg);
if (sg_n == 0)
return (0); /* Nothing to submit */
/* Now submit sglist to DMA engine driver. */
ret = XDMA_CHANNEL_SUBMIT_SG(xdma->dma_dev, xchan, sg, sg_n);
if (ret != 0) {
device_printf(xdma->dev,
"%s: Can't submit an sglist.\n", __func__);
return (-1);
}
return (0);
}