Index: head/sys/dev/altera/softdma/softdma.c
===================================================================
--- head/sys/dev/altera/softdma/softdma.c (revision 349726)
+++ head/sys/dev/altera/softdma/softdma.c (revision 349727)
@@ -1,888 +1,887 @@
/*-
* Copyright (c) 2017-2018 Ruslan Bukin
* All rights reserved.
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
* ("CTSRD"), as part of the DARPA CRASH research programme.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* This is driver for SoftDMA device built using Altera FIFO component. */
#include
__FBSDID("$FreeBSD$");
#include "opt_platform.h"
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#ifdef FDT
#include
#include
#include
#endif
#include
#include
#include "xdma_if.h"
#define SOFTDMA_DEBUG
#undef SOFTDMA_DEBUG
#ifdef SOFTDMA_DEBUG
#define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
#else
#define dprintf(fmt, ...)
#endif
#define AVALON_FIFO_TX_BASIC_OPTS_DEPTH 16
#define SOFTDMA_NCHANNELS 1
#define CONTROL_GEN_SOP (1 << 0)
#define CONTROL_GEN_EOP (1 << 1)
#define CONTROL_OWN (1 << 31)
#define SOFTDMA_RX_EVENTS \
(A_ONCHIP_FIFO_MEM_CORE_INTR_FULL | \
A_ONCHIP_FIFO_MEM_CORE_INTR_OVERFLOW | \
A_ONCHIP_FIFO_MEM_CORE_INTR_UNDERFLOW)
#define SOFTDMA_TX_EVENTS \
(A_ONCHIP_FIFO_MEM_CORE_INTR_EMPTY | \
A_ONCHIP_FIFO_MEM_CORE_INTR_OVERFLOW | \
A_ONCHIP_FIFO_MEM_CORE_INTR_UNDERFLOW)
struct softdma_channel {
struct softdma_softc *sc;
struct mtx mtx;
xdma_channel_t *xchan;
struct proc *p;
int used;
int index;
int run;
uint32_t idx_tail;
uint32_t idx_head;
struct softdma_desc *descs;
uint32_t descs_num;
uint32_t descs_used_count;
};
struct softdma_desc {
uint64_t src_addr;
uint64_t dst_addr;
uint32_t len;
uint32_t access_width;
uint32_t count;
uint16_t src_incr;
uint16_t dst_incr;
uint32_t direction;
struct softdma_desc *next;
uint32_t transfered;
uint32_t status;
uint32_t reserved;
uint32_t control;
};
struct softdma_softc {
device_t dev;
struct resource *res[3];
bus_space_tag_t bst;
bus_space_handle_t bsh;
bus_space_tag_t bst_c;
bus_space_handle_t bsh_c;
void *ih;
struct softdma_channel channels[SOFTDMA_NCHANNELS];
};
static struct resource_spec softdma_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE }, /* fifo */
{ SYS_RES_MEMORY, 1, RF_ACTIVE }, /* core */
{ SYS_RES_IRQ, 0, RF_ACTIVE },
{ -1, 0 }
};
static int softdma_probe(device_t dev);
static int softdma_attach(device_t dev);
static int softdma_detach(device_t dev);
static inline uint32_t
softdma_next_desc(struct softdma_channel *chan, uint32_t curidx)
{
return ((curidx + 1) % chan->descs_num);
}
static void
softdma_mem_write(struct softdma_softc *sc, uint32_t reg, uint32_t val)
{
bus_write_4(sc->res[0], reg, htole32(val));
}
static uint32_t
softdma_mem_read(struct softdma_softc *sc, uint32_t reg)
{
uint32_t val;
val = bus_read_4(sc->res[0], reg);
return (le32toh(val));
}
static void
softdma_memc_write(struct softdma_softc *sc, uint32_t reg, uint32_t val)
{
bus_write_4(sc->res[1], reg, htole32(val));
}
static uint32_t
softdma_memc_read(struct softdma_softc *sc, uint32_t reg)
{
uint32_t val;
val = bus_read_4(sc->res[1], reg);
return (le32toh(val));
}
static uint32_t
softdma_fill_level(struct softdma_softc *sc)
{
uint32_t val;
val = softdma_memc_read(sc,
A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_FILL_LEVEL);
return (val);
}
static uint32_t
fifo_fill_level_wait(struct softdma_softc *sc)
{
uint32_t val;
do
val = softdma_fill_level(sc);
while (val == AVALON_FIFO_TX_BASIC_OPTS_DEPTH);
return (val);
}
static void
softdma_intr(void *arg)
{
struct softdma_channel *chan;
struct softdma_softc *sc;
int reg;
int err;
sc = arg;
chan = &sc->channels[0];
reg = softdma_memc_read(sc, A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_EVENT);
if (reg & (A_ONCHIP_FIFO_MEM_CORE_EVENT_OVERFLOW |
A_ONCHIP_FIFO_MEM_CORE_EVENT_UNDERFLOW)) {
/* Errors */
err = (((reg & A_ONCHIP_FIFO_MEM_CORE_ERROR_MASK) >> \
A_ONCHIP_FIFO_MEM_CORE_ERROR_SHIFT) & 0xff);
}
if (reg != 0) {
softdma_memc_write(sc,
A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_EVENT, reg);
chan->run = 1;
wakeup(chan);
}
}
static int
softdma_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (!ofw_bus_is_compatible(dev, "altr,softdma"))
return (ENXIO);
device_set_desc(dev, "SoftDMA");
return (BUS_PROBE_DEFAULT);
}
static int
softdma_attach(device_t dev)
{
struct softdma_softc *sc;
phandle_t xref, node;
int err;
sc = device_get_softc(dev);
sc->dev = dev;
if (bus_alloc_resources(dev, softdma_spec, sc->res)) {
device_printf(dev,
"could not allocate resources for device\n");
return (ENXIO);
}
/* FIFO memory interface */
sc->bst = rman_get_bustag(sc->res[0]);
sc->bsh = rman_get_bushandle(sc->res[0]);
/* FIFO control memory interface */
sc->bst_c = rman_get_bustag(sc->res[1]);
sc->bsh_c = rman_get_bushandle(sc->res[1]);
/* Setup interrupt handler */
err = bus_setup_intr(dev, sc->res[2], INTR_TYPE_MISC | INTR_MPSAFE,
NULL, softdma_intr, sc, &sc->ih);
if (err) {
device_printf(dev, "Unable to alloc interrupt resource.\n");
return (ENXIO);
}
node = ofw_bus_get_node(dev);
xref = OF_xref_from_node(node);
OF_device_register_xref(xref, dev);
return (0);
}
static int
softdma_detach(device_t dev)
{
struct softdma_softc *sc;
sc = device_get_softc(dev);
return (0);
}
static int
softdma_process_tx(struct softdma_channel *chan, struct softdma_desc *desc)
{
struct softdma_softc *sc;
uint64_t addr;
uint64_t buf;
uint32_t word;
uint32_t missing;
uint32_t reg;
int got_bits;
int len;
sc = chan->sc;
fifo_fill_level_wait(sc);
/* Set start of packet. */
if (desc->control & CONTROL_GEN_SOP)
softdma_mem_write(sc, A_ONCHIP_FIFO_MEM_CORE_METADATA,
A_ONCHIP_FIFO_MEM_CORE_SOP);
got_bits = 0;
buf = 0;
addr = desc->src_addr;
len = desc->len;
if (addr & 1) {
buf = (buf << 8) | *(uint8_t *)addr;
got_bits += 8;
addr += 1;
len -= 1;
}
if (len >= 2 && addr & 2) {
buf = (buf << 16) | *(uint16_t *)addr;
got_bits += 16;
addr += 2;
len -= 2;
}
while (len >= 4) {
buf = (buf << 32) | (uint64_t)*(uint32_t *)addr;
addr += 4;
len -= 4;
word = (uint32_t)((buf >> got_bits) & 0xffffffff);
fifo_fill_level_wait(sc);
if (len == 0 && got_bits == 0 &&
(desc->control & CONTROL_GEN_EOP) != 0)
softdma_mem_write(sc, A_ONCHIP_FIFO_MEM_CORE_METADATA,
A_ONCHIP_FIFO_MEM_CORE_EOP);
bus_write_4(sc->res[0], A_ONCHIP_FIFO_MEM_CORE_DATA, word);
}
if (len & 2) {
buf = (buf << 16) | *(uint16_t *)addr;
got_bits += 16;
addr += 2;
len -= 2;
}
if (len & 1) {
buf = (buf << 8) | *(uint8_t *)addr;
got_bits += 8;
addr += 1;
len -= 1;
}
if (got_bits >= 32) {
got_bits -= 32;
word = (uint32_t)((buf >> got_bits) & 0xffffffff);
fifo_fill_level_wait(sc);
if (len == 0 && got_bits == 0 &&
(desc->control & CONTROL_GEN_EOP) != 0)
softdma_mem_write(sc, A_ONCHIP_FIFO_MEM_CORE_METADATA,
A_ONCHIP_FIFO_MEM_CORE_EOP);
bus_write_4(sc->res[0], A_ONCHIP_FIFO_MEM_CORE_DATA, word);
}
if (got_bits) {
missing = 32 - got_bits;
got_bits /= 8;
fifo_fill_level_wait(sc);
reg = A_ONCHIP_FIFO_MEM_CORE_EOP |
((4 - got_bits) << A_ONCHIP_FIFO_MEM_CORE_EMPTY_SHIFT);
softdma_mem_write(sc, A_ONCHIP_FIFO_MEM_CORE_METADATA, reg);
word = (uint32_t)((buf << missing) & 0xffffffff);
bus_write_4(sc->res[0], A_ONCHIP_FIFO_MEM_CORE_DATA, word);
}
return (desc->len);
}
static int
softdma_process_rx(struct softdma_channel *chan, struct softdma_desc *desc)
{
uint32_t src_offs, dst_offs;
struct softdma_softc *sc;
uint32_t fill_level;
uint32_t empty;
uint32_t meta;
uint32_t data;
int sop_rcvd;
int timeout;
size_t len;
int error;
sc = chan->sc;
empty = 0;
src_offs = dst_offs = 0;
error = 0;
fill_level = softdma_fill_level(sc);
if (fill_level == 0) {
/* Nothing to receive. */
return (0);
}
len = desc->len;
sop_rcvd = 0;
while (fill_level) {
empty = 0;
data = bus_read_4(sc->res[0], A_ONCHIP_FIFO_MEM_CORE_DATA);
meta = softdma_mem_read(sc, A_ONCHIP_FIFO_MEM_CORE_METADATA);
if (meta & A_ONCHIP_FIFO_MEM_CORE_ERROR_MASK) {
error = 1;
break;
}
if ((meta & A_ONCHIP_FIFO_MEM_CORE_CHANNEL_MASK) != 0) {
error = 1;
break;
}
if (meta & A_ONCHIP_FIFO_MEM_CORE_SOP) {
sop_rcvd = 1;
}
if (meta & A_ONCHIP_FIFO_MEM_CORE_EOP) {
empty = (meta & A_ONCHIP_FIFO_MEM_CORE_EMPTY_MASK) >>
A_ONCHIP_FIFO_MEM_CORE_EMPTY_SHIFT;
}
if (sop_rcvd == 0) {
error = 1;
break;
}
if (empty == 0) {
*(uint32_t *)(desc->dst_addr + dst_offs) = data;
dst_offs += 4;
} else if (empty == 1) {
*(uint16_t *)(desc->dst_addr + dst_offs) =
((data >> 16) & 0xffff);
dst_offs += 2;
*(uint8_t *)(desc->dst_addr + dst_offs) =
((data >> 8) & 0xff);
dst_offs += 1;
} else {
panic("empty %d\n", empty);
}
if (meta & A_ONCHIP_FIFO_MEM_CORE_EOP)
break;
fill_level = softdma_fill_level(sc);
timeout = 100;
while (fill_level == 0 && timeout--)
fill_level = softdma_fill_level(sc);
if (timeout == 0) {
/* No EOP received. Broken packet. */
error = 1;
break;
}
}
if (error) {
return (-1);
}
return (dst_offs);
}
static uint32_t
softdma_process_descriptors(struct softdma_channel *chan,
xdma_transfer_status_t *status)
{
struct xdma_channel *xchan;
struct softdma_desc *desc;
struct softdma_softc *sc;
xdma_transfer_status_t st;
int ret;
sc = chan->sc;
xchan = chan->xchan;
desc = &chan->descs[chan->idx_tail];
while (desc != NULL) {
if ((desc->control & CONTROL_OWN) == 0) {
break;
}
if (desc->direction == XDMA_MEM_TO_DEV) {
ret = softdma_process_tx(chan, desc);
} else {
ret = softdma_process_rx(chan, desc);
if (ret == 0) {
/* No new data available. */
break;
}
}
/* Descriptor processed. */
desc->control = 0;
if (ret >= 0) {
st.error = 0;
st.transferred = ret;
} else {
st.error = ret;
st.transferred = 0;
}
xchan_seg_done(xchan, &st);
atomic_subtract_int(&chan->descs_used_count, 1);
if (ret >= 0) {
status->transferred += ret;
} else {
status->error = 1;
break;
}
chan->idx_tail = softdma_next_desc(chan, chan->idx_tail);
/* Process next descriptor, if any. */
desc = desc->next;
}
return (0);
}
static void
softdma_worker(void *arg)
{
xdma_transfer_status_t status;
struct softdma_channel *chan;
struct softdma_softc *sc;
chan = arg;
sc = chan->sc;
while (1) {
mtx_lock(&chan->mtx);
do {
mtx_sleep(chan, &chan->mtx, 0, "softdma_wait", hz / 2);
} while (chan->run == 0);
status.error = 0;
status.transferred = 0;
softdma_process_descriptors(chan, &status);
/* Finish operation */
chan->run = 0;
xdma_callback(chan->xchan, &status);
mtx_unlock(&chan->mtx);
}
}
static int
softdma_proc_create(struct softdma_channel *chan)
{
struct softdma_softc *sc;
sc = chan->sc;
if (chan->p != NULL) {
/* Already created */
return (0);
}
mtx_init(&chan->mtx, "SoftDMA", NULL, MTX_DEF);
if (kproc_create(softdma_worker, (void *)chan, &chan->p, 0, 0,
"softdma_worker") != 0) {
device_printf(sc->dev,
"%s: Failed to create worker thread.\n", __func__);
return (-1);
}
return (0);
}
static int
softdma_channel_alloc(device_t dev, struct xdma_channel *xchan)
{
struct softdma_channel *chan;
struct softdma_softc *sc;
int i;
sc = device_get_softc(dev);
for (i = 0; i < SOFTDMA_NCHANNELS; i++) {
chan = &sc->channels[i];
if (chan->used == 0) {
chan->xchan = xchan;
xchan->chan = (void *)chan;
- xchan->caps |= XCHAN_CAP_NOBUFS;
xchan->caps |= XCHAN_CAP_NOSEG;
chan->index = i;
chan->idx_head = 0;
chan->idx_tail = 0;
chan->descs_used_count = 0;
chan->descs_num = 1024;
chan->sc = sc;
if (softdma_proc_create(chan) != 0) {
return (-1);
}
chan->used = 1;
return (0);
}
}
return (-1);
}
static int
softdma_channel_free(device_t dev, struct xdma_channel *xchan)
{
struct softdma_channel *chan;
struct softdma_softc *sc;
sc = device_get_softc(dev);
chan = (struct softdma_channel *)xchan->chan;
if (chan->descs != NULL) {
free(chan->descs, M_DEVBUF);
}
chan->used = 0;
return (0);
}
static int
softdma_desc_alloc(struct xdma_channel *xchan)
{
struct softdma_channel *chan;
uint32_t nsegments;
chan = (struct softdma_channel *)xchan->chan;
nsegments = chan->descs_num;
chan->descs = malloc(nsegments * sizeof(struct softdma_desc),
M_DEVBUF, (M_WAITOK | M_ZERO));
return (0);
}
static int
softdma_channel_prep_sg(device_t dev, struct xdma_channel *xchan)
{
struct softdma_channel *chan;
struct softdma_desc *desc;
struct softdma_softc *sc;
int ret;
int i;
sc = device_get_softc(dev);
chan = (struct softdma_channel *)xchan->chan;
ret = softdma_desc_alloc(xchan);
if (ret != 0) {
device_printf(sc->dev,
"%s: Can't allocate descriptors.\n", __func__);
return (-1);
}
for (i = 0; i < chan->descs_num; i++) {
desc = &chan->descs[i];
if (i == (chan->descs_num - 1)) {
desc->next = &chan->descs[0];
} else {
desc->next = &chan->descs[i+1];
}
}
return (0);
}
static int
softdma_channel_capacity(device_t dev, xdma_channel_t *xchan,
uint32_t *capacity)
{
struct softdma_channel *chan;
uint32_t c;
chan = (struct softdma_channel *)xchan->chan;
/* At least one descriptor must be left empty. */
c = (chan->descs_num - chan->descs_used_count - 1);
*capacity = c;
return (0);
}
static int
softdma_channel_submit_sg(device_t dev, struct xdma_channel *xchan,
struct xdma_sglist *sg, uint32_t sg_n)
{
struct softdma_channel *chan;
struct softdma_desc *desc;
struct softdma_softc *sc;
uint32_t enqueued;
uint32_t saved_dir;
uint32_t tmp;
uint32_t len;
int i;
sc = device_get_softc(dev);
chan = (struct softdma_channel *)xchan->chan;
enqueued = 0;
for (i = 0; i < sg_n; i++) {
len = (uint32_t)sg[i].len;
desc = &chan->descs[chan->idx_head];
desc->src_addr = sg[i].src_addr;
desc->dst_addr = sg[i].dst_addr;
if (sg[i].direction == XDMA_MEM_TO_DEV) {
desc->src_incr = 1;
desc->dst_incr = 0;
} else {
desc->src_incr = 0;
desc->dst_incr = 1;
}
desc->direction = sg[i].direction;
saved_dir = sg[i].direction;
desc->len = len;
desc->transfered = 0;
desc->status = 0;
desc->reserved = 0;
desc->control = 0;
if (sg[i].first == 1)
desc->control |= CONTROL_GEN_SOP;
if (sg[i].last == 1)
desc->control |= CONTROL_GEN_EOP;
tmp = chan->idx_head;
chan->idx_head = softdma_next_desc(chan, chan->idx_head);
atomic_add_int(&chan->descs_used_count, 1);
desc->control |= CONTROL_OWN;
enqueued += 1;
}
if (enqueued == 0)
return (0);
if (saved_dir == XDMA_MEM_TO_DEV) {
chan->run = 1;
wakeup(chan);
} else
softdma_memc_write(sc,
A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_INT_ENABLE,
SOFTDMA_RX_EVENTS);
return (0);
}
static int
softdma_channel_request(device_t dev, struct xdma_channel *xchan,
struct xdma_request *req)
{
struct softdma_channel *chan;
struct softdma_desc *desc;
struct softdma_softc *sc;
int ret;
sc = device_get_softc(dev);
chan = (struct softdma_channel *)xchan->chan;
ret = softdma_desc_alloc(xchan);
if (ret != 0) {
device_printf(sc->dev,
"%s: Can't allocate descriptors.\n", __func__);
return (-1);
}
desc = &chan->descs[0];
desc->src_addr = req->src_addr;
desc->dst_addr = req->dst_addr;
desc->len = req->block_len;
desc->src_incr = 1;
desc->dst_incr = 1;
desc->next = NULL;
return (0);
}
static int
softdma_channel_control(device_t dev, xdma_channel_t *xchan, int cmd)
{
struct softdma_channel *chan;
struct softdma_softc *sc;
sc = device_get_softc(dev);
chan = (struct softdma_channel *)xchan->chan;
switch (cmd) {
case XDMA_CMD_BEGIN:
case XDMA_CMD_TERMINATE:
case XDMA_CMD_PAUSE:
/* TODO: implement me */
return (-1);
}
return (0);
}
#ifdef FDT
static int
softdma_ofw_md_data(device_t dev, pcell_t *cells,
int ncells, void **ptr)
{
return (0);
}
#endif
static device_method_t softdma_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, softdma_probe),
DEVMETHOD(device_attach, softdma_attach),
DEVMETHOD(device_detach, softdma_detach),
/* xDMA Interface */
DEVMETHOD(xdma_channel_alloc, softdma_channel_alloc),
DEVMETHOD(xdma_channel_free, softdma_channel_free),
DEVMETHOD(xdma_channel_request, softdma_channel_request),
DEVMETHOD(xdma_channel_control, softdma_channel_control),
/* xDMA SG Interface */
DEVMETHOD(xdma_channel_prep_sg, softdma_channel_prep_sg),
DEVMETHOD(xdma_channel_submit_sg, softdma_channel_submit_sg),
DEVMETHOD(xdma_channel_capacity, softdma_channel_capacity),
#ifdef FDT
DEVMETHOD(xdma_ofw_md_data, softdma_ofw_md_data),
#endif
DEVMETHOD_END
};
static driver_t softdma_driver = {
"softdma",
softdma_methods,
sizeof(struct softdma_softc),
};
static devclass_t softdma_devclass;
EARLY_DRIVER_MODULE(softdma, simplebus, softdma_driver, softdma_devclass, 0, 0,
BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE);
Index: head/sys/dev/xdma/xdma.h
===================================================================
--- head/sys/dev/xdma/xdma.h (revision 349726)
+++ head/sys/dev/xdma/xdma.h (revision 349727)
@@ -1,274 +1,274 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2016-2019 Ruslan Bukin
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
* ("CTSRD"), as part of the DARPA CRASH research programme.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _DEV_XDMA_XDMA_H_
#define _DEV_XDMA_XDMA_H_
#include
#include
enum xdma_direction {
XDMA_MEM_TO_MEM,
XDMA_MEM_TO_DEV,
XDMA_DEV_TO_MEM,
XDMA_DEV_TO_DEV,
};
enum xdma_operation_type {
XDMA_MEMCPY,
XDMA_CYCLIC,
XDMA_FIFO,
XDMA_SG,
};
enum xdma_request_type {
XR_TYPE_PHYS,
XR_TYPE_VIRT,
XR_TYPE_MBUF,
XR_TYPE_BIO,
};
enum xdma_command {
XDMA_CMD_BEGIN,
XDMA_CMD_PAUSE,
XDMA_CMD_TERMINATE,
};
struct xdma_transfer_status {
uint32_t transferred;
int error;
};
typedef struct xdma_transfer_status xdma_transfer_status_t;
struct xdma_controller {
device_t dev; /* DMA consumer device_t. */
device_t dma_dev; /* A real DMA device_t. */
void *data; /* OFW MD part. */
vmem_t *vmem; /* Bounce memory. */
/* List of virtual channels allocated. */
TAILQ_HEAD(xdma_channel_list, xdma_channel) channels;
};
typedef struct xdma_controller xdma_controller_t;
struct xchan_buf {
bus_dmamap_t map;
uint32_t nsegs;
uint32_t nsegs_left;
vm_offset_t vaddr;
vm_offset_t paddr;
vm_size_t size;
};
struct xdma_request {
struct mbuf *m;
struct bio *bp;
enum xdma_operation_type operation;
enum xdma_request_type req_type;
enum xdma_direction direction;
bus_addr_t src_addr;
bus_addr_t dst_addr;
uint8_t src_width;
uint8_t dst_width;
bus_size_t block_num;
bus_size_t block_len;
xdma_transfer_status_t status;
void *user;
TAILQ_ENTRY(xdma_request) xr_next;
struct xchan_buf buf;
};
struct xdma_sglist {
bus_addr_t src_addr;
bus_addr_t dst_addr;
size_t len;
uint8_t src_width;
uint8_t dst_width;
enum xdma_direction direction;
bool first;
bool last;
};
struct xdma_channel {
xdma_controller_t *xdma;
vmem_t *vmem;
uint32_t flags;
#define XCHAN_BUFS_ALLOCATED (1 << 0)
#define XCHAN_SGLIST_ALLOCATED (1 << 1)
#define XCHAN_CONFIGURED (1 << 2)
#define XCHAN_TYPE_CYCLIC (1 << 3)
#define XCHAN_TYPE_MEMCPY (1 << 4)
#define XCHAN_TYPE_FIFO (1 << 5)
#define XCHAN_TYPE_SG (1 << 6)
uint32_t caps;
#define XCHAN_CAP_BUSDMA (1 << 0)
#define XCHAN_CAP_NOSEG (1 << 1)
-#define XCHAN_CAP_NOBUFS (1 << 2)
+#define XCHAN_CAP_BOUNCE (1 << 2)
/* A real hardware driver channel. */
void *chan;
/* Interrupt handlers. */
TAILQ_HEAD(, xdma_intr_handler) ie_handlers;
TAILQ_ENTRY(xdma_channel) xchan_next;
struct mtx mtx_lock;
struct mtx mtx_qin_lock;
struct mtx mtx_qout_lock;
struct mtx mtx_bank_lock;
struct mtx mtx_proc_lock;
/* Request queue. */
bus_dma_tag_t dma_tag_bufs;
struct xdma_request *xr_mem;
uint32_t xr_num;
/* Bus dma tag options. */
bus_size_t maxsegsize;
bus_size_t maxnsegs;
bus_size_t alignment;
bus_addr_t boundary;
bus_addr_t lowaddr;
bus_addr_t highaddr;
struct xdma_sglist *sg;
TAILQ_HEAD(, xdma_request) bank;
TAILQ_HEAD(, xdma_request) queue_in;
TAILQ_HEAD(, xdma_request) queue_out;
TAILQ_HEAD(, xdma_request) processing;
};
typedef struct xdma_channel xdma_channel_t;
struct xdma_intr_handler {
int (*cb)(void *cb_user, xdma_transfer_status_t *status);
void *cb_user;
TAILQ_ENTRY(xdma_intr_handler) ih_next;
};
static MALLOC_DEFINE(M_XDMA, "xdma", "xDMA framework");
#define XCHAN_LOCK(xchan) mtx_lock(&(xchan)->mtx_lock)
#define XCHAN_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_lock)
#define XCHAN_ASSERT_LOCKED(xchan) \
mtx_assert(&(xchan)->mtx_lock, MA_OWNED)
#define QUEUE_IN_LOCK(xchan) mtx_lock(&(xchan)->mtx_qin_lock)
#define QUEUE_IN_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_qin_lock)
#define QUEUE_IN_ASSERT_LOCKED(xchan) \
mtx_assert(&(xchan)->mtx_qin_lock, MA_OWNED)
#define QUEUE_OUT_LOCK(xchan) mtx_lock(&(xchan)->mtx_qout_lock)
#define QUEUE_OUT_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_qout_lock)
#define QUEUE_OUT_ASSERT_LOCKED(xchan) \
mtx_assert(&(xchan)->mtx_qout_lock, MA_OWNED)
#define QUEUE_BANK_LOCK(xchan) mtx_lock(&(xchan)->mtx_bank_lock)
#define QUEUE_BANK_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_bank_lock)
#define QUEUE_BANK_ASSERT_LOCKED(xchan) \
mtx_assert(&(xchan)->mtx_bank_lock, MA_OWNED)
#define QUEUE_PROC_LOCK(xchan) mtx_lock(&(xchan)->mtx_proc_lock)
#define QUEUE_PROC_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_proc_lock)
#define QUEUE_PROC_ASSERT_LOCKED(xchan) \
mtx_assert(&(xchan)->mtx_proc_lock, MA_OWNED)
#define XDMA_SGLIST_MAXLEN 2048
#define XDMA_MAX_SEG 128
/* xDMA controller ops */
xdma_controller_t *xdma_ofw_get(device_t dev, const char *prop);
int xdma_put(xdma_controller_t *xdma);
vmem_t * xdma_get_memory(device_t dev);
void xdma_put_memory(vmem_t *vmem);
/* xDMA channel ops */
xdma_channel_t * xdma_channel_alloc(xdma_controller_t *, uint32_t caps);
int xdma_channel_free(xdma_channel_t *);
int xdma_request(xdma_channel_t *xchan, struct xdma_request *r);
void xchan_set_memory(xdma_channel_t *xchan, vmem_t *vmem);
/* SG interface */
int xdma_prep_sg(xdma_channel_t *, uint32_t,
bus_size_t, bus_size_t, bus_size_t, bus_addr_t, bus_addr_t, bus_addr_t);
void xdma_channel_free_sg(xdma_channel_t *xchan);
int xdma_queue_submit_sg(xdma_channel_t *xchan);
void xchan_seg_done(xdma_channel_t *xchan, xdma_transfer_status_t *);
/* Queue operations */
int xdma_dequeue_mbuf(xdma_channel_t *xchan, struct mbuf **m,
xdma_transfer_status_t *);
int xdma_enqueue_mbuf(xdma_channel_t *xchan, struct mbuf **m, uintptr_t addr,
uint8_t, uint8_t, enum xdma_direction dir);
int xdma_dequeue_bio(xdma_channel_t *xchan, struct bio **bp,
xdma_transfer_status_t *status);
int xdma_enqueue_bio(xdma_channel_t *xchan, struct bio **bp, bus_addr_t addr,
uint8_t, uint8_t, enum xdma_direction dir);
int xdma_dequeue(xdma_channel_t *xchan, void **user,
xdma_transfer_status_t *status);
int xdma_enqueue(xdma_channel_t *xchan, uintptr_t src, uintptr_t dst,
uint8_t, uint8_t, bus_size_t, enum xdma_direction dir, void *);
int xdma_queue_submit(xdma_channel_t *xchan);
/* Mbuf operations */
uint32_t xdma_mbuf_defrag(xdma_channel_t *xchan, struct xdma_request *xr);
uint32_t xdma_mbuf_chain_count(struct mbuf *m0);
/* Channel Control */
int xdma_control(xdma_channel_t *xchan, enum xdma_command cmd);
/* Interrupt callback */
int xdma_setup_intr(xdma_channel_t *xchan, int (*cb)(void *,
xdma_transfer_status_t *), void *arg, void **);
int xdma_teardown_intr(xdma_channel_t *xchan, struct xdma_intr_handler *ih);
int xdma_teardown_all_intr(xdma_channel_t *xchan);
void xdma_callback(struct xdma_channel *xchan, xdma_transfer_status_t *status);
/* Sglist */
int xchan_sglist_alloc(xdma_channel_t *xchan);
void xchan_sglist_free(xdma_channel_t *xchan);
int xdma_sglist_add(struct xdma_sglist *sg, struct bus_dma_segment *seg,
uint32_t nsegs, struct xdma_request *xr);
/* Requests bank */
void xchan_bank_init(xdma_channel_t *xchan);
int xchan_bank_free(xdma_channel_t *xchan);
struct xdma_request * xchan_bank_get(xdma_channel_t *xchan);
int xchan_bank_put(xdma_channel_t *xchan, struct xdma_request *xr);
#endif /* !_DEV_XDMA_XDMA_H_ */
Index: head/sys/dev/xdma/xdma_sg.c
===================================================================
--- head/sys/dev/xdma/xdma_sg.c (revision 349726)
+++ head/sys/dev/xdma/xdma_sg.c (revision 349727)
@@ -1,650 +1,650 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2018-2019 Ruslan Bukin
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
* ("CTSRD"), as part of the DARPA CRASH research programme.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
__FBSDID("$FreeBSD$");
#include "opt_platform.h"
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#ifdef FDT
#include
#include
#include
#endif
#include
#include
struct seg_load_request {
struct bus_dma_segment *seg;
uint32_t nsegs;
uint32_t error;
};
static void
xchan_bufs_free_reserved(xdma_channel_t *xchan)
{
struct xdma_request *xr;
vm_size_t size;
int i;
for (i = 0; i < xchan->xr_num; i++) {
xr = &xchan->xr_mem[i];
size = xr->buf.size;
if (xr->buf.vaddr) {
pmap_kremove_device(xr->buf.vaddr, size);
kva_free(xr->buf.vaddr, size);
xr->buf.vaddr = 0;
}
if (xr->buf.paddr) {
vmem_free(xchan->vmem, xr->buf.paddr, size);
xr->buf.paddr = 0;
}
xr->buf.size = 0;
}
}
static int
xchan_bufs_alloc_reserved(xdma_channel_t *xchan)
{
xdma_controller_t *xdma;
struct xdma_request *xr;
vmem_addr_t addr;
vm_size_t size;
int i;
xdma = xchan->xdma;
if (xchan->vmem == NULL)
return (ENOBUFS);
for (i = 0; i < xchan->xr_num; i++) {
xr = &xchan->xr_mem[i];
size = round_page(xchan->maxsegsize);
if (vmem_alloc(xchan->vmem, size,
M_BESTFIT | M_NOWAIT, &addr)) {
device_printf(xdma->dev,
"%s: Can't allocate memory\n", __func__);
xchan_bufs_free_reserved(xchan);
return (ENOMEM);
}
xr->buf.size = size;
xr->buf.paddr = addr;
xr->buf.vaddr = kva_alloc(size);
if (xr->buf.vaddr == 0) {
device_printf(xdma->dev,
"%s: Can't allocate KVA\n", __func__);
xchan_bufs_free_reserved(xchan);
return (ENOMEM);
}
pmap_kenter_device(xr->buf.vaddr, size, addr);
}
return (0);
}
static int
xchan_bufs_alloc_busdma(xdma_channel_t *xchan)
{
xdma_controller_t *xdma;
struct xdma_request *xr;
int err;
int i;
xdma = xchan->xdma;
/* Create bus_dma tag */
err = bus_dma_tag_create(
bus_get_dma_tag(xdma->dev), /* Parent tag. */
xchan->alignment, /* alignment */
xchan->boundary, /* boundary */
xchan->lowaddr, /* lowaddr */
xchan->highaddr, /* highaddr */
NULL, NULL, /* filter, filterarg */
xchan->maxsegsize * xchan->maxnsegs, /* maxsize */
xchan->maxnsegs, /* nsegments */
xchan->maxsegsize, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&xchan->dma_tag_bufs);
if (err != 0) {
device_printf(xdma->dev,
"%s: Can't create bus_dma tag.\n", __func__);
return (-1);
}
for (i = 0; i < xchan->xr_num; i++) {
xr = &xchan->xr_mem[i];
err = bus_dmamap_create(xchan->dma_tag_bufs, 0,
&xr->buf.map);
if (err != 0) {
device_printf(xdma->dev,
"%s: Can't create buf DMA map.\n", __func__);
/* Cleanup. */
bus_dma_tag_destroy(xchan->dma_tag_bufs);
return (-1);
}
}
return (0);
}
static int
xchan_bufs_alloc(xdma_channel_t *xchan)
{
xdma_controller_t *xdma;
int ret;
xdma = xchan->xdma;
if (xdma == NULL) {
device_printf(xdma->dev,
"%s: Channel was not allocated properly.\n", __func__);
return (-1);
}
if (xchan->caps & XCHAN_CAP_BUSDMA)
ret = xchan_bufs_alloc_busdma(xchan);
else {
ret = xchan_bufs_alloc_reserved(xchan);
}
if (ret != 0) {
device_printf(xdma->dev,
"%s: Can't allocate bufs.\n", __func__);
return (-1);
}
xchan->flags |= XCHAN_BUFS_ALLOCATED;
return (0);
}
static int
xchan_bufs_free(xdma_channel_t *xchan)
{
struct xdma_request *xr;
struct xchan_buf *b;
int i;
if ((xchan->flags & XCHAN_BUFS_ALLOCATED) == 0)
return (-1);
if (xchan->caps & XCHAN_CAP_BUSDMA) {
for (i = 0; i < xchan->xr_num; i++) {
xr = &xchan->xr_mem[i];
b = &xr->buf;
bus_dmamap_destroy(xchan->dma_tag_bufs, b->map);
}
bus_dma_tag_destroy(xchan->dma_tag_bufs);
} else
xchan_bufs_free_reserved(xchan);
xchan->flags &= ~XCHAN_BUFS_ALLOCATED;
return (0);
}
void
xdma_channel_free_sg(xdma_channel_t *xchan)
{
xchan_bufs_free(xchan);
xchan_sglist_free(xchan);
xchan_bank_free(xchan);
}
/*
* Prepare xchan for a scatter-gather transfer.
* xr_num - xdma requests queue size,
* maxsegsize - maximum allowed scatter-gather list element size in bytes
*/
int
xdma_prep_sg(xdma_channel_t *xchan, uint32_t xr_num,
bus_size_t maxsegsize, bus_size_t maxnsegs,
bus_size_t alignment, bus_addr_t boundary,
bus_addr_t lowaddr, bus_addr_t highaddr)
{
xdma_controller_t *xdma;
int ret;
xdma = xchan->xdma;
KASSERT(xdma != NULL, ("xdma is NULL"));
if (xchan->flags & XCHAN_CONFIGURED) {
device_printf(xdma->dev,
"%s: Channel is already configured.\n", __func__);
return (-1);
}
xchan->xr_num = xr_num;
xchan->maxsegsize = maxsegsize;
xchan->maxnsegs = maxnsegs;
xchan->alignment = alignment;
xchan->boundary = boundary;
xchan->lowaddr = lowaddr;
xchan->highaddr = highaddr;
if (xchan->maxnsegs > XDMA_MAX_SEG) {
device_printf(xdma->dev, "%s: maxnsegs is too big\n",
__func__);
return (-1);
}
xchan_bank_init(xchan);
/* Allocate sglist. */
ret = xchan_sglist_alloc(xchan);
if (ret != 0) {
device_printf(xdma->dev,
"%s: Can't allocate sglist.\n", __func__);
return (-1);
}
/* Allocate buffers if required. */
- if ((xchan->caps & XCHAN_CAP_NOBUFS) == 0) {
+ if (xchan->caps & (XCHAN_CAP_BUSDMA | XCHAN_CAP_BOUNCE)) {
ret = xchan_bufs_alloc(xchan);
if (ret != 0) {
device_printf(xdma->dev,
"%s: Can't allocate bufs.\n", __func__);
/* Cleanup */
xchan_sglist_free(xchan);
xchan_bank_free(xchan);
return (-1);
}
}
xchan->flags |= (XCHAN_CONFIGURED | XCHAN_TYPE_SG);
XCHAN_LOCK(xchan);
ret = XDMA_CHANNEL_PREP_SG(xdma->dma_dev, xchan);
if (ret != 0) {
device_printf(xdma->dev,
"%s: Can't prepare SG transfer.\n", __func__);
XCHAN_UNLOCK(xchan);
return (-1);
}
XCHAN_UNLOCK(xchan);
return (0);
}
void
xchan_seg_done(xdma_channel_t *xchan,
struct xdma_transfer_status *st)
{
struct xdma_request *xr;
xdma_controller_t *xdma;
struct xchan_buf *b;
xdma = xchan->xdma;
xr = TAILQ_FIRST(&xchan->processing);
if (xr == NULL)
panic("request not found\n");
b = &xr->buf;
atomic_subtract_int(&b->nsegs_left, 1);
if (b->nsegs_left == 0) {
if (xchan->caps & XCHAN_CAP_BUSDMA) {
if (xr->direction == XDMA_MEM_TO_DEV)
bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
BUS_DMASYNC_POSTWRITE);
else
bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(xchan->dma_tag_bufs, b->map);
- } else {
- if ((xchan->caps & XCHAN_CAP_NOBUFS) == 0 &&
- xr->req_type == XR_TYPE_MBUF &&
+ } else if (xchan->caps & XCHAN_CAP_BOUNCE) {
+ if (xr->req_type == XR_TYPE_MBUF &&
xr->direction == XDMA_DEV_TO_MEM)
m_copyback(xr->m, 0, st->transferred,
(void *)xr->buf.vaddr);
}
xr->status.error = st->error;
xr->status.transferred = st->transferred;
QUEUE_PROC_LOCK(xchan);
TAILQ_REMOVE(&xchan->processing, xr, xr_next);
QUEUE_PROC_UNLOCK(xchan);
QUEUE_OUT_LOCK(xchan);
TAILQ_INSERT_TAIL(&xchan->queue_out, xr, xr_next);
QUEUE_OUT_UNLOCK(xchan);
}
}
static void
xdma_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
struct seg_load_request *slr;
struct bus_dma_segment *seg;
int i;
slr = arg;
seg = slr->seg;
if (error != 0) {
slr->error = error;
return;
}
slr->nsegs = nsegs;
for (i = 0; i < nsegs; i++) {
seg[i].ds_addr = segs[i].ds_addr;
seg[i].ds_len = segs[i].ds_len;
}
}
static int
_xdma_load_data_busdma(xdma_channel_t *xchan, struct xdma_request *xr,
struct bus_dma_segment *seg)
{
xdma_controller_t *xdma;
struct seg_load_request slr;
uint32_t nsegs;
void *addr;
int error;
xdma = xchan->xdma;
error = 0;
nsegs = 0;
switch (xr->req_type) {
case XR_TYPE_MBUF:
error = bus_dmamap_load_mbuf_sg(xchan->dma_tag_bufs,
xr->buf.map, xr->m, seg, &nsegs, BUS_DMA_NOWAIT);
break;
case XR_TYPE_BIO:
slr.nsegs = 0;
slr.error = 0;
slr.seg = seg;
error = bus_dmamap_load_bio(xchan->dma_tag_bufs,
xr->buf.map, xr->bp, xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
if (slr.error != 0) {
device_printf(xdma->dma_dev,
"%s: bus_dmamap_load failed, err %d\n",
__func__, slr.error);
return (0);
}
nsegs = slr.nsegs;
break;
case XR_TYPE_VIRT:
switch (xr->direction) {
case XDMA_MEM_TO_DEV:
addr = (void *)xr->src_addr;
break;
case XDMA_DEV_TO_MEM:
addr = (void *)xr->dst_addr;
break;
default:
device_printf(xdma->dma_dev,
"%s: Direction is not supported\n", __func__);
return (0);
}
slr.nsegs = 0;
slr.error = 0;
slr.seg = seg;
error = bus_dmamap_load(xchan->dma_tag_bufs, xr->buf.map,
addr, (xr->block_len * xr->block_num),
xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
if (slr.error != 0) {
device_printf(xdma->dma_dev,
"%s: bus_dmamap_load failed, err %d\n",
__func__, slr.error);
return (0);
}
nsegs = slr.nsegs;
break;
default:
break;
}
if (error != 0) {
if (error == ENOMEM) {
/*
* Out of memory. Try again later.
* TODO: count errors.
*/
} else
device_printf(xdma->dma_dev,
"%s: bus_dmamap_load failed with err %d\n",
__func__, error);
return (0);
}
if (xr->direction == XDMA_MEM_TO_DEV)
bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
BUS_DMASYNC_PREWRITE);
else
bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
BUS_DMASYNC_PREREAD);
return (nsegs);
}
static int
_xdma_load_data(xdma_channel_t *xchan, struct xdma_request *xr,
struct bus_dma_segment *seg)
{
xdma_controller_t *xdma;
struct mbuf *m;
uint32_t nsegs;
xdma = xchan->xdma;
m = xr->m;
nsegs = 1;
switch (xr->req_type) {
case XR_TYPE_MBUF:
- if ((xchan->caps & XCHAN_CAP_NOBUFS) == 0) {
+ if (xchan->caps & XCHAN_CAP_BUSDMA)
+ seg[0].ds_addr = mtod(m, bus_addr_t);
+ else if (xchan->caps & XCHAN_CAP_BOUNCE) {
if (xr->direction == XDMA_MEM_TO_DEV)
m_copydata(m, 0, m->m_pkthdr.len,
(void *)xr->buf.vaddr);
seg[0].ds_addr = (bus_addr_t)xr->buf.paddr;
- } else
- seg[0].ds_addr = mtod(m, bus_addr_t);
+ }
seg[0].ds_len = m->m_pkthdr.len;
break;
case XR_TYPE_BIO:
case XR_TYPE_VIRT:
default:
panic("implement me\n");
}
return (nsegs);
}
static int
xdma_load_data(xdma_channel_t *xchan,
struct xdma_request *xr, struct bus_dma_segment *seg)
{
xdma_controller_t *xdma;
int error;
int nsegs;
xdma = xchan->xdma;
error = 0;
nsegs = 0;
if (xchan->caps & XCHAN_CAP_BUSDMA)
nsegs = _xdma_load_data_busdma(xchan, xr, seg);
else
nsegs = _xdma_load_data(xchan, xr, seg);
if (nsegs == 0)
return (0); /* Try again later. */
xr->buf.nsegs = nsegs;
xr->buf.nsegs_left = nsegs;
return (nsegs);
}
static int
xdma_process(xdma_channel_t *xchan,
struct xdma_sglist *sg)
{
struct bus_dma_segment seg[XDMA_MAX_SEG];
struct xdma_request *xr;
struct xdma_request *xr_tmp;
xdma_controller_t *xdma;
uint32_t capacity;
uint32_t n;
uint32_t c;
int nsegs;
int ret;
XCHAN_ASSERT_LOCKED(xchan);
xdma = xchan->xdma;
n = 0;
c = 0;
ret = XDMA_CHANNEL_CAPACITY(xdma->dma_dev, xchan, &capacity);
if (ret != 0) {
device_printf(xdma->dev,
"%s: Can't get DMA controller capacity.\n", __func__);
return (-1);
}
TAILQ_FOREACH_SAFE(xr, &xchan->queue_in, xr_next, xr_tmp) {
switch (xr->req_type) {
case XR_TYPE_MBUF:
if ((xchan->caps & XCHAN_CAP_NOSEG) ||
(c > xchan->maxnsegs))
c = xdma_mbuf_defrag(xchan, xr);
break;
case XR_TYPE_BIO:
case XR_TYPE_VIRT:
default:
c = 1;
}
if (capacity <= (c + n)) {
/*
* No space yet available for the entire
* request in the DMA engine.
*/
break;
}
if ((c + n + xchan->maxnsegs) >= XDMA_SGLIST_MAXLEN) {
/* Sglist is full. */
break;
}
nsegs = xdma_load_data(xchan, xr, seg);
if (nsegs == 0)
break;
xdma_sglist_add(&sg[n], seg, nsegs, xr);
n += nsegs;
QUEUE_IN_LOCK(xchan);
TAILQ_REMOVE(&xchan->queue_in, xr, xr_next);
QUEUE_IN_UNLOCK(xchan);
QUEUE_PROC_LOCK(xchan);
TAILQ_INSERT_TAIL(&xchan->processing, xr, xr_next);
QUEUE_PROC_UNLOCK(xchan);
}
return (n);
}
int
xdma_queue_submit_sg(xdma_channel_t *xchan)
{
struct xdma_sglist *sg;
xdma_controller_t *xdma;
uint32_t sg_n;
int ret;
xdma = xchan->xdma;
KASSERT(xdma != NULL, ("xdma is NULL"));
XCHAN_ASSERT_LOCKED(xchan);
sg = xchan->sg;
- if ((xchan->caps & XCHAN_CAP_NOBUFS) == 0 &&
+ if ((xchan->caps & (XCHAN_CAP_BOUNCE | XCHAN_CAP_BUSDMA)) &&
(xchan->flags & XCHAN_BUFS_ALLOCATED) == 0) {
device_printf(xdma->dev,
"%s: Can't submit a transfer: no bufs\n",
__func__);
return (-1);
}
sg_n = xdma_process(xchan, sg);
if (sg_n == 0)
return (0); /* Nothing to submit */
/* Now submit sglist to DMA engine driver. */
ret = XDMA_CHANNEL_SUBMIT_SG(xdma->dma_dev, xchan, sg, sg_n);
if (ret != 0) {
device_printf(xdma->dev,
"%s: Can't submit an sglist.\n", __func__);
return (-1);
}
return (0);
}
Index: head/sys/dev/xilinx/axidma.c
===================================================================
--- head/sys/dev/xilinx/axidma.c (revision 349726)
+++ head/sys/dev/xilinx/axidma.c (revision 349727)
@@ -1,648 +1,649 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2019 Ruslan Bukin
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory (Department of Computer Science and
* Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
* DARPA SSITH research programme.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* Xilinx AXI DMA controller driver. */
#include
__FBSDID("$FreeBSD$");
#include "opt_platform.h"
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#ifdef FDT
#include
#include
#include
#endif
#include
#include
#include "xdma_if.h"
#define AXIDMA_DEBUG
#undef AXIDMA_DEBUG
#ifdef AXIDMA_DEBUG
#define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
#else
#define dprintf(fmt, ...)
#endif
#define AXIDMA_NCHANNELS 2
#define AXIDMA_DESCS_NUM 512
#define AXIDMA_TX_CHAN 0
#define AXIDMA_RX_CHAN 1
extern struct bus_space memmap_bus;
struct axidma_fdt_data {
int id;
};
struct axidma_channel {
struct axidma_softc *sc;
xdma_channel_t *xchan;
bool used;
int idx_head;
int idx_tail;
struct axidma_desc **descs;
vm_paddr_t *descs_phys;
uint32_t descs_num;
vm_size_t mem_size;
vm_offset_t mem_paddr;
vm_offset_t mem_vaddr;
uint32_t descs_used_count;
};
struct axidma_softc {
device_t dev;
struct resource *res[3];
bus_space_tag_t bst;
bus_space_handle_t bsh;
void *ih[2];
struct axidma_desc desc;
struct axidma_channel channels[AXIDMA_NCHANNELS];
};
static struct resource_spec axidma_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE },
{ SYS_RES_IRQ, 0, RF_ACTIVE },
{ SYS_RES_IRQ, 1, RF_ACTIVE },
{ -1, 0 }
};
#define HWTYPE_NONE 0
#define HWTYPE_STD 1
static struct ofw_compat_data compat_data[] = {
{ "xlnx,eth-dma", HWTYPE_STD },
{ NULL, HWTYPE_NONE },
};
static int axidma_probe(device_t dev);
static int axidma_attach(device_t dev);
static int axidma_detach(device_t dev);
static inline uint32_t
axidma_next_desc(struct axidma_channel *chan, uint32_t curidx)
{
return ((curidx + 1) % chan->descs_num);
}
static void
axidma_intr(struct axidma_softc *sc,
struct axidma_channel *chan)
{
xdma_transfer_status_t status;
xdma_transfer_status_t st;
struct axidma_fdt_data *data;
xdma_controller_t *xdma;
struct axidma_desc *desc;
struct xdma_channel *xchan;
uint32_t tot_copied;
int pending;
int errors;
xchan = chan->xchan;
xdma = xchan->xdma;
data = xdma->data;
pending = READ4(sc, AXI_DMASR(data->id));
WRITE4(sc, AXI_DMASR(data->id), pending);
errors = (pending & (DMASR_DMAINTERR | DMASR_DMASLVERR
| DMASR_DMADECOREERR | DMASR_SGINTERR
| DMASR_SGSLVERR | DMASR_SGDECERR));
dprintf("%s: AXI_DMASR %x\n", __func__,
READ4(sc, AXI_DMASR(data->id)));
dprintf("%s: AXI_CURDESC %x\n", __func__,
READ4(sc, AXI_CURDESC(data->id)));
dprintf("%s: AXI_TAILDESC %x\n", __func__,
READ4(sc, AXI_TAILDESC(data->id)));
tot_copied = 0;
while (chan->idx_tail != chan->idx_head) {
desc = chan->descs[chan->idx_tail];
if ((desc->status & BD_STATUS_CMPLT) == 0)
break;
st.error = errors;
st.transferred = desc->status & BD_CONTROL_LEN_M;
tot_copied += st.transferred;
xchan_seg_done(xchan, &st);
chan->idx_tail = axidma_next_desc(chan, chan->idx_tail);
atomic_subtract_int(&chan->descs_used_count, 1);
}
/* Finish operation */
status.error = errors;
status.transferred = tot_copied;
xdma_callback(chan->xchan, &status);
}
static void
axidma_intr_rx(void *arg)
{
struct axidma_softc *sc;
struct axidma_channel *chan;
dprintf("%s\n", __func__);
sc = arg;
chan = &sc->channels[AXIDMA_RX_CHAN];
axidma_intr(sc, chan);
}
static void
axidma_intr_tx(void *arg)
{
struct axidma_softc *sc;
struct axidma_channel *chan;
dprintf("%s\n", __func__);
sc = arg;
chan = &sc->channels[AXIDMA_TX_CHAN];
axidma_intr(sc, chan);
}
static int
axidma_reset(struct axidma_softc *sc, int chan_id)
{
int timeout;
WRITE4(sc, AXI_DMACR(chan_id), DMACR_RESET);
timeout = 100;
do {
if ((READ4(sc, AXI_DMACR(chan_id)) & DMACR_RESET) == 0)
break;
} while (timeout--);
dprintf("timeout %d\n", timeout);
if (timeout == 0)
return (-1);
dprintf("%s: read control after reset: %x\n",
__func__, READ4(sc, AXI_DMACR(chan_id)));
return (0);
}
static int
axidma_probe(device_t dev)
{
int hwtype;
if (!ofw_bus_status_okay(dev))
return (ENXIO);
hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
if (hwtype == HWTYPE_NONE)
return (ENXIO);
device_set_desc(dev, "Xilinx AXI DMA");
return (BUS_PROBE_DEFAULT);
}
static int
axidma_attach(device_t dev)
{
struct axidma_softc *sc;
phandle_t xref, node;
int err;
sc = device_get_softc(dev);
sc->dev = dev;
if (bus_alloc_resources(dev, axidma_spec, sc->res)) {
device_printf(dev, "could not allocate resources.\n");
return (ENXIO);
}
/* CSR memory interface */
sc->bst = rman_get_bustag(sc->res[0]);
sc->bsh = rman_get_bushandle(sc->res[0]);
/* Setup interrupt handler */
err = bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE,
NULL, axidma_intr_tx, sc, &sc->ih[0]);
if (err) {
device_printf(dev, "Unable to alloc interrupt resource.\n");
return (ENXIO);
}
/* Setup interrupt handler */
err = bus_setup_intr(dev, sc->res[2], INTR_TYPE_MISC | INTR_MPSAFE,
NULL, axidma_intr_rx, sc, &sc->ih[1]);
if (err) {
device_printf(dev, "Unable to alloc interrupt resource.\n");
return (ENXIO);
}
node = ofw_bus_get_node(dev);
xref = OF_xref_from_node(node);
OF_device_register_xref(xref, dev);
return (0);
}
static int
axidma_detach(device_t dev)
{
struct axidma_softc *sc;
sc = device_get_softc(dev);
bus_teardown_intr(dev, sc->res[1], sc->ih[0]);
bus_teardown_intr(dev, sc->res[2], sc->ih[1]);
bus_release_resources(dev, axidma_spec, sc->res);
return (0);
}
static int
axidma_desc_free(struct axidma_softc *sc, struct axidma_channel *chan)
{
struct xdma_channel *xchan;
int nsegments;
nsegments = chan->descs_num;
xchan = chan->xchan;
free(chan->descs, M_DEVBUF);
free(chan->descs_phys, M_DEVBUF);
pmap_kremove_device(chan->mem_vaddr, chan->mem_size);
kva_free(chan->mem_vaddr, chan->mem_size);
vmem_free(xchan->vmem, chan->mem_paddr, chan->mem_size);
return (0);
}
static int
axidma_desc_alloc(struct axidma_softc *sc, struct xdma_channel *xchan,
uint32_t desc_size)
{
struct axidma_channel *chan;
int nsegments;
int i;
chan = (struct axidma_channel *)xchan->chan;
nsegments = chan->descs_num;
chan->descs = malloc(nsegments * sizeof(struct axidma_desc *),
M_DEVBUF, M_NOWAIT | M_ZERO);
if (chan->descs == NULL) {
device_printf(sc->dev,
"%s: Can't allocate memory.\n", __func__);
return (-1);
}
chan->descs_phys = malloc(nsegments * sizeof(bus_dma_segment_t),
M_DEVBUF, M_NOWAIT | M_ZERO);
chan->mem_size = desc_size * nsegments;
if (vmem_alloc(xchan->vmem, chan->mem_size, M_FIRSTFIT | M_NOWAIT,
&chan->mem_paddr)) {
device_printf(sc->dev, "Failed to allocate memory.\n");
return (-1);
}
chan->mem_vaddr = kva_alloc(chan->mem_size);
pmap_kenter_device(chan->mem_vaddr, chan->mem_size, chan->mem_paddr);
device_printf(sc->dev, "Allocated chunk %lx %d\n",
chan->mem_paddr, chan->mem_size);
for (i = 0; i < nsegments; i++) {
chan->descs[i] = (struct axidma_desc *)
((uint64_t)chan->mem_vaddr + desc_size * i);
chan->descs_phys[i] = chan->mem_paddr + desc_size * i;
}
return (0);
}
static int
axidma_channel_alloc(device_t dev, struct xdma_channel *xchan)
{
xdma_controller_t *xdma;
struct axidma_fdt_data *data;
struct axidma_channel *chan;
struct axidma_softc *sc;
sc = device_get_softc(dev);
if (xchan->caps & XCHAN_CAP_BUSDMA) {
device_printf(sc->dev,
"Error: busdma operation is not implemented.");
return (-1);
}
xdma = xchan->xdma;
data = xdma->data;
chan = &sc->channels[data->id];
if (chan->used == false) {
if (axidma_reset(sc, data->id) != 0)
return (-1);
chan->xchan = xchan;
+ xchan->caps |= XCHAN_CAP_BOUNCE;
xchan->chan = (void *)chan;
chan->sc = sc;
chan->used = true;
chan->idx_head = 0;
chan->idx_tail = 0;
chan->descs_used_count = 0;
chan->descs_num = AXIDMA_DESCS_NUM;
return (0);
}
return (-1);
}
static int
axidma_channel_free(device_t dev, struct xdma_channel *xchan)
{
struct axidma_channel *chan;
struct axidma_softc *sc;
sc = device_get_softc(dev);
chan = (struct axidma_channel *)xchan->chan;
axidma_desc_free(sc, chan);
chan->used = false;
return (0);
}
static int
axidma_channel_capacity(device_t dev, xdma_channel_t *xchan,
uint32_t *capacity)
{
struct axidma_channel *chan;
uint32_t c;
chan = (struct axidma_channel *)xchan->chan;
/* At least one descriptor must be left empty. */
c = (chan->descs_num - chan->descs_used_count - 1);
*capacity = c;
return (0);
}
static int
axidma_channel_submit_sg(device_t dev, struct xdma_channel *xchan,
struct xdma_sglist *sg, uint32_t sg_n)
{
xdma_controller_t *xdma;
struct axidma_fdt_data *data;
struct axidma_channel *chan;
struct axidma_desc *desc;
struct axidma_softc *sc;
uint32_t src_addr;
uint32_t dst_addr;
uint32_t addr;
uint32_t len;
uint32_t tmp;
int i;
int tail;
dprintf("%s: sg_n %d\n", __func__, sg_n);
sc = device_get_softc(dev);
chan = (struct axidma_channel *)xchan->chan;
xdma = xchan->xdma;
data = xdma->data;
if (sg_n == 0)
return (0);
tail = chan->idx_head;
tmp = 0;
for (i = 0; i < sg_n; i++) {
src_addr = (uint32_t)sg[i].src_addr;
dst_addr = (uint32_t)sg[i].dst_addr;
len = (uint32_t)sg[i].len;
dprintf("%s(%d): src %x dst %x len %d\n", __func__,
data->id, src_addr, dst_addr, len);
desc = chan->descs[chan->idx_head];
if (sg[i].direction == XDMA_MEM_TO_DEV)
desc->phys = src_addr;
else
desc->phys = dst_addr;
desc->status = 0;
desc->control = len;
if (sg[i].first == 1)
desc->control |= BD_CONTROL_TXSOF;
if (sg[i].last == 1)
desc->control |= BD_CONTROL_TXEOF;
tmp = chan->idx_head;
atomic_add_int(&chan->descs_used_count, 1);
chan->idx_head = axidma_next_desc(chan, chan->idx_head);
}
dprintf("%s(%d): _curdesc %x\n", __func__, data->id,
READ8(sc, AXI_CURDESC(data->id)));
dprintf("%s(%d): _curdesc %x\n", __func__, data->id,
READ8(sc, AXI_CURDESC(data->id)));
dprintf("%s(%d): status %x\n", __func__, data->id,
READ4(sc, AXI_DMASR(data->id)));
addr = chan->descs_phys[tmp];
WRITE8(sc, AXI_TAILDESC(data->id), addr);
return (0);
}
static int
axidma_channel_prep_sg(device_t dev, struct xdma_channel *xchan)
{
xdma_controller_t *xdma;
struct axidma_fdt_data *data;
struct axidma_channel *chan;
struct axidma_desc *desc;
struct axidma_softc *sc;
uint32_t addr;
uint32_t reg;
int ret;
int i;
sc = device_get_softc(dev);
chan = (struct axidma_channel *)xchan->chan;
xdma = xchan->xdma;
data = xdma->data;
dprintf("%s(%d)\n", __func__, data->id);
ret = axidma_desc_alloc(sc, xchan, sizeof(struct axidma_desc));
if (ret != 0) {
device_printf(sc->dev,
"%s: Can't allocate descriptors.\n", __func__);
return (-1);
}
for (i = 0; i < chan->descs_num; i++) {
desc = chan->descs[i];
bzero(desc, sizeof(struct axidma_desc));
if (i == (chan->descs_num - 1))
desc->next = chan->descs_phys[0];
else
desc->next = chan->descs_phys[i + 1];
desc->status = 0;
desc->control = 0;
dprintf("%s(%d): desc %d vaddr %lx next paddr %x\n", __func__,
data->id, i, (uint64_t)desc, le32toh(desc->next));
}
addr = chan->descs_phys[0];
WRITE8(sc, AXI_CURDESC(data->id), addr);
reg = READ4(sc, AXI_DMACR(data->id));
reg |= DMACR_IOC_IRQEN | DMACR_DLY_IRQEN | DMACR_ERR_IRQEN;
WRITE4(sc, AXI_DMACR(data->id), reg);
reg |= DMACR_RS;
WRITE4(sc, AXI_DMACR(data->id), reg);
return (0);
}
static int
axidma_channel_control(device_t dev, xdma_channel_t *xchan, int cmd)
{
struct axidma_channel *chan;
struct axidma_softc *sc;
sc = device_get_softc(dev);
chan = (struct axidma_channel *)xchan->chan;
switch (cmd) {
case XDMA_CMD_BEGIN:
case XDMA_CMD_TERMINATE:
case XDMA_CMD_PAUSE:
/* TODO: implement me */
return (-1);
}
return (0);
}
#ifdef FDT
static int
axidma_ofw_md_data(device_t dev, pcell_t *cells, int ncells, void **ptr)
{
struct axidma_fdt_data *data;
if (ncells != 1)
return (-1);
data = malloc(sizeof(struct axidma_fdt_data),
M_DEVBUF, (M_WAITOK | M_ZERO));
data->id = cells[0];
*ptr = data;
return (0);
}
#endif
static device_method_t axidma_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, axidma_probe),
DEVMETHOD(device_attach, axidma_attach),
DEVMETHOD(device_detach, axidma_detach),
/* xDMA Interface */
DEVMETHOD(xdma_channel_alloc, axidma_channel_alloc),
DEVMETHOD(xdma_channel_free, axidma_channel_free),
DEVMETHOD(xdma_channel_control, axidma_channel_control),
/* xDMA SG Interface */
DEVMETHOD(xdma_channel_capacity, axidma_channel_capacity),
DEVMETHOD(xdma_channel_prep_sg, axidma_channel_prep_sg),
DEVMETHOD(xdma_channel_submit_sg, axidma_channel_submit_sg),
#ifdef FDT
DEVMETHOD(xdma_ofw_md_data, axidma_ofw_md_data),
#endif
DEVMETHOD_END
};
static driver_t axidma_driver = {
"axidma",
axidma_methods,
sizeof(struct axidma_softc),
};
static devclass_t axidma_devclass;
EARLY_DRIVER_MODULE(axidma, simplebus, axidma_driver, axidma_devclass, 0, 0,
BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE);