Page MenuHomeFreeBSD

D53932.diff
No OneTemporary

D53932.diff

diff --git a/sys/conf/files.riscv b/sys/conf/files.riscv
--- a/sys/conf/files.riscv
+++ b/sys/conf/files.riscv
@@ -23,7 +23,8 @@
dev/vmm/vmm_mem.c optional vmm
dev/vmm/vmm_stat.c optional vmm
dev/xilinx/axi_quad_spi.c optional xilinx_spi
-dev/xilinx/axidma.c optional axidma xdma
+dev/xilinx/axidma.c optional axidma
+dev/xilinx/axidma_if.m optional axidma
dev/xilinx/if_xae.c optional xae
dev/xilinx/xlnx_pcib.c optional pci fdt xlnx_pcib
kern/msi_if.m standard
diff --git a/sys/dev/xilinx/axidma.h b/sys/dev/xilinx/axidma.h
--- a/sys/dev/xilinx/axidma.h
+++ b/sys/dev/xilinx/axidma.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2019 Ruslan Bukin <br@bsdpad.com>
+ * Copyright (c) 2019-2025 Ruslan Bukin <br@bsdpad.com>
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory (Department of Computer Science and
@@ -58,11 +58,6 @@
#define AXI_TAILDESC_MSB(n) (0x14 + 0x30 * (n)) /* Tail Descriptor Pointer. Upper 32 bits of address. */
#define AXI_SG_CTL 0x2C /* Scatter/Gather User and Cache */
-#define AXIDMA_NCHANNELS 2
-#define AXIDMA_DESCS_NUM 512
-#define AXIDMA_TX_CHAN 0
-#define AXIDMA_RX_CHAN 1
-
struct axidma_desc {
uint32_t next;
uint32_t reserved1;
@@ -87,8 +82,4 @@
uint32_t reserved[3];
};
-struct axidma_fdt_data {
- int id;
-};
-
#endif /* !_DEV_XILINX_AXIDMA_H_ */
diff --git a/sys/dev/xilinx/axidma.c b/sys/dev/xilinx/axidma.c
--- a/sys/dev/xilinx/axidma.c
+++ b/sys/dev/xilinx/axidma.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2019 Ruslan Bukin <br@bsdpad.com>
+ * Copyright (c) 2019-2025 Ruslan Bukin <br@bsdpad.com>
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory (Department of Computer Science and
@@ -30,13 +30,11 @@
* SUCH DAMAGE.
*/
-/* Xilinx AXI DMA controller driver. */
+/*
+ * Xilinx AXI Ethernet DMA controller driver.
+ */
-#include <sys/cdefs.h>
-#include "opt_platform.h"
-#include <sys/param.h>
#include <sys/systm.h>
-#include <sys/conf.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/module.h>
@@ -44,67 +42,32 @@
#include <machine/bus.h>
-#include <vm/vm.h>
-#include <vm/vm_extern.h>
-#include <vm/vm_page.h>
-
-#ifdef FDT
-#include <dev/fdt/fdt_common.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
-#endif
-#include <dev/xdma/xdma.h>
#include <dev/xilinx/axidma.h>
-#include "xdma_if.h"
+#include "axidma_if.h"
-#define READ4(_sc, _reg) \
+#define AXIDMA_RD4(_sc, _reg) \
bus_space_read_4(_sc->bst, _sc->bsh, _reg)
-#define WRITE4(_sc, _reg, _val) \
+#define AXIDMA_WR4(_sc, _reg, _val) \
bus_space_write_4(_sc->bst, _sc->bsh, _reg, _val)
-#define READ8(_sc, _reg) \
+#define AXIDMA_RD8(_sc, _reg) \
bus_space_read_8(_sc->bst, _sc->bsh, _reg)
-#define WRITE8(_sc, _reg, _val) \
+#define AXIDMA_WR8(_sc, _reg, _val) \
bus_space_write_8(_sc->bst, _sc->bsh, _reg, _val)
-#define AXIDMA_DEBUG
-#undef AXIDMA_DEBUG
-
-#ifdef AXIDMA_DEBUG
-#define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
-#else
-#define dprintf(fmt, ...)
-#endif
-
-extern struct bus_space memmap_bus;
-
-struct axidma_channel {
- struct axidma_softc *sc;
- xdma_channel_t *xchan;
- bool used;
- int idx_head;
- int idx_tail;
-
- struct axidma_desc **descs;
- vm_paddr_t *descs_phys;
- uint32_t descs_num;
+#define dprintf(fmt, ...)
- vm_size_t mem_size;
- vm_offset_t mem_paddr;
- vm_offset_t mem_vaddr;
-
- uint32_t descs_used_count;
-};
+#define AXIDMA_MAX_CHANNELS 2
struct axidma_softc {
device_t dev;
- struct resource *res[3];
+ struct resource *res[1 + AXIDMA_MAX_CHANNELS];
bus_space_tag_t bst;
bus_space_handle_t bsh;
- void *ih[2];
- struct axidma_desc desc;
- struct axidma_channel channels[AXIDMA_NCHANNELS];
+ void *ih[AXIDMA_MAX_CHANNELS];
};
static struct resource_spec axidma_spec[] = {
@@ -114,144 +77,19 @@
{ -1, 0 }
};
-#define HWTYPE_NONE 0
-#define HWTYPE_STD 1
-
static struct ofw_compat_data compat_data[] = {
- { "xlnx,eth-dma", HWTYPE_STD },
- { NULL, HWTYPE_NONE },
+ { "xlnx,eth-dma", 1 },
+ { NULL, 0 },
};
-static int axidma_probe(device_t dev);
-static int axidma_attach(device_t dev);
-static int axidma_detach(device_t dev);
-
-static inline uint32_t
-axidma_next_desc(struct axidma_channel *chan, uint32_t curidx)
-{
-
- return ((curidx + 1) % chan->descs_num);
-}
-
-static void
-axidma_intr(struct axidma_softc *sc,
- struct axidma_channel *chan)
-{
- xdma_transfer_status_t status;
- xdma_transfer_status_t st;
- struct axidma_fdt_data *data;
- xdma_controller_t *xdma;
- struct axidma_desc *desc;
- struct xdma_channel *xchan;
- uint32_t tot_copied;
- int pending;
- int errors;
-
- xchan = chan->xchan;
- xdma = xchan->xdma;
- data = xdma->data;
-
- pending = READ4(sc, AXI_DMASR(data->id));
- WRITE4(sc, AXI_DMASR(data->id), pending);
-
- errors = (pending & (DMASR_DMAINTERR | DMASR_DMASLVERR
- | DMASR_DMADECOREERR | DMASR_SGINTERR
- | DMASR_SGSLVERR | DMASR_SGDECERR));
-
- dprintf("%s: AXI_DMASR %x\n", __func__,
- READ4(sc, AXI_DMASR(data->id)));
- dprintf("%s: AXI_CURDESC %x\n", __func__,
- READ4(sc, AXI_CURDESC(data->id)));
- dprintf("%s: AXI_TAILDESC %x\n", __func__,
- READ4(sc, AXI_TAILDESC(data->id)));
-
- tot_copied = 0;
-
- while (chan->idx_tail != chan->idx_head) {
- desc = chan->descs[chan->idx_tail];
- cpu_dcache_wbinv_range((vm_offset_t)desc,
- sizeof(struct axidma_desc));
-
- if ((desc->status & BD_STATUS_CMPLT) == 0)
- break;
-
- st.error = errors;
- st.transferred = desc->status & BD_CONTROL_LEN_M;
- tot_copied += st.transferred;
- xchan_seg_done(xchan, &st);
-
- chan->idx_tail = axidma_next_desc(chan, chan->idx_tail);
- atomic_subtract_int(&chan->descs_used_count, 1);
- }
-
- /* Finish operation */
- status.error = errors;
- status.transferred = tot_copied;
- xdma_callback(chan->xchan, &status);
-}
-
-static void
-axidma_intr_rx(void *arg)
-{
- struct axidma_softc *sc;
- struct axidma_channel *chan;
-
- dprintf("%s\n", __func__);
-
- sc = arg;
- chan = &sc->channels[AXIDMA_RX_CHAN];
-
- axidma_intr(sc, chan);
-}
-
-static void
-axidma_intr_tx(void *arg)
-{
- struct axidma_softc *sc;
- struct axidma_channel *chan;
-
- dprintf("%s\n", __func__);
-
- sc = arg;
- chan = &sc->channels[AXIDMA_TX_CHAN];
-
- axidma_intr(sc, chan);
-}
-
-static int
-axidma_reset(struct axidma_softc *sc, int chan_id)
-{
- int timeout;
-
- WRITE4(sc, AXI_DMACR(chan_id), DMACR_RESET);
-
- timeout = 100;
- do {
- if ((READ4(sc, AXI_DMACR(chan_id)) & DMACR_RESET) == 0)
- break;
- } while (timeout--);
-
- dprintf("timeout %d\n", timeout);
-
- if (timeout == 0)
- return (-1);
-
- dprintf("%s: read control after reset: %x\n",
- __func__, READ4(sc, AXI_DMACR(chan_id)));
-
- return (0);
-}
-
static int
axidma_probe(device_t dev)
{
- int hwtype;
if (!ofw_bus_status_okay(dev))
return (ENXIO);
- hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
- if (hwtype == HWTYPE_NONE)
+ if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data)
return (ENXIO);
device_set_desc(dev, "Xilinx AXI DMA");
@@ -264,7 +102,6 @@
{
struct axidma_softc *sc;
phandle_t xref, node;
- int err;
sc = device_get_softc(dev);
sc->dev = dev;
@@ -278,22 +115,6 @@
sc->bst = rman_get_bustag(sc->res[0]);
sc->bsh = rman_get_bushandle(sc->res[0]);
- /* Setup interrupt handler */
- err = bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE,
- NULL, axidma_intr_tx, sc, &sc->ih[0]);
- if (err) {
- device_printf(dev, "Unable to alloc interrupt resource.\n");
- return (ENXIO);
- }
-
- /* Setup interrupt handler */
- err = bus_setup_intr(dev, sc->res[2], INTR_TYPE_MISC | INTR_MPSAFE,
- NULL, axidma_intr_rx, sc, &sc->ih[1]);
- if (err) {
- device_printf(dev, "Unable to alloc interrupt resource.\n");
- return (ENXIO);
- }
-
node = ofw_bus_get_node(dev);
xref = OF_xref_from_node(node);
OF_device_register_xref(xref, dev);
@@ -302,331 +123,72 @@
}
static int
-axidma_detach(device_t dev)
+axidma_reset(device_t dev, int chan_id)
{
struct axidma_softc *sc;
+ int timeout;
sc = device_get_softc(dev);
- bus_teardown_intr(dev, sc->res[1], sc->ih[0]);
- bus_teardown_intr(dev, sc->res[2], sc->ih[1]);
- bus_release_resources(dev, axidma_spec, sc->res);
-
- return (0);
-}
-
-static int
-axidma_desc_free(struct axidma_softc *sc, struct axidma_channel *chan)
-{
- struct xdma_channel *xchan;
-
- xchan = chan->xchan;
-
- free(chan->descs, M_DEVBUF);
- free(chan->descs_phys, M_DEVBUF);
-
- pmap_kremove_device(chan->mem_vaddr, chan->mem_size);
- kva_free(chan->mem_vaddr, chan->mem_size);
- vmem_free(xchan->vmem, chan->mem_paddr, chan->mem_size);
-
- return (0);
-}
-
-static int
-axidma_desc_alloc(struct axidma_softc *sc, struct xdma_channel *xchan,
- uint32_t desc_size)
-{
- struct axidma_channel *chan;
- int nsegments;
- int i;
-
- chan = (struct axidma_channel *)xchan->chan;
- nsegments = chan->descs_num;
-
- chan->descs = malloc(nsegments * sizeof(struct axidma_desc *),
- M_DEVBUF, M_NOWAIT | M_ZERO);
- if (chan->descs == NULL) {
- device_printf(sc->dev,
- "%s: Can't allocate memory.\n", __func__);
- return (-1);
- }
-
- chan->descs_phys = malloc(nsegments * sizeof(bus_dma_segment_t),
- M_DEVBUF, M_NOWAIT | M_ZERO);
- chan->mem_size = desc_size * nsegments;
- if (vmem_alloc(xchan->vmem, chan->mem_size, M_FIRSTFIT | M_NOWAIT,
- &chan->mem_paddr)) {
- device_printf(sc->dev, "Failed to allocate memory.\n");
- return (-1);
- }
- chan->mem_vaddr = kva_alloc(chan->mem_size);
- pmap_kenter(chan->mem_vaddr, chan->mem_size, chan->mem_paddr,
- VM_MEMATTR_DEFAULT);
-
- device_printf(sc->dev, "Allocated chunk %lx %lu\n",
- chan->mem_paddr, chan->mem_size);
-
- for (i = 0; i < nsegments; i++) {
- chan->descs[i] = (struct axidma_desc *)
- ((uint64_t)chan->mem_vaddr + desc_size * i);
- chan->descs_phys[i] = chan->mem_paddr + desc_size * i;
- }
+ AXIDMA_WR4(sc, AXI_DMACR(chan_id), DMACR_RESET);
- return (0);
-}
+ timeout = 100;
-static int
-axidma_channel_alloc(device_t dev, struct xdma_channel *xchan)
-{
- xdma_controller_t *xdma;
- struct axidma_fdt_data *data;
- struct axidma_channel *chan;
- struct axidma_softc *sc;
+ do {
+ if ((AXIDMA_RD4(sc, AXI_DMACR(chan_id)) & DMACR_RESET) == 0)
+ break;
+ } while (timeout--);
- sc = device_get_softc(dev);
+ dprintf("timeout %d\n", timeout);
- if (xchan->caps & XCHAN_CAP_BUSDMA) {
- device_printf(sc->dev,
- "Error: busdma operation is not implemented.");
+ if (timeout == 0)
return (-1);
- }
- xdma = xchan->xdma;
- data = xdma->data;
-
- chan = &sc->channels[data->id];
- if (chan->used == false) {
- if (axidma_reset(sc, data->id) != 0)
- return (-1);
- chan->xchan = xchan;
- xchan->caps |= XCHAN_CAP_BOUNCE;
- xchan->chan = (void *)chan;
- chan->sc = sc;
- chan->used = true;
- chan->idx_head = 0;
- chan->idx_tail = 0;
- chan->descs_used_count = 0;
- chan->descs_num = AXIDMA_DESCS_NUM;
-
- return (0);
- }
-
- return (-1);
-}
-
-static int
-axidma_channel_free(device_t dev, struct xdma_channel *xchan)
-{
- struct axidma_channel *chan;
- struct axidma_softc *sc;
-
- sc = device_get_softc(dev);
-
- chan = (struct axidma_channel *)xchan->chan;
-
- axidma_desc_free(sc, chan);
-
- chan->used = false;
-
- return (0);
-}
-
-static int
-axidma_channel_capacity(device_t dev, xdma_channel_t *xchan,
- uint32_t *capacity)
-{
- struct axidma_channel *chan;
- uint32_t c;
-
- chan = (struct axidma_channel *)xchan->chan;
-
- /* At least one descriptor must be left empty. */
- c = (chan->descs_num - chan->descs_used_count - 1);
-
- *capacity = c;
+ dprintf("%s: read control after reset: %x\n",
+ __func__, AXIDMA_RD4(sc, AXI_DMACR(chan_id)));
return (0);
}
-static int
-axidma_channel_submit_sg(device_t dev, struct xdma_channel *xchan,
- struct xdma_sglist *sg, uint32_t sg_n)
+static struct resource *
+axidma_memres(device_t dev)
{
- xdma_controller_t *xdma;
- struct axidma_fdt_data *data;
- struct axidma_channel *chan;
- struct axidma_desc *desc;
struct axidma_softc *sc;
- uint32_t src_addr;
- uint32_t dst_addr;
- uint32_t addr;
- uint32_t len;
- uint32_t tmp;
- int i;
-
- dprintf("%s: sg_n %d\n", __func__, sg_n);
sc = device_get_softc(dev);
- chan = (struct axidma_channel *)xchan->chan;
- xdma = xchan->xdma;
- data = xdma->data;
-
- if (sg_n == 0)
- return (0);
-
- tmp = 0;
-
- for (i = 0; i < sg_n; i++) {
- src_addr = (uint32_t)sg[i].src_addr;
- dst_addr = (uint32_t)sg[i].dst_addr;
- len = (uint32_t)sg[i].len;
-
- dprintf("%s(%d): src %x dst %x len %d\n", __func__,
- data->id, src_addr, dst_addr, len);
-
- desc = chan->descs[chan->idx_head];
- if (sg[i].direction == XDMA_MEM_TO_DEV)
- desc->phys = src_addr;
- else
- desc->phys = dst_addr;
- desc->status = 0;
- desc->control = len;
- if (sg[i].first == 1)
- desc->control |= BD_CONTROL_TXSOF;
- if (sg[i].last == 1)
- desc->control |= BD_CONTROL_TXEOF;
-
- cpu_dcache_wbinv_range((vm_offset_t)desc,
- sizeof(struct axidma_desc));
-
- tmp = chan->idx_head;
-
- atomic_add_int(&chan->descs_used_count, 1);
- chan->idx_head = axidma_next_desc(chan, chan->idx_head);
- }
-
- dprintf("%s(%d): _curdesc %x\n", __func__, data->id,
- READ8(sc, AXI_CURDESC(data->id)));
- dprintf("%s(%d): _curdesc %x\n", __func__, data->id,
- READ8(sc, AXI_CURDESC(data->id)));
- dprintf("%s(%d): status %x\n", __func__, data->id,
- READ4(sc, AXI_DMASR(data->id)));
-
- addr = chan->descs_phys[tmp];
- WRITE8(sc, AXI_TAILDESC(data->id), addr);
-
- return (0);
+ return (sc->res[0]);
}
static int
-axidma_channel_prep_sg(device_t dev, struct xdma_channel *xchan)
+axidma_setup_cb(device_t dev, int chan_id, void (*cb)(void *), void *arg)
{
- xdma_controller_t *xdma;
- struct axidma_fdt_data *data;
- struct axidma_channel *chan;
- struct axidma_desc *desc;
struct axidma_softc *sc;
- uint32_t addr;
- uint32_t reg;
- int ret;
- int i;
+ int error;
sc = device_get_softc(dev);
- chan = (struct axidma_channel *)xchan->chan;
- xdma = xchan->xdma;
- data = xdma->data;
-
- dprintf("%s(%d)\n", __func__, data->id);
-
- ret = axidma_desc_alloc(sc, xchan, sizeof(struct axidma_desc));
- if (ret != 0) {
- device_printf(sc->dev,
- "%s: Can't allocate descriptors.\n", __func__);
- return (-1);
- }
-
- for (i = 0; i < chan->descs_num; i++) {
- desc = chan->descs[i];
- bzero(desc, sizeof(struct axidma_desc));
-
- if (i == (chan->descs_num - 1))
- desc->next = chan->descs_phys[0];
- else
- desc->next = chan->descs_phys[i + 1];
- desc->status = 0;
- desc->control = 0;
-
- dprintf("%s(%d): desc %d vaddr %lx next paddr %x\n", __func__,
- data->id, i, (uint64_t)desc, le32toh(desc->next));
- }
-
- addr = chan->descs_phys[0];
- WRITE8(sc, AXI_CURDESC(data->id), addr);
-
- reg = READ4(sc, AXI_DMACR(data->id));
- reg |= DMACR_IOC_IRQEN | DMACR_DLY_IRQEN | DMACR_ERR_IRQEN;
- WRITE4(sc, AXI_DMACR(data->id), reg);
- reg |= DMACR_RS;
- WRITE4(sc, AXI_DMACR(data->id), reg);
-
- return (0);
-}
-
-static int
-axidma_channel_control(device_t dev, xdma_channel_t *xchan, int cmd)
-{
-
- switch (cmd) {
- case XDMA_CMD_BEGIN:
- case XDMA_CMD_TERMINATE:
- case XDMA_CMD_PAUSE:
- /* TODO: implement me */
- return (-1);
- }
-
- return (0);
-}
+ if (sc->ih[chan_id] != NULL)
+ return (EEXIST);
-#ifdef FDT
-static int
-axidma_ofw_md_data(device_t dev, pcell_t *cells, int ncells, void **ptr)
-{
- struct axidma_fdt_data *data;
-
- if (ncells != 1)
- return (-1);
-
- data = malloc(sizeof(struct axidma_fdt_data),
- M_DEVBUF, (M_WAITOK | M_ZERO));
- data->id = cells[0];
-
- *ptr = data;
+ error = bus_setup_intr(dev, sc->res[chan_id + 1],
+ INTR_TYPE_MISC | INTR_MPSAFE, NULL, cb, arg,
+ &sc->ih[chan_id]);
+ if (error)
+ device_printf(dev, "Unable to alloc interrupt resource.\n");
- return (0);
+ return (error);
}
-#endif
static device_method_t axidma_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, axidma_probe),
DEVMETHOD(device_attach, axidma_attach),
- DEVMETHOD(device_detach, axidma_detach),
-
- /* xDMA Interface */
- DEVMETHOD(xdma_channel_alloc, axidma_channel_alloc),
- DEVMETHOD(xdma_channel_free, axidma_channel_free),
- DEVMETHOD(xdma_channel_control, axidma_channel_control),
-
- /* xDMA SG Interface */
- DEVMETHOD(xdma_channel_capacity, axidma_channel_capacity),
- DEVMETHOD(xdma_channel_prep_sg, axidma_channel_prep_sg),
- DEVMETHOD(xdma_channel_submit_sg, axidma_channel_submit_sg),
-#ifdef FDT
- DEVMETHOD(xdma_ofw_md_data, axidma_ofw_md_data),
-#endif
+ /* Axidma interface */
+ DEVMETHOD(axidma_reset, axidma_reset),
+ DEVMETHOD(axidma_memres, axidma_memres),
+ DEVMETHOD(axidma_setup_cb, axidma_setup_cb),
DEVMETHOD_END
};
diff --git a/sys/dev/xilinx/axidma_if.m b/sys/dev/xilinx/axidma_if.m
new file mode 100644
--- /dev/null
+++ b/sys/dev/xilinx/axidma_if.m
@@ -0,0 +1,53 @@
+#-
+# Copyright (c) 2025 Ruslan Bukin <br@bsdpad.com>
+#
+# This software was developed by SRI International and the University of
+# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+# ("CTSRD"), as part of the DARPA CRASH research programme.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+#
+
+#include <machine/bus.h>
+
+INTERFACE axidma;
+
+HEADER {
+ typedef void (*axidma_cb_t)(void *arg);
+};
+
+METHOD int reset {
+ device_t dev;
+ int chan_id;
+};
+
+METHOD struct resource * memres {
+ device_t dev;
+};
+
+METHOD int setup_cb {
+ device_t dev;
+ int chan_id;
+ axidma_cb_t cb;
+ void *arg;
+};
diff --git a/sys/dev/xilinx/if_xae.c b/sys/dev/xilinx/if_xae.c
--- a/sys/dev/xilinx/if_xae.c
+++ b/sys/dev/xilinx/if_xae.c
@@ -61,53 +61,38 @@
#include <dev/mii/tiphy.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
-#include <dev/xilinx/if_xaereg.h>
-#include <dev/xilinx/if_xaevar.h>
#include <dev/xilinx/axidma.h>
+#include <dev/xilinx/if_xaereg.h>
+#include <dev/xilinx/if_xaevar.h>
#include "miibus_if.h"
+#include "axidma_if.h"
-#define READ4(_sc, _reg) \
- bus_read_4((_sc)->res[0], _reg)
-#define WRITE4(_sc, _reg, _val) \
- bus_write_4((_sc)->res[0], _reg, _val)
+#define XAE_RD4(_sc, _reg) bus_read_4((_sc)->res[0], _reg)
+#define XAE_RD8(_sc, _reg) bus_read_8((_sc)->res[0], _reg)
+#define XAE_WR4(_sc, _reg, _val) bus_write_4((_sc)->res[0], _reg, _val)
+#define XAE_WR8(_sc, _reg, _val) bus_write_8((_sc)->res[0], _reg, _val)
-#define READ8(_sc, _reg) \
- bus_read_8((_sc)->res[0], _reg)
-#define WRITE8(_sc, _reg, _val) \
- bus_write_8((_sc)->res[0], _reg, _val)
+#define AXIDMA_RD4(_sc, _reg) bus_read_4((_sc)->dma_res, _reg)
+#define AXIDMA_RD8(_sc, _reg) bus_read_8((_sc)->dma_res, _reg)
+#define AXIDMA_WR4(_sc, _reg, _val) bus_write_4((_sc)->dma_res, _reg, _val)
+#define AXIDMA_WR8(_sc, _reg, _val) bus_write_8((_sc)->dma_res, _reg, _val)
#define XAE_LOCK(sc) mtx_lock(&(sc)->mtx)
#define XAE_UNLOCK(sc) mtx_unlock(&(sc)->mtx)
#define XAE_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED)
#define XAE_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED)
-#define XAE_DEBUG
-#undef XAE_DEBUG
+#define dprintf(fmt, ...)
-#ifdef XAE_DEBUG
-#define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
-#else
-#define dprintf(fmt, ...)
-#endif
-
-#define RX_QUEUE_SIZE 64
-#define TX_QUEUE_SIZE 64
-#define NUM_RX_MBUF 16
-#define BUFRING_SIZE 8192
#define MDIO_CLK_DIV_DEFAULT 29
-#define BUF_NPAGES 512
-
-#define PHY1_RD(sc, _r) \
- xae_miibus_read_reg(sc->dev, 1, _r)
-#define PHY1_WR(sc, _r, _v) \
- xae_miibus_write_reg(sc->dev, 1, _r, _v)
-#define PHY_RD(sc, _r) \
- xae_miibus_read_reg(sc->dev, sc->phy_addr, _r)
+#define PHY1_RD(sc, _r) xae_miibus_read_reg(sc->dev, 1, _r)
+#define PHY1_WR(sc, _r, _v) xae_miibus_write_reg(sc->dev, 1, _r, _v)
+#define PHY_RD(sc, _r) xae_miibus_read_reg(sc->dev, sc->phy_addr, _r)
#define PHY_WR(sc, _r, _v) \
- xae_miibus_write_reg(sc->dev, sc->phy_addr, _r, _v)
+ xae_miibus_write_reg(sc->dev, sc->phy_addr, _r, _v)
/* Use this macro to access regs > 0x1f */
#define WRITE_TI_EREG(sc, reg, data) { \
@@ -122,220 +107,384 @@
#define DP83867_SGMIICTL1 0xD3 /* not documented register */
#define SGMIICTL1_SGMII_6W (1 << 14) /* no idea what it is */
+#define AXI_DESC_RING_ALIGN 64
+
+/*
+ * Driver data and defines.
+ */
+
static struct resource_spec xae_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE },
{ SYS_RES_IRQ, 0, RF_ACTIVE },
{ -1, 0 }
};
-static void xae_stop_locked(struct xae_softc *sc);
-static void xae_setup_rxfilter(struct xae_softc *sc);
+static inline uint32_t
+next_rxidx(struct xae_softc *sc, uint32_t curidx)
+{
+
+ return ((curidx == RX_DESC_COUNT - 1) ? 0 : curidx + 1);
+}
-static int
-xae_rx_enqueue(struct xae_softc *sc, uint32_t n)
+static inline uint32_t
+next_txidx(struct xae_softc *sc, uint32_t curidx)
{
- struct mbuf *m;
- int i;
- for (i = 0; i < n; i++) {
- m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
- if (m == NULL) {
- device_printf(sc->dev,
- "%s: Can't alloc rx mbuf\n", __func__);
- return (-1);
- }
+ return ((curidx == TX_DESC_COUNT - 1) ? 0 : curidx + 1);
+}
- m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
- xdma_enqueue_mbuf(sc->xchan_rx, &m, 0, 4, 4, XDMA_DEV_TO_MEM);
- }
+static void
+xae_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
- return (0);
+ if (error != 0)
+ return;
+ *(bus_addr_t *)arg = segs[0].ds_addr;
}
-static int
-xae_get_phyaddr(phandle_t node, int *phy_addr)
+inline static uint32_t
+xae_setup_txdesc(struct xae_softc *sc, int idx, bus_addr_t paddr,
+ uint32_t len)
{
- phandle_t phy_node;
- pcell_t phy_handle, phy_reg;
+ struct axidma_desc *desc;
+ uint32_t nidx;
+ uint32_t flags;
- if (OF_getencprop(node, "phy-handle", (void *)&phy_handle,
- sizeof(phy_handle)) <= 0)
- return (ENXIO);
+ nidx = next_txidx(sc, idx);
- phy_node = OF_node_from_xref(phy_handle);
+ desc = &sc->txdesc_ring[idx];
- if (OF_getencprop(phy_node, "reg", (void *)&phy_reg,
- sizeof(phy_reg)) <= 0)
- return (ENXIO);
+ /* Addr/len 0 means we're clearing the descriptor after xmit done. */
+ if (paddr == 0 || len == 0) {
+ flags = 0;
+ --sc->txcount;
+ } else {
+ flags = BD_CONTROL_TXSOF | BD_CONTROL_TXEOF;
+ ++sc->txcount;
+ }
- *phy_addr = phy_reg;
+ desc->next = sc->txdesc_ring_paddr + sizeof(struct axidma_desc) * nidx;
+ desc->phys = paddr;
+ desc->status = 0;
+ desc->control = len | flags;
- return (0);
+ return (nidx);
}
static int
-xae_xdma_tx_intr(void *arg, xdma_transfer_status_t *status)
+xae_setup_txbuf(struct xae_softc *sc, int idx, struct mbuf **mp)
{
- xdma_transfer_status_t st;
- struct xae_softc *sc;
- if_t ifp;
+ struct bus_dma_segment seg;
struct mbuf *m;
- int err;
-
- sc = arg;
-
- XAE_LOCK(sc);
+ int error;
+ int nsegs;
- ifp = sc->ifp;
+ dprintf("%s\n", __func__);
- for (;;) {
- err = xdma_dequeue_mbuf(sc->xchan_tx, &m, &st);
- if (err != 0) {
- break;
- }
+ if ((m = m_defrag(*mp, M_NOWAIT)) == NULL)
+ return (ENOMEM);
- if (st.error != 0) {
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
- }
+ *mp = m;
- m_freem(m);
- }
+ error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map,
+ m, &seg, &nsegs, 0);
+ if (error != 0)
+ return (ENOMEM);
- if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
+ bus_dmamap_sync(sc->txbuf_tag, sc->txbuf_map[idx].map,
+ BUS_DMASYNC_PREWRITE);
- XAE_UNLOCK(sc);
+ sc->txbuf_map[idx].mbuf = m;
+ xae_setup_txdesc(sc, idx, seg.ds_addr, seg.ds_len);
return (0);
}
-static int
-xae_xdma_rx_intr(void *arg, xdma_transfer_status_t *status)
+static void
+xae_txstart_locked(struct xae_softc *sc)
{
- xdma_transfer_status_t st;
- struct xae_softc *sc;
- if_t ifp;
struct mbuf *m;
- int err;
- uint32_t cnt_processed;
-
- sc = arg;
+ int enqueued;
+ uint32_t addr;
+ int tmp;
+ if_t ifp;
dprintf("%s\n", __func__);
- XAE_LOCK(sc);
+ XAE_ASSERT_LOCKED(sc);
+
+ if (!sc->link_is_up)
+ return;
ifp = sc->ifp;
- cnt_processed = 0;
+ if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE)
+ return;
+
+ enqueued = 0;
+
for (;;) {
- err = xdma_dequeue_mbuf(sc->xchan_rx, &m, &st);
- if (err != 0) {
+ if (sc->txcount == (TX_DESC_COUNT - 1)) {
+ if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
- cnt_processed++;
-
- if (st.error != 0) {
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
- m_freem(m);
- continue;
+ m = if_dequeue(ifp);
+ if (m == NULL)
+ break;
+ if (xae_setup_txbuf(sc, sc->tx_idx_head, &m) != 0) {
+ if_sendq_prepend(ifp, m);
+ break;
}
-
- m->m_pkthdr.len = m->m_len = st.transferred;
- m->m_pkthdr.rcvif = ifp;
- XAE_UNLOCK(sc);
- if_input(ifp, m);
- XAE_LOCK(sc);
+ BPF_MTAP(ifp, m);
+ tmp = sc->tx_idx_head;
+ sc->tx_idx_head = next_txidx(sc, sc->tx_idx_head);
+ ++enqueued;
}
- xae_rx_enqueue(sc, cnt_processed);
+ if (enqueued != 0) {
+ bus_dmamap_sync(sc->txdesc_tag, sc->txdesc_map,
+ BUS_DMASYNC_PREWRITE);
- XAE_UNLOCK(sc);
-
- return (0);
+ addr = sc->txdesc_ring_paddr + tmp * sizeof(struct axidma_desc);
+ dprintf("%s: new tail desc %x\n", __func__, addr);
+ AXIDMA_WR8(sc, AXI_TAILDESC(AXIDMA_TX_CHAN), addr);
+ }
}
static void
-xae_qflush(if_t ifp)
+xae_txfinish_locked(struct xae_softc *sc)
{
+ struct axidma_desc *desc;
+ struct xae_bufmap *bmap;
+ boolean_t retired_buffer;
+
+ XAE_ASSERT_LOCKED(sc);
+
+ bus_dmamap_sync(sc->txdesc_tag, sc->txdesc_map, BUS_DMASYNC_PREREAD);
+ bus_dmamap_sync(sc->txdesc_tag, sc->txdesc_map, BUS_DMASYNC_POSTREAD);
+ retired_buffer = false;
+ while (sc->tx_idx_tail != sc->tx_idx_head) {
+ desc = &sc->txdesc_ring[sc->tx_idx_tail];
+ if ((desc->status & BD_STATUS_CMPLT) == 0)
+ break;
+ retired_buffer = true;
+ bmap = &sc->txbuf_map[sc->tx_idx_tail];
+ bus_dmamap_sync(sc->txbuf_tag, bmap->map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->txbuf_tag, bmap->map);
+ m_freem(bmap->mbuf);
+ bmap->mbuf = NULL;
+ xae_setup_txdesc(sc, sc->tx_idx_tail, 0, 0);
+ sc->tx_idx_tail = next_txidx(sc, sc->tx_idx_tail);
+ }
+
+ /*
+ * If we retired any buffers, there will be open tx slots available in
+ * the descriptor ring, go try to start some new output.
+ */
+ if (retired_buffer) {
+ if_setdrvflagbits(sc->ifp, 0, IFF_DRV_OACTIVE);
+ xae_txstart_locked(sc);
+ }
}
-static int
-xae_transmit_locked(if_t ifp)
+inline static uint32_t
+xae_setup_rxdesc(struct xae_softc *sc, int idx, bus_addr_t paddr)
{
- struct xae_softc *sc;
- struct mbuf *m;
- struct buf_ring *br;
- int error;
- int enq;
+ struct axidma_desc *desc;
+ uint32_t nidx;
- dprintf("%s\n", __func__);
+ /*
+ * The hardware requires 32-bit physical addresses. We set up the dma
+ * tag to indicate that, so the cast to uint32_t should never lose
+ * significant bits.
+ */
+ nidx = next_rxidx(sc, idx);
- sc = if_getsoftc(ifp);
- br = sc->br;
+ desc = &sc->rxdesc_ring[idx];
+ desc->next = sc->rxdesc_ring_paddr + sizeof(struct axidma_desc) * nidx;
+ desc->phys = paddr;
+ desc->status = 0;
+ desc->control = MCLBYTES | BD_CONTROL_TXSOF | BD_CONTROL_TXEOF;
- enq = 0;
+ return (nidx);
+}
- while ((m = drbr_peek(ifp, br)) != NULL) {
- error = xdma_enqueue_mbuf(sc->xchan_tx,
- &m, 0, 4, 4, XDMA_MEM_TO_DEV);
- if (error != 0) {
- /* No space in request queue available yet. */
- drbr_putback(ifp, br, m);
- break;
- }
+static struct mbuf *
+xae_alloc_mbufcl(struct xae_softc *sc)
+{
+ struct mbuf *m;
- drbr_advance(ifp, br);
+ m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
+ if (m != NULL)
+ m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
+
+ return (m);
+}
- enq++;
+static int
+xae_setup_rxbuf(struct xae_softc *sc, int idx, struct mbuf * m)
+{
+ int error, nsegs;
+ struct bus_dma_segment seg;
+
+ error = bus_dmamap_load_mbuf_sg(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
+ m, &seg, &nsegs, 0);
+ if (error != 0)
+ return (error);
- /* If anyone is interested give them a copy. */
- ETHER_BPF_MTAP(ifp, m);
- }
+ bus_dmamap_sync(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
+ BUS_DMASYNC_PREREAD);
- if (enq > 0)
- xdma_queue_submit(sc->xchan_tx);
+ sc->rxbuf_map[idx].mbuf = m;
+ xae_setup_rxdesc(sc, idx, seg.ds_addr);
return (0);
}
-static int
-xae_transmit(if_t ifp, struct mbuf *m)
+static void
+xae_rxfinish_onebuf(struct xae_softc *sc, int len)
{
- struct xae_softc *sc;
+ struct mbuf *m, *newmbuf;
+ struct xae_bufmap *bmap;
int error;
dprintf("%s\n", __func__);
- sc = if_getsoftc(ifp);
+ /*
+ * First try to get a new mbuf to plug into this slot in the rx ring.
+ * If that fails, drop the current packet and recycle the current
+ * mbuf, which is still mapped and loaded.
+ */
+ if ((newmbuf = xae_alloc_mbufcl(sc)) == NULL) {
+ if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, 1);
+ xae_setup_rxdesc(sc, sc->rx_idx,
+ sc->rxdesc_ring[sc->rx_idx].phys);
+ return;
+ }
+
+ XAE_UNLOCK(sc);
+
+ bmap = &sc->rxbuf_map[sc->rx_idx];
+ bus_dmamap_sync(sc->rxbuf_tag, bmap->map, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->rxbuf_tag, bmap->map);
+ m = bmap->mbuf;
+ bmap->mbuf = NULL;
+ m->m_len = len;
+ m->m_pkthdr.len = len;
+ m->m_pkthdr.rcvif = sc->ifp;
+
+ if_input(sc->ifp, m);
XAE_LOCK(sc);
- error = drbr_enqueue(ifp, sc->br, m);
- if (error) {
- XAE_UNLOCK(sc);
- return (error);
+ if ((error = xae_setup_rxbuf(sc, sc->rx_idx, newmbuf)) != 0) {
+ device_printf(sc->dev, "xae_setup_rxbuf error %d\n", error);
+ /* XXX Now what? We've got a hole in the rx ring. */
}
+}
- if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
- IFF_DRV_RUNNING) {
- XAE_UNLOCK(sc);
- return (0);
+static void
+xae_rxfinish_locked(struct xae_softc *sc)
+{
+ boolean_t desc_completed;
+ struct axidma_desc *desc;
+ uint32_t addr;
+ int len;
+ int tmp;
+
+ dprintf("%s\n", __func__);
+
+ XAE_ASSERT_LOCKED(sc);
+
+ bus_dmamap_sync(sc->rxdesc_tag, sc->rxdesc_map, BUS_DMASYNC_PREREAD);
+ bus_dmamap_sync(sc->rxdesc_tag, sc->rxdesc_map, BUS_DMASYNC_POSTREAD);
+ desc_completed = false;
+ for (;;) {
+ desc = &sc->rxdesc_ring[sc->rx_idx];
+ if ((desc->status & BD_STATUS_CMPLT) == 0)
+ break;
+ desc_completed = true;
+ len = desc->status & BD_CONTROL_LEN_M;
+ xae_rxfinish_onebuf(sc, len);
+ tmp = sc->rx_idx;
+ sc->rx_idx = next_rxidx(sc, sc->rx_idx);
}
- if (!sc->link_is_up) {
- XAE_UNLOCK(sc);
- return (0);
+ if (desc_completed) {
+ bus_dmamap_sync(sc->rxdesc_tag, sc->rxdesc_map,
+ BUS_DMASYNC_PREWRITE);
+
+ addr = sc->rxdesc_ring_paddr + tmp * sizeof(struct axidma_desc);
+ dprintf("%s: new tail desc %x\n", __func__, addr);
+ AXIDMA_WR8(sc, AXI_TAILDESC(AXIDMA_RX_CHAN), addr);
}
+}
- error = xae_transmit_locked(ifp);
+static void
+xae_intr_rx(void *arg)
+{
+ struct xae_softc *sc;
+ uint32_t pending;
+
+ sc = arg;
+ XAE_LOCK(sc);
+ pending = AXIDMA_RD4(sc, AXI_DMASR(AXIDMA_RX_CHAN));
+ dprintf("%s: pending %x\n", __func__, pending);
+ AXIDMA_WR4(sc, AXI_DMASR(AXIDMA_RX_CHAN), pending);
+ xae_rxfinish_locked(sc);
XAE_UNLOCK(sc);
+}
- return (error);
+static void
+xae_intr_tx(void *arg)
+{
+ struct xae_softc *sc;
+ uint32_t pending;
+
+ sc = arg;
+
+ XAE_LOCK(sc);
+ pending = AXIDMA_RD4(sc, AXI_DMASR(AXIDMA_TX_CHAN));
+ dprintf("%s: pending %x\n", __func__, pending);
+ AXIDMA_WR4(sc, AXI_DMASR(AXIDMA_TX_CHAN), pending);
+ xae_txfinish_locked(sc);
+ XAE_UNLOCK(sc);
+}
+
+
+static u_int
+xae_write_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
+{
+ struct xae_softc *sc = arg;
+ uint32_t reg;
+ uint8_t *ma;
+
+ if (cnt >= XAE_MULTICAST_TABLE_SIZE)
+ return (1);
+
+ ma = LLADDR(sdl);
+
+ reg = XAE_RD4(sc, XAE_FFC) & 0xffffff00;
+ reg |= cnt;
+ XAE_WR4(sc, XAE_FFC, reg);
+
+ reg = (ma[0]);
+ reg |= (ma[1] << 8);
+ reg |= (ma[2] << 16);
+ reg |= (ma[3] << 24);
+ XAE_WR4(sc, XAE_FFV(0), reg);
+
+ reg = ma[4];
+ reg |= ma[5] << 8;
+ XAE_WR4(sc, XAE_FFV(1), reg);
+
+ return (1);
}
static void
-xae_stop_locked(struct xae_softc *sc)
+xae_setup_rxfilter(struct xae_softc *sc)
{
if_t ifp;
uint32_t reg;
@@ -343,19 +492,82 @@
XAE_ASSERT_LOCKED(sc);
ifp = sc->ifp;
- if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
+
+ /*
+ * Set the multicast (group) filter hash.
+ */
+ if ((if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
+ reg = XAE_RD4(sc, XAE_FFC);
+ reg |= FFC_PM;
+ XAE_WR4(sc, XAE_FFC, reg);
+ } else {
+ reg = XAE_RD4(sc, XAE_FFC);
+ reg &= ~FFC_PM;
+ XAE_WR4(sc, XAE_FFC, reg);
+
+ if_foreach_llmaddr(ifp, xae_write_maddr, sc);
+ }
+
+ /*
+ * Set the primary address.
+ */
+ reg = sc->macaddr[0];
+ reg |= (sc->macaddr[1] << 8);
+ reg |= (sc->macaddr[2] << 16);
+ reg |= (sc->macaddr[3] << 24);
+ XAE_WR4(sc, XAE_UAW0, reg);
+
+ reg = sc->macaddr[4];
+ reg |= (sc->macaddr[5] << 8);
+ XAE_WR4(sc, XAE_UAW1, reg);
+}
+
+static int
+xae_get_phyaddr(phandle_t node, int *phy_addr)
+{
+ phandle_t phy_node;
+ pcell_t phy_handle, phy_reg;
+
+ if (OF_getencprop(node, "phy-handle", (void *)&phy_handle,
+ sizeof(phy_handle)) <= 0)
+ return (ENXIO);
+
+ phy_node = OF_node_from_xref(phy_handle);
+
+ if (OF_getencprop(phy_node, "reg", (void *)&phy_reg,
+ sizeof(phy_reg)) <= 0)
+ return (ENXIO);
+
+ *phy_addr = phy_reg;
+
+ return (0);
+}
+
+static void
+xae_qflush(if_t ifp)
+{
+}
+
+static void
+xae_stop_locked(struct xae_softc *sc)
+{
+ uint32_t reg;
+
+ XAE_ASSERT_LOCKED(sc);
+
+ if_setdrvflagbits(sc->ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
callout_stop(&sc->xae_callout);
/* Stop the transmitter */
- reg = READ4(sc, XAE_TC);
+ reg = XAE_RD4(sc, XAE_TC);
reg &= ~TC_TX;
- WRITE4(sc, XAE_TC, reg);
+ XAE_WR4(sc, XAE_TC, reg);
/* Stop the receiver. */
- reg = READ4(sc, XAE_RCW1);
+ reg = XAE_RD4(sc, XAE_RCW1);
reg &= ~RCW1_RX;
- WRITE4(sc, XAE_RCW1, reg);
+ XAE_WR4(sc, XAE_RCW1, reg);
}
static uint64_t
@@ -365,9 +577,9 @@
uint64_t delta;
KASSERT(counter_id < XAE_MAX_COUNTERS,
- ("counter %d is out of range", counter_id));
+ ("counter %d is out of range", counter_id));
- new = READ8(sc, XAE_STATCNT(counter_id));
+ new = XAE_RD8(sc, XAE_STATCNT(counter_id));
old = sc->counters[counter_id];
if (new >= old)
@@ -429,7 +641,7 @@
link_was_up = sc->link_is_up;
mii_tick(sc->mii_softc);
if (sc->link_is_up && !link_was_up)
- xae_transmit_locked(sc->ifp);
+ xae_txstart_locked(sc);
/* Schedule another check one second from now. */
callout_reset(&sc->xae_callout, hz, xae_tick, sc);
@@ -451,10 +663,10 @@
xae_setup_rxfilter(sc);
/* Enable the transmitter */
- WRITE4(sc, XAE_TC, TC_TX);
+ XAE_WR4(sc, XAE_TC, TC_TX);
/* Enable the receiver. */
- WRITE4(sc, XAE_RCW1, RCW1_RX);
+ XAE_WR4(sc, XAE_RCW1, RCW1_RX);
/*
* Call mii_mediachg() which will call back into xae_miibus_statchg()
@@ -514,74 +726,6 @@
return (error);
}
-static u_int
-xae_write_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
-{
- struct xae_softc *sc = arg;
- uint32_t reg;
- uint8_t *ma;
-
- if (cnt >= XAE_MULTICAST_TABLE_SIZE)
- return (1);
-
- ma = LLADDR(sdl);
-
- reg = READ4(sc, XAE_FFC) & 0xffffff00;
- reg |= cnt;
- WRITE4(sc, XAE_FFC, reg);
-
- reg = (ma[0]);
- reg |= (ma[1] << 8);
- reg |= (ma[2] << 16);
- reg |= (ma[3] << 24);
- WRITE4(sc, XAE_FFV(0), reg);
-
- reg = ma[4];
- reg |= ma[5] << 8;
- WRITE4(sc, XAE_FFV(1), reg);
-
- return (1);
-}
-
-static void
-xae_setup_rxfilter(struct xae_softc *sc)
-{
- if_t ifp;
- uint32_t reg;
-
- XAE_ASSERT_LOCKED(sc);
-
- ifp = sc->ifp;
-
- /*
- * Set the multicast (group) filter hash.
- */
- if ((if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
- reg = READ4(sc, XAE_FFC);
- reg |= FFC_PM;
- WRITE4(sc, XAE_FFC, reg);
- } else {
- reg = READ4(sc, XAE_FFC);
- reg &= ~FFC_PM;
- WRITE4(sc, XAE_FFC, reg);
-
- if_foreach_llmaddr(ifp, xae_write_maddr, sc);
- }
-
- /*
- * Set the primary address.
- */
- reg = sc->macaddr[0];
- reg |= (sc->macaddr[1] << 8);
- reg |= (sc->macaddr[2] << 16);
- reg |= (sc->macaddr[3] << 24);
- WRITE4(sc, XAE_UAW0, reg);
-
- reg = sc->macaddr[4];
- reg |= (sc->macaddr[5] << 8);
- WRITE4(sc, XAE_UAW1, reg);
-}
-
static int
xae_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
@@ -663,8 +807,7 @@
if (len != ETHER_ADDR_LEN)
return (EINVAL);
- OF_getprop(node, "local-mac-address", hwaddr,
- ETHER_ADDR_LEN);
+ OF_getprop(node, "local-mac-address", hwaddr, ETHER_ADDR_LEN);
return (0);
}
@@ -678,7 +821,7 @@
timeout = 200;
do {
- reg = READ4(sc, XAE_MDIO_CTRL);
+ reg = XAE_RD4(sc, XAE_MDIO_CTRL);
if (reg & MDIO_CTRL_READY)
break;
DELAY(1);
@@ -708,12 +851,12 @@
mii |= (reg << MDIO_TX_REGAD_S);
mii |= (phy << MDIO_TX_PHYAD_S);
- WRITE4(sc, XAE_MDIO_CTRL, mii);
+ XAE_WR4(sc, XAE_MDIO_CTRL, mii);
if (mdio_wait(sc))
return (0);
- rv = READ4(sc, XAE_MDIO_READ);
+ rv = XAE_RD4(sc, XAE_MDIO_READ);
return (rv);
}
@@ -733,8 +876,8 @@
mii |= (reg << MDIO_TX_REGAD_S);
mii |= (phy << MDIO_TX_PHYAD_S);
- WRITE4(sc, XAE_MDIO_WRITE, val);
- WRITE4(sc, XAE_MDIO_CTRL, mii);
+ XAE_WR4(sc, XAE_MDIO_WRITE, val);
+ XAE_WR4(sc, XAE_MDIO_CTRL, mii);
if (mdio_wait(sc))
return (1);
@@ -772,27 +915,8 @@
}
static int
-get_xdma_std(struct xae_softc *sc)
-{
-
- sc->xdma_tx = xdma_ofw_get(sc->dev, "tx");
- if (sc->xdma_tx == NULL)
- return (ENXIO);
-
- sc->xdma_rx = xdma_ofw_get(sc->dev, "rx");
- if (sc->xdma_rx == NULL) {
- xdma_put(sc->xdma_tx);
- return (ENXIO);
- }
-
- return (0);
-}
-
-static int
-get_xdma_axistream(struct xae_softc *sc)
+get_axistream(struct xae_softc *sc)
{
- struct axidma_fdt_data *data;
- device_t dma_dev;
phandle_t node;
pcell_t prop;
size_t len;
@@ -804,128 +928,232 @@
"%s: Couldn't get axistream-connected prop.\n", __func__);
return (ENXIO);
}
- dma_dev = OF_device_from_xref(prop);
- if (dma_dev == NULL) {
+ sc->dma_dev = OF_device_from_xref(prop);
+ if (sc->dma_dev == NULL) {
device_printf(sc->dev, "Could not get DMA device by xref.\n");
return (ENXIO);
}
-
- sc->xdma_tx = xdma_get(sc->dev, dma_dev);
- if (sc->xdma_tx == NULL) {
- device_printf(sc->dev, "Could not find DMA controller.\n");
- return (ENXIO);
- }
- data = malloc(sizeof(struct axidma_fdt_data),
- M_DEVBUF, (M_WAITOK | M_ZERO));
- data->id = AXIDMA_TX_CHAN;
- sc->xdma_tx->data = data;
-
- sc->xdma_rx = xdma_get(sc->dev, dma_dev);
- if (sc->xdma_rx == NULL) {
- device_printf(sc->dev, "Could not find DMA controller.\n");
- return (ENXIO);
- }
- data = malloc(sizeof(struct axidma_fdt_data),
- M_DEVBUF, (M_WAITOK | M_ZERO));
- data->id = AXIDMA_RX_CHAN;
- sc->xdma_rx->data = data;
+ sc->dma_res = AXIDMA_MEMRES(sc->dma_dev);
return (0);
}
+static void
+xae_txstart(if_t ifp)
+{
+ struct xae_softc *sc;
+
+ sc = if_getsoftc(ifp);
+
+ dprintf("%s\n", __func__);
+
+ XAE_LOCK(sc);
+ xae_txstart_locked(sc);
+ XAE_UNLOCK(sc);
+}
+
static int
-setup_xdma(struct xae_softc *sc)
+xae_setup_dma(struct xae_softc *sc)
{
- device_t dev;
- vmem_t *vmem;
- vm_paddr_t phys;
- vm_page_t m;
+ struct axidma_desc *desc;
+ uint32_t addr;
+ uint32_t reg;
+ struct mbuf *m;
int error;
+ int idx;
- dev = sc->dev;
+ sc->rxbuf_align = PAGE_SIZE;
+ sc->txbuf_align = PAGE_SIZE;
- /* Get xDMA controller */
- error = get_xdma_std(sc);
+ /*
+ * Set up TX descriptor ring, descriptors, and dma maps.
+ */
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(sc->dev), /* Parent tag. */
+ AXI_DESC_RING_ALIGN, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ TX_DESC_SIZE, 1, /* maxsize, nsegments */
+ TX_DESC_SIZE, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->txdesc_tag);
+ if (error != 0) {
+ device_printf(sc->dev, "could not create TX ring DMA tag.\n");
+ goto out;
+ }
- if (error) {
+ error = bus_dmamem_alloc(sc->txdesc_tag, (void**)&sc->txdesc_ring,
+ BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->txdesc_map);
+ if (error != 0) {
device_printf(sc->dev,
- "Fallback to axistream-connected property\n");
- error = get_xdma_axistream(sc);
+ "could not allocate TX descriptor ring.\n");
+ goto out;
}
- if (error) {
- device_printf(dev, "Could not find xDMA controllers.\n");
- return (ENXIO);
+ error = bus_dmamap_load(sc->txdesc_tag, sc->txdesc_map, sc->txdesc_ring,
+ TX_DESC_SIZE, xae_get1paddr, &sc->txdesc_ring_paddr, 0);
+ if (error != 0) {
+ device_printf(sc->dev,
+ "could not load TX descriptor ring map.\n");
+ goto out;
}
- /* Alloc xDMA TX virtual channel. */
- sc->xchan_tx = xdma_channel_alloc(sc->xdma_tx, 0);
- if (sc->xchan_tx == NULL) {
- device_printf(dev, "Can't alloc virtual DMA TX channel.\n");
- return (ENXIO);
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(sc->dev), /* Parent tag. */
+ sc->txbuf_align, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MCLBYTES, 1, /* maxsize, nsegments */
+ MCLBYTES, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->txbuf_tag);
+ if (error != 0) {
+ device_printf(sc->dev,
+ "could not create TX ring DMA tag.\n");
+ goto out;
}
- /* Setup interrupt handler. */
- error = xdma_setup_intr(sc->xchan_tx, 0,
- xae_xdma_tx_intr, sc, &sc->ih_tx);
- if (error) {
- device_printf(sc->dev,
- "Can't setup xDMA TX interrupt handler.\n");
- return (ENXIO);
+ for (idx = 0; idx < TX_DESC_COUNT; ++idx) {
+ desc = &sc->txdesc_ring[idx];
+ bzero(desc, sizeof(struct axidma_desc));
}
- /* Alloc xDMA RX virtual channel. */
- sc->xchan_rx = xdma_channel_alloc(sc->xdma_rx, 0);
- if (sc->xchan_rx == NULL) {
- device_printf(dev, "Can't alloc virtual DMA RX channel.\n");
- return (ENXIO);
+ for (idx = 0; idx < TX_DESC_COUNT; ++idx) {
+ error = bus_dmamap_create(sc->txbuf_tag, 0,
+ &sc->txbuf_map[idx].map);
+ if (error != 0) {
+ device_printf(sc->dev,
+ "could not create TX buffer DMA map.\n");
+ goto out;
+ }
+ xae_setup_txdesc(sc, idx, 0, 0);
}
- /* Setup interrupt handler. */
- error = xdma_setup_intr(sc->xchan_rx, XDMA_INTR_NET,
- xae_xdma_rx_intr, sc, &sc->ih_rx);
- if (error) {
+ /*
+ * Set up RX descriptor ring, descriptors, dma maps, and mbufs.
+ */
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(sc->dev), /* Parent tag. */
+ AXI_DESC_RING_ALIGN, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ RX_DESC_SIZE, 1, /* maxsize, nsegments */
+ RX_DESC_SIZE, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->rxdesc_tag);
+ if (error != 0) {
+ device_printf(sc->dev, "could not create RX ring DMA tag.\n");
+ goto out;
+ }
+
+ error = bus_dmamem_alloc(sc->rxdesc_tag, (void **)&sc->rxdesc_ring,
+ BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->rxdesc_map);
+ if (error != 0) {
device_printf(sc->dev,
- "Can't setup xDMA RX interrupt handler.\n");
- return (ENXIO);
+ "could not allocate RX descriptor ring.\n");
+ goto out;
+ }
+
+ error = bus_dmamap_load(sc->rxdesc_tag, sc->rxdesc_map, sc->rxdesc_ring,
+ RX_DESC_SIZE, xae_get1paddr, &sc->rxdesc_ring_paddr, 0);
+ if (error != 0) {
+ device_printf(sc->dev,
+ "could not load RX descriptor ring map.\n");
+ goto out;
+ }
+
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(sc->dev), /* Parent tag. */
+ 1, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MCLBYTES, 1, /* maxsize, nsegments */
+ MCLBYTES, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->rxbuf_tag);
+ if (error != 0) {
+ device_printf(sc->dev, "could not create RX buf DMA tag.\n");
+ goto out;
+ }
+
+ for (idx = 0; idx < RX_DESC_COUNT; ++idx) {
+ desc = &sc->rxdesc_ring[idx];
+ bzero(desc, sizeof(struct axidma_desc));
+ }
+
+ for (idx = 0; idx < RX_DESC_COUNT; ++idx) {
+ error = bus_dmamap_create(sc->rxbuf_tag, 0,
+ &sc->rxbuf_map[idx].map);
+ if (error != 0) {
+ device_printf(sc->dev,
+ "could not create RX buffer DMA map.\n");
+ goto out;
+ }
+ if ((m = xae_alloc_mbufcl(sc)) == NULL) {
+ device_printf(sc->dev, "Could not alloc mbuf\n");
+ error = ENOMEM;
+ goto out;
+ }
+ if ((error = xae_setup_rxbuf(sc, idx, m)) != 0) {
+ device_printf(sc->dev,
+ "could not create new RX buffer.\n");
+ goto out;
+ }
}
- /* Setup bounce buffer */
- vmem = xdma_get_memory(dev);
- if (!vmem) {
- m = vm_page_alloc_noobj_contig(VM_ALLOC_WIRED | VM_ALLOC_ZERO,
- BUF_NPAGES, 0, BUS_SPACE_MAXADDR_32BIT, PAGE_SIZE, 0,
- VM_MEMATTR_DEFAULT);
- phys = VM_PAGE_TO_PHYS(m);
- vmem = vmem_create("xdma vmem", 0, 0, PAGE_SIZE, PAGE_SIZE,
- M_BESTFIT | M_WAITOK);
- vmem_add(vmem, phys, BUF_NPAGES * PAGE_SIZE, 0);
+ if (AXIDMA_RESET(sc->dma_dev, AXIDMA_TX_CHAN) != 0) {
+ device_printf(sc->dev, "Could not reset TX channel.\n");
+ goto out;
+ }
+ if (AXIDMA_RESET(sc->dma_dev, AXIDMA_RX_CHAN) != 0) {
+ device_printf(sc->dev, "Could not reset TX channel.\n");
+ goto out;
+ }
+ if (AXIDMA_SETUP_CB(sc->dma_dev, AXIDMA_TX_CHAN, xae_intr_tx, sc)) {
+ device_printf(sc->dev, "Could not setup TX intr callback.\n");
+ goto out;
+ }
+ if (AXIDMA_SETUP_CB(sc->dma_dev, AXIDMA_RX_CHAN, xae_intr_rx, sc)) {
+ device_printf(sc->dev, "Could not setup RX intr callback.\n");
+ goto out;
}
- xchan_set_memory(sc->xchan_tx, vmem);
- xchan_set_memory(sc->xchan_rx, vmem);
-
- xdma_prep_sg(sc->xchan_tx,
- TX_QUEUE_SIZE, /* xchan requests queue size */
- MCLBYTES, /* maxsegsize */
- 8, /* maxnsegs */
- 16, /* alignment */
- 0, /* boundary */
- BUS_SPACE_MAXADDR_32BIT,
- BUS_SPACE_MAXADDR);
-
- xdma_prep_sg(sc->xchan_rx,
- RX_QUEUE_SIZE, /* xchan requests queue size */
- MCLBYTES, /* maxsegsize */
- 1, /* maxnsegs */
- 16, /* alignment */
- 0, /* boundary */
- BUS_SPACE_MAXADDR_32BIT,
- BUS_SPACE_MAXADDR);
+ dprintf("%s: tx desc base %lx\n", __func__, sc->txdesc_ring_paddr);
+ AXIDMA_WR8(sc, AXI_CURDESC(AXIDMA_TX_CHAN), sc->txdesc_ring_paddr);
+ reg = AXIDMA_RD4(sc, AXI_DMACR(AXIDMA_TX_CHAN));
+ reg |= DMACR_IOC_IRQEN | DMACR_DLY_IRQEN | DMACR_ERR_IRQEN;
+ AXIDMA_WR4(sc, AXI_DMACR(AXIDMA_TX_CHAN), reg);
+ reg |= DMACR_RS;
+ AXIDMA_WR4(sc, AXI_DMACR(AXIDMA_TX_CHAN), reg);
+
+ AXIDMA_WR8(sc, AXI_CURDESC(AXIDMA_RX_CHAN), sc->rxdesc_ring_paddr);
+ reg = AXIDMA_RD4(sc, AXI_DMACR(AXIDMA_RX_CHAN));
+ reg |= DMACR_IOC_IRQEN | DMACR_DLY_IRQEN | DMACR_ERR_IRQEN;
+ AXIDMA_WR4(sc, AXI_DMACR(AXIDMA_RX_CHAN), reg);
+ reg |= DMACR_RS;
+ AXIDMA_WR4(sc, AXI_DMACR(AXIDMA_RX_CHAN), reg);
+
+ addr = sc->rxdesc_ring_paddr +
+ (RX_DESC_COUNT - 1) * sizeof(struct axidma_desc);
+ dprintf("%s: new RX tail desc %x\n", __func__, addr);
+ AXIDMA_WR8(sc, AXI_TAILDESC(AXIDMA_RX_CHAN), addr);
return (0);
+
+out:
+ /* TODO: release resources. */
+ return (-1);
}
+
static int
xae_probe(device_t dev)
{
@@ -952,56 +1180,58 @@
sc = device_get_softc(dev);
sc->dev = dev;
+ sc->rx_idx = 0;
+ sc->tx_idx_head = sc->tx_idx_tail = 0;
+ sc->txcount = 0;
+
+ mtx_init(&sc->mtx, device_get_nameunit(sc->dev), MTX_NETWORK_LOCK,
+ MTX_DEF);
+
node = ofw_bus_get_node(dev);
- if (setup_xdma(sc) != 0) {
- device_printf(dev, "Could not setup xDMA.\n");
+ /* Get out MAC addr. */
+ if (xae_get_hwaddr(sc, sc->macaddr)) {
+ device_printf(sc->dev, "can't get mac\n");
return (ENXIO);
}
- mtx_init(&sc->mtx, device_get_nameunit(sc->dev),
- MTX_NETWORK_LOCK, MTX_DEF);
-
- sc->br = buf_ring_alloc(BUFRING_SIZE, M_DEVBUF,
- M_NOWAIT, &sc->mtx);
- if (sc->br == NULL)
- return (ENOMEM);
+ /* DMA */
+ error = get_axistream(sc);
+ if (error != 0)
+ return (error);
if (bus_alloc_resources(dev, xae_spec, sc->res)) {
device_printf(dev, "could not allocate resources\n");
return (ENXIO);
}
+ /* Setup interrupt handler. */
+ error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
+ NULL, xae_intr, sc, &sc->intr_cookie);
+ if (error != 0) {
+ device_printf(dev, "could not setup interrupt handler.\n");
+ return (ENXIO);
+ }
+
/* Memory interface */
sc->bst = rman_get_bustag(sc->res[0]);
sc->bsh = rman_get_bushandle(sc->res[0]);
- device_printf(sc->dev, "Identification: %x\n",
- READ4(sc, XAE_IDENT));
+ device_printf(sc->dev, "Identification: %x\n", XAE_RD4(sc, XAE_IDENT));
- /* Get MAC addr */
- if (xae_get_hwaddr(sc, sc->macaddr)) {
- device_printf(sc->dev, "can't get mac\n");
- return (ENXIO);
- }
+ error = xae_setup_dma(sc);
+ if (error != 0)
+ return (error);
/* Enable MII clock */
reg = (MDIO_CLK_DIV_DEFAULT << MDIO_SETUP_CLK_DIV_S);
reg |= MDIO_SETUP_ENABLE;
- WRITE4(sc, XAE_MDIO_SETUP, reg);
+ XAE_WR4(sc, XAE_MDIO_SETUP, reg);
if (mdio_wait(sc))
return (ENXIO);
callout_init_mtx(&sc->xae_callout, &sc->mtx, 0);
- /* Setup interrupt handler. */
- error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
- NULL, xae_intr, sc, &sc->intr_cookie);
- if (error != 0) {
- device_printf(dev, "could not setup interrupt handler.\n");
- return (ENXIO);
- }
-
/* Set up the ethernet interface. */
sc->ifp = ifp = if_alloc(IFT_ETHER);
if_setsoftc(ifp, sc);
@@ -1009,9 +1239,9 @@
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setcapabilities(ifp, IFCAP_VLAN_MTU);
if_setcapenable(ifp, if_getcapabilities(ifp));
- if_settransmitfn(ifp, xae_transmit);
if_setqflushfn(ifp, xae_qflush);
if_setioctlfn(ifp, xae_ioctl);
+ if_setstartfn(ifp, xae_txstart);
if_setinitfn(ifp, xae_init);
if_setsendqlen(ifp, TX_DESC_COUNT - 1);
if_setsendqready(ifp);
@@ -1023,11 +1253,11 @@
error = mii_attach(dev, &sc->miibus, ifp, xae_media_change,
xae_media_status, BMSR_DEFCAPMASK, sc->phy_addr,
MII_OFFSET_ANY, 0);
-
if (error != 0) {
device_printf(dev, "PHY attach failed\n");
return (ENXIO);
}
+
sc->mii_softc = device_get_softc(sc->miibus);
/* Apply vcu118 workaround. */
@@ -1038,9 +1268,6 @@
ether_ifattach(ifp, sc->macaddr);
sc->is_attached = true;
- xae_rx_enqueue(sc, NUM_RX_MBUF);
- xdma_queue_submit(sc->xchan_rx);
-
return (0);
}
@@ -1077,11 +1304,6 @@
bus_release_resources(dev, xae_spec, sc->res);
- xdma_channel_free(sc->xchan_tx);
- xdma_channel_free(sc->xchan_rx);
- xdma_put(sc->xdma_tx);
- xdma_put(sc->xdma_rx);
-
return (0);
}
@@ -1129,7 +1351,7 @@
return;
}
- WRITE4(sc, XAE_SPEED, reg);
+ XAE_WR4(sc, XAE_SPEED, reg);
}
static device_method_t xae_methods[] = {
@@ -1153,5 +1375,6 @@
DRIVER_MODULE(xae, simplebus, xae_driver, 0, 0);
DRIVER_MODULE(miibus, xae, miibus_driver, 0, 0);
+MODULE_DEPEND(xae, axidma, 1, 1, 1);
MODULE_DEPEND(xae, ether, 1, 1, 1);
MODULE_DEPEND(xae, miibus, 1, 1, 1);
diff --git a/sys/dev/xilinx/if_xaevar.h b/sys/dev/xilinx/if_xaevar.h
--- a/sys/dev/xilinx/if_xaevar.h
+++ b/sys/dev/xilinx/if_xaevar.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2019 Ruslan Bukin <br@bsdpad.com>
+ * Copyright (c) 2019-2025 Ruslan Bukin <br@bsdpad.com>
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory (Department of Computer Science and
@@ -33,13 +33,21 @@
#ifndef _DEV_XILINX_IF_XAEVAR_H_
#define _DEV_XILINX_IF_XAEVAR_H_
-#include <dev/xdma/xdma.h>
+struct xae_bufmap {
+ struct mbuf *mbuf;
+ bus_dmamap_t map;
+};
/*
* Driver data and defines.
*/
-#define RX_DESC_COUNT 1024
-#define TX_DESC_COUNT 1024
+#define RX_DESC_COUNT 64
+#define RX_DESC_SIZE (sizeof(struct axidma_desc) * RX_DESC_COUNT)
+#define TX_DESC_COUNT 64
+#define TX_DESC_SIZE (sizeof(struct axidma_desc) * TX_DESC_COUNT)
+
+#define AXIDMA_TX_CHAN 0
+#define AXIDMA_RX_CHAN 1
struct xae_softc {
struct resource *res[2];
@@ -59,20 +67,33 @@
boolean_t is_detaching;
int phy_addr;
- /* xDMA TX */
- xdma_controller_t *xdma_tx;
- xdma_channel_t *xchan_tx;
- void *ih_tx;
+ /* Counters */
+ uint64_t counters[XAE_MAX_COUNTERS];
- /* xDMA RX */
- xdma_controller_t *xdma_rx;
- xdma_channel_t *xchan_rx;
- void *ih_rx;
+ /* Axistream-connected. */
+ device_t dma_dev;
+ struct resource *dma_res;
- struct buf_ring *br;
+ int rxbuf_align;
+ int txbuf_align;
- /* Counters */
- uint64_t counters[XAE_MAX_COUNTERS];
+ bus_dma_tag_t rxdesc_tag;
+ bus_dmamap_t rxdesc_map;
+ struct axidma_desc *rxdesc_ring;
+ bus_addr_t rxdesc_ring_paddr;
+ bus_dma_tag_t rxbuf_tag;
+ struct xae_bufmap rxbuf_map[RX_DESC_COUNT];
+ uint32_t rx_idx;
+
+ bus_dma_tag_t txdesc_tag;
+ bus_dmamap_t txdesc_map;
+ struct axidma_desc *txdesc_ring;
+ bus_addr_t txdesc_ring_paddr;
+ bus_dma_tag_t txbuf_tag;
+ struct xae_bufmap txbuf_map[TX_DESC_COUNT];
+ uint32_t tx_idx_head;
+ uint32_t tx_idx_tail;
+ int txcount;
};
#endif /* _DEV_XILINX_IF_XAEVAR_H_ */

File Metadata

Mime Type
text/plain
Expires
Fri, Mar 20, 12:02 AM (10 h, 14 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
29986535
Default Alt Text
D53932.diff (53 KB)

Event Timeline