Page MenuHomeFreeBSD

D25409.id73511.diff
No OneTemporary

D25409.id73511.diff

Index: sys/dev/dmaengine/ptdma/ptdma.h
===================================================================
--- /dev/null
+++ sys/dev/dmaengine/ptdma/ptdma.h
@@ -0,0 +1,276 @@
+/*-
+ * Copyright (c) 2020 Advanced Micro Devices Inc.,
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Contact Information :
+ * Rajesh Kumar <rajesh1.kumar@amd.com>
+ * Arpan Palit <Arpan.Palit@amd.com>
+ */
+
+#ifndef _PTDMA_H_
+#define _PTDMA_H_
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/types.h>
+#include <sys/endian.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/mutex.h>
+#include <sys/sx.h>
+#include <sys/taskqueue.h>
+#include <sys/stdint.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+extern unsigned int g_ptdma_dbg_lvl;
+
+#define ptdma_info(v, ...) do { \
+ if ((v) <= g_ptdma_dbg_lvl) { \
+ device_printf(pt->dev, __VA_ARGS__); \
+ } \
+} while (0)
+
+#define ptdma_error(...) do { \
+ device_printf(pt->dev, __VA_ARGS__); \
+} while (0)
+
+#define PTDMA_VENDOR_ID 0x1022
+#define PTDMA_DEVICE_ID 0x1498
+
+#define PTDMA_MAX_CHANNELS 32
+#define PTDMA_MSIX_CNT 1
+#define PTDMA_MAX_NAME_LEN 64
+#define PTDMA_ENGINE_PASSTHRU 5
+
+#define PTDMA_VSIZE 16
+#define PTDMA_VMASK ((unsigned int)((1 << PTDMA_VSIZE) - 1))
+#define PTDMA_VERSION(v, r) ((unsigned int)((v << PTDMA_VSIZE) \
+ | (r & PTDMA_VMASK)))
+
+#define PTDMA_DETACH 0x0001
+#define PTDMA_EXIT 0x0002
+
+typedef void *bus_dmaengine_t;
+typedef void (*bus_dmaengine_callback_t)(void *arg, int error);
+
+/*
+ * HW descriptor for PTDMA commands
+ * 8 32-bit words:
+ * word 0: function; engine; control bits
+ * word 1: length of source data
+ * word 2: low 32 bits of source pointer
+ * word 3: upper 16 bits of source pointer; source memory type
+ * word 4: low 32 bits of destination pointer
+ * word 5: upper 16 bits of destination pointer; destination memory type
+ * word 6: reserved 32 bits
+ * word 7: reserved 32 bits
+ */
+struct ptdma_cmd_desc {
+ struct {
+ unsigned int hoc : 1;
+ unsigned int ioc : 1;
+ unsigned int unused0 : 1;
+ unsigned int som : 1;
+ unsigned int eom : 1;
+ unsigned int function : 15;
+ unsigned int engine : 4;
+ unsigned int unused1 : 8;
+ } dword0;
+ struct {
+ uint32_t length;
+ } dword1;
+ struct {
+ uint32_t src_low;
+ } dword2;
+ struct {
+ unsigned int src_high : 16;
+ unsigned int src_mem : 2;
+ unsigned int lsb_context : 8;
+ unsigned int unused0 : 5;
+ unsigned int fixed : 1;
+ } dword3;
+ struct {
+ uint32_t dest_low;
+ } dword4;
+ struct {
+ unsigned int dest_high : 16;
+ unsigned int dest_mem : 2;
+ unsigned int unused0 : 13;
+ unsigned int fixed : 1;
+ } dword5;
+ struct {
+ unsigned int unused0 : 32;
+ } dword6;
+ struct {
+ unsigned int unused0 : 32;
+ } dword7;
+};
+
+enum cmd_status {
+ CMD_IDLE = 0,
+ CMD_BUSY,
+ CMD_PROCESS,
+ CMD_ERROR,
+ CMD_BACKLOG,
+};
+
+struct ptdma_work_cmd {
+ TAILQ_ENTRY(ptdma_work_cmd) entry;
+
+ struct ptdma_softc *pt;
+ struct ptdma_work_request *work_req;
+
+ bus_addr_t src;
+ bus_addr_t dest;
+ bus_size_t len;
+
+ enum cmd_status status;
+ int cmd_no;
+ bool completion;
+};
+
+enum req_status {
+ REQ_IDLE = 0,
+ REQ_BUSY,
+ REQ_PROCESS,
+ REQ_ERROR,
+};
+
+struct ptdma_work_request {
+ TAILQ_ENTRY(ptdma_work_request) entry;
+
+ struct ptdma_softc *pt;
+
+ //XXX - do we need seperate lock for both queue
+ struct mtx req_cmd_lock;
+ TAILQ_HEAD(, ptdma_work_cmd) cmd_work_q;
+ TAILQ_HEAD(, ptdma_work_cmd) cmd_back_q;
+
+ bus_addr_t src;
+ bus_addr_t dest;
+ bus_size_t len;
+ uint16_t ncmds;
+
+ enum req_status status;
+
+ bus_dmaengine_callback_t callback_fn;
+ void *callback_arg;
+ int req_no;
+};
+
+/* Structure to hold PT device data */
+struct pt_dev_vdata {
+ const unsigned int bar[2];
+ const unsigned int version;
+};
+
+struct pt_irq_info {
+ int irq_rid;
+ struct resource *irq_res;
+ void *irq_tag;
+};
+
+struct ptdma_softc {
+ /* dmaengine.c context. Do not move! Must go first! */
+ void *dmaengine_store;
+
+ TAILQ_ENTRY(ptdma_softc) entry;
+ bus_dmaengine_t dmaengine;
+
+#define to_ptdma_softc(_dmaeng) \
+({ \
+ bus_dmaengine_t *_p = (_dmaeng); \
+ (struct ptdma_softc *)((char *)_p - \
+ offsetof(struct ptdma_softc, dmaengine)); \
+})
+ device_t dev;
+ uint8_t chan_idx;
+ char name[PTDMA_MAX_NAME_LEN];
+ struct pt_dev_vdata *dev_vdata;
+ uint32_t refcnt;
+ uint32_t cmd_count;
+ uint32_t req_count;
+ uint32_t intr_count;
+ uint32_t comp_count;
+
+ /* PCI map details */
+ bus_space_tag_t pci_bus_tag[2];
+ bus_space_handle_t pci_bus_handle[2];
+ struct resource *pci_resource[2];
+ int pci_resource_id[2];
+ vm_paddr_t paddr[2];
+ vm_size_t bar_size[2];
+ caddr_t vaddr[2];
+
+ /* IRQ details */
+ struct pt_irq_info irq_info[PTDMA_MSIX_CNT];
+ uint8_t msix_vector_count;
+ uint32_t irq_allocated;
+ struct taskqueue *intr_workqueue;
+ struct task intr_task;
+
+ /* QUEUE Buffer details */
+ size_t ringsz;
+ uint32_t ringlen;
+ bus_dma_tag_t hw_desc_tag;
+ bus_dmamap_t hw_desc_map;
+ struct ptdma_cmd_desc *hw_desc_vaddr;
+ bus_addr_t hw_desc_paddr;
+ uint32_t qidx;
+
+ //XXX - should we need seperate lock for each queue.
+ struct mtx pt_req_lock;
+ TAILQ_HEAD(, ptdma_work_request) req_free_q;
+ TAILQ_HEAD(, ptdma_work_request) req_work_q;
+
+ struct mtx pt_cmd_lock;
+ struct mtx cmd_comp_lock;
+ TAILQ_HEAD(, ptdma_work_cmd) cmd_free_q;
+ uint32_t cmd_error;
+ struct ptdma_work_cmd *curr_cmd;
+
+ struct mtx thr_lock;
+ struct proc *procp;
+ uint8_t thr_flags;
+
+ void *src_vaddr;
+ bus_addr_t src_paddr;
+};
+
+/* API's exposed to other drivers */
+extern int ptdma_get_chan_idx(ssize_t);
+extern void ptdma_put_chan_idx(int);
+extern bus_dmaengine_t ptdma_get_dmaengine(uint32_t chan_idx, int flags);
+extern void ptdma_put_dmaengine(bus_dmaengine_t dmaengine);
+extern void ptdma_acquire(bus_dmaengine_t dmaengine);
+extern void ptdma_release(bus_dmaengine_t dmaengine);
+extern int ptdma_copy(bus_dmaengine_t dmaengine, bus_addr_t dst,
+ bus_addr_t src, bus_size_t len, bus_dmaengine_callback_t callback_fn,
+ void *callback_arg, uint32_t flags);
+
+#endif
Index: sys/dev/dmaengine/ptdma/ptdma.c
===================================================================
--- /dev/null
+++ sys/dev/dmaengine/ptdma/ptdma.c
@@ -0,0 +1,1054 @@
+/*-
+ * Copyright (c) 2020 Advanced Micro Devices Inc.,
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Contact Information :
+ * Rajesh Kumar <rajesh1.kumar@amd.com>
+ * Arpan Palit <Arpan.Palit@amd.com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/ioccom.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/rman.h>
+#include <sys/sbuf.h>
+#include <sys/sysctl.h>
+#include <sys/kthread.h>
+#include <sys/taskqueue.h>
+#include <sys/time.h>
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <machine/stdarg.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include "ptdma.h"
+#include "ptdma_hw.h"
+
+/* Globals */
+MALLOC_DEFINE(M_PTDMA, "ptdma", "ptdma driver memory allocations");
+
+/* Store all the device instance in a list */
+TAILQ_HEAD(pt_channel, ptdma_softc) pt_channel;
+
+unsigned int n_ptdma_dev = 0;
+unsigned int g_ptdma_dbg_lvl = 0;
+
+struct mtx pt_channel_lock;
+
+SYSCTL_NODE(_hw, OID_AUTO, pt, CTLFLAG_RD, 0, "pt node");
+SYSCTL_INT(_hw_pt, OID_AUTO, debug_level, CTLFLAG_RWTUN, &g_ptdma_dbg_lvl,
+ 0, "Set log level (0-3) for ptdma(4). Higher is more verbose.");
+
+/* Function prototypes */
+static int ptdma_probe(device_t);
+static int ptdma_attach(device_t);
+static int ptdma_detach(device_t);
+int ptdma_get_chan_idx(ssize_t);
+void ptdma_put_chan_idx(int);
+bus_dmaengine_t ptdma_get_dmaengine(uint32_t, int);
+void ptdma_put_dmaengine(bus_dmaengine_t);
+int ptdma_copy(bus_dmaengine_t, bus_addr_t, bus_addr_t,
+ bus_size_t, bus_dmaengine_callback_t, void *, uint32_t);
+
+/* Externs */
+static void ptdma_process_request(struct ptdma_softc *pt);
+
+static const struct pt_dev_vdata dev_vdata[] = {
+ { .bar = {2, 5},
+ .version = PTDMA_VERSION(5, 0),
+ },
+};
+
+static const struct pci_device_table ptdma_devs[] = {
+ { PCI_DEV(PTDMA_VENDOR_ID, PTDMA_DEVICE_ID),
+ .driver_data = (uintptr_t)&dev_vdata[0],
+ PCI_DESCR("Pass-through Direct Memory Access Driver") },
+};
+
+static inline uint32_t ptdma_low_address(unsigned long addr)
+{
+ return (uint64_t)addr & 0xffffffff;
+}
+
+static inline uint32_t ptdma_high_address(unsigned long addr)
+{
+ return ((uint64_t)addr >> 32) & 0x0000ffff;
+}
+
+/* API's exposed to client applications/drivers */
+bus_dmaengine_t ptdma_get_dmaengine(uint32_t chan_idx, int flags)
+{
+ struct ptdma_softc *pt, *pt_tmp;
+
+ mtx_lock(&pt_channel_lock);
+
+ TAILQ_FOREACH_SAFE(pt, &pt_channel, entry, pt_tmp) {
+ if (chan_idx == pt->chan_idx)
+ break;
+ }
+
+ mtx_unlock(&pt_channel_lock);
+
+ return (&pt->dmaengine);
+}
+
+void ptdma_put_dmaengine(bus_dmaengine_t dmaengine)
+{
+ struct ptdma_softc *pt = to_ptdma_softc(dmaengine);
+
+ if (pt == NULL) {
+ ptdma_info(1, "no ptdma device found\n");
+ return;
+ }
+
+ mtx_lock(&pt_channel_lock);
+ ptdma_info(0, "%s: req_cnt %d cmd_cnt %d intr_cnt %d comp_cnt %d\n",
+ __func__, pt->req_count, pt->cmd_count, pt->intr_count,
+ pt->comp_count);
+ while (pt->req_count != 0){
+ atomic_subtract_int(&pt->req_count, 1);
+ atomic_subtract_int(&pt->cmd_count, 1);
+ atomic_subtract_int(&pt->intr_count, 1);
+ atomic_subtract_int(&pt->comp_count, 1);
+ }
+ ptdma_info(2, "%s: Requesting to put chan_idx %d ref_cnt %d\n",
+ __func__, pt->chan_idx, pt->refcnt);
+ if (pt->refcnt == 1)
+ atomic_subtract_int(&pt->refcnt, 1);
+
+ mtx_unlock(&pt_channel_lock);
+
+ ptdma_info(2, "%s: Freed ref_cnt %d for ptdma channel\n",
+ __func__, pt->refcnt);
+
+ return;
+}
+
+/*
+ * Change the free chan_idx logic based time-sharing policy
+ * of hardware for getting better performance with less number of thread
+ */
+int ptdma_get_chan_idx(ssize_t tcnt)
+{
+ struct ptdma_softc *pt, *pt_tmp;
+
+ mtx_lock(&pt_channel_lock);
+
+ TAILQ_FOREACH_SAFE(pt, &pt_channel, entry, pt_tmp) {
+ if (pt->refcnt == 0) {
+ ptdma_info(0, "%s: Got free chan_idx %d thread_count "
+ "%ld\n", __func__, pt->chan_idx, tcnt);
+ atomic_add_int(&pt->refcnt, 1);
+ atomic_set_int(&pt->req_count, 0);
+ atomic_set_int(&pt->cmd_count, 0);
+ atomic_set_int(&pt->intr_count, 0);
+ atomic_set_int(&pt->comp_count, 0);
+ mtx_unlock(&pt_channel_lock);
+ return pt->chan_idx;
+ }
+ }
+
+ mtx_unlock(&pt_channel_lock);
+
+ return (-ENODEV);
+}
+
+void ptdma_put_chan_idx(int chan_idx)
+{
+ struct ptdma_softc *pt, *pt_tmp;
+
+ mtx_lock(&pt_channel_lock);
+
+ TAILQ_FOREACH_SAFE(pt, &pt_channel, entry, pt_tmp) {
+ if (chan_idx == pt->chan_idx)
+ break;
+ }
+
+ ptdma_info(2, "%s: Requesting to put chan_idx %d ref_cnt %d\n",
+ __func__, pt->chan_idx, pt->refcnt);
+ if (pt->refcnt == 1)
+ atomic_subtract_int(&pt->refcnt, 1);
+
+ ptdma_info(2, "%s: Freed ref_cnt %d for ptdma channel\n",
+ __func__, pt->refcnt);
+ mtx_unlock(&pt_channel_lock);
+}
+
+static int
+ptdma_get_work_cmds(struct ptdma_work_request *req)
+{
+ struct ptdma_softc *pt = req->pt;
+ struct ptdma_work_cmd *cmd;
+ uint16_t offset = 0, total_len = 0;
+ int i, cmdlen = PAGE_SIZE;
+
+ mtx_lock(&pt->pt_cmd_lock);
+
+ ptdma_info(2, "%s: ncmds %d\n", __func__, req->ncmds);
+
+ for (i = 0 ; i < req->ncmds ; i++) {
+
+ cmd = malloc(sizeof(*cmd), M_PTDMA, M_ZERO);
+ if (cmd == NULL) {
+ mtx_unlock(&pt->pt_cmd_lock);
+ return(-ENOMEM);
+ }
+
+ cmd->pt = pt;
+ cmd->status = CMD_IDLE;
+ cmd->src = req->src + offset;
+ cmd->dest = req->dest + offset;
+
+ if (i == (req->ncmds - 1))
+ cmd->len = req->len - total_len;
+ else {
+ cmd->len = cmdlen;
+ total_len += cmdlen;
+ }
+
+ offset += cmd->len;
+
+ if (i <= 100) {
+ cmd->status = CMD_BUSY;
+ TAILQ_INSERT_TAIL(&req->cmd_work_q, cmd, entry);
+ } else {
+ cmd->status = CMD_BACKLOG;
+ TAILQ_INSERT_TAIL(&req->cmd_back_q, cmd, entry);
+ }
+
+ ptdma_info(1, "%s: src 0x%zx dest 0x%zx len 0x%x offset 0x%x "
+ "status 0x%x\n", __func__, (unsigned long)cmd->src,
+ (unsigned long)cmd->dest, (unsigned int)cmd->len,
+ (unsigned int)offset, (unsigned int)cmd->status);
+
+ atomic_add_int(&pt->cmd_count, 1);
+ cmd->cmd_no = pt->cmd_count;
+ }
+
+ mtx_unlock(&pt->pt_cmd_lock);
+
+ return (0);
+}
+
+static struct ptdma_work_request *
+ptdma_get_work_request(struct ptdma_softc *pt, bus_addr_t src,
+ bus_addr_t dest, bus_size_t len)
+{
+ struct ptdma_work_request *req = NULL;
+ //int ncmds = 0, total_len = len, cmdlen = PAGE_SIZE;
+
+ mtx_lock(&pt->pt_req_lock);
+
+ req = malloc(sizeof(*req), M_PTDMA, M_WAITOK | M_ZERO);
+ if (req == NULL) {
+ mtx_unlock(&pt->pt_req_lock);
+ return NULL;
+ }
+
+ req->pt = pt;
+ req->status = REQ_IDLE;
+ mtx_init(&req->req_cmd_lock, "work request cmd lock", NULL, MTX_SPIN);
+
+ TAILQ_INIT(&req->cmd_work_q);
+ TAILQ_INIT(&req->cmd_back_q);
+
+ req->src = src;
+ req->dest = dest;
+ req->len = len;
+ req->ncmds = 1;
+
+ ptdma_info(2, "%s: src 0x%zx dest 0x%zx len 0x%x ncmds %d\n",
+ __func__, (unsigned long)req->src, (unsigned long)req->dest,
+ (unsigned int)req->len, req->ncmds);
+
+ atomic_add_int(&pt->req_count, 1);
+ req->req_no = pt->req_count;
+ mtx_unlock(&pt->pt_req_lock);
+
+ return (req);
+}
+
+static void ptdma_put_work_request(struct ptdma_work_request *req)
+{
+ struct ptdma_softc *pt = req->pt;
+
+ mtx_lock(&pt->pt_req_lock);
+
+ if (req->status != REQ_IDLE)
+ TAILQ_REMOVE(&pt->req_work_q, req, entry);
+
+ req->src = 0;
+ req->dest = 0;
+ req->len = 0;
+ req->status = REQ_IDLE;
+ req->req_no = 0;
+
+ TAILQ_INIT(&req->cmd_work_q);
+ TAILQ_INIT(&req->cmd_back_q);
+ free(req, M_PTDMA);
+
+ mtx_unlock(&pt->pt_req_lock);
+}
+
+int
+ptdma_copy(bus_dmaengine_t dmaengine, bus_addr_t dest, bus_addr_t src,
+ bus_size_t len, bus_dmaengine_callback_t callback_fn,
+ void *callback_arg, uint32_t flags)
+{
+ struct ptdma_softc *pt = to_ptdma_softc(dmaengine);
+ struct ptdma_work_request *req;
+ int ret = 0;
+
+ ptdma_info(2, "%s: src 0x%zx dest 0x%zx len 0x%x\n", __func__,
+ (unsigned long)src, (unsigned long)dest, (unsigned int)len);
+
+ req = ptdma_get_work_request(pt, src, dest, len);
+ if (req == NULL) {
+ ptdma_error("%s: fail work_req\n", __func__);
+ return (-ENOMEM);
+ }
+
+ ret = ptdma_get_work_cmds(req);
+ if (ret < 0) {
+ ptdma_error("%s: fail work_cmd %d\n", __func__, ret);
+ goto copy_err;
+ }
+
+ ptdma_info(1, "%s: get_work_cmds pass\n", __func__);
+
+ mtx_lock(&pt->pt_req_lock);
+
+ req->callback_fn = callback_fn;
+ req->callback_arg = callback_arg;
+
+ req->status = REQ_BUSY;
+ TAILQ_INSERT_TAIL(&pt->req_work_q, req, entry);
+ mtx_unlock(&pt->pt_req_lock);
+
+ ptdma_info(1, "%s: wakeup thread\n", __func__);
+ wakeup(pt);
+
+copy_err:
+
+ if (ret < 0)
+ ptdma_put_work_request(req);
+
+ ptdma_info(1, "%s: ret %d\n", __func__, ret);
+ return (ret);
+}
+
+static int ptdma_execute_cmd(struct ptdma_work_cmd *cmd)
+{
+ struct ptdma_softc *pt = cmd->pt;
+ struct ptdma_cmd_desc *desc;
+ uint32_t ctrl_reg;
+
+ desc = &pt->hw_desc_vaddr[pt->qidx];
+ memset(desc, 0, sizeof(*desc));
+ ptdma_info(2, "%s: qidx %d desc %p-%p\n", __func__, pt->qidx, desc,
+ desc+pt->qidx);
+
+ desc->dword0.hoc = 0;
+ desc->dword0.ioc = 1;
+ desc->dword0.eom = 1;
+ desc->dword0.som = 1;
+ desc->dword0.function = 0;
+ desc->dword0.engine = PTDMA_ENGINE_PASSTHRU;
+
+ desc->dword1.length = cmd->len;
+
+ desc->dword2.src_low = ptdma_low_address(cmd->src);
+ desc->dword3.src_high = ptdma_high_address(cmd->src);
+ desc->dword3.src_mem = 0;
+
+ desc->dword4.dest_low = ptdma_low_address(cmd->dest);
+ desc->dword5.dest_high = ptdma_high_address(cmd->dest);
+ desc->dword5.dest_mem = 0;
+
+ pt->qidx = ((pt->qidx + 1) % COMMANDS_PER_QUEUE);
+
+ ptdma_info(2, "%s: qidx %d paddr 0x%x-0x%zx\n",
+ __func__, pt->qidx, (uint32_t)pt->hw_desc_paddr,
+ ((uint32_t)(pt->hw_desc_paddr) + (pt->qidx * sizeof(*desc))));
+ wmb();
+
+ ptdma_write(pt, PTDMA_Q_REG_TAIL_LO,
+ ((uint32_t)pt->hw_desc_paddr) + (pt->qidx * sizeof(*desc)));
+
+ ctrl_reg = ptdma_read(pt, PTDMA_Q_CONTROL_REG);
+ if (!(ctrl_reg & PTDMA_VQ_CTRL_RUN)) {
+ ctrl_reg |= PTDMA_VQ_CTRL_RUN;
+ ptdma_write(pt, PTDMA_Q_CONTROL_REG, ctrl_reg);
+ }
+ rmb();
+
+ ptdma_info(2, "%s: pt->qidx %d\n", __func__, pt->qidx);
+
+ return (0);
+}
+
+static void dump_reg_status(struct ptdma_softc *pt)
+{
+ ptdma_info(0, "CMD_INTR_STATUS: 0x%x\n",
+ ptdma_read(pt, PTDMA_Q_REG_INTR_STATUS));
+ ptdma_info(0, "CMD_STATUS: 0x%x\n",
+ ptdma_read(pt, PTDMA_Q_REG_STATUS));
+ ptdma_info(0, "CMD_QINTR_STATUS: 0x%x\n",
+ ptdma_read(pt, PTDMA_Q_REG_INT_STATUS));
+ ptdma_info(0, "CMD_QDMA_STATUS: 0x%x\n",
+ ptdma_read(pt, PTDMA_Q_REG_DMA_STATUS));
+ ptdma_info(0, "CMD_QDMA_READ_STATUS: 0x%x\n",
+ ptdma_read(pt, PTDMA_Q_REG_DMA_READ_STATUS));
+ ptdma_info(0, "CMD_QDMA_WRITE_STATUS: 0x%x\n",
+ ptdma_read(pt, PTDMA_Q_REG_DMA_WRITE_STATUS));
+}
+
+static void ptdma_intr_work(void *data, int pending __unused)
+{
+ struct ptdma_softc *pt = (struct ptdma_softc *)data;
+ uint32_t status, reg_int_status, reg_status;
+
+ mtx_lock_spin(&pt->cmd_comp_lock);
+ /* Disable interrupts */
+ ptdma_write(pt, PTDMA_Q_REG_INT_ENABLE, 0);
+
+ status = ptdma_read(pt, PTDMA_Q_REG_INTR_STATUS);
+ ptdma_info(2, "%s: status 0x%x\n", __func__, status);
+ if (status) {
+ /* Read status registers */
+ reg_int_status = ptdma_read(pt, PTDMA_Q_REG_INT_STATUS);
+ reg_status = ptdma_read(pt, PTDMA_Q_REG_STATUS);
+ ptdma_info(2, "%s: int_status 0x%x status 0x%x\n", __func__,
+ reg_int_status, reg_status);
+
+ atomic_add_int(&pt->intr_count, 1);
+ /* On error, only save the first error value */
+ if ((status & PTDMA_INT_ERROR) && !pt->cmd_error) {
+ pt->cmd_error = (reg_status & 0x0000003f);
+ ptdma_error("%s: cmd_no %d cmd error %d\n", __func__,
+ pt->curr_cmd->cmd_no, pt->cmd_error);
+ dump_reg_status(pt);
+ }
+
+ wmb();
+ ptdma_write(pt, PTDMA_Q_REG_HEAD_LO,
+ ((uint32_t)pt->hw_desc_paddr) +
+ (pt->qidx * sizeof(struct ptdma_cmd_desc)));
+
+ /* Acknowledge the interrupt and wake the thread*/
+ ptdma_write(pt, PTDMA_Q_REG_INTR_STATUS, status);
+ if (pt->curr_cmd == NULL)
+ ptdma_info(0, "%s: status 0x%x for cur_cmd is null\n",
+ __func__, status);
+ else
+ ptdma_info(2, "%s: status 0x%x curr_cmd %p cmd_no %d\n",
+ __func__, status, pt->curr_cmd, pt->curr_cmd->cmd_no);
+
+ pt->curr_cmd->completion = true;
+ wakeup_one(pt->curr_cmd);
+ }
+
+ /* Enable Interrupts */
+ ptdma_write(pt, PTDMA_Q_REG_INT_ENABLE, PTDMA_SUPP_INTERRUPTS);
+ mtx_unlock_spin(&pt->cmd_comp_lock);
+}
+
+static void pt_interrupt_handler(void *arg)
+{
+ struct ptdma_softc *pt = (struct ptdma_softc *)arg;
+
+ taskqueue_enqueue(pt->intr_workqueue, &pt->intr_task);
+
+ return;
+}
+
+static void ptdma_process_request(struct ptdma_softc *pt)
+{
+ struct ptdma_work_request *req;
+ struct ptdma_work_cmd *cmd, *cmd_tmp;
+ int ret = 0, err = 0;
+
+ if (TAILQ_EMPTY(&pt->req_work_q)) {
+ ptdma_error("%s: pt->req_work_q empty\n", __func__);
+ return;
+ }
+
+ req = TAILQ_FIRST(&pt->req_work_q);
+ req->status = REQ_PROCESS;
+
+ TAILQ_FOREACH_SAFE(cmd, &req->cmd_work_q, entry, cmd_tmp) {
+
+ cmd->status = CMD_PROCESS;
+
+ if (req->status != REQ_ERROR) {
+ mtx_lock_spin(&pt->cmd_comp_lock);
+ pt->curr_cmd = cmd;
+ pt->curr_cmd->completion = false;
+ ret = ptdma_execute_cmd(cmd);
+ if (ret == 0) {
+ while (!pt->curr_cmd->completion && err == 0)
+ err = msleep_spin(pt->curr_cmd,
+ &pt->cmd_comp_lock, "cmd_xfer", hz);
+
+ if (err || (err == EWOULDBLOCK)) {
+ ptdma_error("%s: failed curr_cmd %p"
+ " cmd_no %d completion %d err %d\n",
+ __func__, pt->curr_cmd,
+ pt->curr_cmd->cmd_no,
+ pt->curr_cmd->completion, err);
+ dump_reg_status(pt);
+ pt->curr_cmd = NULL;
+ mtx_unlock_spin(&pt->cmd_comp_lock);
+ ret = -EWOULDBLOCK;
+ goto cmd_fail;
+ }
+ atomic_add_int(&pt->comp_count, 1);
+ pt->curr_cmd = NULL;
+ mtx_unlock_spin(&pt->cmd_comp_lock);
+ } else
+ ptdma_error("%s: execute_cmd fail %d\n",
+ __func__, ret);
+ }
+cmd_fail:
+
+ if (ret || pt->cmd_error) {
+ ptdma_error("%s: cmd error ret %d cmd_err %d\n",
+ __func__, ret, pt->cmd_error);
+ cmd->status = CMD_ERROR;
+ req->status = REQ_ERROR;
+ }
+
+ mtx_lock(&pt->pt_cmd_lock);
+
+ TAILQ_REMOVE(&req->cmd_work_q, cmd, entry);
+ cmd->status = CMD_IDLE;
+ cmd->src = 0;
+ cmd->dest = 0;
+ cmd->len = 0;
+ cmd->cmd_no = 0;
+ free(cmd, M_PTDMA);
+
+ if (!TAILQ_EMPTY(&req->cmd_back_q)) {
+ cmd_tmp = TAILQ_FIRST(&req->cmd_back_q);
+ TAILQ_REMOVE(&req->cmd_back_q, cmd_tmp, entry);
+ cmd_tmp->status = CMD_BUSY;
+ TAILQ_INSERT_TAIL(&req->cmd_work_q, cmd_tmp, entry);
+ }
+
+ mtx_unlock(&pt->pt_cmd_lock);
+ }
+
+ if (TAILQ_EMPTY(&req->cmd_work_q))
+ ptdma_info(3, "%s: req cmd work queue empty\n", __func__);
+
+ if (TAILQ_EMPTY(&req->cmd_back_q))
+ ptdma_info(3, "%s: req cmd back queue empty\n", __func__);
+
+ req->callback_fn(req->callback_arg,
+ ((req->status == REQ_ERROR)? req->status : 0));
+
+ ptdma_put_work_request(req);
+}
+
+static void ptdma_thread_work(void *data)
+{
+ struct ptdma_softc *pt = (struct ptdma_softc *)data;
+
+ ptdma_info(2, "%s: THREAD_ENTRY\n", __func__);
+
+ while (1) {
+
+ mtx_lock(&pt->thr_lock);
+
+ if (pt->thr_flags & PTDMA_DETACH) {
+ pt->thr_flags |= PTDMA_EXIT;
+ mtx_unlock(&pt->thr_lock);
+ kproc_exit(0);
+ }
+
+ if (TAILQ_EMPTY(&pt->req_work_q)) {
+ msleep(pt, &pt->thr_lock, 0, "workwait", 0);
+ mtx_unlock(&pt->thr_lock);
+ continue;
+ }
+
+ mtx_unlock(&pt->thr_lock);
+
+ ptdma_process_request(pt);
+ }
+}
+
+static int ptdma_init_work_queue(struct ptdma_softc *pt)
+{
+ int ret;
+
+ mtx_init(&pt->pt_req_lock, "ptdma req lock", NULL, MTX_SPIN);
+ TAILQ_INIT(&pt->req_work_q);
+
+ mtx_init(&pt->pt_cmd_lock, "ptdma cmd lock", NULL, MTX_SPIN);
+ mtx_init(&pt->cmd_comp_lock, "ptdma cmd completion lock", NULL, MTX_SPIN);
+
+ mtx_init(&pt->thr_lock, "thread lock", NULL, MTX_DEF);
+ pt->thr_flags = 0;
+ ret = kproc_create(ptdma_thread_work, pt, &pt->procp, 0, 0,"%s", pt->name);
+ if (ret)
+ ptdma_error("ptdma_thread_work failed %d\n", ret);
+
+ return (ret);
+}
+
+static void ptdma_destroy_work_queue(struct ptdma_softc *pt)
+{
+ mtx_lock(&pt->thr_lock);
+
+ TAILQ_INIT(&pt->req_work_q);
+ pt->thr_flags |= PTDMA_DETACH;
+ wakeup(pt);
+ while (!(pt->thr_flags & PTDMA_EXIT))
+ msleep(pt, &pt->thr_lock, 0, "ptdmadestroy", hz/10);
+ mtx_unlock(&pt->thr_lock);
+ pt->thr_flags = 0;
+
+}
+
+static void
+ptdma_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ bus_addr_t *baddr;
+
+ KASSERT(error == 0, ("%s: error:%d", __func__, error));
+ baddr = arg;
+ *baddr = segs->ds_addr;
+
+ return;
+}
+
+static int ptdma_queue_init(struct ptdma_softc *pt)
+{
+ uint32_t ctrl_reg, int_status, status;
+ int ret;
+
+ pt->ringlen = COMMANDS_PER_QUEUE;
+ pt->ringsz = sizeof(struct ptdma_cmd_desc) * pt->ringlen;
+ ptdma_info(1, "%s: ring len %d sz %zu\n", __func__, pt->ringlen,
+ pt->ringsz);
+
+ ret = bus_dma_tag_create(bus_get_dma_tag(pt->dev),
+ PAGE_SIZE, (bus_addr_t)1 << 32, BUS_SPACE_MAXADDR_48BIT,
+ BUS_SPACE_MAXADDR, NULL, NULL, pt->ringsz, 1, pt->ringsz, 0, NULL,
+ NULL, &pt->hw_desc_tag);
+ if (ret != 0)
+ return (ret);
+
+ ret = bus_dmamem_alloc(pt->hw_desc_tag, (void **)&pt->hw_desc_vaddr,
+ BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &pt->hw_desc_map);
+ if (ret != 0)
+ return (ret);
+
+ ret = bus_dmamap_load(pt->hw_desc_tag, pt->hw_desc_map, pt->hw_desc_vaddr,
+ pt->ringsz, ptdma_dmamap_cb, &pt->hw_desc_paddr, BUS_DMA_NOWAIT);
+ if (ret)
+ return (ret);
+
+ ptdma_info(1, "%s: vaddr %p paddr 0x%lx\n", __func__, pt->hw_desc_vaddr,
+ pt->hw_desc_paddr);
+
+ /* Init Registers */
+ ctrl_reg = 0;
+ ptdma_write(pt, PTDMA_Q_CONTROL_REG, ctrl_reg);
+ ptdma_info(1, "%s: ctrl_reg1 0x%x\n", __func__, ctrl_reg);
+
+ /* Disable interrupts */
+ ptdma_write(pt, PTDMA_Q_REG_INT_ENABLE, 0);
+ ptdma_info(1, "%s: int_enable 0x%x\n", __func__,
+ ptdma_read(pt, PTDMA_Q_REG_INT_ENABLE));
+
+ /* Read status registers */
+ int_status = ptdma_read(pt, PTDMA_Q_REG_INT_STATUS);
+ status = ptdma_read(pt, PTDMA_Q_REG_STATUS);
+ ptdma_info(1, "%s: int_status 0x%x status 0x%x\n", __func__, int_status,
+ status);
+
+ ptdma_write(pt, PTDMA_CMD_PRIO_OFFSET, 0x6);
+ ptdma_write(pt, PTDMA_CMD_REQID_CFG_OFFSET, 0x0);
+ ptdma_write(pt, PTDMA_CMD_TIMEOUT_OFFSET, 0x0);
+ ptdma_write(pt, PTDMA_CMD_CONFIG_OFFSET, 0x1);
+ ptdma_write(pt, PTDMA_CMD_CLK_GATE_CTL_OFFSET, CMD_CLK_GATE_CONFIG);
+
+ ptdma_write(pt, PTDMA_Q_REG_TAIL_LO, (uint32_t)pt->hw_desc_paddr);
+ ptdma_write(pt, PTDMA_Q_REG_HEAD_LO, (uint32_t)pt->hw_desc_paddr);
+ ptdma_info(1, "%s: tail 0x%x head 0x%x\n", __func__,
+ ptdma_read(pt, PTDMA_Q_REG_TAIL_LO),
+ ptdma_read(pt, PTDMA_Q_REG_HEAD_LO));
+
+ ptdma_info(1, "%s: ctrl_reg2 0x%x\n", __func__, ctrl_reg);
+ ctrl_reg |= ((ffs(COMMANDS_PER_QUEUE) - 2) << PTDMA_VQ_CTRL_QSIZE_SHIFT);
+ ctrl_reg |= (pt->hw_desc_paddr >> 32) << 16;
+ ctrl_reg |= PTDMA_VQ_CTRL_RUN;
+ ptdma_write(pt, PTDMA_Q_CONTROL_REG, ctrl_reg);
+ ptdma_info(1, "%s: ctrl_reg3 0x%x reg 0x%x\n", __func__, ctrl_reg,
+ ptdma_read(pt, PTDMA_Q_CONTROL_REG));
+
+ /* Clear Interrupt status and enable interrupts */
+ ptdma_write(pt, PTDMA_Q_REG_INTR_STATUS, 0xF);
+ ptdma_write(pt, PTDMA_Q_REG_INT_ENABLE, PTDMA_SUPP_INTERRUPTS);
+ ptdma_info(1, "%s: intr_status 0x%x int_enable 0x%x\n", __func__,
+ ptdma_read(pt, PTDMA_Q_REG_INTR_STATUS),
+ ptdma_read(pt, PTDMA_Q_REG_INT_ENABLE));
+
+ return(0);
+}
+
+static void
+ptdma_queue_free(struct ptdma_softc *pt)
+{
+ uint32_t ctrl_reg, int_status, status;
+
+ /* Disable interrupts */
+ ptdma_write(pt, PTDMA_Q_REG_INT_ENABLE, 0);
+ ptdma_info(1, "%s: int_enable 0x%x\n", __func__,
+ ptdma_read(pt, PTDMA_Q_REG_INT_ENABLE));
+
+ /* Turn off the Run Bit */
+ ctrl_reg = ptdma_read(pt, PTDMA_Q_CONTROL_REG);
+ ctrl_reg &= ~PTDMA_VQ_CTRL_RUN;
+ ptdma_write(pt, PTDMA_Q_CONTROL_REG, ctrl_reg);
+ ptdma_info(1, "%s: ctrl_reg1 0x%x\n", __func__, ctrl_reg);
+
+ /* Clear the interrupt status */
+ ptdma_write(pt, PTDMA_Q_REG_INTR_STATUS, 0xFFFFFFFF);
+ ptdma_info(1, "%s: intr_status 0x%x\n", __func__,
+ ptdma_read(pt, PTDMA_Q_REG_INTR_STATUS));
+
+ /* Read status registers */
+ int_status = ptdma_read(pt, PTDMA_Q_REG_INT_STATUS);
+ status = ptdma_read(pt, PTDMA_Q_REG_STATUS);
+ ptdma_info(1, "%s: int_status 0x%x status 0x%x\n", __func__, int_status,
+ status);
+
+ if (pt->hw_desc_paddr != 0)
+ bus_dmamap_unload(pt->hw_desc_tag, pt->hw_desc_map);
+
+ if (pt->hw_desc_vaddr != NULL)
+ bus_dmamem_free(pt->hw_desc_tag, pt->hw_desc_vaddr,
+ pt->hw_desc_map);
+
+ if (pt->hw_desc_tag != NULL)
+ bus_dma_tag_destroy(pt->hw_desc_tag);
+}
+
+static int ptdma_setup_irq(struct ptdma_softc *pt)
+{
+ boolean_t use_msi = false, f_legacy = false;
+ uint32_t supp_count = 0, vector_count = 0;
+ int i, ret = 0, flags = 0;
+
+ supp_count = pci_msix_count(pt->dev);
+ if (!supp_count || (supp_count < PTDMA_MSIX_CNT)) {
+ ptdma_error("MSIX supp count %d\n", supp_count);
+ use_msi = true;
+ goto enable_msi_vec;
+ }
+
+ vector_count = min(PTDMA_MSIX_CNT, supp_count);
+ ptdma_info(1, "%s: MSIX supp %d vector %d\n", __func__, supp_count,
+ vector_count);
+ ret = pci_alloc_msix(pt->dev, &vector_count);
+ if (ret != 0) {
+ ptdma_error("Error allocating msix vectors: %d\n", ret);
+ use_msi = true;
+ }
+
+enable_msi_vec:
+
+ if(use_msi) {
+ pci_release_msi(pt->dev);
+ supp_count = pci_msi_count(pt->dev);
+ if (!supp_count) {
+ ptdma_error("MSI supp count %d\n", supp_count);
+ use_msi = false;
+ f_legacy = true;
+ goto enable_legacy_intr;
+ }
+
+ vector_count = 1;
+ ptdma_info(0, "%s: MSI supp %d vector %d\n", __func__,
+ supp_count, vector_count);
+ ret = pci_alloc_msi(pt->dev, &vector_count);
+ if (ret != 0) {
+ ptdma_error("Error allocating msi vector: %d\n", ret);
+ use_msi = false;
+ f_legacy = true;
+ }
+ }
+
+enable_legacy_intr:
+
+ if (f_legacy) {
+ pt->msix_vector_count = 0;
+ vector_count = 1;
+ } else
+ pt->msix_vector_count = vector_count;
+ ptdma_info(1, "%s: msix/msi vector %d\n", __func__,
+ pt->msix_vector_count);
+
+ flags |= RF_ACTIVE;
+ if (f_legacy)
+ flags |= RF_SHAREABLE;
+
+ for (i = 0; i < vector_count; i++) {
+
+ /* RID should be 0 for legacy intr */
+ if (f_legacy)
+ pt->irq_info[i].irq_rid = i;
+ else
+ pt->irq_info[i].irq_rid = i + 1;
+
+ ptdma_info(1, "%s: i %d irq_rid %d\n", __func__, i,
+ pt->irq_info[i].irq_rid);
+ pt->irq_info[i].irq_res = bus_alloc_resource_any(pt->dev,
+ SYS_RES_IRQ, &pt->irq_info[i].irq_rid, flags);
+ if (pt->irq_info[i].irq_res == NULL) {
+ ptdma_error(0, "bus alloc resource failed\n");
+ return (-ENOMEM);
+ }
+
+ pt->irq_info[i].irq_tag = NULL;
+ pt->irq_allocated++;
+
+ ret = bus_setup_intr(pt->dev, pt->irq_info[i].irq_res,
+ INTR_MPSAFE | INTR_TYPE_MISC, NULL, pt_interrupt_handler,
+ pt, &pt->irq_info[i].irq_tag);
+ if (ret != 0) {
+ ptdma_error("bus setup intr failed \n");
+ return (-ret);
+ }
+ }
+
+ TASK_INIT(&pt->intr_task, 0, ptdma_intr_work, pt);
+
+ pt->intr_workqueue = taskqueue_create("ptdma_intr_wq",
+ M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &pt->intr_workqueue);
+
+ taskqueue_start_threads(&pt->intr_workqueue, 1, PI_DISK,
+ "ptdma_intr_work_thread");
+
+ return (0);
+}
+
+static int ptdma_teardown_intr(struct ptdma_softc *pt)
+{
+ struct pt_irq_info *irq_info;
+ int i;
+
+ taskqueue_drain(pt->intr_workqueue, &pt->intr_task);
+ taskqueue_free(pt->intr_workqueue);
+
+ for (i = 0; i < pt->irq_allocated; i++) {
+ irq_info = &pt->irq_info[i];
+ if (irq_info[i].irq_tag != NULL)
+ bus_teardown_intr(pt->dev, irq_info[i].irq_res,
+ irq_info[i].irq_tag);
+
+ if (pt->irq_info[i].irq_res != NULL)
+ bus_release_resource(pt->dev, SYS_RES_IRQ,
+ rman_get_rid(irq_info[i].irq_res),
+ irq_info[i].irq_res);
+ }
+
+ if (pt->msix_vector_count) {
+ pci_release_msi(pt->dev);
+ pt->msix_vector_count = 0;
+ }
+
+ return(0);
+}
+
+static int ptdma_map_pci_bar(struct ptdma_softc *pt)
+{
+ int i;
+
+ for (i = 0; i < 2 ; i++) {
+
+ pt->pci_resource_id[i] = PCIR_BAR(pt->dev_vdata->bar[i]);
+ ptdma_info(1, "pci resource_id %d\n", pt->pci_resource_id[i]);
+
+ pt->pci_resource[i] = bus_alloc_resource_any(pt->dev,
+ SYS_RES_MEMORY, &pt->pci_resource_id[i], RF_ACTIVE);
+ if (pt->pci_resource[i] == NULL) {
+ ptdma_error("unable to allocate pci resource\n");
+ return (-ENODEV);
+ }
+
+ pt->pci_bus_tag[i] = rman_get_bustag(pt->pci_resource[i]);
+ pt->pci_bus_handle[i] = rman_get_bushandle(pt->pci_resource[i]);
+ pt->paddr[i] = rman_get_start(pt->pci_resource[i]);
+ pt->vaddr[i] = rman_get_virtual(pt->pci_resource[i]);
+ pt->bar_size[i] = rman_get_size(pt->pci_resource[i]);
+ }
+
+ ptdma_info(1, "PCI bar map successful\n");
+
+ return (0);
+}
+
+/*
+ * PTDMA Driver linkage functions to system
+ */
+static int ptdma_probe(device_t dev)
+{
+ struct ptdma_softc *pt = device_get_softc(dev);
+ const struct pci_device_table *id;
+
+ pt->dev = dev;
+ id = PCI_MATCH(dev, ptdma_devs);
+ if (id == NULL)
+ return (ENXIO);
+
+ pt->dev_vdata = (struct pt_dev_vdata *)id->driver_data;
+ device_set_desc(dev, id->descr);
+
+ return (BUS_PROBE_GENERIC);
+}
+
+static int ptdma_attach(device_t dev)
+{
+ struct ptdma_softc *pt = device_get_softc(dev);
+ int ret = 0;
+
+ if (n_ptdma_dev == 0)
+ mtx_init(&pt_channel_lock, "PTDMA Channels", 0, MTX_DEF);
+
+ mtx_lock(&pt_channel_lock);
+ pt->chan_idx = n_ptdma_dev;
+ snprintf(pt->name, PTDMA_MAX_NAME_LEN, "%s", device_get_nameunit(dev));
+
+ if (pt_channel.tqh_last == NULL)
+ TAILQ_INIT(&pt_channel);
+ atomic_set_int(&pt->refcnt, 0);
+ atomic_add_int(&n_ptdma_dev, 1);
+ TAILQ_INSERT_TAIL(&pt_channel, pt, entry);
+ mtx_unlock(&pt_channel_lock);
+
+ ret = pci_enable_busmaster(dev);
+ if (ret != 0)
+ return (ret);
+
+ ret = ptdma_map_pci_bar(pt);
+ if (ret)
+ return (ret);
+
+ ret = ptdma_setup_irq(pt);
+ if (ret)
+ goto err;
+
+ ret = ptdma_queue_init(pt);
+ if (ret)
+ goto err;
+
+ ret = ptdma_init_work_queue(pt);
+ if (ret != 0)
+ goto err;;
+
+ device_printf(pt->dev, "Driver attach successful\n");
+
+err:
+ if (ret != 0)
+ ptdma_detach(dev);
+
+ return ret;
+}
+
+static int ptdma_detach(device_t dev)
+{
+ struct ptdma_softc *pt = device_get_softc(dev), *pt_chan, *pt_chan_tmp;
+ int i;
+
+ pci_disable_busmaster(dev);
+
+ ptdma_destroy_work_queue(pt);
+ ptdma_info(1, "PTDMA work queue released\n");
+
+ ptdma_queue_free(pt);
+
+ ptdma_teardown_intr(pt);
+ ptdma_info(1, "PT IRQ released\n");
+
+ for (i = 0 ; i < 2 ; i++) {
+ if (pt->pci_resource[i] != NULL)
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ pt->pci_resource_id[i], pt->pci_resource[i]);
+ }
+
+ mtx_lock(&pt_channel_lock);
+ ptdma_info(1, "%s: name %s id %d num_dev %d\n", __func__, pt->name,
+ pt->chan_idx, n_ptdma_dev);
+ TAILQ_FOREACH_SAFE(pt_chan, &pt_channel, entry, pt_chan_tmp) {
+ if (pt->chan_idx == pt_chan->chan_idx) {
+ TAILQ_REMOVE(&pt_channel, pt_chan, entry);
+ }
+ }
+ pt->chan_idx = 0;
+ atomic_subtract_int(&n_ptdma_dev, 1);
+ mtx_unlock(&pt_channel_lock);
+
+ ptdma_info(1, "PTDMA Device Detached\n");
+
+ return (0);
+}
+
+/*
+ * PTDMA Driver interface structures to system
+ */
+static device_method_t ptdma_pci_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, ptdma_probe),
+ DEVMETHOD(device_attach, ptdma_attach),
+ DEVMETHOD(device_detach, ptdma_detach),
+ DEVMETHOD_END
+};
+
+static driver_t ptdma_pci_driver = {
+ "ptdma",
+ ptdma_pci_methods,
+ sizeof(struct ptdma_softc),
+};
+
+static devclass_t ptdma_devclass;
+DRIVER_MODULE(ptdma, pci, ptdma_pci_driver, ptdma_devclass, 0, 0);
+MODULE_VERSION(ptdma, 1);
Index: sys/dev/dmaengine/ptdma/ptdma_hw.h
===================================================================
--- /dev/null
+++ sys/dev/dmaengine/ptdma/ptdma_hw.h
@@ -0,0 +1,151 @@
+/*-
+ * Copyright (c) 2020 Advanced Micro Devices Inc.,
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Contact Information :
+ * Rajesh Kumar <rajesh1.kumar@amd.com>
+ * Arpan Palit <Arpan.Palit@amd.com>
+ */
+
+#ifndef __PT_INTERNAL_H__
+#define __PT_INTERNAL_H__
+
+#define cpu_to_le32 htole32
+
+#define ptdma_read(pt, offset) \
+ bus_read_4((pt)->pci_resource[0], (offset))
+
+#define ptdma_write(pt, offset, value) \
+ bus_write_4((pt)->pci_resource[0], (offset), (value))
+
+/* Register Masks, Shifts, and Bitfields. */
+#define PTDMA_CMD_PRIO_MASK 0x0003
+#define PTDMA_CMD_PRIO_DISABLE 0x0007
+#define PTDMA_CMD_PRIO_HIGHEST 0x0006
+#define PTDMA_CMD_PRIO_LOWEST 0x0000
+
+#define PTDMA_REQID_CFG_MASK 0x0003
+#define PTDMA_REQID_CFG_MAX 0x0005
+#define PTDMA_REQID_CFG_MIN 0x0001
+
+#define PTDMA_VQ_CTRL_RUN_MASK 0x0001
+#define PTDMA_VQ_CTRL_RUN 0x0001
+#define PTDMA_VQ_CTRL_HALTED_MASK 0x0002
+#define PTDMA_VQ_CTRL_HALTED 0x0002
+#define PTDMA_VQ_CTRL_MEM_MASK 0x0004
+#define PTDMA_VQ_CTRL_QSIZE_MASK 0x0078
+#define PTDMA_VQ_CTRL_QSIZE_SHIFT 0x0003
+#define PTDMA_VQ_CTRL_HPTR_MASK 0xFFFF0000
+#define PTDMA_VQ_CTRL_HPTR_SHIFT 0x0008
+
+#define PTDMA_CMD_INT_IOC 0x0001
+#define PTDMA_CMD_INT_ERR 0x0002
+#define PTDMA_CMD_INT_QSTOP 0x0004
+#define PTDMA_CMD_INT_QEMPTY 0x0008
+
+#define PTDMA_CMD_DMA_STATUS_ERR_MASK 0x003F
+#define PTDMA_CMD_DMA_STATUS_JOBSTATUS_MASK 0x0380
+#define PTDMA_CMD_DMA_STATUS_ERRSRC_MASK 0x0C00
+#define PTDMA_CMD_DMA_STATUS_VLSBBLK_MASK 0x7000
+
+#define PTDMA_CMD_DMA_INT_STATUS_CNT_MASK 0x007F
+
+#define PTDMA_CMD_DMA_STATUS_WR_MASK 0x00003FF
+#define PTDMA_CMD_DMA_STATUS_WR_SHIFT 0x0000000
+#define PTDMA_CMD_DMA_STATUS_WR_MAX 0x0000400
+#define PTDMA_CMD_DMA_STATUS_RD_MASK 0x007FC00
+#define PTDMA_CMD_DMA_STATUS_RD_SHIFT 0x0000008
+#define PTDMA_CMD_DMA_STATUS_RD_MAX 0x0000100
+#define PTDMA_CMD_DMA_STATUS_STAT_MASK 0x0380000
+#define PTDMA_CMD_DMA_STATUS_STAT_SHIFT 0x0000013
+#define PTDMA_CMD_DMA_STATUS_STAT_IDLE 0x0000000
+#define PTDMA_CMD_DMA_STATUS_STAT_WAIT_BUF 0x0000001
+#define PTDMA_CMD_DMA_STATUS_STAT_ARB_REQ 0x0000002
+#define PTDMA_CMD_DMA_STATUS_STAT_ISSUE_REQ 0x0000003
+#define PTDMA_CMD_DMA_STATUS_STAT_WAIT_DATA 0x0000004
+
+#define PTDMA_CMD_DMA_ABORT_OFFSET_MASK 0x003FFFF
+#define PTDMA_CMD_DMA_ABORT_OFFSET_VALID_MASK 0x8000000
+
+#define PTDMA_CMD_AXCACHE_WCACHE_MASK 0x0000F
+#define PTDMA_CMD_AXCACHE_RCACHE_MASK 0x000F0
+#define PTDMA_CMD_AXCACHE_RCACHE_SHIFT 0x00004
+#define PTDMA_CMD_AXCACHE_VQCACHE_MASK 0x00F00
+#define PTDMA_CMD_AXCACHE_VQCACHE_SHIFT 0x00008
+
+#define PTDMA_CMD_VLSB_LSB0_MASK 0x00000000F
+#define PTDMA_CMD_VLSB_LSB1_MASK 0x0000000F0
+#define PTDMA_CMD_VLSB_LSB2_MASK 0x000000F00
+#define PTDMA_CMD_VLSB_LSB3_MASK 0x00000F000
+#define PTDMA_CMD_VLSB_LSB4_MASK 0x0000F0000
+#define PTDMA_CMD_VLSB_LSB5_MASK 0x000F00000
+#define PTDMA_CMD_VLSB_LSB6_MASK 0x00F000000
+#define PTDMA_CMD_VLSB_LSB7_MASK 0x0F0000000
+
+#define PTDMA_CFG_ENABLE_MASK 0x0001
+
+/* Register addresses for queue */
+#define PTDMA_CMD_PRIO_OFFSET 0x0000
+#define PTDMA_CMD_REQID_CFG_OFFSET 0x0004
+#define PTDMA_CMD_TIMEOUT_OFFSET 0x0008
+#define PTDMA_CMD_PT_VERSION 0x0010
+
+#define PTDMA_Q_CONTROL_REG 0x1000
+#define PTDMA_Q_REG_TAIL_LO 0x1004
+#define PTDMA_Q_REG_HEAD_LO 0x1008
+#define PTDMA_Q_REG_INT_ENABLE 0x100c
+#define PTDMA_Q_REG_INTR_STATUS 0x1010
+#define PTDMA_Q_REG_STATUS 0x1100
+#define PTDMA_Q_REG_INT_STATUS 0x1104
+#define PTDMA_Q_REG_DMA_STATUS 0x1108
+#define PTDMA_Q_REG_DMA_READ_STATUS 0x110c
+#define PTDMA_Q_REG_DMA_WRITE_STATUS 0x1110
+#define PTDMA_Q_REG_ABORT_BASE 0x1114
+#define PTDMA_Q_REG_AXCACHE_BASE 0x1118
+#define PTDMA_CMD_CONFIG_OFFSET 0x1120
+#define PTDMA_CMD_CLK_GATE_CTL_OFFSET 0x6004
+
+/* Bit masks */
+#define CMD_CLK_DYN_GATING_EN 0x1
+#define CMD_CLK_DYN_GATING_DIS 0x0
+#define CMD_CLK_HW_GATE_MODE 0x1
+#define CMD_CLK_SW_GATE_MODE 0x0
+#define CMD_CLK_GATE_ON_DELAY 0x1000
+#define CMD_CLK_GATE_CTL 0x0
+#define CMD_CLK_GATE_OFF_DELAY 0x1000
+
+#define CMD_CLK_GATE_CONFIG (CMD_CLK_DYN_GATING_EN | \
+ CMD_CLK_HW_GATE_MODE | CMD_CLK_GATE_ON_DELAY | \
+ CMD_CLK_GATE_CTL | CMD_CLK_GATE_OFF_DELAY)
+
+#define COMMANDS_PER_QUEUE 16
+
+#define PTDMA_INT_COMPLETION 0x1
+#define PTDMA_INT_ERROR 0x2
+#define PTDMA_INT_QUEUE_STOPPED 0x4
+#define PTDMA_INT_EMPTY_QUEUE 0x8
+
+#define PTDMA_SUPP_INTERRUPTS (PTDMA_INT_COMPLETION | PTDMA_INT_ERROR | \
+ PTDMA_INT_QUEUE_STOPPED | PTDMA_INT_EMPTY_QUEUE)
+#endif
Index: sys/modules/dmaengine/Makefile
===================================================================
--- /dev/null
+++ sys/modules/dmaengine/Makefile
@@ -0,0 +1,5 @@
+# $FreeBSD$
+
+SUBDIR= ptdma
+
+.include <bsd.subdir.mk>
Index: sys/modules/dmaengine/ptdma/Makefile
===================================================================
--- /dev/null
+++ sys/modules/dmaengine/ptdma/Makefile
@@ -0,0 +1,9 @@
+# $FreeBSD$
+
+.PATH: ${SRCTOP}/sys/dev/dmaengine/ptdma
+
+KMOD = ptdma
+SRCS = ptdma.c
+SRCS += device_if.h bus_if.h pci_if.h
+
+.include <bsd.kmod.mk>

File Metadata

Mime Type
text/plain
Expires
Tue, Apr 28, 4:48 AM (13 h, 22 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
32251458
Default Alt Text
D25409.id73511.diff (42 KB)

Event Timeline