Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F137996189
D8453.id22039.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
9 KB
Referenced Files
None
Subscribers
None
D8453.id22039.diff
View Options
Index: sys/dev/nvme/nvme_ctrlr.c
===================================================================
--- sys/dev/nvme/nvme_ctrlr.c
+++ sys/dev/nvme/nvme_ctrlr.c
@@ -80,11 +80,12 @@
return (0);
}
-static void
+static int
nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
{
struct nvme_qpair *qpair;
uint32_t num_entries;
+ int error;
qpair = &ctrlr->adminq;
@@ -105,12 +106,13 @@
* The admin queue's max xfer size is treated differently than the
* max I/O xfer size. 16KB is sufficient here - maybe even less?
*/
- nvme_qpair_construct(qpair,
- 0, /* qpair ID */
- 0, /* vector */
- num_entries,
- NVME_ADMIN_TRACKERS,
- ctrlr);
+ error = nvme_qpair_construct(qpair,
+ 0, /* qpair ID */
+ 0, /* vector */
+ num_entries,
+ NVME_ADMIN_TRACKERS,
+ ctrlr);
+ return (error);
}
static int
@@ -118,7 +120,7 @@
{
struct nvme_qpair *qpair;
union cap_lo_register cap_lo;
- int i, num_entries, num_trackers;
+ int i, error, num_entries, num_trackers;
num_entries = NVME_IO_ENTRIES;
TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries);
@@ -163,12 +165,14 @@
* For I/O queues, use the controller-wide max_xfer_size
* calculated in nvme_attach().
*/
- nvme_qpair_construct(qpair,
+ error = nvme_qpair_construct(qpair,
i+1, /* qpair ID */
ctrlr->msix_enabled ? i+1 : 0, /* vector */
num_entries,
num_trackers,
ctrlr);
+ if (error)
+ return (error);
/*
* Do not bother binding interrupts if we only have one I/O
@@ -1098,7 +1102,8 @@
nvme_ctrlr_setup_interrupts(ctrlr);
ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE;
- nvme_ctrlr_construct_admin_qpair(ctrlr);
+ if (nvme_ctrlr_construct_admin_qpair(ctrlr) != 0)
+ return (ENXIO);
ctrlr->cdev = make_dev(&nvme_ctrlr_cdevsw, device_get_unit(dev),
UID_ROOT, GID_WHEEL, 0600, "nvme%d", device_get_unit(dev));
Index: sys/dev/nvme/nvme_private.h
===================================================================
--- sys/dev/nvme/nvme_private.h
+++ sys/dev/nvme/nvme_private.h
@@ -172,9 +172,8 @@
bus_dmamap_t payload_dma_map;
uint16_t cid;
- uint64_t prp[NVME_MAX_PRP_LIST_ENTRIES];
+ uint64_t *prp;
bus_addr_t prp_bus_addr;
- bus_dmamap_t prp_dma_map;
};
struct nvme_qpair {
@@ -206,10 +205,8 @@
bus_dma_tag_t dma_tag;
bus_dma_tag_t dma_tag_payload;
- bus_dmamap_t cmd_dma_map;
+ bus_dmamap_t queuemem_map;
uint64_t cmd_bus_addr;
-
- bus_dmamap_t cpl_dma_map;
uint64_t cpl_bus_addr;
TAILQ_HEAD(, nvme_tracker) free_tr;
@@ -417,7 +414,7 @@
void nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
struct nvme_request *req);
-void nvme_qpair_construct(struct nvme_qpair *qpair, uint32_t id,
+int nvme_qpair_construct(struct nvme_qpair *qpair, uint32_t id,
uint16_t vector, uint32_t num_entries,
uint32_t num_trackers,
struct nvme_controller *ctrlr);
Index: sys/dev/nvme/nvme_qpair.c
===================================================================
--- sys/dev/nvme/nvme_qpair.c
+++ sys/dev/nvme/nvme_qpair.c
@@ -36,6 +36,7 @@
static void _nvme_qpair_submit_request(struct nvme_qpair *qpair,
struct nvme_request *req);
+static void nvme_qpair_destroy(struct nvme_qpair *qpair);
struct nvme_opcode_string {
@@ -290,22 +291,6 @@
}
static void
-nvme_qpair_construct_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr,
- uint16_t cid)
-{
-
- bus_dmamap_create(qpair->dma_tag_payload, 0, &tr->payload_dma_map);
- bus_dmamap_create(qpair->dma_tag, 0, &tr->prp_dma_map);
-
- bus_dmamap_load(qpair->dma_tag, tr->prp_dma_map, tr->prp,
- sizeof(tr->prp), nvme_single_map, &tr->prp_bus_addr, 0);
-
- callout_init(&tr->timer, 1);
- tr->cid = cid;
- tr->qpair = qpair;
-}
-
-static void
nvme_qpair_complete_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr,
struct nvme_completion *cpl, boolean_t print_on_error)
{
@@ -457,14 +442,16 @@
nvme_qpair_process_completions(qpair);
}
-void
+int
nvme_qpair_construct(struct nvme_qpair *qpair, uint32_t id,
uint16_t vector, uint32_t num_entries, uint32_t num_trackers,
struct nvme_controller *ctrlr)
{
struct nvme_tracker *tr;
- uint32_t i;
- int err;
+ size_t cmdsz, cplsz, prpsz, allocsz;
+ uint64_t queuemem_phys, prpmem_phys, list_phys;
+ uint8_t *queuemem, *prpmem;
+ int i, err;
qpair->id = id;
qpair->vector = vector;
@@ -495,40 +482,50 @@
BUS_SPACE_MAXADDR, NULL, NULL, NVME_MAX_XFER_SIZE,
(NVME_MAX_XFER_SIZE/PAGE_SIZE)+1, PAGE_SIZE, 0,
NULL, NULL, &qpair->dma_tag_payload);
- if (err != 0)
+ if (err != 0) {
nvme_printf(ctrlr, "payload tag create failed %d\n", err);
+ goto out;
+ }
+
+ /*
+ * Each component must be page aligned, and individual PRP lists
+ * cannot cross a page boundary.
+ */
+ cmdsz = qpair->num_entries * sizeof(struct nvme_command);
+ cmdsz = roundup2(cmdsz, PAGE_SIZE);
+ cplsz = qpair->num_entries * sizeof(struct nvme_completion);
+ cplsz = roundup2(cplsz, PAGE_SIZE);
+ prpsz = sizeof(uint64_t) * NVME_MAX_PRP_LIST_ENTRIES;;
+ allocsz = cmdsz + cplsz + qpair->num_trackers * prpsz;
err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
- 4, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
- BUS_SPACE_MAXSIZE, 1, BUS_SPACE_MAXSIZE, 0,
- NULL, NULL, &qpair->dma_tag);
- if (err != 0)
+ PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
+ allocsz, 1, allocsz, 0, NULL, NULL, &qpair->dma_tag);
+ if (err != 0) {
nvme_printf(ctrlr, "tag create failed %d\n", err);
+ goto out;
+ }
+
+ if (bus_dmamem_alloc(qpair->dma_tag, (void **)&queuemem,
+ BUS_DMA_NOWAIT, &qpair->queuemem_map)) {
+ nvme_printf(ctrlr, "failed to alloc qpair memory\n");
+ goto out;
+ }
+
+ if (bus_dmamap_load(qpair->dma_tag, qpair->queuemem_map,
+ queuemem, allocsz, nvme_single_map, &queuemem_phys, 0) != 0) {
+ nvme_printf(ctrlr, "failed to load qpair memory\n");
+ goto out;
+ }
qpair->num_cmds = 0;
qpair->num_intr_handler_calls = 0;
-
- qpair->cmd = contigmalloc(qpair->num_entries *
- sizeof(struct nvme_command), M_NVME, M_ZERO,
- 0, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
- qpair->cpl = contigmalloc(qpair->num_entries *
- sizeof(struct nvme_completion), M_NVME, M_ZERO,
- 0, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
-
- err = bus_dmamap_create(qpair->dma_tag, 0, &qpair->cmd_dma_map);
- if (err != 0)
- nvme_printf(ctrlr, "cmd_dma_map create failed %d\n", err);
-
- err = bus_dmamap_create(qpair->dma_tag, 0, &qpair->cpl_dma_map);
- if (err != 0)
- nvme_printf(ctrlr, "cpl_dma_map create failed %d\n", err);
-
- bus_dmamap_load(qpair->dma_tag, qpair->cmd_dma_map,
- qpair->cmd, qpair->num_entries * sizeof(struct nvme_command),
- nvme_single_map, &qpair->cmd_bus_addr, 0);
- bus_dmamap_load(qpair->dma_tag, qpair->cpl_dma_map,
- qpair->cpl, qpair->num_entries * sizeof(struct nvme_completion),
- nvme_single_map, &qpair->cpl_bus_addr, 0);
+ qpair->cmd = (struct nvme_command *)queuemem;
+ qpair->cpl = (struct nvme_completion *)(queuemem + cmdsz);
+ prpmem = (uint8_t *)(queuemem + cmdsz + cplsz);
+ qpair->cmd_bus_addr = queuemem_phys;
+ qpair->cpl_bus_addr = queuemem_phys + cmdsz;
+ prpmem_phys = queuemem_phys + cmdsz + cplsz;
qpair->sq_tdbl_off = nvme_mmio_offsetof(doorbell[id].sq_tdbl);
qpair->cq_hdbl_off = nvme_mmio_offsetof(doorbell[id].cq_hdbl);
@@ -538,13 +535,41 @@
STAILQ_INIT(&qpair->queued_req);
for (i = 0; i < qpair->num_trackers; i++) {
+
+ /*
+ * Make sure that the PRP list for this tracker doesn't
+ * overflow to another page.
+ */
+ list_phys = prpmem_phys + i * prpsz;
+ if (trunc_page(list_phys) !=
+ trunc_page(list_phys + prpsz - 1)) {
+ qpair->num_trackers = i;
+ break;
+ }
+
tr = malloc(sizeof(*tr), M_NVME, M_ZERO | M_WAITOK);
- nvme_qpair_construct_tracker(qpair, tr, i);
+ bus_dmamap_create(qpair->dma_tag_payload, 0,
+ &tr->payload_dma_map);
+ callout_init(&tr->timer, 1);
+ tr->cid = i;
+ tr->qpair = qpair;
+ tr->prp = (uint64_t *)(prpmem + i * prpsz);
+ tr->prp_bus_addr = prpmem_phys + i * prpsz;
TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq);
}
- qpair->act_tr = malloc(sizeof(struct nvme_tracker *) * qpair->num_entries,
- M_NVME, M_ZERO | M_WAITOK);
+ if (qpair->num_trackers == 0) {
+ nvme_printf(ctrlr, "failed to allocate enough trackers\n");
+ goto out;
+ }
+
+ qpair->act_tr = malloc(sizeof(struct nvme_tracker *) *
+ qpair->num_entries, M_NVME, M_ZERO | M_WAITOK);
+ return (0);
+
+out:
+ nvme_qpair_destroy(qpair);
+ return (ENOMEM);
}
static void
@@ -555,23 +580,17 @@
if (qpair->tag)
bus_teardown_intr(qpair->ctrlr->dev, qpair->res, qpair->tag);
+ if (mtx_initialized(&qpair->lock))
+ mtx_destroy(&qpair->lock);
+
if (qpair->res)
bus_release_resource(qpair->ctrlr->dev, SYS_RES_IRQ,
rman_get_rid(qpair->res), qpair->res);
- if (qpair->cmd) {
- bus_dmamap_unload(qpair->dma_tag, qpair->cmd_dma_map);
- bus_dmamap_destroy(qpair->dma_tag, qpair->cmd_dma_map);
- contigfree(qpair->cmd,
- qpair->num_entries * sizeof(struct nvme_command), M_NVME);
- }
-
- if (qpair->cpl) {
- bus_dmamap_unload(qpair->dma_tag, qpair->cpl_dma_map);
- bus_dmamap_destroy(qpair->dma_tag, qpair->cpl_dma_map);
- contigfree(qpair->cpl,
- qpair->num_entries * sizeof(struct nvme_completion),
- M_NVME);
+ if (qpair->cmd != NULL) {
+ bus_dmamap_unload(qpair->dma_tag, qpair->queuemem_map);
+ bus_dmamem_free(qpair->dma_tag, qpair->cmd,
+ qpair->queuemem_map);
}
if (qpair->dma_tag)
@@ -587,7 +606,6 @@
tr = TAILQ_FIRST(&qpair->free_tr);
TAILQ_REMOVE(&qpair->free_tr, tr, tailq);
bus_dmamap_destroy(qpair->dma_tag, tr->payload_dma_map);
- bus_dmamap_destroy(qpair->dma_tag, tr->prp_dma_map);
free(tr, M_NVME);
}
}
Index: sys/modules/nvme/Makefile
===================================================================
--- sys/modules/nvme/Makefile
+++ sys/modules/nvme/Makefile
@@ -16,6 +16,7 @@
\
bus_if.h \
device_if.h \
+ opt_cam.h \
pci_if.h
.include <bsd.kmod.mk>
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Sat, Nov 29, 12:09 AM (1 h, 51 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
26309120
Default Alt Text
D8453.id22039.diff (9 KB)
Attached To
Mode
D8453: Change the device shared memory allocation routines to use the busdma API. One side effect is that all memory, including the PRP segments, are now allocated in a single slab.
Attached
Detach File
Event Timeline
Log In to Comment