diff --git a/sys/dev/nvme/nvme_ctrlr.c b/sys/dev/nvme/nvme_ctrlr.c --- a/sys/dev/nvme/nvme_ctrlr.c +++ b/sys/dev/nvme/nvme_ctrlr.c @@ -730,6 +730,7 @@ aer->ctrlr = ctrlr; req = nvme_allocate_request_null(M_WAITOK, nvme_ctrlr_async_event_cb, aer); + req->payload_read = true; aer->req = req; aer->log_page_id = 0; /* Not a valid page */ @@ -1320,6 +1321,7 @@ *req = nvme_allocate_request_null(M_WAITOK, cb_fn, cb_arg); (*req)->payload = memdesc_vmpages(upages, len, addr & PAGE_MASK); (*req)->payload_valid = true; + (*req)->payload_read = is_read; return (0); } @@ -1373,9 +1375,11 @@ nvme_pt_done, pt); if (ret != 0) return (ret); - } else + } else { req = nvme_allocate_request_vaddr(pt->buf, pt->len, M_WAITOK, nvme_pt_done, pt); + req->payload_read = pt->is_read; + } } else req = nvme_allocate_request_null(M_WAITOK, nvme_pt_done, pt); @@ -1456,10 +1460,12 @@ &req, nvme_npc_done, npc); if (ret != 0) return (ret); - } else + } else { req = nvme_allocate_request_vaddr( (void *)(uintptr_t)npc->addr, npc->data_len, M_WAITOK, nvme_npc_done, npc); + req->payload_read = npc->opcode & 0x1; + } } else req = nvme_allocate_request_null(M_WAITOK, nvme_npc_done, npc); diff --git a/sys/dev/nvme/nvme_ctrlr_cmd.c b/sys/dev/nvme/nvme_ctrlr_cmd.c --- a/sys/dev/nvme/nvme_ctrlr_cmd.c +++ b/sys/dev/nvme/nvme_ctrlr_cmd.c @@ -38,7 +38,7 @@ req = nvme_allocate_request_vaddr(payload, sizeof(struct nvme_controller_data), M_WAITOK, cb_fn, cb_arg); - + req->payload_read = true; cmd = &req->cmd; cmd->opc = NVME_OPC_IDENTIFY; @@ -60,6 +60,7 @@ req = nvme_allocate_request_vaddr(payload, sizeof(struct nvme_namespace_data), M_WAITOK, cb_fn, cb_arg); + req->payload_read = true; cmd = &req->cmd; cmd->opc = NVME_OPC_IDENTIFY; @@ -265,6 +266,7 @@ */ req = nvme_allocate_request_vaddr(payload, payload_size, M_NOWAIT, cb_fn, cb_arg); + req->payload_read = true; cmd = &req->cmd; cmd->opc = NVME_OPC_GET_LOG_PAGE; diff --git a/sys/dev/nvme/nvme_ns_cmd.c b/sys/dev/nvme/nvme_ns_cmd.c --- a/sys/dev/nvme/nvme_ns_cmd.c +++ b/sys/dev/nvme/nvme_ns_cmd.c @@ -39,6 +39,7 @@ lba_count * nvme_ns_get_sector_size(ns), M_NOWAIT, cb_fn, cb_arg); if (req == NULL) return (ENOMEM); + req->payload_read = true; nvme_ns_read_cmd(&req->cmd, ns->id, lba, lba_count); @@ -58,6 +59,7 @@ req = nvme_allocate_request_bio(bp, M_NOWAIT, cb_fn, cb_arg); if (req == NULL) return (ENOMEM); + req->payload_read = true; lba = bp->bio_offset / nvme_ns_get_sector_size(ns); lba_count = bp->bio_bcount / nvme_ns_get_sector_size(ns); nvme_ns_read_cmd(&req->cmd, ns->id, lba, lba_count); @@ -77,6 +79,7 @@ lba_count * nvme_ns_get_sector_size(ns), M_NOWAIT, cb_fn, cb_arg); if (req == NULL) return (ENOMEM); + req->payload_read = false; nvme_ns_write_cmd(&req->cmd, ns->id, lba, lba_count); @@ -96,6 +99,7 @@ req = nvme_allocate_request_bio(bp, M_NOWAIT, cb_fn, cb_arg); if (req == NULL) return (ENOMEM); + req->payload_read = false; lba = bp->bio_offset / nvme_ns_get_sector_size(ns); lba_count = bp->bio_bcount / nvme_ns_get_sector_size(ns); nvme_ns_write_cmd(&req->cmd, ns->id, lba, lba_count); @@ -117,6 +121,7 @@ cb_arg); if (req == NULL) return (ENOMEM); + req->payload_read = false; cmd = &req->cmd; cmd->opc = NVME_OPC_DATASET_MANAGEMENT; @@ -163,7 +168,7 @@ nvme_completion_poll_cb, &status); if (req == NULL) return (ENOMEM); - + req->payload_read = false; cmd = &req->cmd; if (len > 0) { diff --git a/sys/dev/nvme/nvme_private.h b/sys/dev/nvme/nvme_private.h --- a/sys/dev/nvme/nvme_private.h +++ b/sys/dev/nvme/nvme_private.h @@ -115,7 +115,7 @@ int32_t retries; bool payload_valid; bool timeout; - bool spare[2]; /* Future use */ + bool payload_read; STAILQ_ENTRY(nvme_request) stailq; }; diff --git a/sys/dev/nvme/nvme_qpair.c b/sys/dev/nvme/nvme_qpair.c --- a/sys/dev/nvme/nvme_qpair.c +++ b/sys/dev/nvme/nvme_qpair.c @@ -237,9 +237,10 @@ if (!retry) { if (req->payload_valid) { + bus_dmasync_op_t op = req->payload_read ? + BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(qpair->dma_tag_payload, - tr->payload_dma_map, - BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); + tr->payload_dma_map, op); } if (req->cb_fn) req->cb_fn(req->cb_arg, cpl); @@ -1116,8 +1117,11 @@ tr->req->cmd.prp2 = 0; } - bus_dmamap_sync(tr->qpair->dma_tag_payload, tr->payload_dma_map, - BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + if (tr->req->payload_valid) { + bus_dmamap_sync(tr->qpair->dma_tag_payload, tr->payload_dma_map, + tr->req->payload_read ? + BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); + } nvme_qpair_submit_tracker(tr->qpair, tr); } diff --git a/sys/dev/nvme/nvme_sim.c b/sys/dev/nvme/nvme_sim.c --- a/sys/dev/nvme/nvme_sim.c +++ b/sys/dev/nvme/nvme_sim.c @@ -99,6 +99,7 @@ payload = nvmeio->data_ptr; size = nvmeio->dxfer_len; /* SG LIST ??? */ + /* We only support NONE, IN, or OUT transactions, but not BOTH */ if ((nvmeio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_BIO) req = nvme_allocate_request_bio((struct bio *)payload, M_NOWAIT, nvme_sim_nvmeio_done, ccb); @@ -116,6 +117,8 @@ xpt_done(ccb); return; } + if (payload != NULL) + req->payload_read = (nvmeio->ccb_h.flags & CAM_DIR_IN) != 0; ccb->ccb_h.status |= CAM_SIM_QUEUED; memcpy(&req->cmd, &ccb->nvmeio.cmd, sizeof(ccb->nvmeio.cmd));