Index: sys/dev/nvme/nvme.h =================================================================== --- sys/dev/nvme/nvme.h +++ sys/dev/nvme/nvme.h @@ -1570,20 +1570,16 @@ int is_admin_cmd); /* Admin functions */ -void nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr, - uint8_t feature, uint32_t cdw11, - uint32_t cdw12, uint32_t cdw13, - uint32_t cdw14, uint32_t cdw15, - void *payload, uint32_t payload_size, - nvme_cb_fn_t cb_fn, void *cb_arg); -void nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr, - uint8_t feature, uint32_t cdw11, - void *payload, uint32_t payload_size, - nvme_cb_fn_t cb_fn, void *cb_arg); -void nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr, - uint8_t log_page, uint32_t nsid, - void *payload, uint32_t payload_size, - nvme_cb_fn_t cb_fn, void *cb_arg); +struct nvme_request *nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr, + uint8_t feature, uint32_t cdw11, uint32_t cdw12, uint32_t cdw13, + uint32_t cdw14, uint32_t cdw15, void *payload, uint32_t payload_size, + nvme_cb_fn_t cb_fn, void *cb_arg); +struct nvme_request *nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr, + uint8_t feature, uint32_t cdw11, void *payload, uint32_t payload_size, + nvme_cb_fn_t cb_fn, void *cb_arg); +struct nvme_request *nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr, + uint8_t log_page, uint32_t nsid, void *payload, uint32_t payload_size, + nvme_cb_fn_t cb_fn, void *cb_arg); /* NVM I/O functions */ int nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload, Index: sys/dev/nvme/nvme_ctrlr.c =================================================================== --- sys/dev/nvme/nvme_ctrlr.c +++ sys/dev/nvme/nvme_ctrlr.c @@ -411,7 +411,7 @@ struct nvme_completion_poll_status status; status.done = 0; - nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata, + status.req = nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata, nvme_completion_poll_cb, &status); nvme_completion_poll(&status); if (nvme_completion_is_error(&status.cpl)) { @@ -440,7 +440,7 @@ int cq_allocated, sq_allocated; status.done = 0; - nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues, + status.req = nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues, nvme_completion_poll_cb, &status); nvme_completion_poll(&status); if (nvme_completion_is_error(&status.cpl)) { @@ -480,7 +480,7 @@ qpair = &ctrlr->ioq[i]; status.done = 0; - nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, + status.req = nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, nvme_completion_poll_cb, &status); nvme_completion_poll(&status); if (nvme_completion_is_error(&status.cpl)) { @@ -489,7 +489,7 @@ } status.done = 0; - nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair, + status.req = nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair, nvme_completion_poll_cb, &status); nvme_completion_poll(&status); if (nvme_completion_is_error(&status.cpl)) { @@ -511,7 +511,7 @@ qpair = &ctrlr->ioq[i]; status.done = 0; - nvme_ctrlr_cmd_delete_io_sq(ctrlr, qpair, + status.req = nvme_ctrlr_cmd_delete_io_sq(ctrlr, qpair, nvme_completion_poll_cb, &status); nvme_completion_poll(&status); if (nvme_completion_is_error(&status.cpl)) { @@ -520,7 +520,7 @@ } status.done = 0; - nvme_ctrlr_cmd_delete_io_cq(ctrlr, qpair, + status.req = nvme_ctrlr_cmd_delete_io_cq(ctrlr, qpair, nvme_completion_poll_cb, &status); nvme_completion_poll(&status); if (nvme_completion_is_error(&status.cpl)) { @@ -802,7 +802,7 @@ ctrlr->async_event_config |= 0x300; status.done = 0; - nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD, + status.req = nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD, 0, NULL, 0, nvme_completion_poll_cb, &status); nvme_completion_poll(&status); if (nvme_completion_is_error(&status.cpl) || @@ -994,7 +994,7 @@ if (memret) cdw11 |= 2; status.done = 0; - nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_HOST_MEMORY_BUFFER, cdw11, + status.req = nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_HOST_MEMORY_BUFFER, cdw11, ctrlr->hmb_nchunks * ctrlr->hmb_chunk / 4096, ctrlr->hmb_desc_paddr, ctrlr->hmb_desc_paddr >> 32, ctrlr->hmb_nchunks, NULL, 0, nvme_completion_poll_cb, &status); Index: sys/dev/nvme/nvme_ctrlr_cmd.c =================================================================== --- sys/dev/nvme/nvme_ctrlr_cmd.c +++ sys/dev/nvme/nvme_ctrlr_cmd.c @@ -31,7 +31,7 @@ #include "nvme_private.h" -void +struct nvme_request * nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr, void *payload, nvme_cb_fn_t cb_fn, void *cb_arg) { @@ -51,9 +51,10 @@ cmd->cdw10 = htole32(1); nvme_ctrlr_submit_admin_request(ctrlr, req); + return (req); } -void +struct nvme_request * nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr, uint32_t nsid, void *payload, nvme_cb_fn_t cb_fn, void *cb_arg) { @@ -72,9 +73,10 @@ cmd->nsid = htole32(nsid); nvme_ctrlr_submit_admin_request(ctrlr, req); + return (req); } -void +struct nvme_request * nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr, struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg) { @@ -96,9 +98,10 @@ cmd->prp1 = htole64(io_que->cpl_bus_addr); nvme_ctrlr_submit_admin_request(ctrlr, req); + return (req); } -void +struct nvme_request * nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr, struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg) { @@ -120,9 +123,10 @@ cmd->prp1 = htole64(io_que->cmd_bus_addr); nvme_ctrlr_submit_admin_request(ctrlr, req); + return (req); } -void +struct nvme_request * nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr, struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg) { @@ -141,9 +145,10 @@ cmd->cdw10 = htole32(io_que->id); nvme_ctrlr_submit_admin_request(ctrlr, req); + return (req); } -void +struct nvme_request * nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr, struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg) { @@ -162,9 +167,10 @@ cmd->cdw10 = htole32(io_que->id); nvme_ctrlr_submit_admin_request(ctrlr, req); + return (req); } -void +struct nvme_request * nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr, uint8_t feature, uint32_t cdw11, uint32_t cdw12, uint32_t cdw13, uint32_t cdw14, uint32_t cdw15, void *payload, uint32_t payload_size, @@ -185,9 +191,10 @@ cmd->cdw15 = htole32(cdw15); nvme_ctrlr_submit_admin_request(ctrlr, req); + return (req); } -void +struct nvme_request * nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr, uint8_t feature, uint32_t cdw11, void *payload, uint32_t payload_size, nvme_cb_fn_t cb_fn, void *cb_arg) @@ -203,32 +210,33 @@ cmd->cdw11 = htole32(cdw11); nvme_ctrlr_submit_admin_request(ctrlr, req); + return (req); } -void +struct nvme_request * nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr, uint32_t num_queues, nvme_cb_fn_t cb_fn, void *cb_arg) { uint32_t cdw11; cdw11 = ((num_queues - 1) << 16) | (num_queues - 1); - nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_NUMBER_OF_QUEUES, cdw11, - 0, 0, 0, 0, NULL, 0, cb_fn, cb_arg); + return (nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_NUMBER_OF_QUEUES, cdw11, + 0, 0, 0, 0, NULL, 0, cb_fn, cb_arg)); } -void +struct nvme_request * nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr, uint32_t state, nvme_cb_fn_t cb_fn, void *cb_arg) { uint32_t cdw11; cdw11 = state; - nvme_ctrlr_cmd_set_feature(ctrlr, + return (nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_ASYNC_EVENT_CONFIGURATION, cdw11, 0, 0, 0, 0, NULL, 0, - cb_fn, cb_arg); + cb_fn, cb_arg)); } -void +struct nvme_request * nvme_ctrlr_cmd_set_interrupt_coalescing(struct nvme_controller *ctrlr, uint32_t microseconds, uint32_t threshold, nvme_cb_fn_t cb_fn, void *cb_arg) { @@ -249,11 +257,11 @@ } cdw11 = ((microseconds/100) << 8) | threshold; - nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_INTERRUPT_COALESCING, cdw11, - 0, 0, 0, 0, NULL, 0, cb_fn, cb_arg); + return (nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_INTERRUPT_COALESCING, cdw11, + 0, 0, 0, 0, NULL, 0, cb_fn, cb_arg)); } -void +struct nvme_request * nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr, uint8_t log_page, uint32_t nsid, void *payload, uint32_t payload_size, nvme_cb_fn_t cb_fn, void *cb_arg) @@ -271,9 +279,10 @@ cmd->cdw10 = htole32(cmd->cdw10); nvme_ctrlr_submit_admin_request(ctrlr, req); + return (req); } -void +struct nvme_request * nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr, struct nvme_error_information_entry *payload, uint32_t num_entries, nvme_cb_fn_t cb_fn, void *cb_arg) @@ -289,32 +298,32 @@ if (num_entries > (ctrlr->cdata.elpe + 1)) num_entries = ctrlr->cdata.elpe + 1; - nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_ERROR, + return (nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_ERROR, NVME_GLOBAL_NAMESPACE_TAG, payload, sizeof(*payload) * num_entries, - cb_fn, cb_arg); + cb_fn, cb_arg)); } -void +struct nvme_request * nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr, uint32_t nsid, struct nvme_health_information_page *payload, nvme_cb_fn_t cb_fn, void *cb_arg) { - nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_HEALTH_INFORMATION, - nsid, payload, sizeof(*payload), cb_fn, cb_arg); + return (nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_HEALTH_INFORMATION, + nsid, payload, sizeof(*payload), cb_fn, cb_arg)); } -void +struct nvme_request * nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr, struct nvme_firmware_page *payload, nvme_cb_fn_t cb_fn, void *cb_arg) { - nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_FIRMWARE_SLOT, + return (nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_FIRMWARE_SLOT, NVME_GLOBAL_NAMESPACE_TAG, payload, sizeof(*payload), cb_fn, - cb_arg); + cb_arg)); } -void +struct nvme_request * nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid, uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg) { @@ -328,4 +337,5 @@ cmd->cdw10 = htole32((cid << 16) | sqid); nvme_ctrlr_submit_admin_request(ctrlr, req); + return (req); } Index: sys/dev/nvme/nvme_ns.c =================================================================== --- sys/dev/nvme/nvme_ns.c +++ sys/dev/nvme/nvme_ns.c @@ -528,7 +528,7 @@ mtx_init(&ns->lock, "nvme ns lock", NULL, MTX_DEF); status.done = 0; - nvme_ctrlr_cmd_identify_namespace(ctrlr, id, &ns->data, + status.req = nvme_ctrlr_cmd_identify_namespace(ctrlr, id, &ns->data, nvme_completion_poll_cb, &status); nvme_completion_poll(&status); if (nvme_completion_is_error(&status.cpl)) { Index: sys/dev/nvme/nvme_ns_cmd.c =================================================================== --- sys/dev/nvme/nvme_ns_cmd.c +++ sys/dev/nvme/nvme_ns_cmd.c @@ -167,8 +167,8 @@ int i; status.done = FALSE; - req = nvme_allocate_request_vaddr(virt, len, nvme_completion_poll_cb, - &status); + status.req = req = nvme_allocate_request_vaddr(virt, len, + nvme_completion_poll_cb, &status); if (req == NULL) return (ENOMEM); Index: sys/dev/nvme/nvme_private.h =================================================================== --- sys/dev/nvme/nvme_private.h +++ sys/dev/nvme/nvme_private.h @@ -118,7 +118,7 @@ extern bool nvme_verbose_cmd_dump; struct nvme_completion_poll_status { - + struct nvme_request *req; struct nvme_completion cpl; int done; }; @@ -362,51 +362,49 @@ void nvme_ns_test(struct nvme_namespace *ns, u_long cmd, caddr_t arg); -void nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr, - void *payload, - nvme_cb_fn_t cb_fn, void *cb_arg); -void nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr, - uint32_t nsid, void *payload, - nvme_cb_fn_t cb_fn, void *cb_arg); -void nvme_ctrlr_cmd_set_interrupt_coalescing(struct nvme_controller *ctrlr, - uint32_t microseconds, - uint32_t threshold, - nvme_cb_fn_t cb_fn, - void *cb_arg); -void nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr, - struct nvme_error_information_entry *payload, - uint32_t num_entries, /* 0 = max */ - nvme_cb_fn_t cb_fn, - void *cb_arg); -void nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr, - uint32_t nsid, - struct nvme_health_information_page *payload, - nvme_cb_fn_t cb_fn, - void *cb_arg); -void nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr, - struct nvme_firmware_page *payload, - nvme_cb_fn_t cb_fn, - void *cb_arg); -void nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr, - struct nvme_qpair *io_que, - nvme_cb_fn_t cb_fn, void *cb_arg); -void nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr, - struct nvme_qpair *io_que, - nvme_cb_fn_t cb_fn, void *cb_arg); -void nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr, - struct nvme_qpair *io_que, - nvme_cb_fn_t cb_fn, void *cb_arg); -void nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr, - struct nvme_qpair *io_que, - nvme_cb_fn_t cb_fn, void *cb_arg); -void nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr, - uint32_t num_queues, nvme_cb_fn_t cb_fn, - void *cb_arg); -void nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr, - uint32_t state, - nvme_cb_fn_t cb_fn, void *cb_arg); -void nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid, - uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg); +struct nvme_request *nvme_ctrlr_cmd_identify_controller( + struct nvme_controller *ctrlr, void *payload, + nvme_cb_fn_t cb_fn, void *cb_arg); +struct nvme_request *nvme_ctrlr_cmd_identify_namespace( + struct nvme_controller *ctrlr, uint32_t nsid, void *payload, + nvme_cb_fn_t cb_fn, void *cb_arg); +struct nvme_request *nvme_ctrlr_cmd_set_interrupt_coalescing( + struct nvme_controller *ctrlr, uint32_t microseconds, + uint32_t threshold, nvme_cb_fn_t cb_fn, void *cb_arg); +struct nvme_request *nvme_ctrlr_cmd_get_error_page( + struct nvme_controller *ctrlr, + struct nvme_error_information_entry *payload, + uint32_t num_entries, /* 0 = max */ + nvme_cb_fn_t cb_fn, void *cb_arg); +struct nvme_request *nvme_ctrlr_cmd_get_health_information_page( + struct nvme_controller *ctrlr, uint32_t nsid, + struct nvme_health_information_page *payload, + nvme_cb_fn_t cb_fn, void *cb_arg); +struct nvme_request *nvme_ctrlr_cmd_get_firmware_page( + struct nvme_controller *ctrlr, + struct nvme_firmware_page *payload, + nvme_cb_fn_t cb_fn, void *cb_arg); +struct nvme_request *nvme_ctrlr_cmd_create_io_cq( + struct nvme_controller *ctrlr, struct nvme_qpair *io_que, + nvme_cb_fn_t cb_fn, void *cb_arg); +struct nvme_request *nvme_ctrlr_cmd_create_io_sq( + struct nvme_controller *ctrlr, struct nvme_qpair *io_que, + nvme_cb_fn_t cb_fn, void *cb_arg); +struct nvme_request *nvme_ctrlr_cmd_delete_io_cq( + struct nvme_controller *ctrlr, struct nvme_qpair *io_que, + nvme_cb_fn_t cb_fn, void *cb_arg); +struct nvme_request *nvme_ctrlr_cmd_delete_io_sq( + struct nvme_controller *ctrlr, struct nvme_qpair *io_que, + nvme_cb_fn_t cb_fn, void *cb_arg); +struct nvme_request *nvme_ctrlr_cmd_set_num_queues( + struct nvme_controller *ctrlr, uint32_t num_queues, + nvme_cb_fn_t cb_fn, void *cb_arg); +struct nvme_request *nvme_ctrlr_cmd_set_async_event_config( + struct nvme_controller *ctrlr, uint32_t state, + nvme_cb_fn_t cb_fn, void *cb_arg); +struct nvme_request *nvme_ctrlr_cmd_abort( + struct nvme_controller *ctrlr, uint16_t cid, uint16_t sqid, + nvme_cb_fn_t cb_fn, void *cb_arg); void nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl); @@ -475,8 +473,16 @@ while (!atomic_load_acq_int(&status->done) && --sanity > 0) pause("nvme", 1); +#if 0 if (sanity <= 0) panic("NVME polled command failed to complete within 1s."); +#else + if (sanity <= 0) { + struct nvme_request *req = status->req; + nvme_qpair_manual_complete_request(req->qpair, req, + NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST); + } +#endif } static __inline void