diff --git a/sys/cam/nvme/nvme_all.c b/sys/cam/nvme/nvme_all.c index 3f264e2ff6eb..4145aa16ed49 100644 --- a/sys/cam/nvme/nvme_all.c +++ b/sys/cam/nvme/nvme_all.c @@ -1,432 +1,417 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2015 Netflix, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #ifdef _KERNEL #include "opt_scsi.h" #include #include #include #include #include #else #include #include #include #include #ifndef min #define min(a,b) (((a)<(b))?(a):(b)) #endif #endif #include #include #include #include #include #include #include #ifdef _KERNEL #include #include #include #include #endif /* XXX: This duplicates lists in nvme_qpair.c. */ #define OPC_ENTRY(x) [NVME_OPC_ ## x] = #x static const char *admin_opcode[256] = { OPC_ENTRY(DELETE_IO_SQ), OPC_ENTRY(CREATE_IO_SQ), OPC_ENTRY(GET_LOG_PAGE), OPC_ENTRY(DELETE_IO_CQ), OPC_ENTRY(CREATE_IO_CQ), OPC_ENTRY(IDENTIFY), OPC_ENTRY(ABORT), OPC_ENTRY(SET_FEATURES), OPC_ENTRY(GET_FEATURES), OPC_ENTRY(ASYNC_EVENT_REQUEST), OPC_ENTRY(NAMESPACE_MANAGEMENT), OPC_ENTRY(FIRMWARE_ACTIVATE), OPC_ENTRY(FIRMWARE_IMAGE_DOWNLOAD), OPC_ENTRY(DEVICE_SELF_TEST), OPC_ENTRY(NAMESPACE_ATTACHMENT), OPC_ENTRY(KEEP_ALIVE), OPC_ENTRY(DIRECTIVE_SEND), OPC_ENTRY(DIRECTIVE_RECEIVE), OPC_ENTRY(VIRTUALIZATION_MANAGEMENT), OPC_ENTRY(NVME_MI_SEND), OPC_ENTRY(NVME_MI_RECEIVE), OPC_ENTRY(CAPACITY_MANAGEMENT), OPC_ENTRY(LOCKDOWN), OPC_ENTRY(DOORBELL_BUFFER_CONFIG), OPC_ENTRY(FABRICS_COMMANDS), OPC_ENTRY(FORMAT_NVM), OPC_ENTRY(SECURITY_SEND), OPC_ENTRY(SECURITY_RECEIVE), OPC_ENTRY(SANITIZE), OPC_ENTRY(GET_LBA_STATUS), }; static const char *nvm_opcode[256] = { OPC_ENTRY(FLUSH), OPC_ENTRY(WRITE), OPC_ENTRY(READ), OPC_ENTRY(WRITE_UNCORRECTABLE), OPC_ENTRY(COMPARE), OPC_ENTRY(WRITE_ZEROES), OPC_ENTRY(DATASET_MANAGEMENT), OPC_ENTRY(VERIFY), OPC_ENTRY(RESERVATION_REGISTER), OPC_ENTRY(RESERVATION_REPORT), OPC_ENTRY(RESERVATION_ACQUIRE), OPC_ENTRY(RESERVATION_RELEASE), OPC_ENTRY(COPY), }; #define SC_ENTRY(x) [NVME_SC_ ## x] = #x static const char *generic_status[256] = { SC_ENTRY(SUCCESS), SC_ENTRY(INVALID_OPCODE), SC_ENTRY(INVALID_FIELD), SC_ENTRY(COMMAND_ID_CONFLICT), SC_ENTRY(DATA_TRANSFER_ERROR), SC_ENTRY(ABORTED_POWER_LOSS), SC_ENTRY(INTERNAL_DEVICE_ERROR), SC_ENTRY(ABORTED_BY_REQUEST), SC_ENTRY(ABORTED_SQ_DELETION), SC_ENTRY(ABORTED_FAILED_FUSED), SC_ENTRY(ABORTED_MISSING_FUSED), SC_ENTRY(INVALID_NAMESPACE_OR_FORMAT), SC_ENTRY(COMMAND_SEQUENCE_ERROR), SC_ENTRY(INVALID_SGL_SEGMENT_DESCR), SC_ENTRY(INVALID_NUMBER_OF_SGL_DESCR), SC_ENTRY(DATA_SGL_LENGTH_INVALID), SC_ENTRY(METADATA_SGL_LENGTH_INVALID), SC_ENTRY(SGL_DESCRIPTOR_TYPE_INVALID), SC_ENTRY(INVALID_USE_OF_CMB), SC_ENTRY(PRP_OFFET_INVALID), SC_ENTRY(ATOMIC_WRITE_UNIT_EXCEEDED), SC_ENTRY(OPERATION_DENIED), SC_ENTRY(SGL_OFFSET_INVALID), SC_ENTRY(HOST_ID_INCONSISTENT_FORMAT), SC_ENTRY(KEEP_ALIVE_TIMEOUT_EXPIRED), SC_ENTRY(KEEP_ALIVE_TIMEOUT_INVALID), SC_ENTRY(ABORTED_DUE_TO_PREEMPT), SC_ENTRY(SANITIZE_FAILED), SC_ENTRY(SANITIZE_IN_PROGRESS), SC_ENTRY(SGL_DATA_BLOCK_GRAN_INVALID), SC_ENTRY(NOT_SUPPORTED_IN_CMB), SC_ENTRY(NAMESPACE_IS_WRITE_PROTECTED), SC_ENTRY(COMMAND_INTERRUPTED), SC_ENTRY(TRANSIENT_TRANSPORT_ERROR), SC_ENTRY(LBA_OUT_OF_RANGE), SC_ENTRY(CAPACITY_EXCEEDED), SC_ENTRY(NAMESPACE_NOT_READY), SC_ENTRY(RESERVATION_CONFLICT), SC_ENTRY(FORMAT_IN_PROGRESS), }; static const char *command_specific_status[256] = { SC_ENTRY(COMPLETION_QUEUE_INVALID), SC_ENTRY(INVALID_QUEUE_IDENTIFIER), SC_ENTRY(MAXIMUM_QUEUE_SIZE_EXCEEDED), SC_ENTRY(ABORT_COMMAND_LIMIT_EXCEEDED), SC_ENTRY(ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED), SC_ENTRY(INVALID_FIRMWARE_SLOT), SC_ENTRY(INVALID_FIRMWARE_IMAGE), SC_ENTRY(INVALID_INTERRUPT_VECTOR), SC_ENTRY(INVALID_LOG_PAGE), SC_ENTRY(INVALID_FORMAT), SC_ENTRY(FIRMWARE_REQUIRES_RESET), SC_ENTRY(INVALID_QUEUE_DELETION), SC_ENTRY(FEATURE_NOT_SAVEABLE), SC_ENTRY(FEATURE_NOT_CHANGEABLE), SC_ENTRY(FEATURE_NOT_NS_SPECIFIC), SC_ENTRY(FW_ACT_REQUIRES_NVMS_RESET), SC_ENTRY(FW_ACT_REQUIRES_RESET), SC_ENTRY(FW_ACT_REQUIRES_TIME), SC_ENTRY(FW_ACT_PROHIBITED), SC_ENTRY(OVERLAPPING_RANGE), SC_ENTRY(NS_INSUFFICIENT_CAPACITY), SC_ENTRY(NS_ID_UNAVAILABLE), SC_ENTRY(NS_ALREADY_ATTACHED), SC_ENTRY(NS_IS_PRIVATE), SC_ENTRY(NS_NOT_ATTACHED), SC_ENTRY(THIN_PROV_NOT_SUPPORTED), SC_ENTRY(CTRLR_LIST_INVALID), SC_ENTRY(SELF_TEST_IN_PROGRESS), SC_ENTRY(BOOT_PART_WRITE_PROHIB), SC_ENTRY(INVALID_CTRLR_ID), SC_ENTRY(INVALID_SEC_CTRLR_STATE), SC_ENTRY(INVALID_NUM_OF_CTRLR_RESRC), SC_ENTRY(INVALID_RESOURCE_ID), SC_ENTRY(SANITIZE_PROHIBITED_WPMRE), SC_ENTRY(ANA_GROUP_ID_INVALID), SC_ENTRY(ANA_ATTACH_FAILED), SC_ENTRY(CONFLICTING_ATTRIBUTES), SC_ENTRY(INVALID_PROTECTION_INFO), SC_ENTRY(ATTEMPTED_WRITE_TO_RO_PAGE), }; static const char *media_error_status[256] = { SC_ENTRY(WRITE_FAULTS), SC_ENTRY(UNRECOVERED_READ_ERROR), SC_ENTRY(GUARD_CHECK_ERROR), SC_ENTRY(APPLICATION_TAG_CHECK_ERROR), SC_ENTRY(REFERENCE_TAG_CHECK_ERROR), SC_ENTRY(COMPARE_FAILURE), SC_ENTRY(ACCESS_DENIED), SC_ENTRY(DEALLOCATED_OR_UNWRITTEN), }; static const char *path_related_status[256] = { SC_ENTRY(INTERNAL_PATH_ERROR), SC_ENTRY(ASYMMETRIC_ACCESS_PERSISTENT_LOSS), SC_ENTRY(ASYMMETRIC_ACCESS_INACCESSIBLE), SC_ENTRY(ASYMMETRIC_ACCESS_TRANSITION), SC_ENTRY(CONTROLLER_PATHING_ERROR), SC_ENTRY(HOST_PATHING_ERROR), SC_ENTRY(COMMAND_ABORTED_BY_HOST), }; void nvme_ns_cmd(struct ccb_nvmeio *nvmeio, uint8_t cmd, uint32_t nsid, uint32_t cdw10, uint32_t cdw11, uint32_t cdw12, uint32_t cdw13, uint32_t cdw14, uint32_t cdw15) { bzero(&nvmeio->cmd, sizeof(struct nvme_command)); nvmeio->cmd.opc = cmd; nvmeio->cmd.nsid = htole32(nsid); nvmeio->cmd.cdw10 = htole32(cdw10); nvmeio->cmd.cdw11 = htole32(cdw11); nvmeio->cmd.cdw12 = htole32(cdw12); nvmeio->cmd.cdw13 = htole32(cdw13); nvmeio->cmd.cdw14 = htole32(cdw14); nvmeio->cmd.cdw15 = htole32(cdw15); } int nvme_identify_match(caddr_t identbuffer, caddr_t table_entry) { return 0; } void nvme_print_ident(const struct nvme_controller_data *cdata, const struct nvme_namespace_data *data, struct sbuf *sb) { nvme_print_ident_short(cdata, data, sb); sbuf_putc(sb, '\n'); } void nvme_print_ident_short(const struct nvme_controller_data *cdata, const struct nvme_namespace_data *data, struct sbuf *sb) { sbuf_putc(sb, '<'); cam_strvis_sbuf(sb, cdata->mn, sizeof(cdata->mn), CAM_STRVIS_FLAG_NONASCII_SPC); sbuf_putc(sb, ' '); cam_strvis_sbuf(sb, cdata->fr, sizeof(cdata->fr), CAM_STRVIS_FLAG_NONASCII_SPC); sbuf_putc(sb, ' '); cam_strvis_sbuf(sb, cdata->sn, sizeof(cdata->sn), CAM_STRVIS_FLAG_NONASCII_SPC); sbuf_putc(sb, '>'); } const char * -nvme_op_string(const struct nvme_command *cmd, int admin) -{ - const char *s; - - if (admin) - s = admin_opcode[cmd->opc]; - else - s = nvm_opcode[cmd->opc]; - if (s == NULL) - return ("UNKNOWN"); - else - return (s); -} - -const char * -nvme_cmd_string(const struct nvme_command *cmd, char *cmd_string, size_t len) +nvme_command_string(struct ccb_nvmeio *nvmeio, char *cmd_string, size_t len) { struct sbuf sb; int error; if (len == 0) return (""); sbuf_new(&sb, cmd_string, len, SBUF_FIXEDLEN); - nvme_cmd_sbuf(cmd, &sb); + nvme_command_sbuf(nvmeio, &sb); error = sbuf_finish(&sb); if (error != 0 && #ifdef _KERNEL error != ENOMEM) #else errno != ENOMEM) #endif return (""); return(sbuf_data(&sb)); } void nvme_opcode_sbuf(bool admin, uint8_t opc, struct sbuf *sb) { const char *s, *type; if (admin) { s = admin_opcode[opc]; type = "ADMIN"; } else { s = nvm_opcode[opc]; type = "NVM"; } if (s == NULL) sbuf_printf(sb, "%s:0x%02x", type, opc); else sbuf_printf(sb, "%s", s); } void nvme_cmd_sbuf(const struct nvme_command *cmd, struct sbuf *sb) { /* * cid, rsvd areas and mptr not printed, since they are used * only internally by the SIM. */ sbuf_printf(sb, "opc=%x fuse=%x nsid=%x prp1=%llx prp2=%llx cdw=%x %x %x %x %x %x", cmd->opc, cmd->fuse, cmd->nsid, (unsigned long long)cmd->prp1, (unsigned long long)cmd->prp2, cmd->cdw10, cmd->cdw11, cmd->cdw12, cmd->cdw13, cmd->cdw14, cmd->cdw15); } /* * nvme_command_sbuf() returns 0 for success and -1 for failure. */ int nvme_command_sbuf(struct ccb_nvmeio *nvmeio, struct sbuf *sb) { nvme_opcode_sbuf(nvmeio->ccb_h.func_code == XPT_NVME_ADMIN, nvmeio->cmd.opc, sb); sbuf_cat(sb, ". NCB: "); nvme_cmd_sbuf(&nvmeio->cmd, sb); return(0); } void nvme_cpl_sbuf(const struct nvme_completion *cpl, struct sbuf *sb) { const char *s, *type; uint16_t status; status = le16toh(cpl->status); switch (NVME_STATUS_GET_SCT(status)) { case NVME_SCT_GENERIC: s = generic_status[NVME_STATUS_GET_SC(status)]; type = "GENERIC"; break; case NVME_SCT_COMMAND_SPECIFIC: s = command_specific_status[NVME_STATUS_GET_SC(status)]; type = "COMMAND SPECIFIC"; break; case NVME_SCT_MEDIA_ERROR: s = media_error_status[NVME_STATUS_GET_SC(status)]; type = "MEDIA ERROR"; break; case NVME_SCT_PATH_RELATED: s = path_related_status[NVME_STATUS_GET_SC(status)]; type = "PATH RELATED"; break; case NVME_SCT_VENDOR_SPECIFIC: s = NULL; type = "VENDOR SPECIFIC"; break; default: s = "RESERVED"; type = NULL; break; } if (s == NULL) sbuf_printf(sb, "%s:0x%02x", type, NVME_STATUS_GET_SC(status)); else sbuf_printf(sb, "%s", s); if (NVME_STATUS_GET_M(status) != 0) sbuf_printf(sb, " M"); if (NVME_STATUS_GET_DNR(status) != 0) sbuf_printf(sb, " DNR"); } /* * nvme_status_sbuf() returns 0 for success and -1 for failure. */ int nvme_status_sbuf(struct ccb_nvmeio *nvmeio, struct sbuf *sb) { nvme_cpl_sbuf(&nvmeio->cpl, sb); return (0); } #ifdef _KERNEL const void * nvme_get_identify_cntrl(struct cam_periph *periph) { struct cam_ed *device; device = periph->path->device; return device->nvme_cdata; } const void * nvme_get_identify_ns(struct cam_periph *periph) { struct cam_ed *device; device = periph->path->device; return device->nvme_data; } #endif diff --git a/sys/cam/nvme/nvme_all.h b/sys/cam/nvme/nvme_all.h index a32668ddc1fb..17c068b825be 100644 --- a/sys/cam/nvme/nvme_all.h +++ b/sys/cam/nvme/nvme_all.h @@ -1,55 +1,54 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2015 Netflix, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef CAM_NVME_NVME_ALL_H #define CAM_NVME_NVME_ALL_H 1 #include struct ccb_nvmeio; void nvme_ns_cmd(struct ccb_nvmeio *nvmeio, uint8_t cmd, uint32_t nsid, uint32_t cdw10, uint32_t cdw11, uint32_t cdw12, uint32_t cdw13, uint32_t cdw14, uint32_t cdw15); int nvme_identify_match(caddr_t identbuffer, caddr_t table_entry); struct sbuf; void nvme_print_ident(const struct nvme_controller_data *, const struct nvme_namespace_data *, struct sbuf *); void nvme_print_ident_short(const struct nvme_controller_data *, const struct nvme_namespace_data *, struct sbuf *); -const char *nvme_op_string(const struct nvme_command *, int admin); -const char *nvme_cmd_string(const struct nvme_command *, char *, size_t); void nvme_opcode_sbuf(bool admin, uint8_t opc, struct sbuf *sb); void nvme_cmd_sbuf(const struct nvme_command *, struct sbuf *sb); int nvme_command_sbuf(struct ccb_nvmeio *nvmeio, struct sbuf *sb); +const char *nvme_command_string(struct ccb_nvmeio *nvmeio, char *, size_t); void nvme_cpl_sbuf(const struct nvme_completion *cpl, struct sbuf *sbuf); int nvme_status_sbuf(struct ccb_nvmeio *nvmeio, struct sbuf *sb); const void *nvme_get_identify_cntrl(struct cam_periph *); const void *nvme_get_identify_ns(struct cam_periph *); #endif /* CAM_NVME_NVME_ALL_H */ diff --git a/sys/cam/nvme/nvme_xpt.c b/sys/cam/nvme/nvme_xpt.c index 47c0bde1df37..bac9e6441040 100644 --- a/sys/cam/nvme/nvme_xpt.c +++ b/sys/cam/nvme/nvme_xpt.c @@ -1,846 +1,845 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2015 Netflix, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * derived from ata_xpt.c: Copyright (c) 2009 Alexander Motin */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for xpt_print below */ struct nvme_quirk_entry { u_int quirks; #define CAM_QUIRK_MAXTAGS 1 u_int mintags; u_int maxtags; }; /* Not even sure why we need this */ static periph_init_t nvme_probe_periph_init; static struct periph_driver nvme_probe_driver = { nvme_probe_periph_init, "nvme_probe", TAILQ_HEAD_INITIALIZER(nvme_probe_driver.units), /* generation */ 0, CAM_PERIPH_DRV_EARLY }; PERIPHDRIVER_DECLARE(nvme_probe, nvme_probe_driver); typedef enum { NVME_PROBE_IDENTIFY_CD, NVME_PROBE_IDENTIFY_NS, NVME_PROBE_DONE, NVME_PROBE_INVALID } nvme_probe_action; static char *nvme_probe_action_text[] = { "NVME_PROBE_IDENTIFY_CD", "NVME_PROBE_IDENTIFY_NS", "NVME_PROBE_DONE", "NVME_PROBE_INVALID" }; #define NVME_PROBE_SET_ACTION(softc, newaction) \ do { \ char **text; \ text = nvme_probe_action_text; \ CAM_DEBUG((softc)->periph->path, CAM_DEBUG_PROBE, \ ("Probe %s to %s\n", text[(softc)->action], \ text[(newaction)])); \ (softc)->action = (newaction); \ } while(0) typedef enum { NVME_PROBE_NO_ANNOUNCE = 0x04 } nvme_probe_flags; typedef struct { TAILQ_HEAD(, ccb_hdr) request_ccbs; union { struct nvme_controller_data cd; struct nvme_namespace_data ns; }; nvme_probe_action action; nvme_probe_flags flags; int restart; struct cam_periph *periph; } nvme_probe_softc; static struct nvme_quirk_entry nvme_quirk_table[] = { { // { // T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, // /*vendor*/"*", /*product*/"*", /*revision*/"*" // }, .quirks = 0, .mintags = 0, .maxtags = 0 }, }; static const int nvme_quirk_table_size = sizeof(nvme_quirk_table) / sizeof(*nvme_quirk_table); static cam_status nvme_probe_register(struct cam_periph *periph, void *arg); static void nvme_probe_schedule(struct cam_periph *nvme_probe_periph); static void nvme_probe_start(struct cam_periph *periph, union ccb *start_ccb); static void nvme_probe_done(struct cam_periph *periph, union ccb *done_ccb); static void nvme_probe_cleanup(struct cam_periph *periph); //static void nvme_find_quirk(struct cam_ed *device); static void nvme_scan_lun(struct cam_periph *periph, struct cam_path *path, cam_flags flags, union ccb *ccb); static struct cam_ed * nvme_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id); static void nvme_device_transport(struct cam_path *path); static void nvme_dev_async(uint32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg); static void nvme_action(union ccb *start_ccb); static void nvme_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb); static void nvme_proto_announce_sbuf(struct cam_ed *device, struct sbuf *sb); static void nvme_proto_denounce_sbuf(struct cam_ed *device, struct sbuf *sb); static void nvme_proto_debug_out(union ccb *ccb); static struct xpt_xport_ops nvme_xport_ops = { .alloc_device = nvme_alloc_device, .action = nvme_action, .async = nvme_dev_async, .announce_sbuf = nvme_announce_periph_sbuf, }; #define NVME_XPT_XPORT(x, X) \ static struct xpt_xport nvme_xport_ ## x = { \ .xport = XPORT_ ## X, \ .name = #x, \ .ops = &nvme_xport_ops, \ }; \ CAM_XPT_XPORT(nvme_xport_ ## x); NVME_XPT_XPORT(nvme, NVME); NVME_XPT_XPORT(nvmf, NVMF); #undef NVME_XPT_XPORT static struct xpt_proto_ops nvme_proto_ops = { .announce_sbuf = nvme_proto_announce_sbuf, .denounce_sbuf = nvme_proto_denounce_sbuf, .debug_out = nvme_proto_debug_out, }; static struct xpt_proto nvme_proto = { .proto = PROTO_NVME, .name = "nvme", .ops = &nvme_proto_ops, }; CAM_XPT_PROTO(nvme_proto); static void nvme_probe_periph_init(void) { } static cam_status nvme_probe_register(struct cam_periph *periph, void *arg) { union ccb *request_ccb; /* CCB representing the probe request */ nvme_probe_softc *softc; request_ccb = (union ccb *)arg; if (request_ccb == NULL) { printf( "nvme_probe_register: no probe CCB, can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (nvme_probe_softc *)malloc(sizeof(*softc), M_CAMXPT, M_ZERO | M_NOWAIT); if (softc == NULL) { printf( "nvme_probe_register: Unable to probe new device. Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } TAILQ_INIT(&softc->request_ccbs); TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, periph_links.tqe); softc->flags = 0; periph->softc = softc; softc->periph = periph; softc->action = NVME_PROBE_INVALID; if (cam_periph_acquire(periph) != 0) return (CAM_REQ_CMP_ERR); CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe started\n")); // nvme_device_transport(periph->path); nvme_probe_schedule(periph); return(CAM_REQ_CMP); } static void nvme_probe_schedule(struct cam_periph *periph) { union ccb *ccb; nvme_probe_softc *softc; softc = (nvme_probe_softc *)periph->softc; ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY_CD); if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE) softc->flags |= NVME_PROBE_NO_ANNOUNCE; else softc->flags &= ~NVME_PROBE_NO_ANNOUNCE; xpt_schedule(periph, CAM_PRIORITY_XPT); } static void nvme_probe_start(struct cam_periph *periph, union ccb *start_ccb) { struct ccb_nvmeio *nvmeio; nvme_probe_softc *softc; lun_id_t lun; CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("nvme_probe_start\n")); softc = (nvme_probe_softc *)periph->softc; nvmeio = &start_ccb->nvmeio; lun = xpt_path_lun_id(periph->path); if (softc->restart) { softc->restart = 0; NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY_CD); } switch (softc->action) { case NVME_PROBE_IDENTIFY_CD: cam_fill_nvmeadmin(nvmeio, 0, /* retries */ nvme_probe_done, /* cbfcnp */ CAM_DIR_IN, /* flags */ (uint8_t *)&softc->cd, /* data_ptr */ sizeof(softc->cd), /* dxfer_len */ 30 * 1000); /* timeout 30s */ nvme_ns_cmd(nvmeio, NVME_OPC_IDENTIFY, 0, 1, 0, 0, 0, 0, 0); break; case NVME_PROBE_IDENTIFY_NS: cam_fill_nvmeadmin(nvmeio, 0, /* retries */ nvme_probe_done, /* cbfcnp */ CAM_DIR_IN, /* flags */ (uint8_t *)&softc->ns, /* data_ptr */ sizeof(softc->ns), /* dxfer_len */ 30 * 1000); /* timeout 30s */ nvme_ns_cmd(nvmeio, NVME_OPC_IDENTIFY, lun, 0, 0, 0, 0, 0, 0); break; default: panic("nvme_probe_start: invalid action state 0x%x\n", softc->action); } start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; xpt_action(start_ccb); } static void nvme_probe_done(struct cam_periph *periph, union ccb *done_ccb) { struct nvme_namespace_data *nvme_data; struct nvme_controller_data *nvme_cdata; nvme_probe_softc *softc; struct cam_path *path; struct scsi_vpd_device_id *did; struct scsi_vpd_id_descriptor *idd; uint32_t priority; int found = 1, e, g, len; CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("nvme_probe_done\n")); softc = (nvme_probe_softc *)periph->softc; path = done_ccb->ccb_h.path; priority = done_ccb->ccb_h.pinfo.priority; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (cam_periph_error(done_ccb, 0, softc->restart ? (SF_NO_RECOVERY | SF_NO_RETRY) : 0 ) == ERESTART) { out: /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */ cam_release_devq(path, 0, 0, 0, FALSE); return; } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE); } /* * If we get to this point, we got an error status back * from the inquiry and the error status doesn't require * automatically retrying the command. Therefore, the * inquiry failed. If we had inquiry information before * for this device, but this latest inquiry command failed, * the device has probably gone away. If this device isn't * already marked unconfigured, notify the peripheral * drivers that this device is no more. */ device_fail: if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0) xpt_async(AC_LOST_DEVICE, path, NULL); NVME_PROBE_SET_ACTION(softc, NVME_PROBE_INVALID); found = 0; goto done; } if (softc->restart) goto done; switch (softc->action) { case NVME_PROBE_IDENTIFY_CD: nvme_controller_data_swapbytes(&softc->cd); nvme_cdata = path->device->nvme_cdata; if (nvme_cdata == NULL) { nvme_cdata = malloc(sizeof(*nvme_cdata), M_CAMXPT, M_NOWAIT); if (nvme_cdata == NULL) { xpt_print(path, "Can't allocate memory"); goto device_fail; } } bcopy(&softc->cd, nvme_cdata, sizeof(*nvme_cdata)); path->device->nvme_cdata = nvme_cdata; /* Save/update serial number. */ if (path->device->serial_num != NULL) { free(path->device->serial_num, M_CAMXPT); path->device->serial_num = NULL; path->device->serial_num_len = 0; } path->device->serial_num = (uint8_t *) malloc(NVME_SERIAL_NUMBER_LENGTH + 1, M_CAMXPT, M_NOWAIT); if (path->device->serial_num != NULL) { cam_strvis_flag(path->device->serial_num, nvme_cdata->sn, sizeof(nvme_cdata->sn), NVME_SERIAL_NUMBER_LENGTH + 1, CAM_STRVIS_FLAG_NONASCII_SPC); path->device->serial_num_len = strlen(path->device->serial_num); } // nvme_find_quirk(path->device); nvme_device_transport(path); NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY_NS); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; case NVME_PROBE_IDENTIFY_NS: nvme_namespace_data_swapbytes(&softc->ns); /* Check that the namespace exists. */ if (softc->ns.nsze == 0) goto device_fail; nvme_data = path->device->nvme_data; if (nvme_data == NULL) { nvme_data = malloc(sizeof(*nvme_data), M_CAMXPT, M_NOWAIT); if (nvme_data == NULL) { xpt_print(path, "Can't allocate memory"); goto device_fail; } } bcopy(&softc->ns, nvme_data, sizeof(*nvme_data)); path->device->nvme_data = nvme_data; /* Save/update device_id based on NGUID and/or EUI64. */ if (path->device->device_id != NULL) { free(path->device->device_id, M_CAMXPT); path->device->device_id = NULL; path->device->device_id_len = 0; } len = 0; for (g = 0; g < sizeof(nvme_data->nguid); g++) { if (nvme_data->nguid[g] != 0) break; } if (g < sizeof(nvme_data->nguid)) len += sizeof(struct scsi_vpd_id_descriptor) + 16; for (e = 0; e < sizeof(nvme_data->eui64); e++) { if (nvme_data->eui64[e] != 0) break; } if (e < sizeof(nvme_data->eui64)) len += sizeof(struct scsi_vpd_id_descriptor) + 8; if (len > 0) { path->device->device_id = (uint8_t *) malloc(SVPD_DEVICE_ID_HDR_LEN + len, M_CAMXPT, M_NOWAIT); } if (path->device->device_id != NULL) { did = (struct scsi_vpd_device_id *)path->device->device_id; did->device = SID_QUAL_LU_CONNECTED | T_DIRECT; did->page_code = SVPD_DEVICE_ID; scsi_ulto2b(len, did->length); idd = (struct scsi_vpd_id_descriptor *)(did + 1); if (g < sizeof(nvme_data->nguid)) { idd->proto_codeset = SVPD_ID_CODESET_BINARY; idd->id_type = SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_EUI64; idd->length = 16; bcopy(nvme_data->nguid, idd->identifier, 16); idd = (struct scsi_vpd_id_descriptor *) &idd->identifier[16]; } if (e < sizeof(nvme_data->eui64)) { idd->proto_codeset = SVPD_ID_CODESET_BINARY; idd->id_type = SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_EUI64; idd->length = 8; bcopy(nvme_data->eui64, idd->identifier, 8); } path->device->device_id_len = SVPD_DEVICE_ID_HDR_LEN + len; } if (periph->path->device->flags & CAM_DEV_UNCONFIGURED) { path->device->flags &= ~CAM_DEV_UNCONFIGURED; xpt_acquire_device(path->device); done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action(done_ccb); xpt_async(AC_FOUND_DEVICE, path, done_ccb); } NVME_PROBE_SET_ACTION(softc, NVME_PROBE_DONE); break; default: panic("nvme_probe_done: invalid action state 0x%x\n", softc->action); } done: if (softc->restart) { softc->restart = 0; xpt_release_ccb(done_ccb); nvme_probe_schedule(periph); goto out; } xpt_release_ccb(done_ccb); CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe completed\n")); while ((done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs))) { TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe); done_ccb->ccb_h.status = found ? CAM_REQ_CMP : CAM_REQ_CMP_ERR; xpt_done(done_ccb); } /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */ cam_release_devq(path, 0, 0, 0, FALSE); cam_periph_invalidate(periph); cam_periph_release_locked(periph); } static void nvme_probe_cleanup(struct cam_periph *periph) { free(periph->softc, M_CAMXPT); } #if 0 /* XXX should be used, don't delete */ static void nvme_find_quirk(struct cam_ed *device) { struct nvme_quirk_entry *quirk; caddr_t match; match = cam_quirkmatch((caddr_t)&device->nvme_data, (caddr_t)nvme_quirk_table, nvme_quirk_table_size, sizeof(*nvme_quirk_table), nvme_identify_match); if (match == NULL) panic("xpt_find_quirk: device didn't match wildcard entry!!"); quirk = (struct nvme_quirk_entry *)match; device->quirk = quirk; if (quirk->quirks & CAM_QUIRK_MAXTAGS) { device->mintags = quirk->mintags; device->maxtags = quirk->maxtags; } } #endif static void nvme_scan_lun(struct cam_periph *periph, struct cam_path *path, cam_flags flags, union ccb *request_ccb) { struct ccb_pathinq cpi; cam_status status; struct cam_periph *old_periph; int lock; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("nvme_scan_lun\n")); xpt_path_inq(&cpi, path); if (cpi.ccb_h.status != CAM_REQ_CMP) { if (request_ccb != NULL) { request_ccb->ccb_h.status = cpi.ccb_h.status; xpt_done(request_ccb); } return; } if (xpt_path_lun_id(path) == CAM_LUN_WILDCARD) { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("nvme_scan_lun ignoring bus\n")); request_ccb->ccb_h.status = CAM_REQ_CMP; /* XXX signal error ? */ xpt_done(request_ccb); return; } lock = (xpt_path_owned(path) == 0); if (lock) xpt_path_lock(path); if ((old_periph = cam_periph_find(path, "nvme_probe")) != NULL) { if ((old_periph->flags & CAM_PERIPH_INVALID) == 0) { nvme_probe_softc *softc; softc = (nvme_probe_softc *)old_periph->softc; TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, periph_links.tqe); softc->restart = 1; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("restarting nvme_probe device\n")); } else { request_ccb->ccb_h.status = CAM_REQ_CMP_ERR; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("Failing to restart nvme_probe device\n")); xpt_done(request_ccb); } } else { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("Adding nvme_probe device\n")); status = cam_periph_alloc(nvme_probe_register, NULL, nvme_probe_cleanup, nvme_probe_start, "nvme_probe", CAM_PERIPH_BIO, request_ccb->ccb_h.path, NULL, 0, request_ccb); if (status != CAM_REQ_CMP) { xpt_print(path, "xpt_scan_lun: cam_alloc_periph returned an error, can't continue probe\n"); request_ccb->ccb_h.status = status; xpt_done(request_ccb); } } if (lock) xpt_path_unlock(path); } static struct cam_ed * nvme_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) { struct nvme_quirk_entry *quirk; struct cam_ed *device; device = xpt_alloc_device(bus, target, lun_id); if (device == NULL) return (NULL); /* * Take the default quirk entry until we have inquiry * data from nvme and can determine a better quirk to use. */ quirk = &nvme_quirk_table[nvme_quirk_table_size - 1]; device->quirk = (void *)quirk; device->mintags = 0; device->maxtags = 0; device->inq_flags = 0; device->queue_flags = 0; device->device_id = NULL; device->device_id_len = 0; device->serial_num = NULL; device->serial_num_len = 0; return (device); } static void nvme_device_transport(struct cam_path *path) { struct ccb_pathinq cpi; struct ccb_trans_settings cts; /* XXX get data from nvme namespace and other info ??? */ /* Get transport information from the SIM */ xpt_path_inq(&cpi, path); path->device->transport = cpi.transport; path->device->transport_version = cpi.transport_version; path->device->protocol = cpi.protocol; path->device->protocol_version = cpi.protocol_version; /* Tell the controller what we think */ memset(&cts, 0, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; cts.transport = path->device->transport; cts.transport_version = path->device->transport_version; cts.protocol = path->device->protocol; cts.protocol_version = path->device->protocol_version; cts.proto_specific.valid = 0; cts.xport_specific.valid = 0; xpt_action((union ccb *)&cts); } static void nvme_dev_advinfo(union ccb *start_ccb) { struct cam_ed *device; struct ccb_dev_advinfo *cdai; off_t amt; xpt_path_assert(start_ccb->ccb_h.path, MA_OWNED); start_ccb->ccb_h.status = CAM_REQ_INVALID; device = start_ccb->ccb_h.path->device; cdai = &start_ccb->cdai; switch(cdai->buftype) { case CDAI_TYPE_SCSI_DEVID: if (cdai->flags & CDAI_FLAG_STORE) return; cdai->provsiz = device->device_id_len; if (device->device_id_len == 0) break; amt = device->device_id_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->device_id, amt); break; case CDAI_TYPE_SERIAL_NUM: if (cdai->flags & CDAI_FLAG_STORE) return; cdai->provsiz = device->serial_num_len; if (device->serial_num_len == 0) break; amt = device->serial_num_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->serial_num, amt); break; case CDAI_TYPE_PHYS_PATH: if (cdai->flags & CDAI_FLAG_STORE) { if (device->physpath != NULL) { free(device->physpath, M_CAMXPT); device->physpath = NULL; device->physpath_len = 0; } /* Clear existing buffer if zero length */ if (cdai->bufsiz == 0) break; device->physpath = malloc(cdai->bufsiz, M_CAMXPT, M_NOWAIT); if (device->physpath == NULL) { start_ccb->ccb_h.status = CAM_REQ_ABORTED; return; } device->physpath_len = cdai->bufsiz; memcpy(device->physpath, cdai->buf, cdai->bufsiz); } else { cdai->provsiz = device->physpath_len; if (device->physpath_len == 0) break; amt = device->physpath_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->physpath, amt); } break; case CDAI_TYPE_NVME_CNTRL: if (cdai->flags & CDAI_FLAG_STORE) return; amt = sizeof(struct nvme_controller_data); cdai->provsiz = amt; if (amt > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->nvme_cdata, amt); break; case CDAI_TYPE_NVME_NS: if (cdai->flags & CDAI_FLAG_STORE) return; amt = sizeof(struct nvme_namespace_data); cdai->provsiz = amt; if (amt > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->nvme_data, amt); break; default: return; } start_ccb->ccb_h.status = CAM_REQ_CMP; if (cdai->flags & CDAI_FLAG_STORE) { xpt_async(AC_ADVINFO_CHANGED, start_ccb->ccb_h.path, (void *)(uintptr_t)cdai->buftype); } } static void nvme_action(union ccb *start_ccb) { CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("nvme_action: func= %#x\n", start_ccb->ccb_h.func_code)); switch (start_ccb->ccb_h.func_code) { case XPT_SCAN_BUS: case XPT_SCAN_TGT: case XPT_SCAN_LUN: nvme_scan_lun(start_ccb->ccb_h.path->periph, start_ccb->ccb_h.path, start_ccb->crcn.flags, start_ccb); break; case XPT_DEV_ADVINFO: nvme_dev_advinfo(start_ccb); break; default: xpt_action_default(start_ccb); break; } } /* * Handle any per-device event notifications that require action by the XPT. */ static void nvme_dev_async(uint32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg) { /* * We only need to handle events for real devices. */ if (target->target_id == CAM_TARGET_WILDCARD || device->lun_id == CAM_LUN_WILDCARD) return; if (async_code == AC_LOST_DEVICE && (device->flags & CAM_DEV_UNCONFIGURED) == 0) { device->flags |= CAM_DEV_UNCONFIGURED; xpt_release_device(device); } } static void nvme_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb) { struct ccb_pathinq cpi; struct ccb_trans_settings cts; struct cam_path *path = periph->path; struct ccb_trans_settings_nvme *nvmex; cam_periph_assert(periph, MA_OWNED); /* Ask the SIM for connection details */ memset(&cts, 0, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NORMAL); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb*)&cts); if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) return; /* Ask the SIM for its base transfer speed */ xpt_path_inq(&cpi, periph->path); sbuf_printf(sb, "%s%d: nvme version %d.%d", periph->periph_name, periph->unit_number, NVME_MAJOR(cts.protocol_version), NVME_MINOR(cts.protocol_version)); if (cts.transport == XPORT_NVME) { nvmex = &cts.proto_specific.nvme; if (nvmex->valid & CTS_NVME_VALID_LINK) sbuf_printf(sb, " x%d (max x%d) lanes PCIe Gen%d (max Gen%d) link", nvmex->lanes, nvmex->max_lanes, nvmex->speed, nvmex->max_speed); } sbuf_putc(sb, '\n'); } static void nvme_proto_announce_sbuf(struct cam_ed *device, struct sbuf *sb) { nvme_print_ident(device->nvme_cdata, device->nvme_data, sb); } static void nvme_proto_denounce_sbuf(struct cam_ed *device, struct sbuf *sb) { nvme_print_ident_short(device->nvme_cdata, device->nvme_data, sb); } static void nvme_proto_debug_out(union ccb *ccb) { - char cdb_str[(sizeof(struct nvme_command) * 3) + 1]; + char command_str[128]; if (ccb->ccb_h.func_code != XPT_NVME_IO && ccb->ccb_h.func_code != XPT_NVME_ADMIN) return; CAM_DEBUG(ccb->ccb_h.path, - CAM_DEBUG_CDB,("%s. NCB: %s\n", nvme_op_string(&ccb->nvmeio.cmd, - ccb->ccb_h.func_code == XPT_NVME_ADMIN), - nvme_cmd_string(&ccb->nvmeio.cmd, cdb_str, sizeof(cdb_str)))); + CAM_DEBUG_CDB,("%s\n", nvme_command_string(&ccb->nvmeio, + command_str, sizeof(command_str)))); }