Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F143330577
D44723.id136819.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
23 KB
Referenced Files
None
Subscribers
None
D44723.id136819.diff
View Options
diff --git a/sys/cam/ctl/ctl_backend_block.c b/sys/cam/ctl/ctl_backend_block.c
--- a/sys/cam/ctl/ctl_backend_block.c
+++ b/sys/cam/ctl/ctl_backend_block.c
@@ -119,6 +119,7 @@
((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND])
#define ARGS(io) \
((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN])
+#define DSM_RANGE(io) ((io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].integer)
SDT_PROVIDER_DEFINE(cbb);
@@ -390,6 +391,39 @@
}
}
+static void
+ctl_be_block_io_error(union ctl_io *io, int bio_cmd, uint16_t retry_count)
+{
+ switch (io->io_hdr.io_type) {
+ case CTL_IO_SCSI:
+ if (bio_cmd == BIO_FLUSH) {
+ /* XXX KDM is there is a better error here? */
+ ctl_set_internal_failure(&io->scsiio,
+ /*sks_valid*/ 1,
+ retry_count);
+ } else {
+ ctl_set_medium_error(&io->scsiio, bio_cmd == BIO_READ);
+ }
+ break;
+ case CTL_IO_NVME:
+ switch (bio_cmd) {
+ case BIO_FLUSH:
+ case BIO_WRITE:
+ ctl_nvme_set_write_fault(&io->nvmeio);
+ break;
+ case BIO_READ:
+ ctl_nvme_set_unrecoverable_read_error(&io->nvmeio);
+ break;
+ default:
+ ctl_nvme_set_internal_error(&io->nvmeio);
+ break;
+ }
+ break;
+ default:
+ __assert_unreachable();
+ }
+}
+
static size_t
cmp(uint8_t *a, uint8_t *b, size_t size)
{
@@ -408,7 +442,6 @@
struct ctl_be_block_io *beio;
uint64_t off, res;
int i;
- uint8_t info[8];
beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
off = 0;
@@ -421,15 +454,9 @@
break;
}
if (i < beio->num_segs) {
- scsi_u64to8b(off, info);
- ctl_set_sense(&io->scsiio, /*current_error*/ 1,
- /*sense_key*/ SSD_KEY_MISCOMPARE,
- /*asc*/ 0x1D, /*ascq*/ 0x00,
- /*type*/ SSD_ELEM_INFO,
- /*size*/ sizeof(info), /*data*/ &info,
- /*type*/ SSD_ELEM_NONE);
+ ctl_io_set_compare_failure(io, off);
} else
- ctl_set_success(&io->scsiio);
+ ctl_io_set_success(io);
}
static int
@@ -442,7 +469,7 @@
beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
DPRINTF("entered\n");
- io->scsiio.kern_rel_offset += io->scsiio.kern_data_len;
+ ctl_add_kern_rel_offset(io, ctl_kern_data_len(io));
/*
* We set status at this point for read and compare commands.
@@ -451,7 +478,7 @@
(io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE) {
lbalen = ARGS(io);
if (lbalen->flags & CTL_LLF_READ) {
- ctl_set_success(&io->scsiio);
+ ctl_io_set_success(io);
} else if (lbalen->flags & CTL_LLF_COMPARE) {
/* We have two data blocks ready for comparison. */
ctl_be_block_compare(io);
@@ -545,19 +572,14 @@
error = beio->first_error;
if (error != 0) {
if (error == EOPNOTSUPP) {
- ctl_set_invalid_opcode(&io->scsiio);
+ ctl_io_set_invalid_opcode(io);
} else if (error == ENOSPC || error == EDQUOT) {
- ctl_set_space_alloc_fail(&io->scsiio);
+ ctl_io_set_space_alloc_fail(io);
} else if (error == EROFS || error == EACCES) {
- ctl_set_hw_write_protected(&io->scsiio);
- } else if (beio->bio_cmd == BIO_FLUSH) {
- /* XXX KDM is there is a better error here? */
- ctl_set_internal_failure(&io->scsiio,
- /*sks_valid*/ 1,
- /*retry_count*/ 0xbad2);
+ ctl_io_set_hw_write_protected(io);
} else {
- ctl_set_medium_error(&io->scsiio,
- beio->bio_cmd == BIO_READ);
+ ctl_be_block_io_error(io, beio->bio_cmd,
+ /*retry_count*/ 0xbad2);
}
ctl_complete_beio(beio);
return;
@@ -571,12 +593,12 @@
|| (beio->bio_cmd == BIO_FLUSH)
|| (beio->bio_cmd == BIO_DELETE)
|| (ARGS(io)->flags & CTL_LLF_VERIFY)) {
- ctl_set_success(&io->scsiio);
+ ctl_io_set_success(io);
ctl_complete_beio(beio);
} else {
if ((ARGS(io)->flags & CTL_LLF_READ) &&
beio->beio_cont == NULL) {
- ctl_set_success(&io->scsiio);
+ ctl_io_set_success(io);
if (cbe_lun->serseq >= CTL_LUN_SERSEQ_SOFT)
ctl_serseq_done(io);
}
@@ -614,12 +636,10 @@
mtx_unlock(&be_lun->io_lock);
if (error == 0)
- ctl_set_success(&io->scsiio);
+ ctl_io_set_success(io);
else {
- /* XXX KDM is there is a better error here? */
- ctl_set_internal_failure(&io->scsiio,
- /*sks_valid*/ 1,
- /*retry_count*/ 0xbad1);
+ ctl_be_block_io_error(io, BIO_FLUSH,
+ /*retry_count*/ 0xbad1);
}
ctl_complete_beio(beio);
@@ -704,7 +724,7 @@
SDT_PROBE0(cbb, , read, file_done);
if (error == 0 && xuio.uio_resid > 0) {
/*
- * If we red less then requested (EOF), then
+ * If we read less then requested (EOF), then
* we should clean the rest of the buffer.
*/
s = beio->io_len - xuio.uio_resid;
@@ -759,12 +779,11 @@
*/
if (error != 0) {
if (error == ENOSPC || error == EDQUOT) {
- ctl_set_space_alloc_fail(&io->scsiio);
+ ctl_io_set_space_alloc_fail(io);
} else if (error == EROFS || error == EACCES) {
- ctl_set_hw_write_protected(&io->scsiio);
+ ctl_io_set_hw_write_protected(io);
} else {
- ctl_set_medium_error(&io->scsiio,
- beio->bio_cmd == BIO_READ);
+ ctl_be_block_io_error(io, beio->bio_cmd, 0);
}
ctl_complete_beio(beio);
return;
@@ -776,12 +795,12 @@
*/
if ((beio->bio_cmd == BIO_WRITE) ||
(ARGS(io)->flags & CTL_LLF_VERIFY)) {
- ctl_set_success(&io->scsiio);
+ ctl_io_set_success(io);
ctl_complete_beio(beio);
} else {
if ((ARGS(io)->flags & CTL_LLF_READ) &&
beio->beio_cont == NULL) {
- ctl_set_success(&io->scsiio);
+ ctl_io_set_success(io);
if (cbe_lun->serseq > CTL_LUN_SERSEQ_SOFT)
ctl_serseq_done(io);
}
@@ -801,6 +820,9 @@
DPRINTF("entered\n");
+ KASSERT(io->io_hdr.io_type == CTL_IO_SCSI,
+ ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type));
+
off = roff = ((off_t)lbalen->lba) * be_lun->cbe_lun.blocksize;
vn_lock(be_lun->vn, LK_SHARED | LK_RETRY);
error = VOP_IOCTL(be_lun->vn, FIOSEEKHOLE, &off,
@@ -918,18 +940,18 @@
*/
switch (error) {
case 0:
- ctl_set_success(&io->scsiio);
+ ctl_io_set_success(io);
break;
case ENOSPC:
case EDQUOT:
- ctl_set_space_alloc_fail(&io->scsiio);
+ ctl_io_set_space_alloc_fail(io);
break;
case EROFS:
case EACCES:
- ctl_set_hw_write_protected(&io->scsiio);
+ ctl_io_set_hw_write_protected(io);
break;
default:
- ctl_set_medium_error(&io->scsiio, false);
+ ctl_be_block_io_error(io, BIO_DELETE, 0);
}
ctl_complete_beio(beio);
}
@@ -1008,12 +1030,11 @@
*/
if (error != 0) {
if (error == ENOSPC || error == EDQUOT) {
- ctl_set_space_alloc_fail(&io->scsiio);
+ ctl_io_set_space_alloc_fail(io);
} else if (error == EROFS || error == EACCES) {
- ctl_set_hw_write_protected(&io->scsiio);
+ ctl_io_set_hw_write_protected(io);
} else {
- ctl_set_medium_error(&io->scsiio,
- beio->bio_cmd == BIO_READ);
+ ctl_be_block_io_error(io, beio->bio_cmd, 0);
}
ctl_complete_beio(beio);
return;
@@ -1025,12 +1046,12 @@
*/
if ((beio->bio_cmd == BIO_WRITE) ||
(ARGS(io)->flags & CTL_LLF_VERIFY)) {
- ctl_set_success(&io->scsiio);
+ ctl_io_set_success(io);
ctl_complete_beio(beio);
} else {
if ((ARGS(io)->flags & CTL_LLF_READ) &&
beio->beio_cont == NULL) {
- ctl_set_success(&io->scsiio);
+ ctl_io_set_success(io);
if (cbe_lun->serseq > CTL_LUN_SERSEQ_SOFT)
ctl_serseq_done(io);
}
@@ -1052,6 +1073,9 @@
DPRINTF("entered\n");
+ KASSERT(io->io_hdr.io_type == CTL_IO_SCSI,
+ ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type));
+
csw = devvn_refthread(be_lun->vn, &dev, &ref);
if (csw == NULL) {
status = 0; /* unknown up to the end */
@@ -1306,6 +1330,39 @@
return (arg.value.off);
}
+static void
+ctl_be_block_namespace_data(struct ctl_be_block_lun *be_lun,
+ union ctl_io *io)
+{
+ struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun;
+ struct nvme_namespace_data *nsdata;
+
+ nsdata = (struct nvme_namespace_data *)io->nvmeio.kern_data_ptr;
+ memset(nsdata, 0, sizeof(*nsdata));
+ nsdata->nsze = htole64(be_lun->size_blocks);
+ nsdata->ncap = nsdata->nsze;
+ nsdata->nuse = nsdata->nuse;
+ nsdata->nlbaf = 1 - 1;
+ nsdata->dlfeat = NVMEM(NVME_NS_DATA_DLFEAT_DWZ) |
+ NVMEF(NVME_NS_DATA_DLFEAT_READ, NVME_NS_DATA_DLFEAT_READ_00);
+ nsdata->flbas = NVMEF(NVME_NS_DATA_FLBAS_FORMAT, 0);
+ nsdata->lbaf[0] = NVMEF(NVME_NS_DATA_LBAF_LBADS,
+ ffs(cbe_lun->blocksize) - 1);
+
+ ctl_lun_nsdata_ids(cbe_lun, nsdata);
+ ctl_config_read_done(io);
+}
+
+static void
+ctl_be_block_nvme_ids(struct ctl_be_block_lun *be_lun,
+ union ctl_io *io)
+{
+ struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun;
+
+ ctl_lun_nvme_ids(cbe_lun, io->nvmeio.kern_data_ptr);
+ ctl_config_read_done(io);
+}
+
static void
ctl_be_block_cw_dispatch_sync(struct ctl_be_block_lun *be_lun,
union ctl_io *io)
@@ -1359,6 +1416,9 @@
DPRINTF("entered\n");
+ KASSERT(io->io_hdr.io_type == CTL_IO_SCSI,
+ ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type));
+
beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
lbalen = ARGS(io);
@@ -1449,13 +1509,16 @@
static void
ctl_be_block_cw_dispatch_unmap(struct ctl_be_block_lun *be_lun,
- union ctl_io *io)
+ union ctl_io *io)
{
struct ctl_be_block_io *beio;
struct ctl_ptr_len_flags *ptrlen;
DPRINTF("entered\n");
+ KASSERT(io->io_hdr.io_type == CTL_IO_SCSI,
+ ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type));
+
beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
@@ -1480,7 +1543,190 @@
}
static void
-ctl_be_block_cr_done(struct ctl_be_block_io *beio)
+ctl_be_block_cw_dispatch_flush(struct ctl_be_block_lun *be_lun,
+ union ctl_io *io)
+{
+ struct ctl_be_block_io *beio;
+
+ DPRINTF("entered\n");
+ beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
+
+ beio->io_len = be_lun->size_bytes;
+ beio->io_offset = 0;
+ beio->io_arg = 1;
+ beio->bio_cmd = BIO_FLUSH;
+ beio->ds_trans_type = DEVSTAT_NO_DATA;
+ DPRINTF("FLUSH\n");
+ be_lun->lun_flush(be_lun, beio);
+}
+
+static void
+ctl_be_block_cw_dispatch_wu(struct ctl_be_block_lun *be_lun,
+ union ctl_io *io)
+{
+ struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun;
+ struct ctl_be_block_io *beio;
+ struct ctl_lba_len_flags *lbalen;
+
+ KASSERT(io->io_hdr.io_type == CTL_IO_NVME,
+ ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type));
+
+ beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
+ lbalen = ARGS(io);
+
+ /*
+ * XXX: Not quite right as reads will return zeroes rather
+ * than failing.
+ */
+ beio->io_offset = lbalen->lba * cbe_lun->blocksize;
+ beio->io_len = (uint64_t)lbalen->len * cbe_lun->blocksize;
+ beio->bio_cmd = BIO_DELETE;
+ beio->ds_trans_type = DEVSTAT_FREE;
+
+ be_lun->unmap(be_lun, beio);
+}
+
+static void
+ctl_be_block_cw_dispatch_wz(struct ctl_be_block_lun *be_lun,
+ union ctl_io *io)
+{
+ struct ctl_be_block_softc *softc = be_lun->softc;
+ struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun;
+ struct ctl_be_block_io *beio;
+ struct ctl_lba_len_flags *lbalen;
+ uint64_t len_left, lba;
+ uint32_t pb, pbo, adj;
+ int i, seglen;
+
+ DPRINTF("entered\n");
+
+ KASSERT(io->io_hdr.io_type == CTL_IO_NVME,
+ ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type));
+
+ beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
+ lbalen = ARGS(io);
+
+ if ((le32toh(io->nvmeio.cmd.cdw12) & (1U << 25)) != 0 &&
+ be_lun->unmap != NULL) {
+ beio->io_offset = lbalen->lba * cbe_lun->blocksize;
+ beio->io_len = (uint64_t)lbalen->len * cbe_lun->blocksize;
+ beio->bio_cmd = BIO_DELETE;
+ beio->ds_trans_type = DEVSTAT_FREE;
+
+ be_lun->unmap(be_lun, beio);
+ return;
+ }
+
+ beio->bio_cmd = BIO_WRITE;
+ beio->ds_trans_type = DEVSTAT_WRITE;
+
+ DPRINTF("WRITE ZEROES at LBA %jx len %u\n",
+ (uintmax_t)lbalen->lba, lbalen->len);
+
+ pb = cbe_lun->blocksize << be_lun->cbe_lun.pblockexp;
+ if (be_lun->cbe_lun.pblockoff > 0)
+ pbo = pb - cbe_lun->blocksize * be_lun->cbe_lun.pblockoff;
+ else
+ pbo = 0;
+ len_left = (uint64_t)lbalen->len * cbe_lun->blocksize;
+ for (i = 0, lba = 0; i < CTLBLK_MAX_SEGS && len_left > 0; i++) {
+ /*
+ * Setup the S/G entry for this chunk.
+ */
+ seglen = MIN(CTLBLK_MAX_SEG, len_left);
+ if (pb > cbe_lun->blocksize) {
+ adj = ((lbalen->lba + lba) * cbe_lun->blocksize +
+ seglen - pbo) % pb;
+ if (seglen > adj)
+ seglen -= adj;
+ else
+ seglen -= seglen % cbe_lun->blocksize;
+ } else
+ seglen -= seglen % cbe_lun->blocksize;
+ ctl_alloc_seg(softc, &beio->sg_segs[i], seglen);
+
+ DPRINTF("segment %d addr %p len %zd\n", i,
+ beio->sg_segs[i].addr, beio->sg_segs[i].len);
+
+ beio->num_segs++;
+ len_left -= seglen;
+
+ memset(beio->sg_segs[i].addr, 0, seglen);
+ lba += seglen / cbe_lun->blocksize;
+ }
+
+ beio->io_offset = lbalen->lba * cbe_lun->blocksize;
+ beio->io_len = lba * cbe_lun->blocksize;
+
+ /* We can not do all in one run. Correct and schedule rerun. */
+ if (len_left > 0) {
+ lbalen->lba += lba;
+ lbalen->len -= lba;
+ beio->beio_cont = ctl_be_block_cw_done_ws;
+ }
+
+ be_lun->dispatch(be_lun, beio);
+}
+
+static void
+ctl_be_block_cw_dispatch_dsm(struct ctl_be_block_lun *be_lun,
+ union ctl_io *io)
+{
+ struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun;
+ struct ctl_be_block_io *beio;
+ struct nvme_dsm_range *r;
+ uint64_t lba;
+ uint32_t num_blocks;
+ u_int i, ranges;
+
+ KASSERT(io->io_hdr.io_type == CTL_IO_NVME,
+ ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type));
+
+ beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
+
+ if (be_lun->unmap == NULL) {
+ ctl_free_beio(beio);
+ ctl_nvme_set_success(&io->nvmeio);
+ ctl_config_write_done(io);
+ return;
+ }
+
+ ranges = le32toh(io->nvmeio.cmd.cdw10) & 0xff;
+ r = (struct nvme_dsm_range *)io->nvmeio.kern_data_ptr;
+
+ /* Find the next range to delete. */
+ for (i = DSM_RANGE(io); i < ranges; i++) {
+ if ((le32toh(r[i].attributes) & (1U << 2)) != 0)
+ break;
+ }
+
+ /* If no range to delete, complete the operation. */
+ if (i == ranges) {
+ ctl_free_beio(beio);
+ ctl_nvme_set_success(&io->nvmeio);
+ ctl_config_write_done(io);
+ return;
+ }
+
+ /* If this is not the last range, request a rerun after this range. */
+ if (i + 1 < ranges) {
+ DSM_RANGE(io) = i + 1;
+ beio->beio_cont = ctl_be_block_cw_done_ws;
+ }
+
+ lba = le64toh(r[i].starting_lba);
+ num_blocks = le32toh(r[i].length);
+
+ beio->io_offset = lba * cbe_lun->blocksize;
+ beio->io_len = (uint64_t)num_blocks * cbe_lun->blocksize;
+ beio->bio_cmd = BIO_DELETE;
+ beio->ds_trans_type = DEVSTAT_FREE;
+
+ be_lun->unmap(be_lun, beio);
+}
+
+static void
+ctl_be_block_scsi_cr_done(struct ctl_be_block_io *beio)
{
union ctl_io *io;
@@ -1490,8 +1736,8 @@
}
static void
-ctl_be_block_cr_dispatch(struct ctl_be_block_lun *be_lun,
- union ctl_io *io)
+ctl_be_block_scsi_cr_dispatch(struct ctl_be_block_lun *be_lun,
+ union ctl_io *io)
{
struct ctl_be_block_io *beio;
struct ctl_be_block_softc *softc;
@@ -1502,7 +1748,7 @@
beio = ctl_alloc_beio(softc);
beio->io = io;
beio->lun = be_lun;
- beio->beio_cont = ctl_be_block_cr_done;
+ beio->beio_cont = ctl_be_block_scsi_cr_done;
PRIV(io)->ptr = (void *)beio;
switch (io->scsiio.cdb[0]) {
@@ -1514,7 +1760,7 @@
if (be_lun->get_lba_status)
be_lun->get_lba_status(be_lun, beio);
else
- ctl_be_block_cr_done(beio);
+ ctl_be_block_scsi_cr_done(beio);
break;
default:
panic("Unhandled CDB type %#x", io->scsiio.cdb[0]);
@@ -1522,6 +1768,45 @@
}
}
+static void
+ctl_be_block_nvme_cr_dispatch(struct ctl_be_block_lun *be_lun,
+ union ctl_io *io)
+{
+ uint8_t cns;
+
+ DPRINTF("entered\n");
+
+ MPASS(io->nvmeio.cmd.opc == NVME_OPC_IDENTIFY);
+
+ cns = le32toh(io->nvmeio.cmd.cdw10) & 0xff;
+ switch (cns) {
+ case 0:
+ ctl_be_block_namespace_data(be_lun, io);
+ break;
+ case 3:
+ ctl_be_block_nvme_ids(be_lun, io);
+ break;
+ default:
+ __assert_unreachable();
+ }
+}
+
+static void
+ctl_be_block_cr_dispatch(struct ctl_be_block_lun *be_lun,
+ union ctl_io *io)
+{
+ switch (io->io_hdr.io_type) {
+ case CTL_IO_SCSI:
+ ctl_be_block_scsi_cr_dispatch(be_lun, io);
+ break;
+ case CTL_IO_NVME_ADMIN:
+ ctl_be_block_nvme_cr_dispatch(be_lun, io);
+ break;
+ default:
+ __assert_unreachable();
+ }
+}
+
static void
ctl_be_block_cw_done(struct ctl_be_block_io *beio)
{
@@ -1533,19 +1818,15 @@
}
static void
-ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun,
- union ctl_io *io)
+ctl_be_block_scsi_cw_dispatch(struct ctl_be_block_lun *be_lun,
+ union ctl_io *io)
{
struct ctl_be_block_io *beio;
- struct ctl_be_block_softc *softc;
DPRINTF("entered\n");
- softc = be_lun->softc;
- beio = ctl_alloc_beio(softc);
- beio->io = io;
- beio->lun = be_lun;
- beio->beio_cont = ctl_be_block_cw_done;
+ beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
+
switch (io->scsiio.tag_type) {
case CTL_TAG_ORDERED:
beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
@@ -1560,7 +1841,6 @@
beio->ds_tag_type = DEVSTAT_TAG_SIMPLE;
break;
}
- PRIV(io)->ptr = (void *)beio;
switch (io->scsiio.cdb[0]) {
case SYNCHRONIZE_CACHE:
@@ -1580,6 +1860,61 @@
}
}
+static void
+ctl_be_block_nvme_cw_dispatch(struct ctl_be_block_lun *be_lun,
+ union ctl_io *io)
+{
+ struct ctl_be_block_io *beio;
+
+ DPRINTF("entered\n");
+
+ beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
+ beio->ds_tag_type = DEVSTAT_TAG_SIMPLE;
+
+ switch (io->nvmeio.cmd.opc) {
+ case NVME_OPC_FLUSH:
+ ctl_be_block_cw_dispatch_flush(be_lun, io);
+ break;
+ case NVME_OPC_WRITE_UNCORRECTABLE:
+ ctl_be_block_cw_dispatch_wu(be_lun, io);
+ break;
+ case NVME_OPC_WRITE_ZEROES:
+ ctl_be_block_cw_dispatch_wz(be_lun, io);
+ break;
+ case NVME_OPC_DATASET_MANAGEMENT:
+ ctl_be_block_cw_dispatch_dsm(be_lun, io);
+ break;
+ default:
+ __assert_unreachable();
+ }
+}
+
+static void
+ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun,
+ union ctl_io *io)
+{
+ struct ctl_be_block_io *beio;
+ struct ctl_be_block_softc *softc;
+
+ softc = be_lun->softc;
+ beio = ctl_alloc_beio(softc);
+ beio->io = io;
+ beio->lun = be_lun;
+ beio->beio_cont = ctl_be_block_cw_done;
+ PRIV(io)->ptr = (void *)beio;
+
+ switch (io->io_hdr.io_type) {
+ case CTL_IO_SCSI:
+ ctl_be_block_scsi_cw_dispatch(be_lun, io);
+ break;
+ case CTL_IO_NVME:
+ ctl_be_block_nvme_cw_dispatch(be_lun, io);
+ break;
+ default:
+ __assert_unreachable();
+ }
+}
+
SDT_PROBE_DEFINE1(cbb, , read, start, "uint64_t");
SDT_PROBE_DEFINE1(cbb, , write, start, "uint64_t");
SDT_PROBE_DEFINE1(cbb, , read, alloc_done, "uint64_t");
@@ -1639,19 +1974,28 @@
bptrlen = PRIV(io);
bptrlen->ptr = (void *)beio;
- switch (io->scsiio.tag_type) {
- case CTL_TAG_ORDERED:
- beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
+ switch (io->io_hdr.io_type) {
+ case CTL_IO_SCSI:
+ switch (io->scsiio.tag_type) {
+ case CTL_TAG_ORDERED:
+ beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
+ break;
+ case CTL_TAG_HEAD_OF_QUEUE:
+ beio->ds_tag_type = DEVSTAT_TAG_HEAD;
+ break;
+ case CTL_TAG_UNTAGGED:
+ case CTL_TAG_SIMPLE:
+ case CTL_TAG_ACA:
+ default:
+ beio->ds_tag_type = DEVSTAT_TAG_SIMPLE;
+ break;
+ }
break;
- case CTL_TAG_HEAD_OF_QUEUE:
- beio->ds_tag_type = DEVSTAT_TAG_HEAD;
- break;
- case CTL_TAG_UNTAGGED:
- case CTL_TAG_SIMPLE:
- case CTL_TAG_ACA:
- default:
+ case CTL_IO_NVME:
beio->ds_tag_type = DEVSTAT_TAG_SIMPLE;
break;
+ default:
+ __assert_unreachable();
}
if (lbalen->flags & CTL_LLF_WRITE) {
@@ -1700,16 +2044,16 @@
}
if (bptrlen->len < lbalen->len)
beio->beio_cont = ctl_be_block_next;
- io->scsiio.be_move_done = ctl_be_block_move_done;
+ ctl_set_be_move_done(io, ctl_be_block_move_done);
/* For compare we have separate S/G lists for read and datamove. */
if (beio->two_sglists)
- io->scsiio.kern_data_ptr = (uint8_t *)&beio->sg_segs[CTLBLK_HALF_SEGS];
+ ctl_set_kern_data_ptr(io, &beio->sg_segs[CTLBLK_HALF_SEGS]);
else
- io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
- io->scsiio.kern_data_len = beio->io_len;
- io->scsiio.kern_sg_entries = beio->num_segs;
- io->scsiio.kern_data_ref = ctl_refcnt_beio;
- io->scsiio.kern_data_arg = beio;
+ ctl_set_kern_data_ptr(io, beio->sg_segs);
+ ctl_set_kern_data_len(io, beio->io_len);
+ ctl_set_kern_sg_entries(io, beio->num_segs);
+ ctl_set_kern_data_ref(io, ctl_refcnt_beio);
+ ctl_set_kern_data_arg(io, beio);
io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
/*
@@ -1749,7 +2093,7 @@
mtx_unlock(&be_lun->queue_lock);
beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) {
- ctl_set_busy(&io->scsiio);
+ ctl_io_set_busy(io);
ctl_complete_beio(beio);
continue;
}
@@ -1762,7 +2106,7 @@
STAILQ_REMOVE_HEAD(&be_lun->config_write_queue, links);
mtx_unlock(&be_lun->queue_lock);
if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) {
- ctl_set_busy(&io->scsiio);
+ ctl_io_set_busy(io);
ctl_config_write_done(io);
continue;
}
@@ -1775,7 +2119,7 @@
STAILQ_REMOVE_HEAD(&be_lun->config_read_queue, links);
mtx_unlock(&be_lun->queue_lock);
if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) {
- ctl_set_busy(&io->scsiio);
+ ctl_io_set_busy(io);
ctl_config_read_done(io);
continue;
}
@@ -1788,7 +2132,7 @@
STAILQ_REMOVE_HEAD(&be_lun->input_queue, links);
mtx_unlock(&be_lun->queue_lock);
if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) {
- ctl_set_busy(&io->scsiio);
+ ctl_io_set_busy(io);
ctl_data_submit_done(io);
continue;
}
@@ -1819,7 +2163,8 @@
be_lun = (struct ctl_be_block_lun *)CTL_BACKEND_LUN(io);
- KASSERT(io->io_hdr.io_type == CTL_IO_SCSI,
+ KASSERT(io->io_hdr.io_type == CTL_IO_SCSI ||
+ io->io_hdr.io_type == CTL_IO_NVME,
("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type));
PRIV(io)->len = 0;
@@ -2695,7 +3040,7 @@
}
static int
-ctl_be_block_config_write(union ctl_io *io)
+ctl_be_block_scsi_config_write(union ctl_io *io)
{
struct ctl_be_block_lun *be_lun;
struct ctl_be_lun *cbe_lun;
@@ -2780,7 +3125,50 @@
}
static int
-ctl_be_block_config_read(union ctl_io *io)
+ctl_be_block_nvme_config_write(union ctl_io *io)
+{
+ struct ctl_be_block_lun *be_lun;
+
+ DPRINTF("entered\n");
+
+ be_lun = (struct ctl_be_block_lun *)CTL_BACKEND_LUN(io);
+
+ switch (io->nvmeio.cmd.opc) {
+ case NVME_OPC_DATASET_MANAGEMENT:
+ DSM_RANGE(io) = 0;
+ /* FALLTHROUGH */
+ case NVME_OPC_FLUSH:
+ case NVME_OPC_WRITE_UNCORRECTABLE:
+ case NVME_OPC_WRITE_ZEROES:
+ mtx_lock(&be_lun->queue_lock);
+ STAILQ_INSERT_TAIL(&be_lun->config_write_queue, &io->io_hdr,
+ links);
+ mtx_unlock(&be_lun->queue_lock);
+ taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
+ break;
+ default:
+ ctl_nvme_set_invalid_opcode(&io->nvmeio);
+ ctl_config_write_done(io);
+ break;
+ }
+ return (CTL_RETVAL_COMPLETE);
+}
+
+static int
+ctl_be_block_config_write(union ctl_io *io)
+{
+ switch (io->io_hdr.io_type) {
+ case CTL_IO_SCSI:
+ return (ctl_be_block_scsi_config_write(io));
+ case CTL_IO_NVME:
+ return (ctl_be_block_nvme_config_write(io));
+ default:
+ __assert_unreachable();
+ }
+}
+
+static int
+ctl_be_block_scsi_config_read(union ctl_io *io)
{
struct ctl_be_block_lun *be_lun;
int retval = 0;
@@ -2820,6 +3208,59 @@
return (retval);
}
+static int
+ctl_be_block_nvme_config_read(union ctl_io *io)
+{
+ struct ctl_be_block_lun *be_lun;
+
+ DPRINTF("entered\n");
+
+ be_lun = (struct ctl_be_block_lun *)CTL_BACKEND_LUN(io);
+
+ switch (io->nvmeio.cmd.opc) {
+ case NVME_OPC_IDENTIFY:
+ {
+ uint8_t cns;
+
+ cns = le32toh(io->nvmeio.cmd.cdw10) & 0xff;
+ switch (cns) {
+ case 0:
+ case 3:
+ mtx_lock(&be_lun->queue_lock);
+ STAILQ_INSERT_TAIL(&be_lun->config_read_queue,
+ &io->io_hdr, links);
+ mtx_unlock(&be_lun->queue_lock);
+ taskqueue_enqueue(be_lun->io_taskqueue,
+ &be_lun->io_task);
+ return (CTL_RETVAL_QUEUED);
+ default:
+ ctl_nvme_set_invalid_field(&io->nvmeio);
+ ctl_config_read_done(io);
+ break;
+ }
+ break;
+ }
+ default:
+ ctl_nvme_set_invalid_opcode(&io->nvmeio);
+ ctl_config_read_done(io);
+ break;
+ }
+ return (CTL_RETVAL_COMPLETE);
+}
+
+static int
+ctl_be_block_config_read(union ctl_io *io)
+{
+ switch (io->io_hdr.io_type) {
+ case CTL_IO_SCSI:
+ return (ctl_be_block_scsi_config_read(io));
+ case CTL_IO_NVME_ADMIN:
+ return (ctl_be_block_nvme_config_read(io));
+ default:
+ __assert_unreachable();
+ }
+}
+
static int
ctl_be_block_lun_info(struct ctl_be_lun *cbe_lun, struct sbuf *sb)
{
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Fri, Jan 30, 12:16 AM (14 h, 4 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
28091837
Default Alt Text
D44723.id136819.diff (23 KB)
Attached To
Mode
D44723: ctl_backend_block: Add support for NVMe
Attached
Detach File
Event Timeline
Log In to Comment