Index: head/sys/dev/nvme/nvme.c =================================================================== --- head/sys/dev/nvme/nvme.c +++ head/sys/dev/nvme/nvme.c @@ -90,6 +90,7 @@ int match_subdevice; uint16_t subdevice; const char *desc; + uint32_t quirks; } pci_ids[] = { { 0x01118086, 0, 0, "NVMe Controller" }, { IDT32_PCI_ID, 0, 0, "IDT NVMe Controller (32 channel)" }, @@ -100,6 +101,11 @@ { 0x09538086, 1, 0x3705, "DC P3500 SSD [2.5\" SFF]" }, { 0x09538086, 1, 0x3709, "DC P3600 SSD [Add-in Card]" }, { 0x09538086, 1, 0x370a, "DC P3600 SSD [2.5\" SFF]" }, + { 0x00031c58, 0, 0, "HGST SN100", QUIRK_DELAY_B4_CHK_RDY }, + { 0x00231c58, 0, 0, "WDC SN200", QUIRK_DELAY_B4_CHK_RDY }, + { 0x05401c5f, 0, 0, "Memblaze Pblaze4", QUIRK_DELAY_B4_CHK_RDY }, + { 0xa821144d, 0, 0, "Samsung PM1725", QUIRK_DELAY_B4_CHK_RDY }, + { 0xa822144d, 0, 0, "Samsung PM1725a", QUIRK_DELAY_B4_CHK_RDY }, { 0x00000000, 0, 0, NULL } }; @@ -240,6 +246,19 @@ { struct nvme_controller *ctrlr = DEVICE2SOFTC(dev); int status; + struct _pcsid *ep; + uint32_t devid; + uint16_t subdevice; + + devid = pci_get_devid(dev); + subdevice = pci_get_subdevice(dev); + ep = pci_ids; + while (ep->devid) { + if (nvme_match(devid, subdevice, ep)) + break; + ++ep; + } + ctrlr->quirks = ep->quirks; status = nvme_ctrlr_construct(ctrlr, dev); Index: head/sys/dev/nvme/nvme_ctrlr.c =================================================================== --- head/sys/dev/nvme/nvme_ctrlr.c +++ head/sys/dev/nvme/nvme_ctrlr.c @@ -46,6 +46,8 @@ #include "nvme_private.h" +#define B4_CHK_RDY_DELAY_MS 2300 /* work arond controller bug */ + static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr, struct nvme_async_event_request *aer); static void nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr); @@ -241,49 +243,65 @@ nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val) { int ms_waited; - union cc_register cc; union csts_register csts; - cc.raw = nvme_mmio_read_4(ctrlr, cc); csts.raw = nvme_mmio_read_4(ctrlr, csts); - if (cc.bits.en != desired_val) { - nvme_printf(ctrlr, "%s called with desired_val = %d " - "but cc.en = %d\n", __func__, desired_val, cc.bits.en); - return (ENXIO); - } - ms_waited = 0; - while (csts.bits.rdy != desired_val) { - DELAY(1000); if (ms_waited++ > ctrlr->ready_timeout_in_ms) { nvme_printf(ctrlr, "controller ready did not become %d " "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms); return (ENXIO); } + DELAY(1000); csts.raw = nvme_mmio_read_4(ctrlr, csts); } return (0); } -static void +static int nvme_ctrlr_disable(struct nvme_controller *ctrlr) { union cc_register cc; union csts_register csts; + int err; cc.raw = nvme_mmio_read_4(ctrlr, cc); csts.raw = nvme_mmio_read_4(ctrlr, csts); - if (cc.bits.en == 1 && csts.bits.rdy == 0) - nvme_ctrlr_wait_for_ready(ctrlr, 1); + /* + * Per 3.1.5 in NVME 1.3 spec, transitioning CC.EN from 0 to 1 + * when CSTS.RDY is 1 or transitioning CC.EN from 1 to 0 when + * CSTS.RDY is 0 "has undefined results" So make sure that CSTS.RDY + * isn't the desired value. Short circuit if we're already disabled. + */ + if (cc.bits.en == 1) { + if (csts.bits.rdy == 0) { + /* EN == 1, wait for RDY == 1 or fail */ + err = nvme_ctrlr_wait_for_ready(ctrlr, 1); + if (err != 0) + return (err); + } + } else { + /* EN == 0 already wait for RDY == 0 */ + if (csts.bits.rdy == 0) + return (0); + else + return (nvme_ctrlr_wait_for_ready(ctrlr, 0)); + } cc.bits.en = 0; nvme_mmio_write_4(ctrlr, cc, cc.raw); - DELAY(5000); - nvme_ctrlr_wait_for_ready(ctrlr, 0); + /* + * Some drives have issues with accessing the mmio after we + * disable, so delay for a bit after we write the bit to + * cope with these issues. + */ + if (ctrlr->quirks) + pause("nvmeR", B4_CHK_RDY_DELAY_MS * hz / 1000); + return (nvme_ctrlr_wait_for_ready(ctrlr, 0)); } static int @@ -292,15 +310,24 @@ union cc_register cc; union csts_register csts; union aqa_register aqa; + int err; cc.raw = nvme_mmio_read_4(ctrlr, cc); csts.raw = nvme_mmio_read_4(ctrlr, csts); + /* + * See note in nvme_ctrlr_disable. Short circuit if we're already enabled. + */ if (cc.bits.en == 1) { if (csts.bits.rdy == 1) return (0); else return (nvme_ctrlr_wait_for_ready(ctrlr, 1)); + } else { + /* EN == 0 already wait for RDY == 0 or fail */ + err = nvme_ctrlr_wait_for_ready(ctrlr, 0); + if (err != 0) + return (err); } nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr); @@ -326,7 +353,6 @@ cc.bits.mps = (PAGE_SIZE >> 13); nvme_mmio_write_4(ctrlr, cc, cc.raw); - DELAY(5000); return (nvme_ctrlr_wait_for_ready(ctrlr, 1)); } @@ -334,7 +360,7 @@ int nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr) { - int i; + int i, err; nvme_admin_qpair_disable(&ctrlr->adminq); /* @@ -349,7 +375,9 @@ DELAY(100*1000); - nvme_ctrlr_disable(ctrlr); + err = nvme_ctrlr_disable(ctrlr); + if (err != 0) + return err; return (nvme_ctrlr_enable(ctrlr)); } Index: head/sys/dev/nvme/nvme_private.h =================================================================== --- head/sys/dev/nvme/nvme_private.h +++ head/sys/dev/nvme/nvme_private.h @@ -246,6 +246,8 @@ struct mtx lock; uint32_t ready_timeout_in_ms; + uint32_t quirks; +#define QUIRK_DELAY_B4_CHK_RDY 1 /* Can't touch MMIO on disable */ bus_space_tag_t bus_tag; bus_space_handle_t bus_handle;