Changeset View
Changeset View
Standalone View
Standalone View
head/sys/dev/nvme/nvme_ctrlr.c
Show All 40 Lines | |||||
#include <sys/smp.h> | #include <sys/smp.h> | ||||
#include <sys/uio.h> | #include <sys/uio.h> | ||||
#include <dev/pci/pcireg.h> | #include <dev/pci/pcireg.h> | ||||
#include <dev/pci/pcivar.h> | #include <dev/pci/pcivar.h> | ||||
#include "nvme_private.h" | #include "nvme_private.h" | ||||
#define B4_CHK_RDY_DELAY_MS 2300 /* work arond controller bug */ | |||||
static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr, | static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr, | ||||
struct nvme_async_event_request *aer); | struct nvme_async_event_request *aer); | ||||
static void nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr); | static void nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr); | ||||
static int | static int | ||||
nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr) | nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 179 Lines • ▼ Show 20 Lines | nvme_ctrlr_fail_req_task(void *arg, int pending) | ||||
} | } | ||||
mtx_unlock(&ctrlr->lock); | mtx_unlock(&ctrlr->lock); | ||||
} | } | ||||
static int | static int | ||||
nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val) | nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val) | ||||
{ | { | ||||
int ms_waited; | int ms_waited; | ||||
union cc_register cc; | |||||
union csts_register csts; | union csts_register csts; | ||||
cc.raw = nvme_mmio_read_4(ctrlr, cc); | |||||
csts.raw = nvme_mmio_read_4(ctrlr, csts); | csts.raw = nvme_mmio_read_4(ctrlr, csts); | ||||
if (cc.bits.en != desired_val) { | |||||
nvme_printf(ctrlr, "%s called with desired_val = %d " | |||||
"but cc.en = %d\n", __func__, desired_val, cc.bits.en); | |||||
return (ENXIO); | |||||
} | |||||
ms_waited = 0; | ms_waited = 0; | ||||
while (csts.bits.rdy != desired_val) { | while (csts.bits.rdy != desired_val) { | ||||
DELAY(1000); | |||||
if (ms_waited++ > ctrlr->ready_timeout_in_ms) { | if (ms_waited++ > ctrlr->ready_timeout_in_ms) { | ||||
nvme_printf(ctrlr, "controller ready did not become %d " | nvme_printf(ctrlr, "controller ready did not become %d " | ||||
"within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms); | "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms); | ||||
return (ENXIO); | return (ENXIO); | ||||
} | } | ||||
DELAY(1000); | |||||
csts.raw = nvme_mmio_read_4(ctrlr, csts); | csts.raw = nvme_mmio_read_4(ctrlr, csts); | ||||
} | } | ||||
return (0); | return (0); | ||||
} | } | ||||
static void | static int | ||||
nvme_ctrlr_disable(struct nvme_controller *ctrlr) | nvme_ctrlr_disable(struct nvme_controller *ctrlr) | ||||
{ | { | ||||
union cc_register cc; | union cc_register cc; | ||||
union csts_register csts; | union csts_register csts; | ||||
int err; | |||||
cc.raw = nvme_mmio_read_4(ctrlr, cc); | cc.raw = nvme_mmio_read_4(ctrlr, cc); | ||||
csts.raw = nvme_mmio_read_4(ctrlr, csts); | csts.raw = nvme_mmio_read_4(ctrlr, csts); | ||||
if (cc.bits.en == 1 && csts.bits.rdy == 0) | /* | ||||
nvme_ctrlr_wait_for_ready(ctrlr, 1); | * Per 3.1.5 in NVME 1.3 spec, transitioning CC.EN from 0 to 1 | ||||
* when CSTS.RDY is 1 or transitioning CC.EN from 1 to 0 when | |||||
* CSTS.RDY is 0 "has undefined results" So make sure that CSTS.RDY | |||||
* isn't the desired value. Short circuit if we're already disabled. | |||||
*/ | |||||
if (cc.bits.en == 1) { | |||||
if (csts.bits.rdy == 0) { | |||||
/* EN == 1, wait for RDY == 1 or fail */ | |||||
err = nvme_ctrlr_wait_for_ready(ctrlr, 1); | |||||
if (err != 0) | |||||
return (err); | |||||
} | |||||
} else { | |||||
/* EN == 0 already wait for RDY == 0 */ | |||||
if (csts.bits.rdy == 0) | |||||
return (0); | |||||
else | |||||
return (nvme_ctrlr_wait_for_ready(ctrlr, 0)); | |||||
} | |||||
cc.bits.en = 0; | cc.bits.en = 0; | ||||
nvme_mmio_write_4(ctrlr, cc, cc.raw); | nvme_mmio_write_4(ctrlr, cc, cc.raw); | ||||
DELAY(5000); | /* | ||||
nvme_ctrlr_wait_for_ready(ctrlr, 0); | * Some drives have issues with accessing the mmio after we | ||||
* disable, so delay for a bit after we write the bit to | |||||
* cope with these issues. | |||||
*/ | |||||
if (ctrlr->quirks) | |||||
pause("nvmeR", B4_CHK_RDY_DELAY_MS * hz / 1000); | |||||
return (nvme_ctrlr_wait_for_ready(ctrlr, 0)); | |||||
} | } | ||||
static int | static int | ||||
nvme_ctrlr_enable(struct nvme_controller *ctrlr) | nvme_ctrlr_enable(struct nvme_controller *ctrlr) | ||||
{ | { | ||||
union cc_register cc; | union cc_register cc; | ||||
union csts_register csts; | union csts_register csts; | ||||
union aqa_register aqa; | union aqa_register aqa; | ||||
int err; | |||||
cc.raw = nvme_mmio_read_4(ctrlr, cc); | cc.raw = nvme_mmio_read_4(ctrlr, cc); | ||||
csts.raw = nvme_mmio_read_4(ctrlr, csts); | csts.raw = nvme_mmio_read_4(ctrlr, csts); | ||||
/* | |||||
* See note in nvme_ctrlr_disable. Short circuit if we're already enabled. | |||||
*/ | |||||
if (cc.bits.en == 1) { | if (cc.bits.en == 1) { | ||||
if (csts.bits.rdy == 1) | if (csts.bits.rdy == 1) | ||||
return (0); | return (0); | ||||
else | else | ||||
return (nvme_ctrlr_wait_for_ready(ctrlr, 1)); | return (nvme_ctrlr_wait_for_ready(ctrlr, 1)); | ||||
} else { | |||||
/* EN == 0 already wait for RDY == 0 or fail */ | |||||
err = nvme_ctrlr_wait_for_ready(ctrlr, 0); | |||||
if (err != 0) | |||||
return (err); | |||||
} | } | ||||
nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr); | nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr); | ||||
DELAY(5000); | DELAY(5000); | ||||
nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr); | nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr); | ||||
DELAY(5000); | DELAY(5000); | ||||
aqa.raw = 0; | aqa.raw = 0; | ||||
Show All 9 Lines | nvme_ctrlr_enable(struct nvme_controller *ctrlr) | ||||
cc.bits.shn = 0; | cc.bits.shn = 0; | ||||
cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */ | cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */ | ||||
cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */ | cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */ | ||||
/* This evaluates to 0, which is according to spec. */ | /* This evaluates to 0, which is according to spec. */ | ||||
cc.bits.mps = (PAGE_SIZE >> 13); | cc.bits.mps = (PAGE_SIZE >> 13); | ||||
nvme_mmio_write_4(ctrlr, cc, cc.raw); | nvme_mmio_write_4(ctrlr, cc, cc.raw); | ||||
DELAY(5000); | |||||
return (nvme_ctrlr_wait_for_ready(ctrlr, 1)); | return (nvme_ctrlr_wait_for_ready(ctrlr, 1)); | ||||
} | } | ||||
int | int | ||||
nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr) | nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr) | ||||
{ | { | ||||
int i; | int i, err; | ||||
nvme_admin_qpair_disable(&ctrlr->adminq); | nvme_admin_qpair_disable(&ctrlr->adminq); | ||||
/* | /* | ||||
* I/O queues are not allocated before the initial HW | * I/O queues are not allocated before the initial HW | ||||
* reset, so do not try to disable them. Use is_initialized | * reset, so do not try to disable them. Use is_initialized | ||||
* to determine if this is the initial HW reset. | * to determine if this is the initial HW reset. | ||||
*/ | */ | ||||
if (ctrlr->is_initialized) { | if (ctrlr->is_initialized) { | ||||
for (i = 0; i < ctrlr->num_io_queues; i++) | for (i = 0; i < ctrlr->num_io_queues; i++) | ||||
nvme_io_qpair_disable(&ctrlr->ioq[i]); | nvme_io_qpair_disable(&ctrlr->ioq[i]); | ||||
} | } | ||||
DELAY(100*1000); | DELAY(100*1000); | ||||
nvme_ctrlr_disable(ctrlr); | err = nvme_ctrlr_disable(ctrlr); | ||||
if (err != 0) | |||||
return err; | |||||
return (nvme_ctrlr_enable(ctrlr)); | return (nvme_ctrlr_enable(ctrlr)); | ||||
} | } | ||||
void | void | ||||
nvme_ctrlr_reset(struct nvme_controller *ctrlr) | nvme_ctrlr_reset(struct nvme_controller *ctrlr) | ||||
{ | { | ||||
int cmpset; | int cmpset; | ||||
▲ Show 20 Lines • Show All 911 Lines • Show Last 20 Lines |