Changeset View
Changeset View
Standalone View
Standalone View
sys/dev/nvme/nvme_ctrlr.c
Show First 20 Lines • Show All 254 Lines • ▼ Show 20 Lines | while ((req = STAILQ_FIRST(&ctrlr->fail_req)) != NULL) { | ||||
mtx_unlock(&ctrlr->lock); | mtx_unlock(&ctrlr->lock); | ||||
nvme_qpair_manual_complete_request(req->qpair, req, | nvme_qpair_manual_complete_request(req->qpair, req, | ||||
NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST); | NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST); | ||||
mtx_lock(&ctrlr->lock); | mtx_lock(&ctrlr->lock); | ||||
} | } | ||||
mtx_unlock(&ctrlr->lock); | mtx_unlock(&ctrlr->lock); | ||||
} | } | ||||
static void | |||||
nvme_ctrlr_post_update_ns_request(struct nvme_controller *ctrlr, | |||||
struct nvme_namespace *req) | |||||
{ | |||||
mtx_lock(&ctrlr->lock); | |||||
STAILQ_INSERT_TAIL(&ctrlr->update_ns_req, req, stailq); | |||||
mtx_unlock(&ctrlr->lock); | |||||
if (!ctrlr->is_dying) | |||||
taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->update_ns_task); | |||||
} | |||||
static void | |||||
nvme_ctrlr_update_ns_req_task(void *arg, int pending) | |||||
{ | |||||
struct nvme_controller *ctrlr = arg; | |||||
struct nvme_namespace *req; | |||||
mtx_lock(&ctrlr->lock); | |||||
while ((req = STAILQ_FIRST(&ctrlr->update_ns_req)) != NULL) { | |||||
STAILQ_REMOVE_HEAD(&ctrlr->update_ns_req, stailq); | |||||
mtx_unlock(&ctrlr->lock); | |||||
nvme_ns_construct(req, req->id, ctrlr, NVME_REASON_FLAGGED); | |||||
nvme_notify_ns(ctrlr, req->id); | |||||
req->flags &= ~NVME_NS_FLAG_CHANGED; | |||||
mtx_lock(&ctrlr->lock); | |||||
} | |||||
mtx_unlock(&ctrlr->lock); | |||||
} | |||||
/* | /* | ||||
* Wait for RDY to change. | * Wait for RDY to change. | ||||
* | * | ||||
* Starts sleeping for 1us and geometrically increases it the longer we wait, | * Starts sleeping for 1us and geometrically increases it the longer we wait, | ||||
* capped at 1ms. | * capped at 1ms. | ||||
*/ | */ | ||||
static int | static int | ||||
nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val) | nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val) | ||||
▲ Show 20 Lines • Show All 310 Lines • ▼ Show 20 Lines | |||||
static int | static int | ||||
nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr) | nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr) | ||||
{ | { | ||||
struct nvme_namespace *ns; | struct nvme_namespace *ns; | ||||
uint32_t i; | uint32_t i; | ||||
for (i = 0; i < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); i++) { | for (i = 0; i < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); i++) { | ||||
ns = &ctrlr->ns[i]; | ns = &ctrlr->ns[i]; | ||||
nvme_ns_construct(ns, i+1, ctrlr); | nvme_ns_construct(ns, i+1, ctrlr, NVME_REASON_RESET); | ||||
} | } | ||||
return (0); | return (0); | ||||
} | } | ||||
static bool | static bool | ||||
is_log_page_id_valid(uint8_t page_id) | is_log_page_id_valid(uint8_t page_id) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 147 Lines • ▼ Show 20 Lines | if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) { | ||||
* clear the associated bits in the async event | * clear the associated bits in the async event | ||||
* config so that we do not receive repeated | * config so that we do not receive repeated | ||||
* notifications for the same event. | * notifications for the same event. | ||||
*/ | */ | ||||
aer->ctrlr->async_event_config &= | aer->ctrlr->async_event_config &= | ||||
~health_info->critical_warning; | ~health_info->critical_warning; | ||||
nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr, | nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr, | ||||
aer->ctrlr->async_event_config, NULL, NULL); | aer->ctrlr->async_event_config, NULL, NULL); | ||||
} else if (aer->log_page_id == NVME_LOG_CHANGED_NAMESPACE && | } else if (aer->log_page_id == NVME_LOG_CHANGED_NAMESPACE) { | ||||
!nvme_use_nvd) { | |||||
nsl = (struct nvme_ns_list *)aer->log_page_buffer; | nsl = (struct nvme_ns_list *)aer->log_page_buffer; | ||||
for (i = 0; i < nitems(nsl->ns) && nsl->ns[i] != 0; i++) { | for (i = 0; i < nitems(nsl->ns) && nsl->ns[i] != 0; i++) { | ||||
if (nsl->ns[i] > NVME_MAX_NAMESPACES) | if (nsl->ns[i] > NVME_MAX_NAMESPACES) | ||||
break; | break; | ||||
nvme_notify_ns(aer->ctrlr, nsl->ns[i]); | nvme_ctrlr_post_update_ns_request(aer->ctrlr, | ||||
&aer->ctrlr->ns[nsl->ns[i]-1]); | |||||
} | } | ||||
imp: I think this is wrong on many levels.
| |||||
Done Inline Actionssince notify_ns will query the namespace, that will cause kernel panic. here we post a query request to queue. other thread will query and construct_ns and call notify_ns after that. wanpengqian_gmail.com: since notify_ns will query the namespace, that will cause kernel panic. here we post a query… | |||||
} | } | ||||
/* | /* | ||||
* Pass the cpl data from the original async event completion, | * Pass the cpl data from the original async event completion, | ||||
* not the log page fetch. | * not the log page fetch. | ||||
*/ | */ | ||||
nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, | nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, | ||||
aer->log_page_id, aer->log_page_buffer, aer->log_page_size); | aer->log_page_id, aer->log_page_buffer, aer->log_page_size); | ||||
▲ Show 20 Lines • Show All 693 Lines • ▼ Show 20 Lines | nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev) | ||||
* | * | ||||
* We could partially solve this race by draining the failed requeust | * We could partially solve this race by draining the failed requeust | ||||
* queue before proceding to free the sim, though nothing would stop | * queue before proceding to free the sim, though nothing would stop | ||||
* new I/O from coming in after we do that drain, but before we reach | * new I/O from coming in after we do that drain, but before we reach | ||||
* cam_sim_free, so this big hammer is used instead. | * cam_sim_free, so this big hammer is used instead. | ||||
*/ | */ | ||||
ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK, | ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK, | ||||
taskqueue_thread_enqueue, &ctrlr->taskqueue); | taskqueue_thread_enqueue, &ctrlr->taskqueue); | ||||
taskqueue_start_threads(&ctrlr->taskqueue, 2, PI_DISK, "nvme taskq"); | taskqueue_start_threads(&ctrlr->taskqueue, 3, PI_DISK, "nvme taskq"); | ||||
ctrlr->is_resetting = 0; | ctrlr->is_resetting = 0; | ||||
ctrlr->is_initialized = 0; | ctrlr->is_initialized = 0; | ||||
ctrlr->notification_sent = 0; | ctrlr->notification_sent = 0; | ||||
TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr); | TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr); | ||||
TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr); | TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr); | ||||
TASK_INIT(&ctrlr->update_ns_task, 0, nvme_ctrlr_update_ns_req_task, ctrlr); | |||||
STAILQ_INIT(&ctrlr->fail_req); | STAILQ_INIT(&ctrlr->fail_req); | ||||
STAILQ_INIT(&ctrlr->update_ns_req); | |||||
ctrlr->is_failed = false; | ctrlr->is_failed = false; | ||||
make_dev_args_init(&md_args); | make_dev_args_init(&md_args); | ||||
md_args.mda_devsw = &nvme_ctrlr_cdevsw; | md_args.mda_devsw = &nvme_ctrlr_cdevsw; | ||||
md_args.mda_uid = UID_ROOT; | md_args.mda_uid = UID_ROOT; | ||||
md_args.mda_gid = GID_WHEEL; | md_args.mda_gid = GID_WHEEL; | ||||
md_args.mda_mode = 0600; | md_args.mda_mode = 0600; | ||||
md_args.mda_unit = device_get_unit(dev); | md_args.mda_unit = device_get_unit(dev); | ||||
▲ Show 20 Lines • Show All 234 Lines • Show Last 20 Lines |
I think this is wrong on many levels.