diff --git a/sys/dev/nvme/nvme_qpair.c b/sys/dev/nvme/nvme_qpair.c --- a/sys/dev/nvme/nvme_qpair.c +++ b/sys/dev/nvme/nvme_qpair.c @@ -945,22 +945,38 @@ { struct nvme_tracker *tr; + /* + * nvme_complete_tracker must be called without the qpair lock held. It + * takes the lock to adjust outstanding_tr list, so make sure we don't + * have it yet (since this is a general purpose routine). We take the + * lock to make the list traverse safe, but have to drop the lock to + * complete any AER. We restart the list scan when we do this to make + * this safe. There's interlock with the ISR so we know this tracker + * won't be completed twice. + */ + mtx_assert(&qpair->lock, MA_UNOWNED); + + mtx_lock(&qpair->lock); tr = TAILQ_FIRST(&qpair->outstanding_tr); while (tr != NULL) { if (tr->req->cmd.opc == NVME_OPC_ASYNC_EVENT_REQUEST) { + mtx_unlock(&qpair->lock); nvme_qpair_manual_complete_tracker(tr, NVME_SCT_GENERIC, NVME_SC_ABORTED_SQ_DELETION, 0, ERROR_PRINT_NONE); + mtx_lock(&qpair->lock); tr = TAILQ_FIRST(&qpair->outstanding_tr); } else { tr = TAILQ_NEXT(tr, tailq); } } + mtx_unlock(&qpair->lock); } void nvme_admin_qpair_destroy(struct nvme_qpair *qpair) { + mtx_assert(&qpair->lock, MA_UNOWNED); nvme_admin_qpair_abort_aers(qpair); nvme_qpair_destroy(qpair); @@ -1410,12 +1426,13 @@ nvme_admin_qpair_disable(struct nvme_qpair *qpair) { mtx_lock(&qpair->recovery); - mtx_lock(&qpair->lock); + mtx_lock(&qpair->lock); nvme_qpair_disable(qpair); + mtx_unlock(&qpair->lock); + nvme_admin_qpair_abort_aers(qpair); - mtx_unlock(&qpair->lock); mtx_unlock(&qpair->recovery); }