Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F144055556
D52440.id162330.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
35 KB
Referenced Files
None
Subscribers
None
D52440.id162330.diff
View Options
diff --git a/sys/dev/ufshci/ufshci_ctrlr.c b/sys/dev/ufshci/ufshci_ctrlr.c
--- a/sys/dev/ufshci/ufshci_ctrlr.c
+++ b/sys/dev/ufshci/ufshci_ctrlr.c
@@ -12,8 +12,108 @@
#include "ufshci_private.h"
#include "ufshci_reg.h"
+static void
+ufshci_ctrlr_fail(struct ufshci_controller *ctrlr)
+{
+ ctrlr->is_failed = true;
+
+ ufshci_req_queue_fail(ctrlr,
+ ctrlr->task_mgmt_req_queue.qops.get_hw_queue(
+ &ctrlr->task_mgmt_req_queue));
+ ufshci_req_queue_fail(ctrlr,
+ ctrlr->transfer_req_queue.qops.get_hw_queue(
+ &ctrlr->transfer_req_queue));
+}
+
+static void
+ufshci_ctrlr_start(struct ufshci_controller *ctrlr, bool resetting)
+{
+ TSENTER();
+
+ /*
+ * If `resetting` is true, we are on the reset path.
+ * Re-enable request queues here because ufshci_ctrlr_reset_task()
+ * disables them during reset.
+ */
+ if (resetting) {
+ if (ufshci_utmr_req_queue_enable(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+ if (ufshci_utr_req_queue_enable(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+ }
+
+ if (ufshci_ctrlr_send_nop(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* Initialize UFS target drvice */
+ if (ufshci_dev_init(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* Initialize Reference Clock */
+ if (ufshci_dev_init_reference_clock(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* Initialize unipro */
+ if (ufshci_dev_init_unipro(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /*
+ * Initialize UIC Power Mode
+ * QEMU UFS devices do not support unipro and power mode.
+ */
+ if (!(ctrlr->quirks & UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE) &&
+ ufshci_dev_init_uic_power_mode(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* Initialize UFS Power Mode */
+ if (ufshci_dev_init_ufs_power_mode(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* Read Controller Descriptor (Device, Geometry) */
+ if (ufshci_dev_get_descriptor(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ if (ufshci_dev_config_write_booster(ctrlr)) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* TODO: Configure Write Protect */
+
+ /* TODO: Configure Background Operations */
+
+ /*
+ * If the reset is due to a timeout, it is already attached to the SIM
+ * and does not need to be attached again.
+ */
+ if (!resetting && ufshci_sim_attach(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ TSEXIT();
+}
+
static int
-ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr)
+ufshci_ctrlr_disable_host_ctrlr(struct ufshci_controller *ctrlr)
{
int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms);
sbintime_t delta_t = SBT_1US;
@@ -27,6 +127,35 @@
ufshci_mmio_write_4(ctrlr, hce, hce);
}
+ /* Wait for the HCE flag to change */
+ while (1) {
+ hce = ufshci_mmio_read_4(ctrlr, hce);
+ if (!UFSHCIV(UFSHCI_HCE_REG_HCE, hce))
+ break;
+ if (timeout - ticks < 0) {
+ ufshci_printf(ctrlr,
+ "host controller failed to disable "
+ "within %d ms\n",
+ ctrlr->device_init_timeout_in_ms);
+ return (ENXIO);
+ }
+
+ pause_sbt("ufshci_disable_hce", delta_t, 0, C_PREL(1));
+ delta_t = min(SBT_1MS, delta_t * 3 / 2);
+ }
+
+ return (0);
+}
+
+static int
+ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr)
+{
+ int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms);
+ sbintime_t delta_t = SBT_1US;
+ uint32_t hce;
+
+ hce = ufshci_mmio_read_4(ctrlr, hce);
+
/* Enable UFS host controller */
hce |= UFSHCIM(UFSHCI_HCE_REG_HCE);
ufshci_mmio_write_4(ctrlr, hce, hce);
@@ -36,7 +165,7 @@
* unstable, so we need to read the HCE value after some time after
* initialization is complete.
*/
- pause_sbt("ufshci_hce", ustosbt(100), 0, C_PREL(1));
+ pause_sbt("ufshci_enable_hce", ustosbt(100), 0, C_PREL(1));
/* Wait for the HCE flag to change */
while (1) {
@@ -51,17 +180,103 @@
return (ENXIO);
}
- pause_sbt("ufshci_hce", delta_t, 0, C_PREL(1));
+ pause_sbt("ufshci_enable_hce", delta_t, 0, C_PREL(1));
delta_t = min(SBT_1MS, delta_t * 3 / 2);
}
return (0);
}
+static int
+ufshci_ctrlr_disable(struct ufshci_controller *ctrlr)
+{
+ int error;
+
+ /* Disable all interrupts */
+ ufshci_mmio_write_4(ctrlr, ie, 0);
+
+ error = ufshci_ctrlr_disable_host_ctrlr(ctrlr);
+ return (error);
+}
+
+static int
+ufshci_ctrlr_enable(struct ufshci_controller *ctrlr)
+{
+ uint32_t ie, hcs;
+ int error;
+
+ error = ufshci_ctrlr_enable_host_ctrlr(ctrlr);
+ if (error)
+ return (error);
+
+ /* Send DME_LINKSTARTUP command to start the link startup procedure */
+ error = ufshci_uic_send_dme_link_startup(ctrlr);
+ if (error)
+ return (error);
+
+ /*
+ * The device_present(UFSHCI_HCS_REG_DP) bit becomes true if the host
+ * controller has successfully received a Link Startup UIC command
+ * response and the UFS device has found a physical link to the
+ * controller.
+ */
+ hcs = ufshci_mmio_read_4(ctrlr, hcs);
+ if (!UFSHCIV(UFSHCI_HCS_REG_DP, hcs)) {
+ ufshci_printf(ctrlr, "UFS device not found\n");
+ return (ENXIO);
+ }
+
+ /* Enable additional interrupts by programming the IE register. */
+ ie = ufshci_mmio_read_4(ctrlr, ie);
+ ie |= UFSHCIM(UFSHCI_IE_REG_UTRCE); /* UTR Completion */
+ ie |= UFSHCIM(UFSHCI_IE_REG_UEE); /* UIC Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_UTMRCE); /* UTMR Completion */
+ ie |= UFSHCIM(UFSHCI_IE_REG_DFEE); /* Device Fatal Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_UTPEE); /* UTP Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_HCFEE); /* Host Ctrlr Fatal Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_SBFEE); /* System Bus Fatal Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_CEFEE); /* Crypto Engine Fatal Error */
+ ufshci_mmio_write_4(ctrlr, ie, ie);
+
+ /* TODO: Initialize interrupt Aggregation Control Register (UTRIACR) */
+
+ return (0);
+}
+
+static int
+ufshci_ctrlr_hw_reset(struct ufshci_controller *ctrlr)
+{
+ int error;
+
+ error = ufshci_ctrlr_disable(ctrlr);
+ if (error)
+ return (error);
+
+ error = ufshci_ctrlr_enable(ctrlr);
+ return (error);
+}
+
+static void
+ufshci_ctrlr_reset_task(void *arg, int pending)
+{
+ struct ufshci_controller *ctrlr = arg;
+ int error;
+
+ /* Release resources */
+ ufshci_utmr_req_queue_disable(ctrlr);
+ ufshci_utr_req_queue_disable(ctrlr);
+
+ error = ufshci_ctrlr_hw_reset(ctrlr);
+ if (error)
+ return (ufshci_ctrlr_fail(ctrlr));
+
+ ufshci_ctrlr_start(ctrlr, true);
+}
+
int
ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev)
{
- uint32_t ver, cap, hcs, ie, ahit;
+ uint32_t ver, cap, ahit;
uint32_t timeout_period, retry_count;
int error;
@@ -114,16 +329,15 @@
TUNABLE_INT_FETCH("hw.ufshci.retry_count", &retry_count);
ctrlr->retry_count = retry_count;
- /* Disable all interrupts */
- ufshci_mmio_write_4(ctrlr, ie, 0);
-
- /* Enable Host Controller */
- error = ufshci_ctrlr_enable_host_ctrlr(ctrlr);
- if (error)
- return (error);
+ ctrlr->enable_aborts = 1;
+ if (ctrlr->quirks & UFSHCI_QUIRK_NOT_SUPPORT_ABORT_TASK)
+ ctrlr->enable_aborts = 0;
+ else
+ TUNABLE_INT_FETCH("hw.ufshci.enable_aborts",
+ &ctrlr->enable_aborts);
- /* Send DME_LINKSTARTUP command to start the link startup procedure */
- error = ufshci_uic_send_dme_link_startup(ctrlr);
+ /* Reset the UFSHCI controller */
+ error = ufshci_ctrlr_hw_reset(ctrlr);
if (error)
return (error);
@@ -134,18 +348,6 @@
ahit = 0;
ufshci_mmio_write_4(ctrlr, ahit, ahit);
- /*
- * The device_present(UFSHCI_HCS_REG_DP) bit becomes true if the host
- * controller has successfully received a Link Startup UIC command
- * response and the UFS device has found a physical link to the
- * controller.
- */
- hcs = ufshci_mmio_read_4(ctrlr, hcs);
- if (!UFSHCIV(UFSHCI_HCS_REG_DP, hcs)) {
- ufshci_printf(ctrlr, "UFS device not found\n");
- return (ENXIO);
- }
-
/* Allocate and initialize UTP Task Management Request List. */
error = ufshci_utmr_req_queue_construct(ctrlr);
if (error)
@@ -156,27 +358,21 @@
if (error)
return (error);
- /* Enable additional interrupts by programming the IE register. */
- ie = ufshci_mmio_read_4(ctrlr, ie);
- ie |= UFSHCIM(UFSHCI_IE_REG_UTRCE); /* UTR Completion */
- ie |= UFSHCIM(UFSHCI_IE_REG_UEE); /* UIC Error */
- ie |= UFSHCIM(UFSHCI_IE_REG_UTMRCE); /* UTMR Completion */
- ie |= UFSHCIM(UFSHCI_IE_REG_DFEE); /* Device Fatal Error */
- ie |= UFSHCIM(UFSHCI_IE_REG_UTPEE); /* UTP Error */
- ie |= UFSHCIM(UFSHCI_IE_REG_HCFEE); /* Host Ctrlr Fatal Error */
- ie |= UFSHCIM(UFSHCI_IE_REG_SBFEE); /* System Bus Fatal Error */
- ie |= UFSHCIM(UFSHCI_IE_REG_CEFEE); /* Crypto Engine Fatal Error */
- ufshci_mmio_write_4(ctrlr, ie, ie);
-
- /* TODO: Initialize interrupt Aggregation Control Register (UTRIACR) */
-
/* TODO: Separate IO and Admin slot */
+
/*
* max_hw_pend_io is the number of slots in the transfer_req_queue.
* Reduce num_entries by one to reserve an admin slot.
*/
ctrlr->max_hw_pend_io = ctrlr->transfer_req_queue.num_entries - 1;
+ /* Create a thread for the taskqueue. */
+ ctrlr->taskqueue = taskqueue_create("ufshci_taskq", M_WAITOK,
+ taskqueue_thread_enqueue, &ctrlr->taskqueue);
+ taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "ufshci taskq");
+
+ TASK_INIT(&ctrlr->reset_task, 0, ufshci_ctrlr_reset_task, ctrlr);
+
return (0);
}
@@ -208,50 +404,21 @@
bus_release_resource(dev, SYS_RES_MEMORY, ctrlr->resource_id,
ctrlr->resource);
nores:
+ KASSERT(!mtx_owned(&ctrlr->uic_cmd_lock),
+ ("destroying uic_cmd_lock while still owned"));
mtx_destroy(&ctrlr->uic_cmd_lock);
+
+ KASSERT(!mtx_owned(&ctrlr->sc_mtx),
+ ("destroying sc_mtx while still owned"));
mtx_destroy(&ctrlr->sc_mtx);
return;
}
-int
+void
ufshci_ctrlr_reset(struct ufshci_controller *ctrlr)
{
- uint32_t ie;
- int error;
-
- /* Backup and disable all interrupts */
- ie = ufshci_mmio_read_4(ctrlr, ie);
- ufshci_mmio_write_4(ctrlr, ie, 0);
-
- /* Release resources */
- ufshci_utmr_req_queue_destroy(ctrlr);
- ufshci_utr_req_queue_destroy(ctrlr);
-
- /* Reset Host Controller */
- error = ufshci_ctrlr_enable_host_ctrlr(ctrlr);
- if (error)
- return (error);
-
- /* Send DME_LINKSTARTUP command to start the link startup procedure */
- error = ufshci_uic_send_dme_link_startup(ctrlr);
- if (error)
- return (error);
-
- /* Enable interrupts */
- ufshci_mmio_write_4(ctrlr, ie, ie);
-
- /* Allocate and initialize UTP Task Management Request List. */
- error = ufshci_utmr_req_queue_construct(ctrlr);
- if (error)
- return (error);
-
- /* Allocate and initialize UTP Transfer Request List or SQ/CQ. */
- error = ufshci_utr_req_queue_construct(ctrlr);
- if (error)
- return (error);
-
- return (0);
+ taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
}
int
@@ -295,84 +462,6 @@
return (0);
}
-static void
-ufshci_ctrlr_fail(struct ufshci_controller *ctrlr, bool admin_also)
-{
- printf("ufshci(4): ufshci_ctrlr_fail\n");
-
- ctrlr->is_failed = true;
-
- /* TODO: task_mgmt_req_queue should be handled as fail */
-
- ufshci_req_queue_fail(ctrlr,
- &ctrlr->transfer_req_queue.hwq[UFSHCI_SDB_Q]);
-}
-
-static void
-ufshci_ctrlr_start(struct ufshci_controller *ctrlr)
-{
- TSENTER();
-
- if (ufshci_ctrlr_send_nop(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /* Initialize UFS target drvice */
- if (ufshci_dev_init(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /* Initialize Reference Clock */
- if (ufshci_dev_init_reference_clock(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /* Initialize unipro */
- if (ufshci_dev_init_unipro(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /*
- * Initialize UIC Power Mode
- * QEMU UFS devices do not support unipro and power mode.
- */
- if (!(ctrlr->quirks & UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE) &&
- ufshci_dev_init_uic_power_mode(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /* Initialize UFS Power Mode */
- if (ufshci_dev_init_ufs_power_mode(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /* Read Controller Descriptor (Device, Geometry) */
- if (ufshci_dev_get_descriptor(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- if (ufshci_dev_config_write_booster(ctrlr)) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /* TODO: Configure Background Operations */
-
- if (ufshci_sim_attach(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- TSEXIT();
-}
-
void
ufshci_ctrlr_start_config_hook(void *arg)
{
@@ -382,9 +471,9 @@
if (ufshci_utmr_req_queue_enable(ctrlr) == 0 &&
ufshci_utr_req_queue_enable(ctrlr) == 0)
- ufshci_ctrlr_start(ctrlr);
+ ufshci_ctrlr_start(ctrlr, false);
else
- ufshci_ctrlr_fail(ctrlr, false);
+ ufshci_ctrlr_fail(ctrlr);
ufshci_sysctl_initialize_ctrlr(ctrlr);
config_intrhook_disestablish(&ctrlr->config_hook);
diff --git a/sys/dev/ufshci/ufshci_ctrlr_cmd.c b/sys/dev/ufshci/ufshci_ctrlr_cmd.c
--- a/sys/dev/ufshci/ufshci_ctrlr_cmd.c
+++ b/sys/dev/ufshci/ufshci_ctrlr_cmd.c
@@ -15,7 +15,7 @@
struct ufshci_request *req;
struct ufshci_task_mgmt_request_upiu *upiu;
- req = ufshci_allocate_request_vaddr(NULL, 0, M_WAITOK, cb_fn, cb_arg);
+ req = ufshci_allocate_request_vaddr(NULL, 0, M_NOWAIT, cb_fn, cb_arg);
req->request_size = sizeof(struct ufshci_task_mgmt_request_upiu);
req->response_size = sizeof(struct ufshci_task_mgmt_response_upiu);
diff --git a/sys/dev/ufshci/ufshci_dev.c b/sys/dev/ufshci/ufshci_dev.c
--- a/sys/dev/ufshci/ufshci_dev.c
+++ b/sys/dev/ufshci/ufshci_dev.c
@@ -774,4 +774,3 @@
ufshci_dev_disable_write_booster(ctrlr);
return (error);
}
-
diff --git a/sys/dev/ufshci/ufshci_pci.c b/sys/dev/ufshci/ufshci_pci.c
--- a/sys/dev/ufshci/ufshci_pci.c
+++ b/sys/dev/ufshci/ufshci_pci.c
@@ -49,7 +49,8 @@
uint32_t ref_clk;
uint32_t quirks;
} pci_ids[] = { { 0x131b36, "QEMU UFS Host Controller", UFSHCI_REF_CLK_19_2MHz,
- UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE },
+ UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE |
+ UFSHCI_QUIRK_NOT_SUPPORT_ABORT_TASK },
{ 0x98fa8086, "Intel Lakefield UFS Host Controller",
UFSHCI_REF_CLK_19_2MHz,
UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE |
diff --git a/sys/dev/ufshci/ufshci_private.h b/sys/dev/ufshci/ufshci_private.h
--- a/sys/dev/ufshci/ufshci_private.h
+++ b/sys/dev/ufshci/ufshci_private.h
@@ -68,7 +68,6 @@
bool is_admin;
int32_t retries;
bool payload_valid;
- bool timeout;
bool spare[2]; /* Future use */
STAILQ_ENTRY(ufshci_request) stailq;
};
@@ -82,6 +81,7 @@
};
struct ufshci_tracker {
+ TAILQ_ENTRY(ufshci_tracker) tailq;
struct ufshci_request *req;
struct ufshci_req_queue *req_queue;
struct ufshci_hw_queue *hwq;
@@ -121,6 +121,8 @@
struct ufshci_req_queue *req_queue);
int (*enable)(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue);
+ void (*disable)(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue);
int (*reserve_slot)(struct ufshci_req_queue *req_queue,
struct ufshci_tracker **tr);
int (*reserve_admin_slot)(struct ufshci_req_queue *req_queue,
@@ -137,16 +139,27 @@
#define UFSHCI_SDB_Q 0 /* Queue number for a single doorbell queue */
+enum ufshci_recovery {
+ RECOVERY_NONE = 0, /* Normal operations */
+ RECOVERY_WAITING, /* waiting for the reset to complete */
+};
+
/*
* Generic queue container used by both SDB (fixed 32-slot bitmap) and MCQ
* (ring buffer) modes. Fields are shared; some such as sq_head, sq_tail and
* cq_head are not used in SDB but used in MCQ.
*/
struct ufshci_hw_queue {
+ struct ufshci_controller *ctrlr;
+ struct ufshci_req_queue *req_queue;
uint32_t id;
int domain;
int cpu;
+ struct callout timer; /* recovery lock */
+ bool timer_armed; /* recovery lock */
+ enum ufshci_recovery recovery_state; /* recovery lock */
+
union {
struct ufshci_utp_xfer_req_desc *utrd;
struct ufshci_utp_task_mgmt_req_desc *utmrd;
@@ -161,6 +174,9 @@
uint32_t num_entries;
uint32_t num_trackers;
+ TAILQ_HEAD(, ufshci_tracker) free_tr;
+ TAILQ_HEAD(, ufshci_tracker) outstanding_tr;
+
/*
* A Request List using the single doorbell method uses a dedicated
* ufshci_tracker, one per slot.
@@ -177,7 +193,13 @@
int64_t num_retries;
int64_t num_failures;
+ /*
+ * Each lock may be acquired independently.
+ * When both are required, acquire them in this order to avoid
+ * deadlocks. (recovery_lock -> qlock)
+ */
struct mtx_padalign qlock;
+ struct mtx_padalign recovery_lock;
};
struct ufshci_req_queue {
@@ -242,6 +264,9 @@
4 /* Need to wait 1250us after power mode change */
#define UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY \
8 /* Need to change the number of lanes before changing HS-GEAR. */
+#define UFSHCI_QUIRK_NOT_SUPPORT_ABORT_TASK \
+ 16 /* QEMU does not support Task Management Request */
+
uint32_t ref_clk;
struct cam_sim *ufshci_sim;
@@ -264,6 +289,9 @@
/* Fields for tracking progress during controller initialization. */
struct intr_config_hook config_hook;
+ struct task reset_task;
+ struct taskqueue *taskqueue;
+
/* For shared legacy interrupt. */
int rid;
struct resource *res;
@@ -272,6 +300,8 @@
uint32_t major_version;
uint32_t minor_version;
+ uint32_t enable_aborts;
+
uint32_t num_io_queues;
uint32_t max_hw_pend_io;
@@ -345,7 +375,7 @@
/* Controller */
int ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev);
void ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev);
-int ufshci_ctrlr_reset(struct ufshci_controller *ctrlr);
+void ufshci_ctrlr_reset(struct ufshci_controller *ctrlr);
/* ctrlr defined as void * to allow use with config_intrhook. */
void ufshci_ctrlr_start_config_hook(void *arg);
void ufshci_ctrlr_poll(struct ufshci_controller *ctrlr);
@@ -388,7 +418,9 @@
int ufshci_utr_req_queue_construct(struct ufshci_controller *ctrlr);
void ufshci_utmr_req_queue_destroy(struct ufshci_controller *ctrlr);
void ufshci_utr_req_queue_destroy(struct ufshci_controller *ctrlr);
+void ufshci_utmr_req_queue_disable(struct ufshci_controller *ctrlr);
int ufshci_utmr_req_queue_enable(struct ufshci_controller *ctrlr);
+void ufshci_utr_req_queue_disable(struct ufshci_controller *ctrlr);
int ufshci_utr_req_queue_enable(struct ufshci_controller *ctrlr);
void ufshci_req_queue_fail(struct ufshci_controller *ctrlr,
struct ufshci_hw_queue *hwq);
@@ -404,6 +436,8 @@
struct ufshci_req_queue *req_queue);
struct ufshci_hw_queue *ufshci_req_sdb_get_hw_queue(
struct ufshci_req_queue *req_queue);
+void ufshci_req_sdb_disable(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue);
int ufshci_req_sdb_enable(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue);
int ufshci_req_sdb_reserve_slot(struct ufshci_req_queue *req_queue,
@@ -489,13 +523,12 @@
struct ufshci_request *req;
KASSERT(how == M_WAITOK || how == M_NOWAIT,
- ("nvme_allocate_request: invalid how %d", how));
+ ("ufshci_allocate_request: invalid how %d", how));
req = malloc(sizeof(*req), M_UFSHCI, how | M_ZERO);
if (req != NULL) {
req->cb_fn = cb_fn;
req->cb_arg = cb_arg;
- req->timeout = true;
}
return (req);
}
diff --git a/sys/dev/ufshci/ufshci_req_queue.c b/sys/dev/ufshci/ufshci_req_queue.c
--- a/sys/dev/ufshci/ufshci_req_queue.c
+++ b/sys/dev/ufshci/ufshci_req_queue.c
@@ -24,6 +24,7 @@
.destroy = ufshci_req_sdb_destroy,
.get_hw_queue = ufshci_req_sdb_get_hw_queue,
.enable = ufshci_req_sdb_enable,
+ .disable = ufshci_req_sdb_disable,
.reserve_slot = ufshci_req_sdb_reserve_slot,
.reserve_admin_slot = ufshci_req_sdb_reserve_slot,
.ring_doorbell = ufshci_req_sdb_utmr_ring_doorbell,
@@ -38,6 +39,7 @@
.destroy = ufshci_req_sdb_destroy,
.get_hw_queue = ufshci_req_sdb_get_hw_queue,
.enable = ufshci_req_sdb_enable,
+ .disable = ufshci_req_sdb_disable,
.reserve_slot = ufshci_req_sdb_reserve_slot,
.reserve_admin_slot = ufshci_req_sdb_reserve_slot,
.ring_doorbell = ufshci_req_sdb_utr_ring_doorbell,
@@ -74,6 +76,13 @@
&ctrlr->task_mgmt_req_queue);
}
+void
+ufshci_utmr_req_queue_disable(struct ufshci_controller *ctrlr)
+{
+ ctrlr->task_mgmt_req_queue.qops.disable(ctrlr,
+ &ctrlr->task_mgmt_req_queue);
+}
+
int
ufshci_utmr_req_queue_enable(struct ufshci_controller *ctrlr)
{
@@ -109,6 +118,13 @@
&ctrlr->transfer_req_queue);
}
+void
+ufshci_utr_req_queue_disable(struct ufshci_controller *ctrlr)
+{
+ ctrlr->transfer_req_queue.qops.disable(ctrlr,
+ &ctrlr->transfer_req_queue);
+}
+
int
ufshci_utr_req_queue_enable(struct ufshci_controller *ctrlr)
{
@@ -226,31 +242,30 @@
ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
{
struct ufshci_req_queue *req_queue = tr->req_queue;
+ struct ufshci_hw_queue *hwq = tr->hwq;
struct ufshci_request *req = tr->req;
struct ufshci_completion cpl;
uint8_t ocs;
bool retry, error, retriable;
- mtx_assert(&tr->hwq->qlock, MA_NOTOWNED);
+ mtx_assert(&hwq->qlock, MA_NOTOWNED);
/* Copy the response from the Request Descriptor or UTP Command
* Descriptor. */
+ cpl.size = tr->response_size;
if (req_queue->is_task_mgmt) {
- cpl.size = tr->response_size;
memcpy(&cpl.response_upiu,
- (void *)tr->hwq->utmrd[tr->slot_num].response_upiu,
- cpl.size);
+ (void *)hwq->utmrd[tr->slot_num].response_upiu, cpl.size);
- ocs = tr->hwq->utmrd[tr->slot_num].overall_command_status;
+ ocs = hwq->utmrd[tr->slot_num].overall_command_status;
} else {
bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
- cpl.size = tr->response_size;
memcpy(&cpl.response_upiu, (void *)tr->ucd->response_upiu,
cpl.size);
- ocs = tr->hwq->utrd[tr->slot_num].overall_command_status;
+ ocs = hwq->utrd[tr->slot_num].overall_command_status;
}
error = ufshci_req_queue_response_is_error(req_queue, ocs,
@@ -262,9 +277,9 @@
retry = error && retriable &&
req->retries < req_queue->ctrlr->retry_count;
if (retry)
- tr->hwq->num_retries++;
+ hwq->num_retries++;
if (error && req->retries >= req_queue->ctrlr->retry_count && retriable)
- tr->hwq->num_failures++;
+ hwq->num_failures++;
KASSERT(tr->req, ("there is no request assigned to the tracker\n"));
KASSERT(cpl.response_upiu.header.task_tag ==
@@ -282,7 +297,7 @@
req->cb_fn(req->cb_arg, &cpl, error);
}
- mtx_lock(&tr->hwq->qlock);
+ mtx_lock(&hwq->qlock);
/* Clear the UTRL Completion Notification register */
req_queue->qops.clear_cpl_ntf(req_queue->ctrlr, tr);
@@ -301,6 +316,9 @@
ufshci_free_request(req);
tr->req = NULL;
tr->slot_state = UFSHCI_SLOT_STATE_FREE;
+
+ TAILQ_REMOVE(&hwq->outstanding_tr, tr, tailq);
+ TAILQ_INSERT_HEAD(&hwq->free_tr, tr, tailq);
}
mtx_unlock(&tr->hwq->qlock);
@@ -309,7 +327,16 @@
bool
ufshci_req_queue_process_completions(struct ufshci_req_queue *req_queue)
{
- return (req_queue->qops.process_cpl(req_queue));
+ struct ufshci_hw_queue *hwq;
+ bool done;
+
+ hwq = req_queue->qops.get_hw_queue(req_queue);
+
+ mtx_lock(&hwq->recovery_lock);
+ done = req_queue->qops.process_cpl(req_queue);
+ mtx_unlock(&hwq->recovery_lock);
+
+ return (done);
}
static void
@@ -427,6 +454,225 @@
desc->prdt_length = prdt_entry_cnt;
}
+static void
+ufshci_req_queue_timeout_recovery(struct ufshci_controller *ctrlr,
+ struct ufshci_hw_queue *hwq)
+{
+ /* TODO: Step 2. Logical unit reset */
+ /* TODO: Step 3. Target device reset */
+ /* TODO: Step 4. Bus reset */
+
+ /*
+ * Step 5. All previous commands were timeout.
+ * Recovery failed, reset the host controller.
+ */
+ ufshci_printf(ctrlr,
+ "Recovery step 5: Resetting controller due to a timeout.\n");
+ hwq->recovery_state = RECOVERY_WAITING;
+
+ ufshci_ctrlr_reset(ctrlr);
+}
+
+static void
+ufshci_abort_complete(void *arg, const struct ufshci_completion *status,
+ bool error)
+{
+ struct ufshci_tracker *tr = arg;
+
+ /*
+ * We still need to check the active tracker array, to cover race where
+ * I/O timed out at same time controller was completing the I/O. An
+ * abort request always is on the Task Management Request queue, but
+ * affects either an Task Management Request or an I/O (UTRL) queue, so
+ * take the appropriate queue lock for the original command's queue,
+ * since we'll need it to avoid races with the completion code and to
+ * complete the command manually.
+ */
+ mtx_lock(&tr->hwq->qlock);
+ if (tr->slot_state != UFSHCI_SLOT_STATE_FREE) {
+ mtx_unlock(&tr->hwq->qlock);
+ /*
+ * An I/O has timed out, and the controller was unable to abort
+ * it for some reason. And we've not processed a completion for
+ * it yet. Construct a fake completion status, and then complete
+ * the I/O's tracker manually.
+ */
+ ufshci_printf(tr->hwq->ctrlr,
+ "abort task request failed, aborting task manually\n");
+ ufshci_req_queue_manual_complete_tracker(tr,
+ UFSHCI_DESC_ABORTED, UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
+
+ if ((status->response_upiu.task_mgmt_response_upiu
+ .output_param1 ==
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_COMPLETE) ||
+ (status->response_upiu.task_mgmt_response_upiu
+ .output_param1 ==
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_SUCCEEDED)) {
+ ufshci_printf(tr->hwq->ctrlr,
+ "Warning: the abort task request completed \
+ successfully, but the original task is still incomplete.");
+ return;
+ }
+
+ /* Abort Task failed. Perform recovery steps 2-5 */
+ ufshci_req_queue_timeout_recovery(tr->hwq->ctrlr, tr->hwq);
+ } else {
+ mtx_unlock(&tr->hwq->qlock);
+ }
+}
+
+static void
+ufshci_req_queue_timeout(void *arg)
+{
+ struct ufshci_hw_queue *hwq = arg;
+ struct ufshci_controller *ctrlr = hwq->ctrlr;
+ struct ufshci_tracker *tr;
+ sbintime_t now;
+ bool idle = true;
+ bool fast;
+
+ mtx_assert(&hwq->recovery_lock, MA_OWNED);
+
+ /*
+ * If the controller is failed, then stop polling. This ensures that any
+ * failure processing that races with the hwq timeout will fail safely.
+ */
+ if (ctrlr->is_failed) {
+ ufshci_printf(ctrlr,
+ "Failed controller, stopping watchdog timeout.\n");
+ hwq->timer_armed = false;
+ return;
+ }
+
+ /*
+ * Shutdown condition: We set hwq->timer_armed to false in
+ * ufshci_req_sdb_destroy before calling callout_drain. When we call
+ * that, this routine might get called one last time. Exit w/o setting a
+ * timeout. None of the watchdog stuff needs to be done since we're
+ * destroying the hwq.
+ */
+ if (!hwq->timer_armed) {
+ ufshci_printf(ctrlr,
+ "Timeout fired during ufshci_utr_req_queue_destroy\n");
+ return;
+ }
+
+ switch (hwq->recovery_state) {
+ case RECOVERY_NONE:
+ /*
+ * See if there's any recovery needed. First, do a fast check to
+ * see if anything could have timed out. If not, then skip
+ * everything else.
+ */
+ fast = false;
+ mtx_lock(&hwq->qlock);
+ now = getsbinuptime();
+ TAILQ_FOREACH(tr, &hwq->outstanding_tr, tailq) {
+ /*
+ * If the first real transaction is not in timeout, then
+ * we're done. Otherwise, we try recovery.
+ */
+ idle = false;
+ if (now <= tr->deadline)
+ fast = true;
+ break;
+ }
+ mtx_unlock(&hwq->qlock);
+ if (idle || fast)
+ break;
+
+ /*
+ * There's a stale transaction at the start of the queue whose
+ * deadline has passed. Poll the competions as a last-ditch
+ * effort in case an interrupt has been missed.
+ */
+ hwq->req_queue->qops.process_cpl(hwq->req_queue);
+
+ /*
+ * Now that we've run the ISR, re-rheck to see if there's any
+ * timed out commands and abort them or reset the card if so.
+ */
+ mtx_lock(&hwq->qlock);
+ idle = true;
+ TAILQ_FOREACH(tr, &hwq->outstanding_tr, tailq) {
+ /*
+ * If we know this tracker hasn't timed out, we also
+ * know all subsequent ones haven't timed out. The tr
+ * queue is in submission order and all normal commands
+ * in a queue have the same timeout (or the timeout was
+ * changed by the user, but we eventually timeout then).
+ */
+ idle = false;
+ if (now <= tr->deadline)
+ break;
+
+ /*
+ * Timeout recovery is performed in five steps. If
+ * recovery fails at any step, the process continues to
+ * the next one:
+ * next steps:
+ * Step 1. Abort task
+ * Step 2. Logical unit reset (TODO)
+ * Step 3. Target device reset (TODO)
+ * Step 4. Bus reset (TODO)
+ * Step 5. Host controller reset
+ *
+ * If the timeout occurred in the Task Management
+ * Request queue, ignore Step 1.
+ */
+ if (ctrlr->enable_aborts &&
+ !hwq->req_queue->is_task_mgmt &&
+ tr->req->cb_fn != ufshci_abort_complete) {
+ /*
+ * Step 1. Timeout expired, abort the task.
+ *
+ * This isn't an abort command, ask for a
+ * hardware abort. This goes to the Task
+ * Management Request queue which will reset the
+ * task if it times out.
+ */
+ ufshci_printf(ctrlr,
+ "Recovery step 1: Timeout occurred. aborting the task(%d).\n",
+ tr->req->request_upiu.header.task_tag);
+ ufshci_ctrlr_cmd_send_task_mgmt_request(ctrlr,
+ ufshci_abort_complete, tr,
+ UFSHCI_TASK_MGMT_FUNCTION_ABORT_TASK,
+ tr->req->request_upiu.header.lun,
+ tr->req->request_upiu.header.task_tag, 0);
+ } else {
+ /* Recovery Step 2-5 */
+ ufshci_req_queue_timeout_recovery(ctrlr, hwq);
+ idle = false;
+ break;
+ }
+ }
+ mtx_unlock(&hwq->qlock);
+ break;
+
+ case RECOVERY_WAITING:
+ /*
+ * These messages aren't interesting while we're suspended. We
+ * put the queues into waiting state while suspending.
+ * Suspending takes a while, so we'll see these during that time
+ * and they aren't diagnostic. At other times, they indicate a
+ * problem that's worth complaining about.
+ */
+ if (!device_is_suspended(ctrlr->dev))
+ ufshci_printf(ctrlr, "Waiting for reset to complete\n");
+ idle = false; /* We want to keep polling */
+ break;
+ }
+
+ /*
+ * Rearm the timeout.
+ */
+ if (!idle) {
+ callout_schedule_sbt(&hwq->timer, SBT_1S / 2, SBT_1S / 2, 0);
+ } else {
+ hwq->timer_armed = false;
+ }
+}
+
/*
* Submit the tracker to the hardware.
*/
@@ -436,13 +682,30 @@
{
struct ufshci_controller *ctrlr = req_queue->ctrlr;
struct ufshci_request *req = tr->req;
+ struct ufshci_hw_queue *hwq;
uint64_t ucd_paddr;
uint16_t request_len, response_off, response_len;
uint8_t slot_num = tr->slot_num;
+ int timeout;
- mtx_assert(&req_queue->qops.get_hw_queue(req_queue)->qlock, MA_OWNED);
+ hwq = req_queue->qops.get_hw_queue(req_queue);
+
+ mtx_assert(&hwq->qlock, MA_OWNED);
- /* TODO: Check timeout */
+ if (req->cb_fn == ufshci_completion_poll_cb)
+ timeout = 1;
+ else
+ timeout = ctrlr->timeout_period;
+ tr->deadline = getsbinuptime() + timeout * SBT_1S;
+ if (!hwq->timer_armed) {
+ hwq->timer_armed = true;
+ /*
+ * It wakes up once every 0.5 seconds to check if the deadline
+ * has passed.
+ */
+ callout_reset_sbt_on(&hwq->timer, SBT_1S / 2, SBT_1S / 2,
+ ufshci_req_queue_timeout, hwq, hwq->cpu, 0);
+ }
if (req_queue->is_task_mgmt) {
/* Prepare UTP Task Management Request Descriptor. */
@@ -508,6 +771,9 @@
tr->deadline = SBT_MAX;
tr->req = req;
+ TAILQ_REMOVE(&tr->hwq->free_tr, tr, tailq);
+ TAILQ_INSERT_TAIL(&tr->hwq->outstanding_tr, tr, tailq);
+
ufshci_req_queue_submit_tracker(req_queue, tr, req->data_direction);
return (0);
diff --git a/sys/dev/ufshci/ufshci_req_sdb.c b/sys/dev/ufshci/ufshci_req_sdb.c
--- a/sys/dev/ufshci/ufshci_req_sdb.c
+++ b/sys/dev/ufshci/ufshci_req_sdb.c
@@ -40,6 +40,8 @@
bus_dma_tag_destroy(req_queue->dma_tag_ucd);
req_queue->dma_tag_ucd = NULL;
}
+
+ free(req_queue->hwq->ucd_bus_addr, M_UFSHCI);
}
static void
@@ -74,6 +76,10 @@
uint8_t *ucdmem;
int i, error;
+ req_queue->hwq->ucd_bus_addr = malloc(sizeof(bus_addr_t) *
+ req_queue->num_trackers,
+ M_UFSHCI, M_ZERO | M_NOWAIT);
+
/*
* Each component must be page aligned, and individual PRP lists
* cannot cross a page boundary.
@@ -152,6 +158,9 @@
uint64_t queuemem_phys;
uint8_t *queuemem;
struct ufshci_tracker *tr;
+ const size_t lock_name_len = 32;
+ char qlock_name[lock_name_len], recovery_lock_name[lock_name_len];
+ char *base;
int i, error;
req_queue->ctrlr = ctrlr;
@@ -169,11 +178,21 @@
hwq = &req_queue->hwq[UFSHCI_SDB_Q];
hwq->num_entries = req_queue->num_entries;
hwq->num_trackers = req_queue->num_trackers;
- req_queue->hwq->ucd_bus_addr = malloc(sizeof(bus_addr_t) *
- req_queue->num_trackers,
- M_UFSHCI, M_ZERO | M_NOWAIT);
+ hwq->ctrlr = ctrlr;
+ hwq->req_queue = req_queue;
+
+ base = is_task_mgmt ? "ufshci utmrq" : "ufshci utrq";
+ snprintf(qlock_name, sizeof(qlock_name), "%s #%d lock", base,
+ UFSHCI_SDB_Q);
+ snprintf(recovery_lock_name, sizeof(recovery_lock_name),
+ "%s #%d recovery lock", base, UFSHCI_SDB_Q);
- mtx_init(&hwq->qlock, "ufshci req_queue lock", NULL, MTX_DEF);
+ mtx_init(&hwq->qlock, qlock_name, NULL, MTX_DEF);
+ mtx_init(&hwq->recovery_lock, recovery_lock_name, NULL, MTX_DEF);
+
+ callout_init_mtx(&hwq->timer, &hwq->recovery_lock, 0);
+ hwq->timer_armed = false;
+ hwq->recovery_state = RECOVERY_WAITING;
/*
* Allocate physical memory for request queue (UTP Transfer Request
@@ -219,6 +238,9 @@
req_queue->num_entries,
M_UFSHCI, DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
+ TAILQ_INIT(&hwq->free_tr);
+ TAILQ_INIT(&hwq->outstanding_tr);
+
for (i = 0; i < req_queue->num_trackers; i++) {
tr = malloc_domainset(sizeof(struct ufshci_tracker), M_UFSHCI,
DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
@@ -226,6 +248,7 @@
tr->req_queue = req_queue;
tr->slot_num = i;
tr->slot_state = UFSHCI_SLOT_STATE_FREE;
+ TAILQ_INSERT_HEAD(&hwq->free_tr, tr, tailq);
hwq->act_tr[i] = tr;
}
@@ -255,8 +278,6 @@
ctrlr) != 0) {
ufshci_printf(ctrlr,
"failed to construct cmd descriptor memory\n");
- bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd,
- hwq->queuemem_map);
goto out;
}
@@ -280,6 +301,11 @@
struct ufshci_tracker *tr;
int i;
+ mtx_lock(&hwq->recovery_lock);
+ hwq->timer_armed = false;
+ mtx_unlock(&hwq->recovery_lock);
+ callout_drain(&hwq->timer);
+
if (!req_queue->is_task_mgmt)
ufshci_req_sdb_cmd_desc_destroy(&ctrlr->transfer_req_queue);
@@ -305,10 +331,11 @@
hwq->dma_tag_queue = NULL;
}
+ if (mtx_initialized(&hwq->recovery_lock))
+ mtx_destroy(&hwq->recovery_lock);
if (mtx_initialized(&hwq->qlock))
mtx_destroy(&hwq->qlock);
- free(req_queue->hwq->ucd_bus_addr, M_UFSHCI);
free(req_queue->hwq, M_UFSHCI);
}
@@ -318,10 +345,36 @@
return &req_queue->hwq[UFSHCI_SDB_Q];
}
+void
+ufshci_req_sdb_disable(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue)
+{
+ struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
+ struct ufshci_tracker *tr, *tr_temp;
+
+ mtx_lock(&hwq->recovery_lock);
+ mtx_lock(&hwq->qlock);
+
+ if (mtx_initialized(&hwq->recovery_lock))
+ mtx_assert(&hwq->recovery_lock, MA_OWNED);
+ if (mtx_initialized(&hwq->qlock))
+ mtx_assert(&hwq->qlock, MA_OWNED);
+
+ hwq->recovery_state = RECOVERY_WAITING;
+ TAILQ_FOREACH_SAFE(tr, &hwq->outstanding_tr, tailq, tr_temp) {
+ tr->deadline = SBT_MAX;
+ }
+
+ mtx_unlock(&hwq->qlock);
+ mtx_unlock(&hwq->recovery_lock);
+}
+
int
ufshci_req_sdb_enable(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue)
{
+ struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
+
if (req_queue->is_task_mgmt) {
uint32_t hcs, utmrldbr, utmrlrsr;
@@ -373,6 +426,14 @@
ufshci_mmio_write_4(ctrlr, utrlrsr, utrlrsr);
}
+ if (mtx_initialized(&hwq->recovery_lock))
+ mtx_assert(&hwq->recovery_lock, MA_OWNED);
+ if (mtx_initialized(&hwq->qlock))
+ mtx_assert(&hwq->qlock, MA_OWNED);
+ KASSERT(!req_queue->ctrlr->is_failed, ("Enabling a failed hwq\n"));
+
+ hwq->recovery_state = RECOVERY_NONE;
+
return (0);
}
@@ -466,6 +527,8 @@
uint8_t slot;
bool done = false;
+ mtx_assert(&hwq->recovery_lock, MA_OWNED);
+
hwq->num_intr_handler_calls++;
bus_dmamap_sync(hwq->dma_tag_queue, hwq->queuemem_map,
diff --git a/sys/dev/ufshci/ufshci_sim.c b/sys/dev/ufshci/ufshci_sim.c
--- a/sys/dev/ufshci/ufshci_sim.c
+++ b/sys/dev/ufshci/ufshci_sim.c
@@ -241,7 +241,6 @@
ccb->ccb_h.status = CAM_REQ_CMP;
break;
case XPT_ABORT:
- /* TODO: Implement Task Management CMD*/
ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
break;
case XPT_SET_TRAN_SETTINGS:
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Thu, Feb 5, 12:40 AM (4 h, 14 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
28438870
Default Alt Text
D52440.id162330.diff (35 KB)
Attached To
Mode
D52440: ufshci: revisit controller reset path and add I/O timeout handling
Attached
Detach File
Event Timeline
Log In to Comment