diff --git a/sys/dev/ufshci/ufshci_ctrlr.c b/sys/dev/ufshci/ufshci_ctrlr.c index 494313df95de..ce0da4cab907 100644 --- a/sys/dev/ufshci/ufshci_ctrlr.c +++ b/sys/dev/ufshci/ufshci_ctrlr.c @@ -1,678 +1,682 @@ /*- * Copyright (c) 2025, Samsung Electronics Co., Ltd. * Written by Jaeyoon Choi * * SPDX-License-Identifier: BSD-2-Clause */ #include #include #include #include "ufshci_private.h" #include "ufshci_reg.h" static void ufshci_ctrlr_fail(struct ufshci_controller *ctrlr) { ctrlr->is_failed = true; ufshci_req_queue_fail(ctrlr, ctrlr->task_mgmt_req_queue.qops.get_hw_queue( &ctrlr->task_mgmt_req_queue)); ufshci_req_queue_fail(ctrlr, ctrlr->transfer_req_queue.qops.get_hw_queue( &ctrlr->transfer_req_queue)); } static void ufshci_ctrlr_start(struct ufshci_controller *ctrlr, bool resetting) { TSENTER(); /* * If `resetting` is true, we are on the reset path. * Re-enable request queues here because ufshci_ctrlr_reset_task() * disables them during reset. */ if (resetting) { if (ufshci_utmr_req_queue_enable(ctrlr) != 0) { ufshci_ctrlr_fail(ctrlr); return; } if (ufshci_utr_req_queue_enable(ctrlr) != 0) { ufshci_ctrlr_fail(ctrlr); return; } } if (ufshci_ctrlr_send_nop(ctrlr) != 0) { ufshci_ctrlr_fail(ctrlr); return; } /* Initialize UFS target drvice */ if (ufshci_dev_init(ctrlr) != 0) { ufshci_ctrlr_fail(ctrlr); return; } /* Initialize Reference Clock */ if (ufshci_dev_init_reference_clock(ctrlr) != 0) { ufshci_ctrlr_fail(ctrlr); return; } /* Initialize unipro */ if (ufshci_dev_init_unipro(ctrlr) != 0) { ufshci_ctrlr_fail(ctrlr); return; } /* * Initialize UIC Power Mode * QEMU UFS devices do not support unipro and power mode. */ if (!(ctrlr->quirks & UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE) && ufshci_dev_init_uic_power_mode(ctrlr) != 0) { ufshci_ctrlr_fail(ctrlr); return; } ufshci_dev_init_uic_link_state(ctrlr); /* Read Controller Descriptor (Device, Geometry) */ if (ufshci_dev_get_descriptor(ctrlr) != 0) { ufshci_ctrlr_fail(ctrlr); return; } if (ufshci_dev_config_write_booster(ctrlr)) { ufshci_ctrlr_fail(ctrlr); return; } + ufshci_dev_init_auto_hibernate(ctrlr); + /* TODO: Configure Write Protect */ /* TODO: Configure Background Operations */ /* * If the reset is due to a timeout, it is already attached to the SIM * and does not need to be attached again. */ if (!resetting && ufshci_sim_attach(ctrlr) != 0) { ufshci_ctrlr_fail(ctrlr); return; } /* Initialize UFS Power Mode */ if (ufshci_dev_init_ufs_power_mode(ctrlr) != 0) { ufshci_ctrlr_fail(ctrlr); return; } TSEXIT(); } static int ufshci_ctrlr_disable_host_ctrlr(struct ufshci_controller *ctrlr) { int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms); sbintime_t delta_t = SBT_1US; uint32_t hce; hce = ufshci_mmio_read_4(ctrlr, hce); /* If UFS host controller is already enabled, disable it. */ if (UFSHCIV(UFSHCI_HCE_REG_HCE, hce)) { hce &= ~UFSHCIM(UFSHCI_HCE_REG_HCE); ufshci_mmio_write_4(ctrlr, hce, hce); } /* Wait for the HCE flag to change */ while (1) { hce = ufshci_mmio_read_4(ctrlr, hce); if (!UFSHCIV(UFSHCI_HCE_REG_HCE, hce)) break; if (timeout - ticks < 0) { ufshci_printf(ctrlr, "host controller failed to disable " "within %d ms\n", ctrlr->device_init_timeout_in_ms); return (ENXIO); } pause_sbt("ufshci_disable_hce", delta_t, 0, C_PREL(1)); delta_t = min(SBT_1MS, delta_t * 3 / 2); } return (0); } static int ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr) { int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms); sbintime_t delta_t = SBT_1US; uint32_t hce; hce = ufshci_mmio_read_4(ctrlr, hce); /* Enable UFS host controller */ hce |= UFSHCIM(UFSHCI_HCE_REG_HCE); ufshci_mmio_write_4(ctrlr, hce, hce); /* * During the controller initialization, the value of the HCE bit is * unstable, so we need to read the HCE value after some time after * initialization is complete. */ pause_sbt("ufshci_enable_hce", ustosbt(100), 0, C_PREL(1)); /* Wait for the HCE flag to change */ while (1) { hce = ufshci_mmio_read_4(ctrlr, hce); if (UFSHCIV(UFSHCI_HCE_REG_HCE, hce)) break; if (timeout - ticks < 0) { ufshci_printf(ctrlr, "host controller failed to enable " "within %d ms\n", ctrlr->device_init_timeout_in_ms); return (ENXIO); } pause_sbt("ufshci_enable_hce", delta_t, 0, C_PREL(1)); delta_t = min(SBT_1MS, delta_t * 3 / 2); } return (0); } int ufshci_ctrlr_disable(struct ufshci_controller *ctrlr) { int error; /* Disable all interrupts */ ufshci_mmio_write_4(ctrlr, ie, 0); error = ufshci_ctrlr_disable_host_ctrlr(ctrlr); return (error); } static int ufshci_ctrlr_enable(struct ufshci_controller *ctrlr) { uint32_t ie, hcs; int error; error = ufshci_ctrlr_enable_host_ctrlr(ctrlr); if (error) return (error); /* Send DME_LINKSTARTUP command to start the link startup procedure */ error = ufshci_uic_send_dme_link_startup(ctrlr); if (error) return (error); /* * The device_present(UFSHCI_HCS_REG_DP) bit becomes true if the host * controller has successfully received a Link Startup UIC command * response and the UFS device has found a physical link to the * controller. */ hcs = ufshci_mmio_read_4(ctrlr, hcs); if (!UFSHCIV(UFSHCI_HCS_REG_DP, hcs)) { ufshci_printf(ctrlr, "UFS device not found\n"); return (ENXIO); } /* Enable additional interrupts by programming the IE register. */ ie = ufshci_mmio_read_4(ctrlr, ie); ie |= UFSHCIM(UFSHCI_IE_REG_UTRCE); /* UTR Completion */ ie |= UFSHCIM(UFSHCI_IE_REG_UEE); /* UIC Error */ ie |= UFSHCIM(UFSHCI_IE_REG_UTMRCE); /* UTMR Completion */ ie |= UFSHCIM(UFSHCI_IE_REG_DFEE); /* Device Fatal Error */ ie |= UFSHCIM(UFSHCI_IE_REG_UTPEE); /* UTP Error */ ie |= UFSHCIM(UFSHCI_IE_REG_HCFEE); /* Host Ctrlr Fatal Error */ ie |= UFSHCIM(UFSHCI_IE_REG_SBFEE); /* System Bus Fatal Error */ ie |= UFSHCIM(UFSHCI_IE_REG_CEFEE); /* Crypto Engine Fatal Error */ ufshci_mmio_write_4(ctrlr, ie, ie); /* TODO: Initialize interrupt Aggregation Control Register (UTRIACR) */ return (0); } static int ufshci_ctrlr_hw_reset(struct ufshci_controller *ctrlr) { int error; error = ufshci_ctrlr_disable(ctrlr); if (error) return (error); error = ufshci_ctrlr_enable(ctrlr); return (error); } static void ufshci_ctrlr_reset_task(void *arg, int pending) { struct ufshci_controller *ctrlr = arg; int error; /* Release resources */ ufshci_utmr_req_queue_disable(ctrlr); ufshci_utr_req_queue_disable(ctrlr); error = ufshci_ctrlr_hw_reset(ctrlr); if (error) return (ufshci_ctrlr_fail(ctrlr)); ufshci_ctrlr_start(ctrlr, true); } int ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev) { uint32_t ver, cap, ahit; uint32_t timeout_period, retry_count; int error; ctrlr->device_init_timeout_in_ms = UFSHCI_DEVICE_INIT_TIMEOUT_MS; ctrlr->uic_cmd_timeout_in_ms = UFSHCI_UIC_CMD_TIMEOUT_MS; ctrlr->dev = dev; ctrlr->sc_unit = device_get_unit(dev); snprintf(ctrlr->sc_name, sizeof(ctrlr->sc_name), "%s", device_get_nameunit(dev)); mtx_init(&ctrlr->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF | MTX_RECURSE); mtx_init(&ctrlr->uic_cmd_lock, "ufshci ctrlr uic cmd lock", NULL, MTX_DEF); ver = ufshci_mmio_read_4(ctrlr, ver); ctrlr->major_version = UFSHCIV(UFSHCI_VER_REG_MJR, ver); ctrlr->minor_version = UFSHCIV(UFSHCI_VER_REG_MNR, ver); ufshci_printf(ctrlr, "UFSHCI Version: %d.%d\n", ctrlr->major_version, ctrlr->minor_version); /* Read Device Capabilities */ ctrlr->cap = cap = ufshci_mmio_read_4(ctrlr, cap); ctrlr->is_single_db_supported = UFSHCIV(UFSHCI_CAP_REG_LSDBS, cap); /* * TODO: This driver does not yet support multi-queue. * Check the UFSHCI_CAP_REG_MCQS bit in the future to determine if * multi-queue support is available. */ ctrlr->is_mcq_supported = false; if (!(ctrlr->is_single_db_supported == 0 || ctrlr->is_mcq_supported)) return (ENXIO); /* * The maximum transfer size supported by UFSHCI spec is 65535 * 256 KiB * However, we limit the maximum transfer size to 1MiB(256 * 4KiB) for * performance reason. */ ctrlr->page_size = PAGE_SIZE; ctrlr->max_xfer_size = ctrlr->page_size * UFSHCI_MAX_PRDT_ENTRY_COUNT; timeout_period = UFSHCI_DEFAULT_TIMEOUT_PERIOD; TUNABLE_INT_FETCH("hw.ufshci.timeout_period", &timeout_period); timeout_period = min(timeout_period, UFSHCI_MAX_TIMEOUT_PERIOD); timeout_period = max(timeout_period, UFSHCI_MIN_TIMEOUT_PERIOD); ctrlr->timeout_period = timeout_period; retry_count = UFSHCI_DEFAULT_RETRY_COUNT; TUNABLE_INT_FETCH("hw.ufshci.retry_count", &retry_count); ctrlr->retry_count = retry_count; ctrlr->enable_aborts = 1; if (ctrlr->quirks & UFSHCI_QUIRK_NOT_SUPPORT_ABORT_TASK) ctrlr->enable_aborts = 0; else TUNABLE_INT_FETCH("hw.ufshci.enable_aborts", &ctrlr->enable_aborts); /* Reset the UFSHCI controller */ error = ufshci_ctrlr_hw_reset(ctrlr); if (error) return (error); /* Read the UECPA register to clear */ ufshci_mmio_read_4(ctrlr, uecpa); /* Diable Auto-hibernate */ ahit = 0; ufshci_mmio_write_4(ctrlr, ahit, ahit); /* Allocate and initialize UTP Task Management Request List. */ error = ufshci_utmr_req_queue_construct(ctrlr); if (error) return (error); /* Allocate and initialize UTP Transfer Request List or SQ/CQ. */ error = ufshci_utr_req_queue_construct(ctrlr); if (error) return (error); /* TODO: Separate IO and Admin slot */ /* * max_hw_pend_io is the number of slots in the transfer_req_queue. * Reduce num_entries by one to reserve an admin slot. */ ctrlr->max_hw_pend_io = ctrlr->transfer_req_queue.num_entries - 1; /* Create a thread for the taskqueue. */ ctrlr->taskqueue = taskqueue_create("ufshci_taskq", M_WAITOK, taskqueue_thread_enqueue, &ctrlr->taskqueue); taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "ufshci taskq"); TASK_INIT(&ctrlr->reset_task, 0, ufshci_ctrlr_reset_task, ctrlr); return (0); } void ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev) { if (ctrlr->resource == NULL) goto nores; /* TODO: Flush In-flight IOs */ /* Release resources */ ufshci_utmr_req_queue_destroy(ctrlr); ufshci_utr_req_queue_destroy(ctrlr); if (ctrlr->tag) bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag); if (ctrlr->res) bus_release_resource(ctrlr->dev, SYS_RES_IRQ, rman_get_rid(ctrlr->res), ctrlr->res); mtx_lock(&ctrlr->sc_mtx); ufshci_sim_detach(ctrlr); mtx_unlock(&ctrlr->sc_mtx); bus_release_resource(dev, SYS_RES_MEMORY, ctrlr->resource_id, ctrlr->resource); nores: KASSERT(!mtx_owned(&ctrlr->uic_cmd_lock), ("destroying uic_cmd_lock while still owned")); mtx_destroy(&ctrlr->uic_cmd_lock); KASSERT(!mtx_owned(&ctrlr->sc_mtx), ("destroying sc_mtx while still owned")); mtx_destroy(&ctrlr->sc_mtx); return; } void ufshci_ctrlr_reset(struct ufshci_controller *ctrlr) { taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task); } int ufshci_ctrlr_submit_task_mgmt_request(struct ufshci_controller *ctrlr, struct ufshci_request *req) { return ( ufshci_req_queue_submit_request(&ctrlr->task_mgmt_req_queue, req, /*is_admin*/ false)); } int ufshci_ctrlr_submit_admin_request(struct ufshci_controller *ctrlr, struct ufshci_request *req) { return (ufshci_req_queue_submit_request(&ctrlr->transfer_req_queue, req, /*is_admin*/ true)); } int ufshci_ctrlr_submit_io_request(struct ufshci_controller *ctrlr, struct ufshci_request *req) { return (ufshci_req_queue_submit_request(&ctrlr->transfer_req_queue, req, /*is_admin*/ false)); } int ufshci_ctrlr_send_nop(struct ufshci_controller *ctrlr) { struct ufshci_completion_poll_status status; status.done = 0; ufshci_ctrlr_cmd_send_nop(ctrlr, ufshci_completion_poll_cb, &status); ufshci_completion_poll(&status); if (status.error) { ufshci_printf(ctrlr, "ufshci_ctrlr_send_nop failed!\n"); return (ENXIO); } return (0); } void ufshci_ctrlr_start_config_hook(void *arg) { struct ufshci_controller *ctrlr = arg; TSENTER(); if (ufshci_utmr_req_queue_enable(ctrlr) == 0 && ufshci_utr_req_queue_enable(ctrlr) == 0) ufshci_ctrlr_start(ctrlr, false); else ufshci_ctrlr_fail(ctrlr); ufshci_sysctl_initialize_ctrlr(ctrlr); config_intrhook_disestablish(&ctrlr->config_hook); TSEXIT(); } /* * Poll all the queues enabled on the device for completion. */ void ufshci_ctrlr_poll(struct ufshci_controller *ctrlr) { uint32_t is; is = ufshci_mmio_read_4(ctrlr, is); /* UIC error */ if (is & UFSHCIM(UFSHCI_IS_REG_UE)) { uint32_t uecpa, uecdl, uecn, uect, uecdme; /* UECPA for Host UIC Error Code within PHY Adapter Layer */ uecpa = ufshci_mmio_read_4(ctrlr, uecpa); if (uecpa & UFSHCIM(UFSHCI_UECPA_REG_ERR)) { ufshci_printf(ctrlr, "UECPA error code: 0x%x\n", UFSHCIV(UFSHCI_UECPA_REG_EC, uecpa)); } /* UECDL for Host UIC Error Code within Data Link Layer */ uecdl = ufshci_mmio_read_4(ctrlr, uecdl); if (uecdl & UFSHCIM(UFSHCI_UECDL_REG_ERR)) { ufshci_printf(ctrlr, "UECDL error code: 0x%x\n", UFSHCIV(UFSHCI_UECDL_REG_EC, uecdl)); } /* UECN for Host UIC Error Code within Network Layer */ uecn = ufshci_mmio_read_4(ctrlr, uecn); if (uecn & UFSHCIM(UFSHCI_UECN_REG_ERR)) { ufshci_printf(ctrlr, "UECN error code: 0x%x\n", UFSHCIV(UFSHCI_UECN_REG_EC, uecn)); } /* UECT for Host UIC Error Code within Transport Layer */ uect = ufshci_mmio_read_4(ctrlr, uect); if (uect & UFSHCIM(UFSHCI_UECT_REG_ERR)) { ufshci_printf(ctrlr, "UECT error code: 0x%x\n", UFSHCIV(UFSHCI_UECT_REG_EC, uect)); } /* UECDME for Host UIC Error Code within DME subcomponent */ uecdme = ufshci_mmio_read_4(ctrlr, uecdme); if (uecdme & UFSHCIM(UFSHCI_UECDME_REG_ERR)) { ufshci_printf(ctrlr, "UECDME error code: 0x%x\n", UFSHCIV(UFSHCI_UECDME_REG_EC, uecdme)); } ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UE)); } /* Device Fatal Error Status */ if (is & UFSHCIM(UFSHCI_IS_REG_DFES)) { ufshci_printf(ctrlr, "Device fatal error on ISR\n"); ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_DFES)); } /* UTP Error Status */ if (is & UFSHCIM(UFSHCI_IS_REG_UTPES)) { ufshci_printf(ctrlr, "UTP error on ISR\n"); ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTPES)); } /* Host Controller Fatal Error Status */ if (is & UFSHCIM(UFSHCI_IS_REG_HCFES)) { ufshci_printf(ctrlr, "Host controller fatal error on ISR\n"); ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_HCFES)); } /* System Bus Fatal Error Status */ if (is & UFSHCIM(UFSHCI_IS_REG_SBFES)) { ufshci_printf(ctrlr, "System bus fatal error on ISR\n"); ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_SBFES)); } /* Crypto Engine Fatal Error Status */ if (is & UFSHCIM(UFSHCI_IS_REG_CEFES)) { ufshci_printf(ctrlr, "Crypto engine fatal error on ISR\n"); ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_CEFES)); } /* UTP Task Management Request Completion Status */ if (is & UFSHCIM(UFSHCI_IS_REG_UTMRCS)) { ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTMRCS)); ufshci_req_queue_process_completions( &ctrlr->task_mgmt_req_queue); } /* UTP Transfer Request Completion Status */ if (is & UFSHCIM(UFSHCI_IS_REG_UTRCS)) { ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTRCS)); ufshci_req_queue_process_completions( &ctrlr->transfer_req_queue); } /* MCQ CQ Event Status */ if (is & UFSHCIM(UFSHCI_IS_REG_CQES)) { /* TODO: We need to process completion Queue Pairs */ ufshci_printf(ctrlr, "MCQ completion not yet implemented\n"); ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_CQES)); } } /* * Poll the single-vector interrupt case: num_io_queues will be 1 and * there's only a single vector. While we're polling, we mask further * interrupts in the controller. */ void ufshci_ctrlr_shared_handler(void *arg) { struct ufshci_controller *ctrlr = arg; ufshci_ctrlr_poll(ctrlr); } void ufshci_reg_dump(struct ufshci_controller *ctrlr) { ufshci_printf(ctrlr, "========= UFSHCI Register Dump =========\n"); UFSHCI_DUMP_REG(ctrlr, cap); UFSHCI_DUMP_REG(ctrlr, mcqcap); UFSHCI_DUMP_REG(ctrlr, ver); UFSHCI_DUMP_REG(ctrlr, ext_cap); UFSHCI_DUMP_REG(ctrlr, hcpid); UFSHCI_DUMP_REG(ctrlr, hcmid); UFSHCI_DUMP_REG(ctrlr, ahit); UFSHCI_DUMP_REG(ctrlr, is); UFSHCI_DUMP_REG(ctrlr, ie); UFSHCI_DUMP_REG(ctrlr, hcsext); UFSHCI_DUMP_REG(ctrlr, hcs); UFSHCI_DUMP_REG(ctrlr, hce); UFSHCI_DUMP_REG(ctrlr, uecpa); UFSHCI_DUMP_REG(ctrlr, uecdl); UFSHCI_DUMP_REG(ctrlr, uecn); UFSHCI_DUMP_REG(ctrlr, uect); UFSHCI_DUMP_REG(ctrlr, uecdme); ufshci_printf(ctrlr, "========================================\n"); } int ufshci_ctrlr_suspend(struct ufshci_controller *ctrlr, enum power_stype stype) { int error; if (!ctrlr->ufs_dev.power_mode_supported) return (0); /* TODO: Need to flush the request queue */ if (ctrlr->ufs_device_wlun_periph) { ctrlr->ufs_dev.power_mode = power_map[stype].dev_pwr; error = ufshci_sim_send_ssu(ctrlr, /*start*/ false, power_map[stype].ssu_pc, /*immed*/ false); if (error) { ufshci_printf(ctrlr, "Failed to send SSU in suspend handler\n"); return (error); } } /* Change the link state */ error = ufshci_dev_link_state_transition(ctrlr, power_map[stype].link_state); if (error) { ufshci_printf(ctrlr, "Failed to transition link state in suspend handler\n"); return (error); } return (0); } int ufshci_ctrlr_resume(struct ufshci_controller *ctrlr, enum power_stype stype) { int error; if (!ctrlr->ufs_dev.power_mode_supported) return (0); /* Change the link state */ error = ufshci_dev_link_state_transition(ctrlr, power_map[stype].link_state); if (error) { ufshci_printf(ctrlr, "Failed to transition link state in resume handler\n"); return (error); } if (ctrlr->ufs_device_wlun_periph) { ctrlr->ufs_dev.power_mode = power_map[stype].dev_pwr; error = ufshci_sim_send_ssu(ctrlr, /*start*/ false, power_map[stype].ssu_pc, /*immed*/ false); if (error) { ufshci_printf(ctrlr, "Failed to send SSU in resume handler\n"); return (error); } } + ufshci_dev_enable_auto_hibernate(ctrlr); + return (0); } diff --git a/sys/dev/ufshci/ufshci_dev.c b/sys/dev/ufshci/ufshci_dev.c index c4a5bda9c79a..38c6de9731a4 100644 --- a/sys/dev/ufshci/ufshci_dev.c +++ b/sys/dev/ufshci/ufshci_dev.c @@ -1,878 +1,905 @@ /*- * Copyright (c) 2025, Samsung Electronics Co., Ltd. * Written by Jaeyoon Choi * * SPDX-License-Identifier: BSD-2-Clause */ #include #include #include #include "ufshci_private.h" #include "ufshci_reg.h" static int ufshci_dev_read_descriptor(struct ufshci_controller *ctrlr, enum ufshci_descriptor_type desc_type, uint8_t index, uint8_t selector, void *desc, size_t desc_size) { struct ufshci_completion_poll_status status; struct ufshci_query_param param; param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST; param.opcode = UFSHCI_QUERY_OPCODE_READ_DESCRIPTOR; param.type = desc_type; param.index = index; param.selector = selector; param.value = 0; param.desc_size = desc_size; status.done = 0; ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb, &status, param); ufshci_completion_poll(&status); if (status.error) { ufshci_printf(ctrlr, "ufshci_dev_read_descriptor failed!\n"); return (ENXIO); } memcpy(desc, status.cpl.response_upiu.query_response_upiu.command_data, desc_size); return (0); } static int ufshci_dev_read_device_descriptor(struct ufshci_controller *ctrlr, struct ufshci_device_descriptor *desc) { return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_DEVICE, 0, 0, desc, sizeof(struct ufshci_device_descriptor))); } static int ufshci_dev_read_geometry_descriptor(struct ufshci_controller *ctrlr, struct ufshci_geometry_descriptor *desc) { return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_GEOMETRY, 0, 0, desc, sizeof(struct ufshci_geometry_descriptor))); } static int ufshci_dev_read_unit_descriptor(struct ufshci_controller *ctrlr, uint8_t lun, struct ufshci_unit_descriptor *desc) { return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_UNIT, lun, 0, desc, sizeof(struct ufshci_unit_descriptor))); } static int ufshci_dev_read_flag(struct ufshci_controller *ctrlr, enum ufshci_flags flag_type, uint8_t *flag) { struct ufshci_completion_poll_status status; struct ufshci_query_param param; param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST; param.opcode = UFSHCI_QUERY_OPCODE_READ_FLAG; param.type = flag_type; param.index = 0; param.selector = 0; param.value = 0; status.done = 0; ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb, &status, param); ufshci_completion_poll(&status); if (status.error) { ufshci_printf(ctrlr, "ufshci_dev_read_flag failed!\n"); return (ENXIO); } *flag = status.cpl.response_upiu.query_response_upiu.flag_value; return (0); } static int ufshci_dev_set_flag(struct ufshci_controller *ctrlr, enum ufshci_flags flag_type) { struct ufshci_completion_poll_status status; struct ufshci_query_param param; param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST; param.opcode = UFSHCI_QUERY_OPCODE_SET_FLAG; param.type = flag_type; param.index = 0; param.selector = 0; param.value = 0; status.done = 0; ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb, &status, param); ufshci_completion_poll(&status); if (status.error) { ufshci_printf(ctrlr, "ufshci_dev_set_flag failed!\n"); return (ENXIO); } return (0); } static int ufshci_dev_clear_flag(struct ufshci_controller *ctrlr, enum ufshci_flags flag_type) { struct ufshci_completion_poll_status status; struct ufshci_query_param param; param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST; param.opcode = UFSHCI_QUERY_OPCODE_CLEAR_FLAG; param.type = flag_type; param.index = 0; param.selector = 0; param.value = 0; status.done = 0; ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb, &status, param); ufshci_completion_poll(&status); if (status.error) { ufshci_printf(ctrlr, "ufshci_dev_clear_flag failed!\n"); return (ENXIO); } return (0); } static int ufshci_dev_read_attribute(struct ufshci_controller *ctrlr, enum ufshci_attributes attr_type, uint8_t index, uint8_t selector, uint64_t *value) { struct ufshci_completion_poll_status status; struct ufshci_query_param param; param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST; param.opcode = UFSHCI_QUERY_OPCODE_READ_ATTRIBUTE; param.type = attr_type; param.index = index; param.selector = selector; param.value = 0; status.done = 0; ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb, &status, param); ufshci_completion_poll(&status); if (status.error) { ufshci_printf(ctrlr, "ufshci_dev_read_attribute failed!\n"); return (ENXIO); } *value = status.cpl.response_upiu.query_response_upiu.value_64; return (0); } static int ufshci_dev_write_attribute(struct ufshci_controller *ctrlr, enum ufshci_attributes attr_type, uint8_t index, uint8_t selector, uint64_t value) { struct ufshci_completion_poll_status status; struct ufshci_query_param param; param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST; param.opcode = UFSHCI_QUERY_OPCODE_WRITE_ATTRIBUTE; param.type = attr_type; param.index = index; param.selector = selector; param.value = value; status.done = 0; ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb, &status, param); ufshci_completion_poll(&status); if (status.error) { ufshci_printf(ctrlr, "ufshci_dev_write_attribute failed!\n"); return (ENXIO); } return (0); } int ufshci_dev_init(struct ufshci_controller *ctrlr) { int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms); sbintime_t delta_t = SBT_1US; uint8_t flag; int error; const uint8_t device_init_completed = 0; error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_DEVICE_INIT); if (error) return (error); /* Wait for the UFSHCI_FLAG_F_DEVICE_INIT flag to change */ while (1) { error = ufshci_dev_read_flag(ctrlr, UFSHCI_FLAG_F_DEVICE_INIT, &flag); if (error) return (error); if (flag == device_init_completed) break; if (timeout - ticks < 0) { ufshci_printf(ctrlr, "device init did not become %d " "within %d ms\n", device_init_completed, ctrlr->device_init_timeout_in_ms); return (ENXIO); } pause_sbt("ufshciinit", delta_t, 0, C_PREL(1)); delta_t = min(SBT_1MS, delta_t * 3 / 2); } return (0); } int ufshci_dev_reset(struct ufshci_controller *ctrlr) { if (ufshci_uic_send_dme_endpoint_reset(ctrlr)) return (ENXIO); return (ufshci_dev_init(ctrlr)); } int ufshci_dev_init_reference_clock(struct ufshci_controller *ctrlr) { int error; uint8_t index, selector; index = 0; /* bRefClkFreq is device type attribute */ selector = 0; /* bRefClkFreq is device type attribute */ error = ufshci_dev_write_attribute(ctrlr, UFSHCI_ATTR_B_REF_CLK_FREQ, index, selector, ctrlr->ref_clk); if (error) return (error); return (0); } int ufshci_dev_init_unipro(struct ufshci_controller *ctrlr) { uint32_t pa_granularity, peer_pa_granularity; uint32_t t_activate, pear_t_activate; /* * Unipro Version: * - 7~15 = Above 2.0, 6 = 2.0, 5 = 1.8, 4 = 1.61, 3 = 1.6, 2 = 1.41, * 1 = 1.40, 0 = Reserved */ if (ufshci_uic_send_dme_get(ctrlr, PA_LocalVerInfo, &ctrlr->unipro_version)) return (ENXIO); if (ufshci_uic_send_dme_get(ctrlr, PA_RemoteVerInfo, &ctrlr->ufs_dev.unipro_version)) return (ENXIO); /* * PA_Granularity: Granularity for PA_TActivate and PA_Hibern8Time * - 1=1us, 2=4us, 3=8us, 4=16us, 5=32us, 6=100us */ if (ufshci_uic_send_dme_get(ctrlr, PA_Granularity, &pa_granularity)) return (ENXIO); if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity, &peer_pa_granularity)) return (ENXIO); /* * PA_TActivate: Time to wait before activating a burst in order to * wake-up peer M-RX * UniPro automatically sets timing information such as PA_TActivate * through the PACP_CAP_EXT1_ind command during Link Startup operation. */ if (ufshci_uic_send_dme_get(ctrlr, PA_TActivate, &t_activate)) return (ENXIO); if (ufshci_uic_send_dme_peer_get(ctrlr, PA_TActivate, &pear_t_activate)) return (ENXIO); if (ctrlr->quirks & UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE) { /* * Intel Lake-field UFSHCI has a quirk. We need to add 200us to * the PEER's PA_TActivate. */ if (pa_granularity == peer_pa_granularity) { pear_t_activate = t_activate + 2; if (ufshci_uic_send_dme_peer_set(ctrlr, PA_TActivate, pear_t_activate)) return (ENXIO); } } return (0); } int ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr) { /* HSSerise: A = 1, B = 2 */ const uint32_t hs_series = 2; /* * TX/RX PWRMode: * - TX[3:0], RX[7:4] * - Fast Mode = 1, Slow Mode = 2, FastAuto Mode = 4, SlowAuto Mode = 5 */ const uint32_t fast_mode = 1; const uint32_t rx_bit_shift = 4; uint32_t power_mode, peer_granularity; /* Update lanes with available TX/RX lanes */ if (ufshci_uic_send_dme_get(ctrlr, PA_AvailTxDataLanes, &ctrlr->max_tx_lanes)) return (ENXIO); if (ufshci_uic_send_dme_get(ctrlr, PA_AvailRxDataLanes, &ctrlr->max_rx_lanes)) return (ENXIO); /* Get max HS-GEAR value */ if (ufshci_uic_send_dme_get(ctrlr, PA_MaxRxHSGear, &ctrlr->max_rx_hs_gear)) return (ENXIO); /* Set the data lane to max */ ctrlr->tx_lanes = ctrlr->max_tx_lanes; ctrlr->rx_lanes = ctrlr->max_rx_lanes; if (ufshci_uic_send_dme_set(ctrlr, PA_ActiveTxDataLanes, ctrlr->tx_lanes)) return (ENXIO); if (ufshci_uic_send_dme_set(ctrlr, PA_ActiveRxDataLanes, ctrlr->rx_lanes)) return (ENXIO); if (ctrlr->quirks & UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY) { /* Before changing gears, first change the number of lanes. */ if (ufshci_uic_send_dme_get(ctrlr, PA_PWRMode, &power_mode)) return (ENXIO); if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode)) return (ENXIO); /* Wait for power mode changed. */ if (ufshci_uic_power_mode_ready(ctrlr)) { ufshci_reg_dump(ctrlr); return (ENXIO); } } /* Set HS-GEAR to max gear */ ctrlr->hs_gear = ctrlr->max_rx_hs_gear; if (ufshci_uic_send_dme_set(ctrlr, PA_TxGear, ctrlr->hs_gear)) return (ENXIO); if (ufshci_uic_send_dme_set(ctrlr, PA_RxGear, ctrlr->hs_gear)) return (ENXIO); /* * Set termination * - HS-MODE = ON / LS-MODE = OFF */ if (ufshci_uic_send_dme_set(ctrlr, PA_TxTermination, true)) return (ENXIO); if (ufshci_uic_send_dme_set(ctrlr, PA_RxTermination, true)) return (ENXIO); /* Set HSSerise (A = 1, B = 2) */ if (ufshci_uic_send_dme_set(ctrlr, PA_HSSeries, hs_series)) return (ENXIO); /* Set Timeout values */ if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData0, DL_FC0ProtectionTimeOutVal_Default)) return (ENXIO); if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData1, DL_TC0ReplayTimeOutVal_Default)) return (ENXIO); if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData2, DL_AFC0ReqTimeOutVal_Default)) return (ENXIO); if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData3, DL_FC0ProtectionTimeOutVal_Default)) return (ENXIO); if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData4, DL_TC0ReplayTimeOutVal_Default)) return (ENXIO); if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData5, DL_AFC0ReqTimeOutVal_Default)) return (ENXIO); if (ufshci_uic_send_dme_set(ctrlr, DME_LocalFC0ProtectionTimeOutVal, DL_FC0ProtectionTimeOutVal_Default)) return (ENXIO); if (ufshci_uic_send_dme_set(ctrlr, DME_LocalTC0ReplayTimeOutVal, DL_TC0ReplayTimeOutVal_Default)) return (ENXIO); if (ufshci_uic_send_dme_set(ctrlr, DME_LocalAFC0ReqTimeOutVal, DL_AFC0ReqTimeOutVal_Default)) return (ENXIO); /* Set TX/RX PWRMode */ power_mode = (fast_mode << rx_bit_shift) | fast_mode; if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode)) return (ENXIO); /* Wait for power mode changed. */ if (ufshci_uic_power_mode_ready(ctrlr)) { ufshci_reg_dump(ctrlr); return (ENXIO); } if (ctrlr->quirks & UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE) { /* * Intel Lake-field UFSHCI has a quirk. * We need to wait 1250us and clear dme error. */ pause_sbt("ufshci", ustosbt(1250), 0, C_PREL(1)); /* Test with dme_peer_get to make sure there are no errors. */ if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity, &peer_granularity)) return (ENXIO); } return (0); } +void +ufshci_dev_enable_auto_hibernate(struct ufshci_controller *ctrlr) +{ + if (!ctrlr->ufs_dev.auto_hibernation_supported) + return; + + ufshci_mmio_write_4(ctrlr, ahit, ctrlr->ufs_dev.ahit); +} + +void +ufshci_dev_init_auto_hibernate(struct ufshci_controller *ctrlr) +{ + ctrlr->ufs_dev.auto_hibernation_supported = + UFSHCIV(UFSHCI_CAP_REG_AUTOH8, ctrlr->cap) && + !(ctrlr->quirks & UFSHCI_QUIRK_BROKEN_AUTO_HIBERNATE); + + if (!ctrlr->ufs_dev.auto_hibernation_supported) + return; + + /* The default value for auto hibernation is 150 ms */ + ctrlr->ufs_dev.ahit = 0; + ctrlr->ufs_dev.ahit |= UFSHCIF(UFSHCI_AHIT_REG_AH8ITV, 150); + ctrlr->ufs_dev.ahit |= UFSHCIF(UFSHCI_AHIT_REG_TS, 3); + + ufshci_dev_enable_auto_hibernate(ctrlr); +} + void ufshci_dev_init_uic_link_state(struct ufshci_controller *ctrlr) { ctrlr->ufs_dev.link_state = UFSHCI_UIC_LINK_STATE_ACTIVE; } int ufshci_dev_init_ufs_power_mode(struct ufshci_controller *ctrlr) { ctrlr->ufs_dev.power_mode_supported = false; if (ctrlr->quirks & UFSHCI_QUIRK_SKIP_WELL_KNOWN_LUNS) return (0); ctrlr->ufs_device_wlun_periph = ufshci_sim_find_periph(ctrlr, UFSHCI_WLUN_UFS_DEVICE); if (ctrlr->ufs_device_wlun_periph == NULL) { ufshci_printf(ctrlr, "Well-known LUN `UFS Device (0x50)` not found\n"); return (0); } ctrlr->ufs_dev.power_mode_supported = true; ctrlr->ufs_dev.power_mode = UFSHCI_DEV_PWR_ACTIVE; return (0); } int ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr) { struct ufshci_device *device = &ctrlr->ufs_dev; /* * The kDeviceDensityUnit is defined in the spec as 512. * qTotalRawDeviceCapacity use big-endian byte ordering. */ const uint32_t device_density_unit = 512; uint32_t ver; int error; error = ufshci_dev_read_device_descriptor(ctrlr, &device->dev_desc); if (error) return (error); ver = be16toh(device->dev_desc.wSpecVersion); ufshci_printf(ctrlr, "UFS device spec version %u.%u.%u\n", UFSHCIV(UFSHCI_VER_REG_MJR, ver), UFSHCIV(UFSHCI_VER_REG_MNR, ver), UFSHCIV(UFSHCI_VER_REG_VS, ver)); ufshci_printf(ctrlr, "%u enabled LUNs found\n", device->dev_desc.bNumberLU); error = ufshci_dev_read_geometry_descriptor(ctrlr, &device->geo_desc); if (error) return (error); if (device->geo_desc.bMaxNumberLU == 0) { device->max_lun_count = 8; } else if (device->geo_desc.bMaxNumberLU == 1) { device->max_lun_count = 32; } else { ufshci_printf(ctrlr, "Invalid Geometry Descriptor bMaxNumberLU value=%d\n", device->geo_desc.bMaxNumberLU); return (ENXIO); } ctrlr->max_lun_count = device->max_lun_count; ufshci_printf(ctrlr, "UFS device total size is %lu bytes\n", be64toh(device->geo_desc.qTotalRawDeviceCapacity) * device_density_unit); return (0); } static int ufshci_dev_enable_write_booster(struct ufshci_controller *ctrlr) { struct ufshci_device *dev = &ctrlr->ufs_dev; int error; /* Enable WriteBooster */ error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_WRITE_BOOSTER_EN); if (error) { ufshci_printf(ctrlr, "Failed to enable WriteBooster\n"); return (error); } dev->is_wb_enabled = true; /* Enable WriteBooster buffer flush during hibernate */ error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE); if (error) { ufshci_printf(ctrlr, "Failed to enable WriteBooster buffer flush during hibernate\n"); return (error); } /* Enable WriteBooster buffer flush */ error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN); if (error) { ufshci_printf(ctrlr, "Failed to enable WriteBooster buffer flush\n"); return (error); } dev->is_wb_flush_enabled = true; return (0); } static int ufshci_dev_disable_write_booster(struct ufshci_controller *ctrlr) { struct ufshci_device *dev = &ctrlr->ufs_dev; int error; /* Disable WriteBooster buffer flush */ error = ufshci_dev_clear_flag(ctrlr, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN); if (error) { ufshci_printf(ctrlr, "Failed to disable WriteBooster buffer flush\n"); return (error); } dev->is_wb_flush_enabled = false; /* Disable WriteBooster buffer flush during hibernate */ error = ufshci_dev_clear_flag(ctrlr, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE); if (error) { ufshci_printf(ctrlr, "Failed to disable WriteBooster buffer flush during hibernate\n"); return (error); } /* Disable WriteBooster */ error = ufshci_dev_clear_flag(ctrlr, UFSHCI_FLAG_F_WRITE_BOOSTER_EN); if (error) { ufshci_printf(ctrlr, "Failed to disable WriteBooster\n"); return (error); } dev->is_wb_enabled = false; return (0); } static int ufshci_dev_is_write_booster_buffer_life_time_left( struct ufshci_controller *ctrlr, bool *is_life_time_left) { struct ufshci_device *dev = &ctrlr->ufs_dev; uint8_t buffer_lun; uint64_t life_time; uint32_t error; if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED) buffer_lun = dev->wb_dedicated_lu; else buffer_lun = 0; error = ufshci_dev_read_attribute(ctrlr, UFSHCI_ATTR_B_WB_BUFFER_LIFE_TIME_EST, buffer_lun, 0, &life_time); if (error) return (error); *is_life_time_left = (life_time != UFSHCI_ATTR_WB_LIFE_EXCEEDED); return (0); } /* * This function is not yet in use. It will be used when suspend/resume is * implemented. */ static __unused int ufshci_dev_need_write_booster_buffer_flush(struct ufshci_controller *ctrlr, bool *need_flush) { struct ufshci_device *dev = &ctrlr->ufs_dev; bool is_life_time_left = false; uint64_t available_buffer_size, current_buffer_size; uint8_t buffer_lun; uint32_t error; *need_flush = false; if (!dev->is_wb_enabled) return (0); error = ufshci_dev_is_write_booster_buffer_life_time_left(ctrlr, &is_life_time_left); if (error) return (error); if (!is_life_time_left) return (ufshci_dev_disable_write_booster(ctrlr)); if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED) buffer_lun = dev->wb_dedicated_lu; else buffer_lun = 0; error = ufshci_dev_read_attribute(ctrlr, UFSHCI_ATTR_B_AVAILABLE_WB_BUFFER_SIZE, buffer_lun, 0, &available_buffer_size); if (error) return (error); switch (dev->wb_user_space_config_option) { case UFSHCI_DESC_WB_BUF_USER_SPACE_REDUCTION: *need_flush = (available_buffer_size <= UFSHCI_ATTR_WB_AVAILABLE_10); break; case UFSHCI_DESC_WB_BUF_PRESERVE_USER_SPACE: /* * In PRESERVE USER SPACE mode, flush should be performed when * the current buffer is greater than 0 and the available buffer * below write_booster_flush_threshold is left. */ error = ufshci_dev_read_attribute(ctrlr, UFSHCI_ATTR_D_CURRENT_WB_BUFFER_SIZE, buffer_lun, 0, ¤t_buffer_size); if (error) return (error); if (current_buffer_size == 0) return (0); *need_flush = (available_buffer_size < dev->write_booster_flush_threshold); break; default: ufshci_printf(ctrlr, "Invalid bWriteBoosterBufferPreserveUserSpaceEn value"); return (EINVAL); } /* * TODO: Need to handle WRITEBOOSTER_FLUSH_NEEDED exception case from * wExceptionEventStatus attribute. */ return (0); } int ufshci_dev_config_write_booster(struct ufshci_controller *ctrlr) { struct ufshci_device *dev = &ctrlr->ufs_dev; uint32_t extended_ufs_feature_support; uint32_t alloc_units; struct ufshci_unit_descriptor unit_desc; uint8_t lun; bool is_life_time_left; uint32_t mega_byte = 1024 * 1024; uint32_t error = 0; extended_ufs_feature_support = be32toh( dev->dev_desc.dExtendedUfsFeaturesSupport); if (!(extended_ufs_feature_support & UFSHCI_DESC_EXT_UFS_FEATURE_WRITE_BOOSTER)) { /* This device does not support Write Booster */ return (0); } if (ufshci_dev_enable_write_booster(ctrlr)) return (0); /* Get WriteBooster buffer parameters */ dev->wb_buffer_type = dev->dev_desc.bWriteBoosterBufferType; dev->wb_user_space_config_option = dev->dev_desc.bWriteBoosterBufferPreserveUserSpaceEn; /* * Find the size of the write buffer. * With LU-dedicated (00h), the WriteBooster buffer is assigned * exclusively to one chosen LU (not one-per-LU), whereas Shared (01h) * uses a single device-wide buffer shared by multiple LUs. */ if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_SINGLE_SHARED) { alloc_units = be32toh( dev->dev_desc.dNumSharedWriteBoosterBufferAllocUnits); ufshci_printf(ctrlr, "WriteBooster buffer type = Shared, alloc_units=%d\n", alloc_units); } else if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED) { ufshci_printf(ctrlr, "WriteBooster buffer type = Dedicated\n"); for (lun = 0; lun < ctrlr->max_lun_count; lun++) { /* Find a dedicated buffer using a unit descriptor */ if (ufshci_dev_read_unit_descriptor(ctrlr, lun, &unit_desc)) continue; alloc_units = be32toh( unit_desc.dLUNumWriteBoosterBufferAllocUnits); if (alloc_units) { dev->wb_dedicated_lu = lun; break; } } } else { ufshci_printf(ctrlr, "Not supported WriteBooster buffer type: 0x%x\n", dev->wb_buffer_type); goto out; } if (alloc_units == 0) { ufshci_printf(ctrlr, "The WriteBooster buffer size is zero\n"); goto out; } dev->wb_buffer_size_mb = alloc_units * dev->geo_desc.bAllocationUnitSize * (be32toh(dev->geo_desc.dSegmentSize)) / (mega_byte / UFSHCI_SECTOR_SIZE); /* Set to flush when 40% of the available buffer size remains */ dev->write_booster_flush_threshold = UFSHCI_ATTR_WB_AVAILABLE_40; /* * Check if WriteBooster Buffer lifetime is left. * WriteBooster Buffer lifetime — percent of life used based on P/E * cycles. If "preserve user space" is enabled, writes to normal user * space also consume WB life since the area is shared. */ error = ufshci_dev_is_write_booster_buffer_life_time_left(ctrlr, &is_life_time_left); if (error) goto out; if (!is_life_time_left) { ufshci_printf(ctrlr, "There is no WriteBooster buffer life time left.\n"); goto out; } ufshci_printf(ctrlr, "WriteBooster Enabled\n"); return (0); out: ufshci_dev_disable_write_booster(ctrlr); return (error); } int ufshci_dev_get_current_power_mode(struct ufshci_controller *ctrlr, uint8_t *power_mode) { uint64_t value; int err; err = ufshci_dev_read_attribute(ctrlr, UFSHCI_ATTR_B_CURRENT_POWER_MODE, /*index*/ 0, /*selector*/ 0, &value); if (err) return (err); *power_mode = (uint8_t)value; return (0); } static int ufshci_dev_hibernate_enter(struct ufshci_controller *ctrlr) { int error; error = ufshci_uic_send_dme_hibernate_enter(ctrlr); if (error) return (error); return (ufshci_uic_hibernation_ready(ctrlr)); } static int ufshci_dev_hibernate_exit(struct ufshci_controller *ctrlr) { int error; error = ufshci_uic_send_dme_hibernate_exit(ctrlr); if (error) return (error); return (ufshci_uic_hibernation_ready(ctrlr)); } int ufshci_dev_link_state_transition(struct ufshci_controller *ctrlr, enum ufshci_uic_link_state target_state) { struct ufshci_device *dev = &ctrlr->ufs_dev; int error = 0; if (dev->link_state == target_state) return (0); switch (target_state) { case UFSHCI_UIC_LINK_STATE_OFF: error = ufshci_dev_hibernate_enter(ctrlr); if (error) break; error = ufshci_ctrlr_disable(ctrlr); break; case UFSHCI_UIC_LINK_STATE_ACTIVE: if (dev->link_state == UFSHCI_UIC_LINK_STATE_HIBERNATE) error = ufshci_dev_hibernate_exit(ctrlr); else error = EINVAL; break; case UFSHCI_UIC_LINK_STATE_HIBERNATE: if (dev->link_state == UFSHCI_UIC_LINK_STATE_ACTIVE) error = ufshci_dev_hibernate_enter(ctrlr); else error = EINVAL; break; case UFSHCI_UIC_LINK_STATE_BROKEN: break; default: error = EINVAL; break; } if (error) return (error); dev->link_state = target_state; return (0); } diff --git a/sys/dev/ufshci/ufshci_pci.c b/sys/dev/ufshci/ufshci_pci.c index 5fce14997784..b2a958f1cd1a 100644 --- a/sys/dev/ufshci/ufshci_pci.c +++ b/sys/dev/ufshci/ufshci_pci.c @@ -1,282 +1,283 @@ /*- * Copyright (c) 2025, Samsung Electronics Co., Ltd. * Written by Jaeyoon Choi * * SPDX-License-Identifier: BSD-2-Clause */ #include #include #include #include #include #include #include #include #include #include #include "ufshci_private.h" static int ufshci_pci_probe(device_t); static int ufshci_pci_attach(device_t); static int ufshci_pci_detach(device_t); static int ufshci_pci_suspend(device_t); static int ufshci_pci_resume(device_t); static int ufshci_pci_setup_interrupts(struct ufshci_controller *ctrlr); static device_method_t ufshci_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ufshci_pci_probe), DEVMETHOD(device_attach, ufshci_pci_attach), DEVMETHOD(device_detach, ufshci_pci_detach), DEVMETHOD(device_suspend, ufshci_pci_suspend), DEVMETHOD(device_resume, ufshci_pci_resume), { 0, 0 } }; static driver_t ufshci_pci_driver = { "ufshci", ufshci_pci_methods, sizeof(struct ufshci_controller), }; DRIVER_MODULE(ufshci, pci, ufshci_pci_driver, 0, 0); static struct _pcsid { uint32_t devid; const char *desc; uint32_t ref_clk; uint32_t quirks; } pci_ids[] = { { 0x131b36, "QEMU UFS Host Controller", UFSHCI_REF_CLK_19_2MHz, UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE | UFSHCI_QUIRK_NOT_SUPPORT_ABORT_TASK | UFSHCI_QUIRK_SKIP_WELL_KNOWN_LUNS }, { 0x98fa8086, "Intel Lakefield UFS Host Controller", UFSHCI_REF_CLK_19_2MHz, UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE | UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE | - UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY }, + UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY | + UFSHCI_QUIRK_BROKEN_AUTO_HIBERNATE }, { 0x54ff8086, "Intel UFS Host Controller", UFSHCI_REF_CLK_19_2MHz }, { 0x00000000, NULL } }; static int ufshci_pci_probe(device_t device) { struct ufshci_controller *ctrlr = device_get_softc(device); uint32_t devid = pci_get_devid(device); struct _pcsid *ep = pci_ids; while (ep->devid && ep->devid != devid) ++ep; if (ep->devid) { ctrlr->quirks = ep->quirks; ctrlr->ref_clk = ep->ref_clk; } if (ep->desc) { device_set_desc(device, ep->desc); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int ufshci_pci_allocate_bar(struct ufshci_controller *ctrlr) { ctrlr->resource_id = PCIR_BAR(0); ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY, &ctrlr->resource_id, RF_ACTIVE); if (ctrlr->resource == NULL) { ufshci_printf(ctrlr, "unable to allocate pci resource\n"); return (ENOMEM); } ctrlr->bus_tag = rman_get_bustag(ctrlr->resource); ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource); ctrlr->regs = (struct ufshci_registers *)ctrlr->bus_handle; return (0); } static int ufshci_pci_attach(device_t dev) { struct ufshci_controller *ctrlr = device_get_softc(dev); int status; ctrlr->dev = dev; status = ufshci_pci_allocate_bar(ctrlr); if (status != 0) goto bad; pci_enable_busmaster(dev); status = ufshci_pci_setup_interrupts(ctrlr); if (status != 0) goto bad; return (ufshci_attach(dev)); bad: if (ctrlr->resource != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, ctrlr->resource_id, ctrlr->resource); } if (ctrlr->tag) bus_teardown_intr(dev, ctrlr->res, ctrlr->tag); if (ctrlr->res) bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(ctrlr->res), ctrlr->res); if (ctrlr->msi_count > 0) pci_release_msi(dev); return (status); } static int ufshci_pci_detach(device_t dev) { struct ufshci_controller *ctrlr = device_get_softc(dev); int error; error = ufshci_detach(dev); if (ctrlr->msi_count > 0) pci_release_msi(dev); pci_disable_busmaster(dev); return (error); } static int ufshci_pci_setup_shared(struct ufshci_controller *ctrlr, int rid) { int error; ctrlr->num_io_queues = 1; ctrlr->rid = rid; ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ, &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE); if (ctrlr->res == NULL) { ufshci_printf(ctrlr, "unable to allocate shared interrupt\n"); return (ENOMEM); } error = bus_setup_intr(ctrlr->dev, ctrlr->res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, ufshci_ctrlr_shared_handler, ctrlr, &ctrlr->tag); if (error) { ufshci_printf(ctrlr, "unable to setup shared interrupt\n"); return (error); } return (0); } static int ufshci_pci_setup_interrupts(struct ufshci_controller *ctrlr) { device_t dev = ctrlr->dev; int force_intx = 0; int num_io_queues, per_cpu_io_queues, min_cpus_per_ioq; int num_vectors_requested; TUNABLE_INT_FETCH("hw.ufshci.force_intx", &force_intx); if (force_intx) goto intx; if (pci_msix_count(dev) == 0) goto msi; /* * Try to allocate one MSI-X per core for I/O queues, plus one * for admin queue, but accept single shared MSI-X if have to. * Fall back to MSI if can't get any MSI-X. */ /* * TODO: Need to implement MCQ(Multi Circular Queue) * Example: num_io_queues = mp_ncpus; */ num_io_queues = 1; TUNABLE_INT_FETCH("hw.ufshci.num_io_queues", &num_io_queues); if (num_io_queues < 1 || num_io_queues > mp_ncpus) num_io_queues = mp_ncpus; per_cpu_io_queues = 1; TUNABLE_INT_FETCH("hw.ufshci.per_cpu_io_queues", &per_cpu_io_queues); if (per_cpu_io_queues == 0) num_io_queues = 1; min_cpus_per_ioq = smp_threads_per_core; TUNABLE_INT_FETCH("hw.ufshci.min_cpus_per_ioq", &min_cpus_per_ioq); if (min_cpus_per_ioq > 1) { num_io_queues = min(num_io_queues, max(1, mp_ncpus / min_cpus_per_ioq)); } num_io_queues = min(num_io_queues, max(1, pci_msix_count(dev) - 1)); again: if (num_io_queues > vm_ndomains) num_io_queues -= num_io_queues % vm_ndomains; num_vectors_requested = min(num_io_queues + 1, pci_msix_count(dev)); ctrlr->msi_count = num_vectors_requested; if (pci_alloc_msix(dev, &ctrlr->msi_count) != 0) { ufshci_printf(ctrlr, "unable to allocate MSI-X\n"); ctrlr->msi_count = 0; goto msi; } if (ctrlr->msi_count == 1) return (ufshci_pci_setup_shared(ctrlr, 1)); if (ctrlr->msi_count != num_vectors_requested) { pci_release_msi(dev); num_io_queues = ctrlr->msi_count - 1; goto again; } ctrlr->num_io_queues = num_io_queues; return (0); msi: /* * Try to allocate 2 MSIs (admin and I/O queues), but accept single * shared if have to. Fall back to INTx if can't get any MSI. */ ctrlr->msi_count = min(pci_msi_count(dev), 2); if (ctrlr->msi_count > 0) { if (pci_alloc_msi(dev, &ctrlr->msi_count) != 0) { ufshci_printf(ctrlr, "unable to allocate MSI\n"); ctrlr->msi_count = 0; } else if (ctrlr->msi_count == 2) { ctrlr->num_io_queues = 1; return (0); } } intx: return (ufshci_pci_setup_shared(ctrlr, ctrlr->msi_count > 0 ? 1 : 0)); } static int ufshci_pci_suspend(device_t dev) { struct ufshci_controller *ctrlr = device_get_softc(dev); /* Currently, PCI-based ufshci only supports POWER_STYPE_STANDBY */ return (ufshci_ctrlr_suspend(ctrlr, POWER_STYPE_STANDBY)); } static int ufshci_pci_resume(device_t dev) { struct ufshci_controller *ctrlr = device_get_softc(dev); return (ufshci_ctrlr_resume(ctrlr, POWER_STYPE_AWAKE)); } diff --git a/sys/dev/ufshci/ufshci_private.h b/sys/dev/ufshci/ufshci_private.h index 8a49c2a9bc2b..bcb2bcef0230 100644 --- a/sys/dev/ufshci/ufshci_private.h +++ b/sys/dev/ufshci/ufshci_private.h @@ -1,640 +1,649 @@ /*- * Copyright (c) 2025, Samsung Electronics Co., Ltd. * Written by Jaeyoon Choi * * SPDX-License-Identifier: BSD-2-Clause */ #ifndef __UFSHCI_PRIVATE_H__ #define __UFSHCI_PRIVATE_H__ #ifdef _KERNEL #include #else /* !_KERNEL */ #include #include #endif /* _KERNEL */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ufshci.h" MALLOC_DECLARE(M_UFSHCI); #define UFSHCI_DEVICE_INIT_TIMEOUT_MS (2000) /* in milliseconds */ #define UFSHCI_UIC_CMD_TIMEOUT_MS (500) /* in milliseconds */ #define UFSHCI_DEFAULT_TIMEOUT_PERIOD (10) /* in seconds */ #define UFSHCI_MIN_TIMEOUT_PERIOD (5) /* in seconds */ #define UFSHCI_MAX_TIMEOUT_PERIOD (120) /* in seconds */ #define UFSHCI_DEFAULT_RETRY_COUNT (4) #define UFSHCI_UTR_ENTRIES (32) #define UFSHCI_UTRM_ENTRIES (8) #define UFSHCI_SECTOR_SIZE (512) struct ufshci_controller; struct ufshci_completion_poll_status { struct ufshci_completion cpl; int done; bool error; }; struct ufshci_request { struct ufshci_upiu request_upiu; size_t request_size; size_t response_size; struct memdesc payload; enum ufshci_data_direction data_direction; ufshci_cb_fn_t cb_fn; void *cb_arg; bool is_admin; int32_t retries; bool payload_valid; bool spare[2]; /* Future use */ STAILQ_ENTRY(ufshci_request) stailq; }; enum ufshci_slot_state { UFSHCI_SLOT_STATE_FREE = 0x0, UFSHCI_SLOT_STATE_RESERVED = 0x1, UFSHCI_SLOT_STATE_SCHEDULED = 0x2, UFSHCI_SLOT_STATE_TIMEOUT = 0x3, UFSHCI_SLOT_STATE_NEED_ERROR_HANDLING = 0x4, }; struct ufshci_tracker { TAILQ_ENTRY(ufshci_tracker) tailq; struct ufshci_request *req; struct ufshci_req_queue *req_queue; struct ufshci_hw_queue *hwq; uint8_t slot_num; enum ufshci_slot_state slot_state; size_t response_size; sbintime_t deadline; bus_dmamap_t payload_dma_map; uint64_t payload_addr; struct ufshci_utp_cmd_desc *ucd; bus_addr_t ucd_bus_addr; uint16_t prdt_off; uint16_t prdt_entry_cnt; }; enum ufshci_queue_mode { UFSHCI_Q_MODE_SDB = 0x00, /* Single Doorbell Mode*/ UFSHCI_Q_MODE_MCQ = 0x01, /* Multi-Circular Queue Mode*/ }; /* * UFS uses slot-based Single Doorbell (SDB) mode for request submission by * default and additionally supports Multi-Circular Queue (MCQ) in UFS 4.0. To * minimize duplicated code between SDB and MCQ, mode dependent operations are * extracted into ufshci_qops. */ struct ufshci_qops { int (*construct)(struct ufshci_controller *ctrlr, struct ufshci_req_queue *req_queue, uint32_t num_entries, bool is_task_mgmt); void (*destroy)(struct ufshci_controller *ctrlr, struct ufshci_req_queue *req_queue); struct ufshci_hw_queue *(*get_hw_queue)( struct ufshci_req_queue *req_queue); int (*enable)(struct ufshci_controller *ctrlr, struct ufshci_req_queue *req_queue); void (*disable)(struct ufshci_controller *ctrlr, struct ufshci_req_queue *req_queue); int (*reserve_slot)(struct ufshci_req_queue *req_queue, struct ufshci_tracker **tr); int (*reserve_admin_slot)(struct ufshci_req_queue *req_queue, struct ufshci_tracker **tr); void (*ring_doorbell)(struct ufshci_controller *ctrlr, struct ufshci_tracker *tr); bool (*is_doorbell_cleared)(struct ufshci_controller *ctrlr, uint8_t slot); void (*clear_cpl_ntf)(struct ufshci_controller *ctrlr, struct ufshci_tracker *tr); bool (*process_cpl)(struct ufshci_req_queue *req_queue); int (*get_inflight_io)(struct ufshci_controller *ctrlr); }; #define UFSHCI_SDB_Q 0 /* Queue number for a single doorbell queue */ enum ufshci_recovery { RECOVERY_NONE = 0, /* Normal operations */ RECOVERY_WAITING, /* waiting for the reset to complete */ }; /* * Generic queue container used by both SDB (fixed 32-slot bitmap) and MCQ * (ring buffer) modes. Fields are shared; some such as sq_head, sq_tail and * cq_head are not used in SDB but used in MCQ. */ struct ufshci_hw_queue { struct ufshci_controller *ctrlr; struct ufshci_req_queue *req_queue; uint32_t id; int domain; int cpu; struct callout timer; /* recovery lock */ bool timer_armed; /* recovery lock */ enum ufshci_recovery recovery_state; /* recovery lock */ union { struct ufshci_utp_xfer_req_desc *utrd; struct ufshci_utp_task_mgmt_req_desc *utmrd; }; bus_dma_tag_t dma_tag_queue; bus_dmamap_t queuemem_map; bus_addr_t req_queue_addr; bus_addr_t *ucd_bus_addr; uint32_t num_entries; uint32_t num_trackers; TAILQ_HEAD(, ufshci_tracker) free_tr; TAILQ_HEAD(, ufshci_tracker) outstanding_tr; /* * A Request List using the single doorbell method uses a dedicated * ufshci_tracker, one per slot. */ struct ufshci_tracker **act_tr; uint32_t sq_head; /* MCQ mode */ uint32_t sq_tail; /* MCQ mode */ uint32_t cq_head; /* MCQ mode */ uint32_t phase; int64_t num_cmds; int64_t num_intr_handler_calls; int64_t num_retries; int64_t num_failures; /* * Each lock may be acquired independently. * When both are required, acquire them in this order to avoid * deadlocks. (recovery_lock -> qlock) */ struct mtx_padalign qlock; struct mtx_padalign recovery_lock; }; struct ufshci_req_queue { struct ufshci_controller *ctrlr; int domain; /* * queue_mode: active transfer scheme * UFSHCI_Q_MODE_SDB – legacy single‑doorbell list * UFSHCI_Q_MODE_MCQ – modern multi‑circular queue (UFSHCI 4.0+) */ enum ufshci_queue_mode queue_mode; uint8_t num_q; struct ufshci_hw_queue *hwq; struct ufshci_qops qops; bool is_task_mgmt; uint32_t num_entries; uint32_t num_trackers; /* Shared DMA resource */ struct ufshci_utp_cmd_desc *ucd; bus_dma_tag_t dma_tag_ucd; bus_dma_tag_t dma_tag_payload; bus_dmamap_t ucdmem_map; }; enum ufshci_dev_pwr { UFSHCI_DEV_PWR_ACTIVE = 0, UFSHCI_DEV_PWR_SLEEP, UFSHCI_DEV_PWR_POWERDOWN, UFSHCI_DEV_PWR_DEEPSLEEP, UFSHCI_DEV_PWR_COUNT, }; enum ufshci_uic_link_state { UFSHCI_UIC_LINK_STATE_OFF = 0, UFSHCI_UIC_LINK_STATE_ACTIVE, UFSHCI_UIC_LINK_STATE_HIBERNATE, UFSHCI_UIC_LINK_STATE_BROKEN, }; struct ufshci_power_entry { enum ufshci_dev_pwr dev_pwr; uint8_t ssu_pc; /* SSU Power Condition */ enum ufshci_uic_link_state link_state; }; /* SSU Power Condition 0x40 is defined in the UFS specification */ static const struct ufshci_power_entry power_map[POWER_STYPE_COUNT] = { [POWER_STYPE_AWAKE] = { UFSHCI_DEV_PWR_ACTIVE, SSS_PC_ACTIVE, UFSHCI_UIC_LINK_STATE_ACTIVE }, [POWER_STYPE_STANDBY] = { UFSHCI_DEV_PWR_SLEEP, SSS_PC_IDLE, UFSHCI_UIC_LINK_STATE_HIBERNATE }, [POWER_STYPE_SUSPEND_TO_MEM] = { UFSHCI_DEV_PWR_POWERDOWN, SSS_PC_STANDBY, UFSHCI_UIC_LINK_STATE_HIBERNATE }, [POWER_STYPE_SUSPEND_TO_IDLE] = { UFSHCI_DEV_PWR_SLEEP, SSS_PC_IDLE, UFSHCI_UIC_LINK_STATE_HIBERNATE }, [POWER_STYPE_HIBERNATE] = { UFSHCI_DEV_PWR_DEEPSLEEP, 0x40, UFSHCI_UIC_LINK_STATE_OFF }, [POWER_STYPE_POWEROFF] = { UFSHCI_DEV_PWR_POWERDOWN, SSS_PC_STANDBY, UFSHCI_UIC_LINK_STATE_OFF }, }; struct ufshci_device { uint32_t max_lun_count; struct ufshci_device_descriptor dev_desc; struct ufshci_geometry_descriptor geo_desc; uint32_t unipro_version; /* WriteBooster */ bool is_wb_enabled; bool is_wb_flush_enabled; uint32_t wb_buffer_type; uint32_t wb_buffer_size_mb; uint32_t wb_user_space_config_option; uint8_t wb_dedicated_lu; uint32_t write_booster_flush_threshold; /* Power mode */ bool power_mode_supported; enum ufshci_dev_pwr power_mode; enum ufshci_uic_link_state link_state; + + /* Auto Hibernation */ + bool auto_hibernation_supported; + uint32_t ahit; }; /* * One of these per allocated device. */ struct ufshci_controller { device_t dev; uint32_t quirks; #define UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE \ 1 /* QEMU does not support UIC POWER MODE */ #define UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE \ 2 /* Need an additional 200 ms of PA_TActivate */ #define UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE \ 4 /* Need to wait 1250us after power mode change */ #define UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY \ 8 /* Need to change the number of lanes before changing HS-GEAR. */ #define UFSHCI_QUIRK_NOT_SUPPORT_ABORT_TASK \ 16 /* QEMU does not support Task Management Request */ #define UFSHCI_QUIRK_SKIP_WELL_KNOWN_LUNS \ 32 /* QEMU does not support Well known logical units*/ +#define UFSHCI_QUIRK_BROKEN_AUTO_HIBERNATE \ + 64 /* Some controllers have the Auto hibernate feature enabled but it \ + does not work. */ uint32_t ref_clk; struct cam_sim *ufshci_sim; struct cam_path *ufshci_path; struct cam_periph *ufs_device_wlun_periph; struct mtx ufs_device_wlun_mtx; struct mtx sc_mtx; uint32_t sc_unit; uint8_t sc_name[16]; struct ufshci_device ufs_dev; bus_space_tag_t bus_tag; bus_space_handle_t bus_handle; int resource_id; struct resource *resource; /* Currently, there is no UFSHCI that supports MSI, MSI-X. */ int msi_count; /* Fields for tracking progress during controller initialization. */ struct intr_config_hook config_hook; struct task reset_task; struct taskqueue *taskqueue; /* For shared legacy interrupt. */ int rid; struct resource *res; void *tag; uint32_t major_version; uint32_t minor_version; uint32_t enable_aborts; uint32_t num_io_queues; uint32_t max_hw_pend_io; /* Maximum logical unit number */ uint32_t max_lun_count; /* Maximum i/o size in bytes */ uint32_t max_xfer_size; /* Controller capacity */ uint32_t cap; /* Page size and log2(page_size) - 12 that we're currently using */ uint32_t page_size; /* Timeout value on device initialization */ uint32_t device_init_timeout_in_ms; /* Timeout value on UIC command */ uint32_t uic_cmd_timeout_in_ms; /* UTMR/UTR queue timeout period in seconds */ uint32_t timeout_period; /* UTMR/UTR queue retry count */ uint32_t retry_count; /* UFS Host Controller Interface Registers */ struct ufshci_registers *regs; /* UFS Transport Protocol Layer (UTP) */ struct ufshci_req_queue task_mgmt_req_queue; struct ufshci_req_queue transfer_req_queue; bool is_single_db_supported; /* 0 = supported */ bool is_mcq_supported; /* 1 = supported */ /* UFS Interconnect Layer (UIC) */ struct mtx uic_cmd_lock; uint32_t unipro_version; uint8_t hs_gear; uint32_t tx_lanes; uint32_t rx_lanes; uint32_t max_rx_hs_gear; uint32_t max_tx_lanes; uint32_t max_rx_lanes; bool is_failed; }; #define ufshci_mmio_offsetof(reg) offsetof(struct ufshci_registers, reg) #define ufshci_mmio_read_4(sc, reg) \ bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, \ ufshci_mmio_offsetof(reg)) #define ufshci_mmio_write_4(sc, reg, val) \ bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \ ufshci_mmio_offsetof(reg), val) #define ufshci_printf(ctrlr, fmt, args...) \ device_printf(ctrlr->dev, fmt, ##args) /* UFSHCI */ void ufshci_completion_poll_cb(void *arg, const struct ufshci_completion *cpl, bool error); /* SIM */ uint8_t ufshci_sim_translate_scsi_to_ufs_lun(lun_id_t scsi_lun); uint64_t ufshci_sim_translate_ufs_to_scsi_lun(uint8_t ufs_lun); int ufshci_sim_attach(struct ufshci_controller *ctrlr); void ufshci_sim_detach(struct ufshci_controller *ctrlr); struct cam_periph *ufshci_sim_find_periph(struct ufshci_controller *ctrlr, uint8_t wlun); int ufshci_sim_send_ssu(struct ufshci_controller *ctrlr, bool start, uint8_t pwr_cond, bool immed); /* Controller */ int ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev); void ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev); void ufshci_ctrlr_reset(struct ufshci_controller *ctrlr); int ufshci_ctrlr_suspend(struct ufshci_controller *ctrlr, enum power_stype stype); int ufshci_ctrlr_resume(struct ufshci_controller *ctrlr, enum power_stype stype); int ufshci_ctrlr_disable(struct ufshci_controller *ctrlr); /* ctrlr defined as void * to allow use with config_intrhook. */ void ufshci_ctrlr_start_config_hook(void *arg); void ufshci_ctrlr_poll(struct ufshci_controller *ctrlr); int ufshci_ctrlr_submit_task_mgmt_request(struct ufshci_controller *ctrlr, struct ufshci_request *req); int ufshci_ctrlr_submit_admin_request(struct ufshci_controller *ctrlr, struct ufshci_request *req); int ufshci_ctrlr_submit_io_request(struct ufshci_controller *ctrlr, struct ufshci_request *req); int ufshci_ctrlr_send_nop(struct ufshci_controller *ctrlr); void ufshci_reg_dump(struct ufshci_controller *ctrlr); /* Device */ int ufshci_dev_init(struct ufshci_controller *ctrlr); int ufshci_dev_reset(struct ufshci_controller *ctrlr); int ufshci_dev_init_reference_clock(struct ufshci_controller *ctrlr); int ufshci_dev_init_unipro(struct ufshci_controller *ctrlr); +void ufshci_dev_enable_auto_hibernate(struct ufshci_controller *ctrlr); +void ufshci_dev_init_auto_hibernate(struct ufshci_controller *ctrlr); int ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr); void ufshci_dev_init_uic_link_state(struct ufshci_controller *ctrlr); int ufshci_dev_init_ufs_power_mode(struct ufshci_controller *ctrlr); int ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr); int ufshci_dev_config_write_booster(struct ufshci_controller *ctrlr); int ufshci_dev_get_current_power_mode(struct ufshci_controller *ctrlr, uint8_t *power_mode); int ufshci_dev_link_state_transition(struct ufshci_controller *ctrlr, enum ufshci_uic_link_state target_state); /* Controller Command */ void ufshci_ctrlr_cmd_send_task_mgmt_request(struct ufshci_controller *ctrlr, ufshci_cb_fn_t cb_fn, void *cb_arg, uint8_t function, uint8_t lun, uint8_t task_tag, uint8_t iid); void ufshci_ctrlr_cmd_send_nop(struct ufshci_controller *ctrlr, ufshci_cb_fn_t cb_fn, void *cb_arg); void ufshci_ctrlr_cmd_send_query_request(struct ufshci_controller *ctrlr, ufshci_cb_fn_t cb_fn, void *cb_arg, struct ufshci_query_param param); void ufshci_ctrlr_cmd_send_scsi_command(struct ufshci_controller *ctrlr, ufshci_cb_fn_t cb_fn, void *cb_arg, uint8_t *cmd_ptr, uint8_t cmd_len, uint32_t data_len, uint8_t lun, bool is_write); /* Request Queue */ bool ufshci_req_queue_process_completions(struct ufshci_req_queue *req_queue); int ufshci_utmr_req_queue_construct(struct ufshci_controller *ctrlr); int ufshci_utr_req_queue_construct(struct ufshci_controller *ctrlr); void ufshci_utmr_req_queue_destroy(struct ufshci_controller *ctrlr); void ufshci_utr_req_queue_destroy(struct ufshci_controller *ctrlr); void ufshci_utmr_req_queue_disable(struct ufshci_controller *ctrlr); int ufshci_utmr_req_queue_enable(struct ufshci_controller *ctrlr); void ufshci_utr_req_queue_disable(struct ufshci_controller *ctrlr); int ufshci_utr_req_queue_enable(struct ufshci_controller *ctrlr); void ufshci_req_queue_fail(struct ufshci_controller *ctrlr, struct ufshci_hw_queue *hwq); int ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue, struct ufshci_request *req, bool is_admin); void ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr); /* Request Single Doorbell Queue */ int ufshci_req_sdb_construct(struct ufshci_controller *ctrlr, struct ufshci_req_queue *req_queue, uint32_t num_entries, bool is_task_mgmt); void ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr, struct ufshci_req_queue *req_queue); struct ufshci_hw_queue *ufshci_req_sdb_get_hw_queue( struct ufshci_req_queue *req_queue); void ufshci_req_sdb_disable(struct ufshci_controller *ctrlr, struct ufshci_req_queue *req_queue); int ufshci_req_sdb_enable(struct ufshci_controller *ctrlr, struct ufshci_req_queue *req_queue); int ufshci_req_sdb_reserve_slot(struct ufshci_req_queue *req_queue, struct ufshci_tracker **tr); void ufshci_req_sdb_utmr_ring_doorbell(struct ufshci_controller *ctrlr, struct ufshci_tracker *tr); void ufshci_req_sdb_utr_ring_doorbell(struct ufshci_controller *ctrlr, struct ufshci_tracker *tr); bool ufshci_req_sdb_utmr_is_doorbell_cleared(struct ufshci_controller *ctrlr, uint8_t slot); bool ufshci_req_sdb_utr_is_doorbell_cleared(struct ufshci_controller *ctrlr, uint8_t slot); void ufshci_req_sdb_utmr_clear_cpl_ntf(struct ufshci_controller *ctrlr, struct ufshci_tracker *tr); void ufshci_req_sdb_utr_clear_cpl_ntf(struct ufshci_controller *ctrlr, struct ufshci_tracker *tr); bool ufshci_req_sdb_process_cpl(struct ufshci_req_queue *req_queue); int ufshci_req_sdb_get_inflight_io(struct ufshci_controller *ctrlr); /* UIC Command */ int ufshci_uic_power_mode_ready(struct ufshci_controller *ctrlr); int ufshci_uic_hibernation_ready(struct ufshci_controller *ctrlr); int ufshci_uic_cmd_ready(struct ufshci_controller *ctrlr); int ufshci_uic_send_dme_link_startup(struct ufshci_controller *ctrlr); int ufshci_uic_send_dme_get(struct ufshci_controller *ctrlr, uint16_t attribute, uint32_t *return_value); int ufshci_uic_send_dme_set(struct ufshci_controller *ctrlr, uint16_t attribute, uint32_t value); int ufshci_uic_send_dme_peer_get(struct ufshci_controller *ctrlr, uint16_t attribute, uint32_t *return_value); int ufshci_uic_send_dme_peer_set(struct ufshci_controller *ctrlr, uint16_t attribute, uint32_t value); int ufshci_uic_send_dme_endpoint_reset(struct ufshci_controller *ctrlr); int ufshci_uic_send_dme_hibernate_enter(struct ufshci_controller *ctrlr); int ufshci_uic_send_dme_hibernate_exit(struct ufshci_controller *ctrlr); /* SYSCTL */ void ufshci_sysctl_initialize_ctrlr(struct ufshci_controller *ctrlr); int ufshci_attach(device_t dev); int ufshci_detach(device_t dev); /* * Wait for a command to complete using the ufshci_completion_poll_cb. Used in * limited contexts where the caller knows it's OK to block briefly while the * command runs. The ISR will run the callback which will set status->done to * true, usually within microseconds. If not, then after one second timeout * handler should reset the controller and abort all outstanding requests * including this polled one. If still not after ten seconds, then something is * wrong with the driver, and panic is the only way to recover. * * Most commands using this interface aren't actual I/O to the drive's media so * complete within a few microseconds. Adaptively spin for one tick to catch the * vast majority of these without waiting for a tick plus scheduling delays. * Since these are on startup, this drastically reduces startup time. */ static __inline void ufshci_completion_poll(struct ufshci_completion_poll_status *status) { int timeout = ticks + 10 * hz; sbintime_t delta_t = SBT_1US; while (!atomic_load_acq_int(&status->done)) { if (timeout - ticks < 0) panic( "UFSHCI polled command failed to complete within 10s."); pause_sbt("ufshci_cpl", delta_t, 0, C_PREL(1)); delta_t = min(SBT_1MS, delta_t * 3 / 2); } } static __inline void ufshci_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) { uint64_t *bus_addr = (uint64_t *)arg; KASSERT(nseg == 1, ("number of segments (%d) is not 1", nseg)); if (error != 0) printf("ufshci_single_map err %d\n", error); *bus_addr = seg[0].ds_addr; } static __inline struct ufshci_request * _ufshci_allocate_request(const int how, ufshci_cb_fn_t cb_fn, void *cb_arg) { struct ufshci_request *req; KASSERT(how == M_WAITOK || how == M_NOWAIT, ("ufshci_allocate_request: invalid how %d", how)); req = malloc(sizeof(*req), M_UFSHCI, how | M_ZERO); if (req != NULL) { req->cb_fn = cb_fn; req->cb_arg = cb_arg; } return (req); } static __inline struct ufshci_request * ufshci_allocate_request_vaddr(void *payload, uint32_t payload_size, const int how, ufshci_cb_fn_t cb_fn, void *cb_arg) { struct ufshci_request *req; req = _ufshci_allocate_request(how, cb_fn, cb_arg); if (req != NULL) { if (payload_size) { req->payload = memdesc_vaddr(payload, payload_size); req->payload_valid = true; } } return (req); } static __inline struct ufshci_request * ufshci_allocate_request_bio(struct bio *bio, const int how, ufshci_cb_fn_t cb_fn, void *cb_arg) { struct ufshci_request *req; req = _ufshci_allocate_request(how, cb_fn, cb_arg); if (req != NULL) { req->payload = memdesc_bio(bio); req->payload_valid = true; } return (req); } #define ufshci_free_request(req) free(req, M_UFSHCI) void ufshci_ctrlr_shared_handler(void *arg); #endif /* __UFSHCI_PRIVATE_H__ */ diff --git a/sys/dev/ufshci/ufshci_sysctl.c b/sys/dev/ufshci/ufshci_sysctl.c index 30b0ccaeed13..495f087f3c50 100644 --- a/sys/dev/ufshci/ufshci_sysctl.c +++ b/sys/dev/ufshci/ufshci_sysctl.c @@ -1,260 +1,288 @@ /*- * Copyright (c) 2025, Samsung Electronics Co., Ltd. * Written by Jaeyoon Choi * * SPDX-License-Identifier: BSD-2-Clause */ #include #include #include #include #include "ufshci_private.h" +#include "ufshci_reg.h" static int ufshci_sysctl_timeout_period(SYSCTL_HANDLER_ARGS) { uint32_t *ptr = arg1; uint32_t newval = *ptr; int error = sysctl_handle_int(oidp, &newval, 0, req); if (error || (req->newptr == NULL)) return (error); if (newval > UFSHCI_MAX_TIMEOUT_PERIOD || newval < UFSHCI_MIN_TIMEOUT_PERIOD) { return (EINVAL); } else { *ptr = newval; } return (0); } static int ufshci_sysctl_num_cmds(SYSCTL_HANDLER_ARGS) { struct ufshci_controller *ctrlr = arg1; int64_t num_cmds = 0; int i; num_cmds = ctrlr->task_mgmt_req_queue.hwq[UFSHCI_SDB_Q].num_cmds; if (ctrlr->transfer_req_queue.hwq != NULL) { for (i = 0; i < ctrlr->num_io_queues; i++) num_cmds += ctrlr->transfer_req_queue.hwq[i].num_cmds; } return (sysctl_handle_64(oidp, &num_cmds, 0, req)); } static int ufshci_sysctl_num_intr_handler_calls(SYSCTL_HANDLER_ARGS) { struct ufshci_controller *ctrlr = arg1; int64_t num_intr_handler_calls = 0; int i; num_intr_handler_calls = ctrlr->task_mgmt_req_queue.hwq[UFSHCI_SDB_Q].num_intr_handler_calls; if (ctrlr->transfer_req_queue.hwq != NULL) { for (i = 0; i < ctrlr->num_io_queues; i++) num_intr_handler_calls += ctrlr->transfer_req_queue .hwq[i] .num_intr_handler_calls; } return (sysctl_handle_64(oidp, &num_intr_handler_calls, 0, req)); } static int ufshci_sysctl_num_retries(SYSCTL_HANDLER_ARGS) { struct ufshci_controller *ctrlr = arg1; int64_t num_retries = 0; int i; num_retries = ctrlr->task_mgmt_req_queue.hwq[UFSHCI_SDB_Q].num_retries; if (ctrlr->transfer_req_queue.hwq != NULL) { for (i = 0; i < ctrlr->num_io_queues; i++) num_retries += ctrlr->transfer_req_queue.hwq[i].num_retries; } return (sysctl_handle_64(oidp, &num_retries, 0, req)); } static int ufshci_sysctl_num_failures(SYSCTL_HANDLER_ARGS) { struct ufshci_controller *ctrlr = arg1; int64_t num_failures = 0; int i; num_failures = ctrlr->task_mgmt_req_queue.hwq[UFSHCI_SDB_Q].num_failures; if (ctrlr->transfer_req_queue.hwq != NULL) { for (i = 0; i < ctrlr->num_io_queues; i++) num_failures += ctrlr->transfer_req_queue.hwq[i].num_failures; } return (sysctl_handle_64(oidp, &num_failures, 0, req)); } +static int +ufshci_sysctl_ahit(SYSCTL_HANDLER_ARGS) +{ + struct ufshci_controller *ctrlr = arg1; + int64_t scale, timer; + const int64_t scale_factor = 10; + + scale = UFSHCIV(UFSHCI_AHIT_REG_TS, ctrlr->ufs_dev.ahit); + timer = UFSHCIV(UFSHCI_AHIT_REG_AH8ITV, ctrlr->ufs_dev.ahit); + + while (scale--) + timer *= scale_factor; + + return (sysctl_handle_64(oidp, &timer, 0, req)); +} + static void ufshci_sysctl_initialize_queue(struct ufshci_hw_queue *hwq, struct sysctl_ctx_list *ctrlr_ctx, struct sysctl_oid *que_tree) { struct sysctl_oid_list *que_list = SYSCTL_CHILDREN(que_tree); SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_entries", CTLFLAG_RD, &hwq->num_entries, 0, "Number of entries in hardware queue"); SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_trackers", CTLFLAG_RD, &hwq->num_trackers, 0, "Number of trackers pre-allocated for this queue pair"); SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_head", CTLFLAG_RD, &hwq->sq_head, 0, "Current head of submission queue (as observed by driver)"); SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_tail", CTLFLAG_RD, &hwq->sq_tail, 0, "Current tail of submission queue (as observed by driver)"); SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "cq_head", CTLFLAG_RD, &hwq->cq_head, 0, "Current head of completion queue (as observed by driver)"); SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_cmds", CTLFLAG_RD, &hwq->num_cmds, "Number of commands submitted"); SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_intr_handler_calls", CTLFLAG_RD, &hwq->num_intr_handler_calls, "Number of times interrupt handler was invoked (will typically be " "less than number of actual interrupts generated due to " "interrupt aggregation)"); SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_retries", CTLFLAG_RD, &hwq->num_retries, "Number of commands retried"); SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_failures", CTLFLAG_RD, &hwq->num_failures, "Number of commands ending in failure after all retries"); /* TODO: Implement num_ignored */ /* TODO: Implement recovery state */ /* TODO: Implement dump debug */ } void ufshci_sysctl_initialize_ctrlr(struct ufshci_controller *ctrlr) { struct sysctl_ctx_list *ctrlr_ctx; struct sysctl_oid *ctrlr_tree, *que_tree, *ioq_tree; struct sysctl_oid_list *ctrlr_list, *ioq_list; struct ufshci_device *dev = &ctrlr->ufs_dev; #define QUEUE_NAME_LENGTH 16 char queue_name[QUEUE_NAME_LENGTH]; int i; ctrlr_ctx = device_get_sysctl_ctx(ctrlr->dev); ctrlr_tree = device_get_sysctl_tree(ctrlr->dev); ctrlr_list = SYSCTL_CHILDREN(ctrlr_tree); SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "major_version", CTLFLAG_RD, &ctrlr->major_version, 0, "UFS spec major version"); SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "minor_version", CTLFLAG_RD, &ctrlr->minor_version, 0, "UFS spec minor version"); SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "io_queue_mode", CTLFLAG_RD, &ctrlr->transfer_req_queue.queue_mode, 0, "Active host-side queuing scheme " "(Single-Doorbell or Multi-Circular-Queue)"); SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "num_io_queues", CTLFLAG_RD, &ctrlr->num_io_queues, 0, "Number of I/O queue pairs"); SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "cap", CTLFLAG_RD, &ctrlr->cap, 0, "Number of I/O queue pairs"); SYSCTL_ADD_BOOL(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_enabled", CTLFLAG_RD, &dev->is_wb_enabled, 0, "WriteBooster enable/disable"); SYSCTL_ADD_BOOL(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_flush_enabled", CTLFLAG_RD, &dev->is_wb_flush_enabled, 0, "WriteBooster flush enable/disable"); SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_buffer_type", CTLFLAG_RD, &dev->wb_buffer_type, 0, "WriteBooster type"); SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_buffer_size_mb", CTLFLAG_RD, &dev->wb_buffer_size_mb, 0, "WriteBooster buffer size in MB"); SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_user_space_config_option", CTLFLAG_RD, &dev->wb_user_space_config_option, 0, "WriteBooster preserve user space mode"); SYSCTL_ADD_BOOL(ctrlr_ctx, ctrlr_list, OID_AUTO, "power_mode_supported", CTLFLAG_RD, &dev->power_mode_supported, 0, "Device power mode support"); + SYSCTL_ADD_BOOL(ctrlr_ctx, ctrlr_list, OID_AUTO, + "auto_hibernation_supported", CTLFLAG_RD, + &dev->auto_hibernation_supported, 0, + "Device auto hibernation support"); + + SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, + "auto_hibernate_idle_timer_value", + CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, ctrlr, 0, + ufshci_sysctl_ahit, "IU", + "Auto-Hibernate Idle Timer Value (in microseconds)"); + SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "power_mode", CTLFLAG_RD, &dev->power_mode, 0, "Current device power mode"); SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, "timeout_period", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, &ctrlr->timeout_period, 0, ufshci_sysctl_timeout_period, "IU", "Timeout period for I/O queues (in seconds)"); SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, "num_cmds", CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, ctrlr, 0, ufshci_sysctl_num_cmds, "IU", "Number of commands submitted"); SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, "num_intr_handler_calls", CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, ctrlr, 0, ufshci_sysctl_num_intr_handler_calls, "IU", "Number of times interrupt handler was invoked (will " "typically be less than number of actual interrupts " "generated due to coalescing)"); SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, "num_retries", CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, ctrlr, 0, ufshci_sysctl_num_retries, "IU", "Number of commands retried"); SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, "num_failures", CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, ctrlr, 0, ufshci_sysctl_num_failures, "IU", "Number of commands ending in failure after all retries"); que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO, "utmrq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "UTP Task Management Request Queue"); ufshci_sysctl_initialize_queue( &ctrlr->task_mgmt_req_queue.hwq[UFSHCI_SDB_Q], ctrlr_ctx, que_tree); /* * Make sure that we've constructed the I/O queues before setting up the * sysctls. Failed controllers won't allocate it, but we want the rest * of the sysctls to diagnose things. */ if (ctrlr->transfer_req_queue.hwq != NULL) { ioq_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO, "ioq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "UTP Transfer Request Queue (I/O Queue)"); ioq_list = SYSCTL_CHILDREN(ioq_tree); for (i = 0; i < ctrlr->num_io_queues; i++) { snprintf(queue_name, QUEUE_NAME_LENGTH, "%d", i); que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ioq_list, OID_AUTO, queue_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "IO Queue"); ufshci_sysctl_initialize_queue( &ctrlr->transfer_req_queue.hwq[i], ctrlr_ctx, que_tree); } } } diff --git a/sys/dev/ufshci/ufshci_uic_cmd.c b/sys/dev/ufshci/ufshci_uic_cmd.c index 29c143cec52c..c6e6afe3f688 100644 --- a/sys/dev/ufshci/ufshci_uic_cmd.c +++ b/sys/dev/ufshci/ufshci_uic_cmd.c @@ -1,316 +1,317 @@ /*- * Copyright (c) 2025, Samsung Electronics Co., Ltd. * Written by Jaeyoon Choi * * SPDX-License-Identifier: BSD-2-Clause */ #include #include #include #include "ufshci_private.h" #include "ufshci_reg.h" int ufshci_uic_power_mode_ready(struct ufshci_controller *ctrlr) { uint32_t is, hcs; int timeout; /* Wait for the IS flag to change */ timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms); while (1) { is = ufshci_mmio_read_4(ctrlr, is); if (UFSHCIV(UFSHCI_IS_REG_UPMS, is)) { /* Clear 'Power Mode completion status' */ ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UPMS)); break; } if (timeout - ticks < 0) { ufshci_printf(ctrlr, "Power mode is not changed " "within %d ms\n", ctrlr->device_init_timeout_in_ms); return (ENXIO); } /* TODO: Replace busy-wait with interrupt-based pause. */ DELAY(10); } /* Check HCS power mode change request status */ hcs = ufshci_mmio_read_4(ctrlr, hcs); if (UFSHCIV(UFSHCI_HCS_REG_UPMCRS, hcs) != 0x01) { ufshci_printf(ctrlr, "Power mode change request status error: 0x%x\n", UFSHCIV(UFSHCI_HCS_REG_UPMCRS, hcs)); return (ENXIO); } return (0); } int ufshci_uic_hibernation_ready(struct ufshci_controller *ctrlr) { uint32_t is, hcs; int timeout; /* Wait for the IS flag to change */ timeout = ticks + MSEC_2_TICKS(ctrlr->uic_cmd_timeout_in_ms); while (1) { is = ufshci_mmio_read_4(ctrlr, is); if (UFSHCIV(UFSHCI_IS_REG_UHES, is)) { /* Clear 'UIC Hibernate Enter Status' */ ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UHES)); break; } if (UFSHCIV(UFSHCI_IS_REG_UHXS, is)) { /* Clear 'UIC Hibernate Exit Status' */ ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UHXS)); break; } if (timeout - ticks < 0) { ufshci_printf(ctrlr, "Hibernation enter/exit are not completed " "within %d ms\n", ctrlr->uic_cmd_timeout_in_ms); return (ENXIO); } /* TODO: Replace busy-wait with interrupt-based pause. */ DELAY(10); } /* Check HCS power mode change request status */ hcs = ufshci_mmio_read_4(ctrlr, hcs); if (UFSHCIV(UFSHCI_HCS_REG_UPMCRS, hcs) != 0x01) { ufshci_printf(ctrlr, "Hibernation enter/exit request status error: 0x%x\n", UFSHCIV(UFSHCI_HCS_REG_UPMCRS, hcs)); return (ENXIO); } return (0); } int ufshci_uic_cmd_ready(struct ufshci_controller *ctrlr) { uint32_t hcs; int timeout; /* Wait for the HCS flag to change */ timeout = ticks + MSEC_2_TICKS(ctrlr->uic_cmd_timeout_in_ms); while (1) { hcs = ufshci_mmio_read_4(ctrlr, hcs); if (UFSHCIV(UFSHCI_HCS_REG_UCRDY, hcs)) break; if (timeout - ticks < 0) { ufshci_printf(ctrlr, "UIC command is not ready " "within %d ms\n", ctrlr->uic_cmd_timeout_in_ms); return (ENXIO); } /* TODO: Replace busy-wait with interrupt-based pause. */ DELAY(10); } return (0); } static int ufshci_uic_wait_cmd(struct ufshci_controller *ctrlr, struct ufshci_uic_cmd *uic_cmd) { uint32_t is; int timeout; mtx_assert(&ctrlr->uic_cmd_lock, MA_OWNED); /* Wait for the IS flag to change */ timeout = ticks + MSEC_2_TICKS(ctrlr->uic_cmd_timeout_in_ms); int delta = 10; while (1) { is = ufshci_mmio_read_4(ctrlr, is); if (UFSHCIV(UFSHCI_IS_REG_UCCS, is)) { ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UCCS)); break; } if (timeout - ticks < 0) { ufshci_printf(ctrlr, "UIC command is not completed " "within %d ms\n", ctrlr->uic_cmd_timeout_in_ms); return (ENXIO); } DELAY(delta); delta = min(1000, delta * 2); } return (0); } static int ufshci_uic_send_cmd(struct ufshci_controller *ctrlr, struct ufshci_uic_cmd *uic_cmd, uint32_t *return_value) { int error; uint32_t config_result_code; mtx_lock(&ctrlr->uic_cmd_lock); error = ufshci_uic_cmd_ready(ctrlr); if (error) { mtx_unlock(&ctrlr->uic_cmd_lock); return (ENXIO); } ufshci_mmio_write_4(ctrlr, ucmdarg1, uic_cmd->argument1); ufshci_mmio_write_4(ctrlr, ucmdarg2, uic_cmd->argument2); ufshci_mmio_write_4(ctrlr, ucmdarg3, uic_cmd->argument3); ufshci_mmio_write_4(ctrlr, uiccmd, uic_cmd->opcode); error = ufshci_uic_wait_cmd(ctrlr, uic_cmd); mtx_unlock(&ctrlr->uic_cmd_lock); if (error) return (ENXIO); config_result_code = ufshci_mmio_read_4(ctrlr, ucmdarg2); if (config_result_code) { ufshci_printf(ctrlr, - "Failed to send UIC command. (config result code = 0x%x)\n", - config_result_code); + "Failed to send UIC command (Opcode: 0x%x" + ", config result code = 0x%x)\n", + uic_cmd->opcode, config_result_code); } if (return_value != NULL) *return_value = ufshci_mmio_read_4(ctrlr, ucmdarg3); return (0); } int ufshci_uic_send_dme_link_startup(struct ufshci_controller *ctrlr) { struct ufshci_uic_cmd uic_cmd; uic_cmd.opcode = UFSHCI_DME_LINK_STARTUP; uic_cmd.argument1 = 0; uic_cmd.argument2 = 0; uic_cmd.argument3 = 0; return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, NULL)); } int ufshci_uic_send_dme_get(struct ufshci_controller *ctrlr, uint16_t attribute, uint32_t *return_value) { struct ufshci_uic_cmd uic_cmd; uic_cmd.opcode = UFSHCI_DME_GET; uic_cmd.argument1 = attribute << 16; uic_cmd.argument2 = 0; uic_cmd.argument3 = 0; return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, return_value)); } int ufshci_uic_send_dme_set(struct ufshci_controller *ctrlr, uint16_t attribute, uint32_t value) { struct ufshci_uic_cmd uic_cmd; uic_cmd.opcode = UFSHCI_DME_SET; uic_cmd.argument1 = attribute << 16; /* This drvier always sets only volatile values. */ uic_cmd.argument2 = UFSHCI_ATTR_SET_TYPE_NORMAL << 16; uic_cmd.argument3 = value; return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, NULL)); } int ufshci_uic_send_dme_peer_get(struct ufshci_controller *ctrlr, uint16_t attribute, uint32_t *return_value) { struct ufshci_uic_cmd uic_cmd; uic_cmd.opcode = UFSHCI_DME_PEER_GET; uic_cmd.argument1 = attribute << 16; uic_cmd.argument2 = 0; uic_cmd.argument3 = 0; return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, return_value)); } int ufshci_uic_send_dme_peer_set(struct ufshci_controller *ctrlr, uint16_t attribute, uint32_t value) { struct ufshci_uic_cmd uic_cmd; uic_cmd.opcode = UFSHCI_DME_PEER_SET; uic_cmd.argument1 = attribute << 16; /* This drvier always sets only volatile values. */ uic_cmd.argument2 = UFSHCI_ATTR_SET_TYPE_NORMAL << 16; uic_cmd.argument3 = value; return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, NULL)); } int ufshci_uic_send_dme_endpoint_reset(struct ufshci_controller *ctrlr) { struct ufshci_uic_cmd uic_cmd; uic_cmd.opcode = UFSHCI_DME_ENDPOINT_RESET; uic_cmd.argument1 = 0; uic_cmd.argument2 = 0; uic_cmd.argument3 = 0; return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, NULL)); } int ufshci_uic_send_dme_hibernate_enter(struct ufshci_controller *ctrlr) { struct ufshci_uic_cmd uic_cmd; uic_cmd.opcode = UFSHCI_DME_HIBERNATE_ENTER; uic_cmd.argument1 = 0; uic_cmd.argument2 = 0; uic_cmd.argument3 = 0; return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, NULL)); } int ufshci_uic_send_dme_hibernate_exit(struct ufshci_controller *ctrlr) { struct ufshci_uic_cmd uic_cmd; uic_cmd.opcode = UFSHCI_DME_HIBERNATE_EXIT; uic_cmd.argument1 = 0; uic_cmd.argument2 = 0; uic_cmd.argument3 = 0; return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, NULL)); }