diff --git a/sys/dev/ufshci/ufshci.h b/sys/dev/ufshci/ufshci.h index b96d82ff836e..b055d2d2d769 100644 --- a/sys/dev/ufshci/ufshci.h +++ b/sys/dev/ufshci/ufshci.h @@ -1,1017 +1,1086 @@ /*- * Copyright (c) 2025, Samsung Electronics Co., Ltd. * Written by Jaeyoon Choi * * SPDX-License-Identifier: BSD-2-Clause */ #ifndef __UFSHCI_H__ #define __UFSHCI_H__ #include #include /* * Note: This driver currently assumes a little-endian architecture. * Big-endian support is not yet implemented. */ /* MIPI UniPro spec 2.0, section 5.8.1 "PHY Adapter Common Attributes" */ #define PA_AvailTxDataLanes 0x1520 #define PA_AvailRxDataLanes 0x1540 /* * MIPI UniPro spec 2.0, section 5.8.2 "PHY Adapter M-PHY-Specific * Attributes" */ #define PA_ConnectedTxDataLanes 0x1561 #define PA_ConnectedRxDataLanes 0x1581 #define PA_MaxRxHSGear 0x1587 #define PA_Granularity 0x15AA #define PA_TActivate 0x15A8 #define PA_RemoteVerInfo 0x15A0 #define PA_LocalVerInfo 0x15A9 /* UFSHCI spec 4.1, section 7.4 "UIC Power Mode Change" */ #define PA_ActiveTxDataLanes 0x1560 #define PA_ActiveRxDataLanes 0x1580 #define PA_TxGear 0x1568 #define PA_RxGear 0x1583 #define PA_TxTermination 0x1569 #define PA_RxTermination 0x1584 #define PA_HSSeries 0x156A #define PA_PWRModeUserData0 0x15B0 #define PA_PWRModeUserData1 0x15B1 #define PA_PWRModeUserData2 0x15B2 #define PA_PWRModeUserData3 0x15B3 #define PA_PWRModeUserData4 0x15B4 #define PA_PWRModeUserData5 0x15B5 #define PA_TxHsAdaptType 0x15D4 #define PA_PWRMode 0x1571 #define DME_LocalFC0ProtectionTimeOutVal 0xD041 #define DME_LocalTC0ReplayTimeOutVal 0xD042 #define DME_LocalAFC0ReqTimeOutVal 0xD043 /* Currently, UFS uses TC0 only. */ #define DL_FC0ProtectionTimeOutVal_Default 8191 #define DL_TC0ReplayTimeOutVal_Default 65535 #define DL_AFC0ReqTimeOutVal_Default 32767 /* UFS Spec 4.1, section 6.4 "Reference Clock" */ enum ufshci_attribute_reference_clock { UFSHCI_REF_CLK_19_2MHz = 0x0, UFSHCI_REF_CLK_26MHz = 0x1, UFSHCI_REF_CLK_38_4MHz = 0x2, UFSHCI_REF_CLK_OBSOLETE = 0x3, }; /* UFS spec 4.1, section 9 "UFS UIC Layer: MIPI Unipro" */ enum ufshci_uic_cmd_opcode { /* Configuration */ UFSHCI_DME_GET = 0x01, UFSHCI_DME_SET = 0x02, UFSHCI_DME_PEER_GET = 0x03, UFSHCI_DME_PEER_SET = 0x04, /* Controll */ UFSHCI_DME_POWER_ON = 0x10, UFSHCI_DME_POWER_OFF = 0x11, UFSHCI_DME_ENABLE = 0x12, UFSHCI_DME_RESET = 0x14, UFSHCI_DME_ENDPOINT_RESET = 0x15, UFSHCI_DME_LINK_STARTUP = 0x16, UFSHCI_DME_HIBERNATE_ENTER = 0x17, UFSHCI_DME_HIBERNATE_EXIT = 0x18, UFSHCI_DME_TEST_MODE = 0x1a, }; /* UFSHCI spec 4.1, section 5.6.3 "Offset 98h: UICCMDARG2 – UIC Command * Argument" */ enum ufshci_uic_cmd_attr_set_type { UFSHCI_ATTR_SET_TYPE_NORMAL = 0, /* volatile value */ UFSHCI_ATTR_SET_TYPE_STATIC = 1, /* non-volatile reset value */ }; struct ufshci_uic_cmd { uint8_t opcode; uint32_t argument1; uint32_t argument2; uint32_t argument3; }; /* UFS spec 4.1, section 10.5 "UPIU Transactions" */ enum transaction_code { UFSHCI_UPIU_TRANSACTION_CODE_NOP_OUT = 0x00, UFSHCI_UPIU_TRANSACTION_CODE_COMMAND = 0x01, UFSHCI_UPIU_TRANSACTION_CODE_DATA_OUT = 0x02, UFSHCI_UPIU_TRANSACTION_CODE_TASK_MANAGEMENT_REQUEST = 0x04, UFSHCI_UPIU_TRANSACTION_CODE_QUERY_REQUEST = 0x16, UFSHCI_UPIU_TRANSACTION_CODE_NOP_IN = 0x20, UFSHCI_UPIU_TRANSACTION_CODE_RESPONSE = 0x21, UFSHCI_UPIU_TRANSACTION_CODE_DATA_IN = 0x22, UFSHCI_UPIU_TRANSACTION_CODE_TASK_MANAGEMENT_RESPONSE = 0x24, UFSHCI_UPIU_TRANSACTION_CODE_READY_TO_TRANSFER = 0x31, UFSHCI_UPIU_TRANSACTION_CODE_QUERY_RESPONSE = 0x36, UFSHCI_UPIU_TRANSACTION_CODE_REJECT_UPIU = 0x3f, }; enum overall_command_status { UFSHCI_DESC_SUCCESS = 0x0, UFSHCI_DESC_INVALID_COMMAND_TABLE_ATTRIBUTES = 0x01, UFSHCI_DESC_INVALID_PRDT_ATTRIBUTES = 0x02, UFSHCI_DESC_MISMATCH_DATA_BUFFER_SIZE = 0x03, UFSHCI_DESC_MISMATCH_RESPONSE_UPIU_SIZE = 0x04, UFSHCI_DESC_COMMUNICATION_FAILURE_WITHIN_UIC_LAYERS = 0x05, UFSHCI_DESC_ABORTED = 0x06, UFSHCI_DESC_HOST_CONTROLLER_FATAL_ERROR = 0x07, UFSHCI_DESC_DEVICEFATALERROR = 0x08, UFSHCI_DESC_INVALID_CRYPTO_CONFIGURATION = 0x09, UFSHCI_DESC_GENERAL_CRYPTO_ERROR = 0x0A, UFSHCI_DESC_INVALID = 0x0F, }; enum response_code { UFSHCI_RESPONSE_CODE_TARGET_SUCCESS = 0x00, UFSHCI_RESPONSE_CODE_TARGET_FAILURE = 0x01, UFSHCI_RESPONSE_CODE_PARAMETER_NOTREADABLE = 0xF6, UFSHCI_RESPONSE_CODE_PARAMETER_NOTWRITEABLE = 0xF7, UFSHCI_RESPONSE_CODE_PARAMETER_ALREADYWRITTEN = 0xF8, UFSHCI_RESPONSE_CODE_INVALID_LENGTH = 0xF9, UFSHCI_RESPONSE_CODE_INVALID_VALUE = 0xFA, UFSHCI_RESPONSE_CODE_INVALID_SELECTOR = 0xFB, UFSHCI_RESPONSE_CODE_INVALID_INDEX = 0xFC, UFSHCI_RESPONSE_CODE_INVALID_IDN = 0xFD, UFSHCI_RESPONSE_CODE_INVALID_OPCODE = 0xFE, UFSHCI_RESPONSE_CODE_GENERAL_FAILURE = 0xFF, }; /* UFSHCI spec 4.1, section 6.1.1 "UTP Transfer Request Descriptor" */ enum ufshci_command_type { UFSHCI_COMMAND_TYPE_UFS_STORAGE = 0x01, UFSHCI_COMMAND_TYPE_NULLIFIED_UTRD = 0x0F, }; enum ufshci_data_direction { UFSHCI_DATA_DIRECTION_NO_DATA_TRANSFER = 0x00, UFSHCI_DATA_DIRECTION_FROM_SYS_TO_TGT = 0x01, UFSHCI_DATA_DIRECTION_FROM_TGT_TO_SYS = 0x10, UFSHCI_DATA_DIRECTION_RESERVED = 0b11, }; enum ufshci_utr_overall_command_status { UFSHCI_UTR_OCS_SUCCESS = 0x0, UFSHCI_UTR_OCS_INVALID_COMMAND_TABLE_ATTRIBUTES = 0x01, UFSHCI_UTR_OCS_INVALID_PRDT_ATTRIBUTES = 0x02, UFSHCI_UTR_OCS_MISMATCH_DATA_BUFFER_SIZE = 0x03, UFSHCI_UTR_OCS_MISMATCH_RESPONSE_UPIU_SIZE = 0x04, UFSHCI_UTR_OCS_COMMUNICATION_FAILURE_WITHIN_UIC_LAYERS = 0x05, UFSHCI_UTR_OCS_ABORTED = 0x06, UFSHCI_UTR_OCS_HOST_CONTROLLER_FATAL_ERROR = 0x07, UFSHCI_UTR_OCS_DEVICE_FATAL_ERROR = 0x08, UFSHCI_UTR_OCS_INVALID_CRYPTO_CONFIGURATION = 0x09, UFSHCI_UTR_OCS_GENERAL_CRYPTO_ERROR = 0x0A, UFSHCI_UTR_OCS_INVALID = 0xF, }; struct ufshci_utp_xfer_req_desc { /* dword 0 */ uint32_t cci : 8; /* [7:0] */ uint32_t total_ehs_length : 8; /* [15:8] */ uint32_t reserved0 : 7; /* [22:16] */ uint32_t ce : 1; /* [23] */ uint32_t interrupt : 1; /* [24] */ uint32_t data_direction : 2; /* [26:25] */ uint32_t reserved1 : 1; /* [27] */ uint32_t command_type : 4; /* [31:28] */ /* dword 1 */ uint32_t data_unit_number_lower; /* [31:0] */ /* dword 2 */ uint8_t overall_command_status; /* [7:0] */ uint8_t common_data_size; /* [15:8] */ uint16_t last_data_byte_count; /* [31:16] */ /* dword 3 */ uint32_t data_unit_number_upper; /* [31:0] */ /* dword 4 */ uint32_t utp_command_descriptor_base_address; /* [31:0] */ /* dword 5 */ uint32_t utp_command_descriptor_base_address_upper; /* [31:0] */ /* dword 6 */ uint16_t response_upiu_length; /* [15:0] */ uint16_t response_upiu_offset; /* [31:16] */ /* dword 7 */ uint16_t prdt_length; /* [15:0] */ uint16_t prdt_offset; /* [31:16] */ } __packed __aligned(8); _Static_assert(sizeof(struct ufshci_utp_xfer_req_desc) == 32, "ufshci_utp_xfer_req_desc must be 32 bytes"); /* * According to the UFSHCI specification, the size of the UTP command * descriptor is as follows. The size of the transfer request is not limited, * a transfer response can be as long as 65535 * dwords, and a PRDT can be as * long as 65565 * PRDT entry size(16 bytes). However, for ease of use, this * UFSHCI Driver imposes the following limits. The size of the transfer * request and the transfer response is 1024 bytes or less. The PRDT region * limits the number of scatter gathers to 256 + 1, using a total of 4096 + * 16 bytes. Therefore, only 8KB size is allocated for the UTP command * descriptor. */ #define UFSHCI_UTP_COMMAND_DESCRIPTOR_SIZE 8192 #define UFSHCI_UTP_XFER_REQ_SIZE 512 #define UFSHCI_UTP_XFER_RESP_SIZE 512 /* * To reduce the size of the UTP Command Descriptor(8KB), we must use only * 256 + 1 PRDT entries. The reason for adding the 1 is that if the data is * not aligned, one additional PRDT_ENTRY is used. */ #define UFSHCI_MAX_PRDT_ENTRY_COUNT (256 + 1) /* UFSHCI spec 4.1, section 6.1.2 "UTP Command Descriptor" */ struct ufshci_prdt_entry { /* dword 0 */ uint32_t data_base_address; /* [31:0] */ /* dword 1 */ uint32_t data_base_address_upper; /* [31:0] */ /* dword 2 */ uint32_t reserved; /* [31:0] */ /* dword 3 */ uint32_t data_byte_count; /* [17:0] Maximum byte * count is 256KB */ } __packed __aligned(8); _Static_assert(sizeof(struct ufshci_prdt_entry) == 16, "ufshci_prdt_entry must be 16 bytes"); struct ufshci_utp_cmd_desc { uint8_t command_upiu[UFSHCI_UTP_XFER_REQ_SIZE]; uint8_t response_upiu[UFSHCI_UTP_XFER_RESP_SIZE]; uint8_t prd_table[sizeof(struct ufshci_prdt_entry) * UFSHCI_MAX_PRDT_ENTRY_COUNT]; uint8_t padding[3072 - sizeof(struct ufshci_prdt_entry)]; } __packed __aligned(128); _Static_assert(sizeof(struct ufshci_utp_cmd_desc) == UFSHCI_UTP_COMMAND_DESCRIPTOR_SIZE, "ufshci_utp_cmd_desc must be 8192 bytes"); #define UFSHCI_UTP_TASK_MGMT_REQ_SIZE 32 #define UFSHCI_UTP_TASK_MGMT_RESP_SIZE 32 enum ufshci_utmr_overall_command_status { UFSHCI_UTMR_OCS_SUCCESS = 0x0, UFSHCI_UTMR_OCS_INVALID_TASK_MANAGEMENT_FUNCTION_ATTRIBUTES = 0x01, UFSHCI_UTMR_OCS_MISMATCH_TASK_MANAGEMENT_REQUEST_SIZE = 0x02, UFSHCI_UTMR_OCS_MISMATCH_TASK_MANAGEMENT_RESPONSE_SIZE = 0x03, UFSHCI_UTMR_OCS_PEER_COMMUNICATION_FAILURE = 0x04, UFSHCI_UTMR_OCS_ABORTED = 0x05, UFSHCI_UTMR_OCS_FATAL_ERROR = 0x06, UFSHCI_UTMR_OCS_DEVICE_FATAL_ERROR = 0x07, UFSHCI_UTMR_OCS_INVALID = 0xF, }; /* UFSHCI spec 4.1, section 6.3.1 "UTP Task Management Request Descriptor" */ struct ufshci_utp_task_mgmt_req_desc { /* dword 0 */ uint32_t reserved0 : 24; /* [23:0] */ uint32_t interrupt : 1; /* [24] */ uint32_t reserved1 : 7; /* [31:25] */ /* dword 1 */ uint32_t reserved2; /* [31:0] */ /* dword 2 */ uint8_t overall_command_status; /* [7:0] */ uint8_t reserved3; /* [15:8] */ uint16_t reserved4; /* [31:16] */ /* dword 3 */ uint32_t reserved5; /* [31:0] */ /* dword 4-11 */ uint8_t request_upiu[UFSHCI_UTP_TASK_MGMT_REQ_SIZE]; /* dword 12-19 */ uint8_t response_upiu[UFSHCI_UTP_TASK_MGMT_RESP_SIZE]; } __packed __aligned(8); _Static_assert(sizeof(struct ufshci_utp_task_mgmt_req_desc) == 80, "ufshci_utp_task_mgmt_req_desc must be 80 bytes"); /* UFS spec 4.1, section 10.6.2 "Basic Header Format" */ struct ufshci_upiu_header { /* dword 0 */ union { struct { uint8_t trans_code : 6; /* [5:0] */ uint8_t dd : 1; /* [6] */ uint8_t hd : 1; /* [7] */ }; uint8_t trans_type; }; union { struct { uint8_t task_attribute : 2; /* [1:0] */ uint8_t cp : 1; /* [2] */ uint8_t retransmit_indicator : 1; /* [3] */ #define UFSHCI_OPERATIONAL_FLAG_W 0x2 #define UFSHCI_OPERATIONAL_FLAG_R 0x4 uint8_t operational_flags : 4; /* [7:4] */ }; uint8_t flags; }; uint8_t lun; uint8_t task_tag; /* dword 1 */ #define UFSHCI_COMMAND_SET_TYPE_SCSI 0 uint8_t cmd_set_type : 4; /* [3:0] */ uint8_t iid : 4; /* [7:4] */ uint8_t ext_iid_or_function; uint8_t response; uint8_t ext_iid_or_status; /* dword 2 */ uint8_t ehs_length; uint8_t device_infomation; uint16_t data_segment_length; /* (Big-endian) */ } __packed __aligned(4); _Static_assert(sizeof(struct ufshci_upiu_header) == 12, "ufshci_upiu_header must be 12 bytes"); #define UFSHCI_MAX_UPIU_SIZE 512 #define UFSHCI_UPIU_ALIGNMENT 8 /* UPIU requires 64-bit alignment. */ struct ufshci_upiu { /* dword 0-2 */ struct ufshci_upiu_header header; /* dword 3-127 */ uint8_t reserved[UFSHCI_MAX_UPIU_SIZE - sizeof(struct ufshci_upiu_header)]; } __packed __aligned(8); _Static_assert(sizeof(struct ufshci_upiu) == 512, "ufshci_upiu must be 512 bytes"); /* UFS Spec 4.1, section 10.7.1 "COMMAND UPIU" */ struct ufshci_cmd_command_upiu { /* dword 0-2 */ struct ufshci_upiu_header header; /* dword 3 */ uint32_t expected_data_transfer_length; /* (Big-endian) */ /* dword 4-7 */ uint8_t cdb[16]; } __packed __aligned(4); _Static_assert(sizeof(struct ufshci_cmd_command_upiu) == 32, "bad size for ufshci_cmd_command_upiu"); _Static_assert(sizeof(struct ufshci_cmd_command_upiu) <= UFSHCI_UTP_XFER_REQ_SIZE, "bad size for ufshci_cmd_command_upiu"); _Static_assert(sizeof(struct ufshci_cmd_command_upiu) % UFSHCI_UPIU_ALIGNMENT == 0, "UPIU requires 64-bit alignment"); /* UFS Spec 4.1, section 10.7.2 "RESPONSE UPIU" */ struct ufshci_cmd_response_upiu { /* dword 0-2 */ struct ufshci_upiu_header header; /* dword 3 */ uint32_t residual_transfer_count; /* (Big-endian) */ /* dword 4-7 */ uint8_t reserved[16]; /* Sense Data */ uint16_t sense_data_len; /* (Big-endian) */ uint8_t sense_data[18]; /* Add padding to align the kUpiuAlignment. */ uint8_t padding[4]; } __packed __aligned(4); _Static_assert(sizeof(struct ufshci_cmd_response_upiu) == 56, "bad size for ufshci_cmd_response_upiu"); _Static_assert(sizeof(struct ufshci_cmd_response_upiu) <= UFSHCI_UTP_XFER_RESP_SIZE, "bad size for ufshci_cmd_response_upiu"); _Static_assert(sizeof(struct ufshci_cmd_response_upiu) % UFSHCI_UPIU_ALIGNMENT == 0, "UPIU requires 64-bit alignment"); enum task_management_function { UFSHCI_TASK_MGMT_FUNCTION_ABORT_TASK = 0x01, UFSHCI_TASK_MGMT_FUNCTION_ABORT_TASK_SET = 0x02, UFSHCI_TASK_MGMT_FUNCTION_CLEAR_TASK_SET = 0x04, UFSHCI_TASK_MGMT_FUNCTION_LOGICAL_UNIT_RESET = 0x08, UFSHCI_TASK_MGMT_FUNCTION_QUERY_TASK = 0x80, UFSHCI_TASK_MGMT_FUNCTION_QUERY_TASKSET = 0x81, }; /* UFS Spec 4.1, section 10.7.6 "TASK MANAGEMENT REQUEST UPIU" */ struct ufshci_task_mgmt_request_upiu { /* dword 0-2 */ struct ufshci_upiu_header header; /* dword 3 */ uint32_t input_param1; /* (Big-endian) */ /* dword 4 */ uint32_t input_param2; /* (Big-endian) */ /* dword 5 */ uint32_t input_param3; /* (Big-endian) */ /* dword 6-7 */ uint8_t reserved[8]; } __packed __aligned(4); _Static_assert(sizeof(struct ufshci_task_mgmt_request_upiu) == 32, "bad size for ufshci_task_mgmt_request_upiu"); _Static_assert(sizeof(struct ufshci_task_mgmt_request_upiu) <= UFSHCI_UTP_XFER_RESP_SIZE, "bad size for ufshci_task_mgmt_request_upiu"); _Static_assert(sizeof(struct ufshci_task_mgmt_request_upiu) % UFSHCI_UPIU_ALIGNMENT == 0, "UPIU requires 64-bit alignment"); enum task_management_service_response { UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_COMPLETE = 0x00, UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_NOT_SUPPORTED = 0x04, UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_FAILED = 0x05, UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_SUCCEEDED = 0x08, UFSHCI_TASK_MGMT_SERVICE_RESPONSE_INCORRECT_LUN = 0x09, }; /* UFS Spec 4.1, section 10.7.7 "TASK MANAGEMENT RESPONSE UPIU" */ struct ufshci_task_mgmt_response_upiu { /* dword 0-2 */ struct ufshci_upiu_header header; /* dword 3 */ uint32_t output_param1; /* (Big-endian) */ /* dword 4 */ uint32_t output_param2; /* (Big-endian) */ /* dword 5-7 */ uint8_t reserved[12]; } __packed __aligned(4); _Static_assert(sizeof(struct ufshci_task_mgmt_response_upiu) == 32, "bad size for ufshci_task_mgmt_response_upiu"); _Static_assert(sizeof(struct ufshci_task_mgmt_response_upiu) <= UFSHCI_UTP_XFER_RESP_SIZE, "bad size for ufshci_task_mgmt_response_upiu"); _Static_assert(sizeof(struct ufshci_task_mgmt_response_upiu) % UFSHCI_UPIU_ALIGNMENT == 0, "UPIU requires 64-bit alignment"); /* UFS Spec 4.1, section 10.7.8 "QUERY REQUEST UPIU" */ enum ufshci_query_function { UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST = 0x01, UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST = 0x81, }; enum ufshci_query_opcode { UFSHCI_QUERY_OPCODE_NOP = 0, UFSHCI_QUERY_OPCODE_READ_DESCRIPTOR, UFSHCI_QUERY_OPCODE_WRITE_DESCRIPTOR, UFSHCI_QUERY_OPCODE_READ_ATTRIBUTE, UFSHCI_QUERY_OPCODE_WRITE_ATTRIBUTE, UFSHCI_QUERY_OPCODE_READ_FLAG, UFSHCI_QUERY_OPCODE_SET_FLAG, UFSHCI_QUERY_OPCODE_CLEAR_FLAG, UFSHCI_QUERY_OPCODE_TOGGLE_FLAG, }; struct ufshci_query_param { enum ufshci_query_function function; enum ufshci_query_opcode opcode; uint8_t type; uint8_t index; uint8_t selector; uint64_t value; size_t desc_size; }; struct ufshci_query_request_upiu { /* dword 0-2 */ struct ufshci_upiu_header header; /* dword 3 */ uint8_t opcode; uint8_t idn; uint8_t index; uint8_t selector; /* dword 4-5 */ union { /* The Write Attribute opcode uses 64 - bit value. */ uint64_t value_64; /* (Big-endian) */ struct { uint8_t reserved1[2]; uint16_t length; /* (Big-endian) */ uint32_t value_32; /* (Big-endian) */ }; } __packed __aligned(4); /* dword 6 */ uint32_t reserved2; /* dword 7 */ uint32_t reserved3; uint8_t command_data[256]; } __packed __aligned(4); _Static_assert(sizeof(struct ufshci_query_request_upiu) == 288, "bad size for ufshci_query_request_upiu"); _Static_assert(sizeof(struct ufshci_query_request_upiu) <= UFSHCI_UTP_XFER_REQ_SIZE, "bad size for ufshci_query_request_upiu"); _Static_assert(sizeof(struct ufshci_query_request_upiu) % UFSHCI_UPIU_ALIGNMENT == 0, "UPIU requires 64-bit alignment"); /* UFS Spec 4.1, section 10.7.9 "QUERY RESPONSE UPIU" */ enum ufshci_query_response_code { UFSHCI_QUERY_RESP_CODE_SUCCESS = 0x00, UFSHCI_QUERY_RESP_CODE_PARAMETER_NOT_READABLE = 0xf6, UFSHCI_QUERY_RESP_CODE_PARAMETER_NOT_WRITEABLE = 0xf7, UFSHCI_QUERY_RESP_CODE_PARAMETER_ALREADY_WRITTEN = 0xf8, UFSHCI_QUERY_RESP_CODE_INVALID_LENGTH = 0xf9, UFSHCI_QUERY_RESP_CODE_INVALID_VALUE = 0xfa, UFSHCI_QUERY_RESP_CODE_INVALID_SELECTOR = 0xfb, UFSHCI_QUERY_RESP_CODE_INVALID_INDEX = 0xfc, UFSHCI_QUERY_RESP_CODE_INVALID_IDN = 0xfd, UFSHCI_QUERY_RESP_CODE_INVALID_OPCODE = 0xfe, UFSHCI_QUERY_RESP_CODE_GENERAL_FAILURE = 0xff, }; struct ufshci_query_response_upiu { /* dword 0-2 */ struct ufshci_upiu_header header; /* dword 3 */ uint8_t opcode; uint8_t idn; uint8_t index; uint8_t selector; /* dword 4-5 */ union { /* The Read / Write Attribute opcodes use 64 - bit value. */ uint64_t value_64; /* (Big-endian) */ struct { uint8_t reserved1[2]; uint16_t length; /* (Big-endian) */ union { uint32_t value_32; /* (Big-endian) */ struct { uint8_t reserved2[3]; uint8_t flag_value; }; }; }; } __packed __aligned(4); /* dword 6 */ uint8_t reserved3[4]; /* dword 7 */ uint8_t reserved4[4]; uint8_t command_data[256]; } __packed __aligned(4); _Static_assert(sizeof(struct ufshci_query_response_upiu) == 288, "bad size for ufshci_query_response_upiu"); _Static_assert(sizeof(struct ufshci_query_response_upiu) <= UFSHCI_UTP_XFER_RESP_SIZE, "bad size for ufshci_query_response_upiu"); _Static_assert(sizeof(struct ufshci_query_response_upiu) % UFSHCI_UPIU_ALIGNMENT == 0, "UPIU requires 64-bit alignment"); /* UFS 4.1, section 10.7.11 "NOP OUT UPIU" */ struct ufshci_nop_out_upiu { /* dword 0-2 */ struct ufshci_upiu_header header; /* dword 3-7 */ uint8_t reserved[20]; } __packed __aligned(8); _Static_assert(sizeof(struct ufshci_nop_out_upiu) == 32, "ufshci_upiu_nop_out must be 32 bytes"); /* UFS 4.1, section 10.7.12 "NOP IN UPIU" */ struct ufshci_nop_in_upiu { /* dword 0-2 */ struct ufshci_upiu_header header; /* dword 3-7 */ uint8_t reserved[20]; } __packed __aligned(8); _Static_assert(sizeof(struct ufshci_nop_in_upiu) == 32, "ufshci_upiu_nop_in must be 32 bytes"); union ufshci_reponse_upiu { struct ufshci_upiu_header header; struct ufshci_cmd_response_upiu cmd_response_upiu; struct ufshci_query_response_upiu query_response_upiu; struct ufshci_task_mgmt_response_upiu task_mgmt_response_upiu; struct ufshci_nop_in_upiu nop_in_upiu; }; struct ufshci_completion { union ufshci_reponse_upiu response_upiu; size_t size; }; typedef void (*ufshci_cb_fn_t)(void *, const struct ufshci_completion *, bool); /* * UFS Spec 4.1, section 14.1 "UFS Descriptors" * All descriptors use big-endian byte ordering. */ enum ufshci_descriptor_type { UFSHCI_DESC_TYPE_DEVICE = 0x00, UFSHCI_DESC_TYPE_CONFIGURATION = 0x01, UFSHCI_DESC_TYPE_UNIT = 0x02, UFSHCI_DESC_TYPE_INTERCONNECT = 0x04, UFSHCI_DESC_TYPE_STRING = 0x05, UFSHCI_DESC_TYPE_GEOMETRY = 0X07, UFSHCI_DESC_TYPE_POWER = 0x08, UFSHCI_DESC_TYPE_DEVICE_HEALTH = 0x09, UFSHCI_DESC_TYPE_FBO_EXTENSION_SPECIFICATION = 0x0a, }; /* * UFS Spec 4.1, section 14.1.5.2 "Device Descriptor" * DeviceDescriptor use big-endian byte ordering. */ struct ufshci_device_descriptor { uint8_t bLength; uint8_t bDescriptorIDN; uint8_t bDevice; uint8_t bDeviceClass; uint8_t bDeviceSubClass; uint8_t bProtocol; uint8_t bNumberLU; uint8_t bNumberWLU; uint8_t bBootEnable; uint8_t bDescrAccessEn; uint8_t bInitPowerMode; uint8_t bHighPriorityLUN; uint8_t bSecureRemovalType; uint8_t bSecurityLU; uint8_t bBackgroundOpsTermLat; uint8_t bInitActiveICCLevel; /* 0x10 */ uint16_t wSpecVersion; uint16_t wManufactureDate; uint8_t iManufacturerName; uint8_t iProductName; uint8_t iSerialNumber; uint8_t iOemID; uint16_t wManufacturerID; uint8_t bUD0BaseOffset; uint8_t bUDConfigPLength; uint8_t bDeviceRTTCap; uint16_t wPeriodicRTCUpdate; uint8_t bUfsFeaturesSupport; /* 0x20 */ uint8_t bFFUTimeout; uint8_t bQueueDepth; uint16_t wDeviceVersion; uint8_t bNumSecureWPArea; uint32_t dPSAMaxDataSize; uint8_t bPSAStateTimeout; uint8_t iProductRevisionLevel; uint8_t Reserved[5]; /* 0x2a */ /* 0x30 */ uint8_t ReservedUME[16]; /* 0x40 */ uint8_t ReservedHpb[3]; uint8_t Reserved2[12]; uint32_t dExtendedUfsFeaturesSupport; uint8_t bWriteBoosterBufferPreserveUserSpaceEn; uint8_t bWriteBoosterBufferType; uint32_t dNumSharedWriteBoosterBufferAllocUnits; } __packed; _Static_assert(sizeof(struct ufshci_device_descriptor) == 89, "bad size for ufshci_device_descriptor"); +/* Defines the bit field of dExtendedUfsFeaturesSupport. */ +enum ufshci_desc_wb_ext_ufs_feature { + UFSHCI_DESC_EXT_UFS_FEATURE_FFU = (1 << 0), + UFSHCI_DESC_EXT_UFS_FEATURE_PSA = (1 << 1), + UFSHCI_DESC_EXT_UFS_FEATURE_DEV_LIFE_SPAN = (1 << 2), + UFSHCI_DESC_EXT_UFS_FEATURE_REFRESH_OP = (1 << 3), + UFSHCI_DESC_EXT_UFS_FEATURE_TOO_HIGH_TEMP = (1 << 4), + UFSHCI_DESC_EXT_UFS_FEATURE_TOO_LOW_TEMP = (1 << 5), + UFSHCI_DESC_EXT_UFS_FEATURE_EXT_TEMP = (1 << 6), + UFSHCI_DESC_EXT_UFS_FEATURE_HPB_SUPPORT = (1 << 7), + UFSHCI_DESC_EXT_UFS_FEATURE_WRITE_BOOSTER = (1 << 8), + UFSHCI_DESC_EXT_UFS_FEATURE_PERF_THROTTLING = (1 << 9), + UFSHCI_DESC_EXT_UFS_FEATURE_ADVANCED_RPMB = (1 << 10), + UFSHCI_DESC_EXT_UFS_FEATURE_ZONED_UFS_EXTENSION = (1 << 11), + UFSHCI_DESC_EXT_UFS_FEATURE_DEV_LEVEL_EXCEPTION = (1 << 12), + UFSHCI_DESC_EXT_UFS_FEATURE_HID = (1 << 13), + UFSHCI_DESC_EXT_UFS_FEATURE_BARRIER = (1 << 14), + UFSHCI_DESC_EXT_UFS_FEATURE_CLEAR_ERROR_HISTORY = (1 << 15), + UFSHCI_DESC_EXT_UFS_FEATURE_EXT_IID = (1 << 16), + UFSHCI_DESC_EXT_UFS_FEATURE_FBO = (1 << 17), + UFSHCI_DESC_EXT_UFS_FEATURE_FAST_RECOVERY_MODE = (1 << 18), + UFSHCI_DESC_EXT_UFS_FEATURE_RPMB_VENDOR_CMD = (1 << 19), +}; + +/* Defines the bit field of bWriteBoosterBufferType. */ +enum ufshci_desc_wb_buffer_type { + UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED = 0x00, + UFSHCI_DESC_WB_BUF_TYPE_SINGLE_SHARED = 0x01, +}; + +/* Defines the bit field of bWriteBoosterBufferPreserveUserSpaceEn. */ +enum ufshci_desc_user_space_config { + UFSHCI_DESC_WB_BUF_USER_SPACE_REDUCTION = 0x00, + UFSHCI_DESC_WB_BUF_PRESERVE_USER_SPACE = 0x01, +}; + /* * UFS Spec 4.1, section 14.1.5.3 "Configuration Descriptor" * ConfigurationDescriptor use big-endian byte ordering. */ struct ufshci_unit_descriptor_configurable_parameters { uint8_t bLUEnable; uint8_t bBootLunID; uint8_t bLUWriteProtect; uint8_t bMemoryType; uint32_t dNumAllocUnits; uint8_t bDataReliability; uint8_t bLogicalBlockSize; uint8_t bProvisioningType; uint16_t wContextCapabilities; union { struct { uint8_t Reserved[3]; uint8_t ReservedHpb[6]; } __packed; uint16_t wZoneBufferAllocUnits; }; uint32_t dLUNumWriteBoosterBufferAllocUnits; } __packed; _Static_assert(sizeof(struct ufshci_unit_descriptor_configurable_parameters) == 27, "bad size for ufshci_unit_descriptor_configurable_parameters"); #define UFSHCI_CONFIGURATION_DESCEIPTOR_LU_NUM 8 struct ufshci_configuration_descriptor { uint8_t bLength; uint8_t bDescriptorIDN; uint8_t bConfDescContinue; uint8_t bBootEnable; uint8_t bDescrAccessEn; uint8_t bInitPowerMode; uint8_t bHighPriorityLUN; uint8_t bSecureRemovalType; uint8_t bInitActiveICCLevel; uint16_t wPeriodicRTCUpdate; uint8_t Reserved; uint8_t bRPMBRegionEnable; uint8_t bRPMBRegion1Size; uint8_t bRPMBRegion2Size; uint8_t bRPMBRegion3Size; uint8_t bWriteBoosterBufferPreserveUserSpaceEn; uint8_t bWriteBoosterBufferType; uint32_t dNumSharedWriteBoosterBufferAllocUnits; /* 0x16 */ struct ufshci_unit_descriptor_configurable_parameters unit_config_params[UFSHCI_CONFIGURATION_DESCEIPTOR_LU_NUM]; } __packed; _Static_assert(sizeof(struct ufshci_configuration_descriptor) == (22 + 27 * 8), "bad size for ufshci_configuration_descriptor"); /* * UFS Spec 4.1, section 14.1.5.4 "Geometry Descriptor" * GeometryDescriptor use big-endian byte ordering. */ struct ufshci_geometry_descriptor { uint8_t bLength; uint8_t bDescriptorIDN; uint8_t bMediaTechnology; uint8_t Reserved; uint64_t qTotalRawDeviceCapacity; uint8_t bMaxNumberLU; uint32_t dSegmentSize; /* 0x11 */ uint8_t bAllocationUnitSize; uint8_t bMinAddrBlockSize; uint8_t bOptimalReadBlockSize; uint8_t bOptimalWriteBlockSize; uint8_t bMaxInBufferSize; uint8_t bMaxOutBufferSize; uint8_t bRPMB_ReadWriteSize; uint8_t bDynamicCapacityResourcePolicy; uint8_t bDataOrdering; uint8_t bMaxContexIDNumber; uint8_t bSysDataTagUnitSize; uint8_t bSysDataTagResSize; uint8_t bSupportedSecRTypes; uint16_t wSupportedMemoryTypes; /* 0x20 */ uint32_t dSystemCodeMaxNAllocU; uint16_t wSystemCodeCapAdjFac; uint32_t dNonPersistMaxNAllocU; uint16_t wNonPersistCapAdjFac; uint32_t dEnhanced1MaxNAllocU; /* 0x30 */ uint16_t wEnhanced1CapAdjFac; uint32_t dEnhanced2MaxNAllocU; uint16_t wEnhanced2CapAdjFac; uint32_t dEnhanced3MaxNAllocU; uint16_t wEnhanced3CapAdjFac; uint32_t dEnhanced4MaxNAllocU; /* 0x42 */ uint16_t wEnhanced4CapAdjFac; uint32_t dOptimalLogicalBlockSize; uint8_t ReservedHpb[5]; uint8_t Reserved2[2]; uint32_t dWriteBoosterBufferMaxNAllocUnits; uint8_t bDeviceMaxWriteBoosterLUs; uint8_t bWriteBoosterBufferCapAdjFac; uint8_t bSupportedWriteBoosterBufferUserSpaceReductionTypes; uint8_t bSupportedWriteBoosterBufferTypes; } __packed; _Static_assert(sizeof(struct ufshci_geometry_descriptor) == 87, "bad size for ufshci_geometry_descriptor"); /* * UFS Spec 4.1, section 14.1.5.5 "Unit Descriptor" * UnitDescriptor use big-endian byte ordering. */ struct ufshci_unit_descriptor { uint8_t bLength; uint8_t bDescriptorIDN; uint8_t bUnitIndex; uint8_t bLUEnable; uint8_t bBootLunID; uint8_t bLUWriteProtect; uint8_t bLUQueueDepth; uint8_t bPSASensitive; uint8_t bMemoryType; uint8_t bDataReliability; uint8_t bLogicalBlockSize; uint64_t qLogicalBlockCount; /* 0x13 */ uint32_t dEraseBlockSize; uint8_t bProvisioningType; uint64_t qPhyMemResourceCount; /* 0x20 */ uint16_t wContextCapabilities; uint8_t bLargeUnitGranularity_M1; uint8_t ReservedHpb[6]; uint32_t dLUNumWriteBoosterBufferAllocUnits; } __packed; _Static_assert(sizeof(struct ufshci_unit_descriptor) == 45, "bad size for ufshci_unit_descriptor"); enum LUWriteProtect { kNoWriteProtect = 0x00, kPowerOnWriteProtect = 0x01, kPermanentWriteProtect = 0x02, }; /* * UFS Spec 4.1, section 14.1.5.6 "RPMB Unit Descriptor" * RpmbUnitDescriptor use big-endian byte ordering. */ struct ufshci_rpmb_unit_descriptor { uint8_t bLength; uint8_t bDescriptorIDN; uint8_t bUnitIndex; uint8_t bLUEnable; uint8_t bBootLunID; uint8_t bLUWriteProtect; uint8_t bLUQueueDepth; uint8_t bPSASensitive; uint8_t bMemoryType; uint8_t Reserved; uint8_t bLogicalBlockSize; uint64_t qLogicalBlockCount; /* 0x13 */ uint32_t dEraseBlockSize; uint8_t bProvisioningType; uint64_t qPhyMemResourceCount; /* 0x20 */ uint8_t Reserved1[3]; } __packed; _Static_assert(sizeof(struct ufshci_rpmb_unit_descriptor) == 35, "bad size for RpmbUnitDescriptor"); /* * UFS Spec 4.1, section 14.1.5.7 "Power Parameters Descriptor" * PowerParametersDescriptor use big-endian byte ordering. */ struct ufshci_power_parameters_descriptor { uint8_t bLength; uint8_t bDescriptorIDN; uint16_t wActiveICCLevelsVCC[16]; uint16_t wActiveICCLevelsVCCQ[16]; uint16_t wActiveICCLevelsVCCQ2[16]; } __packed; _Static_assert(sizeof(struct ufshci_power_parameters_descriptor) == 98, "bad size for PowerParametersDescriptor"); /* * UFS Spec 4.1, section 14.1.5.8 "Interconnect Descriptor" * InterconnectDescriptor use big-endian byte ordering. */ struct ufshci_interconnect_descriptor { uint8_t bLength; uint8_t bDescriptorIDN; uint16_t bcdUniproVersion; uint16_t bcdMphyVersion; } __packed; _Static_assert(sizeof(struct ufshci_interconnect_descriptor) == 6, "bad size for InterconnectDescriptor"); /* * UFS Spec 4.1, section 14.1.5.9-13 "String Descriptor" * StringDescriptor use big-endian byte ordering. */ struct ufshci_string_descriptor { uint8_t bLength; uint8_t bDescriptorIDN; uint16_t UC[126]; } __packed; _Static_assert(sizeof(struct ufshci_string_descriptor) == 254, "bad size for StringDescriptor"); /* * UFS Spec 4.1, section 14.1.5.14 "Device Health Descriptor" * DeviceHealthDescriptor use big-endian byte ordering. */ struct ufshci_device_healthd_descriptor { uint8_t bLength; uint8_t bDescriptorIDN; uint8_t bPreEOLInfo; uint8_t bDeviceLifeTimeEstA; uint8_t bDeviceLifeTimeEstB; uint8_t VendorPropInfo[32]; uint32_t dRefreshTotalCount; uint32_t dRefreshProgress; } __packed; _Static_assert(sizeof(struct ufshci_device_healthd_descriptor) == 45, "bad size for DeviceHealthDescriptor"); /* * UFS Spec 4.1, section 14.1.5.15 "Vendor Specific Descriptor" * VendorSpecificDescriptor use big-endian byte ordering. */ struct ufshci_vendor_specific_descriptor { uint8_t bLength; uint8_t bDescriptorIDN; uint8_t DATA[254]; } __packed; _Static_assert(sizeof(struct ufshci_vendor_specific_descriptor) == 256, "bad size for VendorSpecificDescriptor"); /* UFS Spec 4.1, section 14.2 "Flags" */ enum ufshci_flags { UFSHCI_FLAG_F_RESERVED = 0x00, UFSHCI_FLAG_F_DEVICE_INIT = 0x01, UFSHCI_FLAG_F_PERMANENT_WP_EN = 0x02, UFSHCI_FLAS_F_POWER_ON_WP_EN = 0x03, UFSHCI_FLAG_F_BACKGROUND_OPS_EN = 0x04, UFSHCI_FLAG_F_DEVICE_LIFE_SPAN_MODE_EN = 0x05, UFSHCI_FLAG_F_PURGE_ENABLE = 0x06, UFSHCI_FLAG_F_REFRESH_ENABLE = 0x07, UFSHCI_FLAG_F_PHY_RESOURCE_REMOVAL = 0x08, UFSHCI_FLAG_F_BUSY_RTC = 0x09, UFSHCI_FLAG_F_PERMANENTLY_DISABLE_FW_UPDATE = 0x0b, UFSHCI_FLAG_F_WRITE_BOOSTER_EN = 0x0e, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN = 0x0f, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE = 0x10, UFSHCI_FLAG_F_UNPIN_EN = 0x13, }; /* UFS Spec 4.1, section 14.3 "Attributes" */ enum ufshci_attributes { UFSHCI_ATTR_B_BOOT_LUN_EN = 0x00, UFSHCI_ATTR_B_CURRENT_POWER_MODE = 0x02, UFSHCI_ATTR_B_ACTIVE_ICC_LEVEL = 0x03, UFSHCI_ATTR_B_OUT_OF_ORDER_DATA_EN = 0x04, UFSHCI_ATTR_B_BACKGROUND_OP_STATUS = 0x05, UFSHCI_ATTR_B_PURGE_STATUS = 0x06, UFSHCI_ATTR_B_MAX_DATA_IN_SIZE = 0x07, UFSHCI_ATTR_B_MAX_DATA_OUT_SIZE = 0x08, UFSHCI_ATTR_D_DYN_CAP_NEEDED = 0x09, UFSHCI_ATTR_B_REF_CLK_FREQ = 0x0a, UFSHCI_ATTR_B_CONFIG_DESCR_LOCK = 0x0b, UFSHCI_ATTR_B_MAX_NUM_OF_RTT = 0x0c, UFSHCI_ATTR_W_EXCEPTION_EVENT_CONTROL = 0x0d, UFSHCI_ATTR_W_EXCEPTION_EVENT_STATUS = 0x0e, UFSHCI_ATTR_D_SECONDS_PASSED = 0x0f, UFSHCI_ATTR_W_CONTEXT_CONF = 0x10, UFSHCI_ATTR_B_DEVICE_FFU_STATUS = 0x14, UFSHCI_ATTR_B_PSA_STATE = 0x15, UFSHCI_ATTR_D_PSA_DATA_SIZE = 0x16, UFSHCI_ATTR_B_REF_CLK_GATING_WAIT_TIME = 0x17, UFSHCI_ATTR_B_DEVICE_CASE_ROUGH_TEMPERAURE = 0x18, UFSHCI_ATTR_B_DEVICE_TOO_HIGH_TEMP_BOUNDARY = 0x19, UFSHCI_ATTR_B_DEVICE_TOO_LOW_TEMP_BOUNDARY = 0x1a, UFSHCI_ATTR_B_THROTTLING_STATUS = 0x1b, UFSHCI_ATTR_B_WB_BUFFER_FLUSH_STATUS = 0x1c, UFSHCI_ATTR_B_AVAILABLE_WB_BUFFER_SIZE = 0x1d, UFSHCI_ATTR_B_WB_BUFFER_LIFE_TIME_EST = 0x1e, UFSHCI_ATTR_D_CURRENT_WB_BUFFER_SIZE = 0x1f, UFSHCI_ATTR_B_REFRESH_STATUS = 0x2c, UFSHCI_ATTR_B_REFRESH_FREQ = 0x2d, UFSHCI_ATTR_B_REFRESH_UNIT = 0x2e, UFSHCI_ATTR_B_REFRESH_METHOD = 0x2f, }; +/* bAvailableWriteBoosterBufferSize codes (UFS WriteBooster abailable buffer + * left %) */ +enum ufshci_wb_available_buffer_Size { + UFSHCI_ATTR_WB_AVAILABLE_0 = 0x00, /* 0% buffer remains */ + UFSHCI_ATTR_WB_AVAILABLE_10 = 0x01, /* 10% buffer remains */ + UFSHCI_ATTR_WB_AVAILABLE_20 = 0x02, /* 20% buffer remains */ + UFSHCI_ATTR_WB_AVAILABLE_30 = 0x03, /* 30% buffer remains */ + UFSHCI_ATTR_WB_AVAILABLE_40 = 0x04, /* 40% buffer remains */ + UFSHCI_ATTR_WB_AVAILABLE_50 = 0x05, /* 50% buffer remains */ + UFSHCI_ATTR_WB_AVAILABLE_60 = 0x06, /* 60% buffer remains */ + UFSHCI_ATTR_WB_AVAILABLE_70 = 0x07, /* 70% buffer remains */ + UFSHCI_ATTR_WB_AVAILABLE_80 = 0x08, /* 80% buffer remains */ + UFSHCI_ATTR_WB_AVAILABLE_90 = 0x09, /* 90% buffer remains */ + UFSHCI_ATTR_WB_AVAILABLE_100 = 0x0A, /* 100% buffer remains */ +}; + +/* bWriteBoosterBufferLifeTimeEst codes (UFS WriteBooster buffer life %) */ +enum ufshci_wb_lifetime { + UFSHCI_ATTR_WB_LIFE_DISABLED = 0x00, /* Info not available */ + UFSHCI_ATTR_WB_LIFE_0_10 = 0x01, /* 0%–10% used */ + UFSHCI_ATTR_WB_LIFE_10_20 = 0x02, /* 10%–20% used */ + UFSHCI_ATTR_WB_LIFE_20_30 = 0x03, /* 20%–30% used */ + UFSHCI_ATTR_WB_LIFE_30_40 = 0x04, /* 30%–40% used */ + UFSHCI_ATTR_WB_LIFE_40_50 = 0x05, /* 40%–50% used */ + UFSHCI_ATTR_WB_LIFE_50_60 = 0x06, /* 50%–60% used */ + UFSHCI_ATTR_WB_LIFE_60_70 = 0x07, /* 60%–70% used */ + UFSHCI_ATTR_WB_LIFE_70_80 = 0x08, /* 70%–80% used */ + UFSHCI_ATTR_WB_LIFE_80_90 = 0x09, /* 80%–90% used */ + UFSHCI_ATTR_WB_LIFE_90_100 = 0x0A, /* 90%–100% used */ + UFSHCI_ATTR_WB_LIFE_EXCEEDED = + 0x0B, /* Exceeded estimated life (treat as WB disabled) */ +}; + #endif /* __UFSHCI_H__ */ diff --git a/sys/dev/ufshci/ufshci_ctrlr.c b/sys/dev/ufshci/ufshci_ctrlr.c index 37bd32665b2b..7bebfd2b0f9c 100644 --- a/sys/dev/ufshci/ufshci_ctrlr.c +++ b/sys/dev/ufshci/ufshci_ctrlr.c @@ -1,512 +1,520 @@ /*- * Copyright (c) 2025, Samsung Electronics Co., Ltd. * Written by Jaeyoon Choi * * SPDX-License-Identifier: BSD-2-Clause */ #include #include #include #include "ufshci_private.h" #include "ufshci_reg.h" static int ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr) { int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms); sbintime_t delta_t = SBT_1US; uint32_t hce; hce = ufshci_mmio_read_4(ctrlr, hce); /* If UFS host controller is already enabled, disable it. */ if (UFSHCIV(UFSHCI_HCE_REG_HCE, hce)) { hce &= ~UFSHCIM(UFSHCI_HCE_REG_HCE); ufshci_mmio_write_4(ctrlr, hce, hce); } /* Enable UFS host controller */ hce |= UFSHCIM(UFSHCI_HCE_REG_HCE); ufshci_mmio_write_4(ctrlr, hce, hce); /* * During the controller initialization, the value of the HCE bit is * unstable, so we need to read the HCE value after some time after * initialization is complete. */ pause_sbt("ufshci_hce", ustosbt(100), 0, C_PREL(1)); /* Wait for the HCE flag to change */ while (1) { hce = ufshci_mmio_read_4(ctrlr, hce); if (UFSHCIV(UFSHCI_HCE_REG_HCE, hce)) break; if (timeout - ticks < 0) { ufshci_printf(ctrlr, "host controller failed to enable " "within %d ms\n", ctrlr->device_init_timeout_in_ms); return (ENXIO); } pause_sbt("ufshci_hce", delta_t, 0, C_PREL(1)); delta_t = min(SBT_1MS, delta_t * 3 / 2); } return (0); } int ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev) { - uint32_t ver, cap, hcs, ie; + uint32_t ver, cap, hcs, ie, ahit; uint32_t timeout_period, retry_count; int error; ctrlr->device_init_timeout_in_ms = UFSHCI_DEVICE_INIT_TIMEOUT_MS; ctrlr->uic_cmd_timeout_in_ms = UFSHCI_UIC_CMD_TIMEOUT_MS; ctrlr->dev = dev; ctrlr->sc_unit = device_get_unit(dev); snprintf(ctrlr->sc_name, sizeof(ctrlr->sc_name), "%s", device_get_nameunit(dev)); mtx_init(&ctrlr->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF | MTX_RECURSE); mtx_init(&ctrlr->uic_cmd_lock, "ufshci ctrlr uic cmd lock", NULL, MTX_DEF); ver = ufshci_mmio_read_4(ctrlr, ver); ctrlr->major_version = UFSHCIV(UFSHCI_VER_REG_MJR, ver); ctrlr->minor_version = UFSHCIV(UFSHCI_VER_REG_MNR, ver); ufshci_printf(ctrlr, "UFSHCI Version: %d.%d\n", ctrlr->major_version, ctrlr->minor_version); /* Read Device Capabilities */ ctrlr->cap = cap = ufshci_mmio_read_4(ctrlr, cap); ctrlr->is_single_db_supported = UFSHCIV(UFSHCI_CAP_REG_LSDBS, cap); /* * TODO: This driver does not yet support multi-queue. * Check the UFSHCI_CAP_REG_MCQS bit in the future to determine if * multi-queue support is available. */ ctrlr->is_mcq_supported = false; if (!(ctrlr->is_single_db_supported == 0 || ctrlr->is_mcq_supported)) return (ENXIO); /* * The maximum transfer size supported by UFSHCI spec is 65535 * 256 KiB * However, we limit the maximum transfer size to 1MiB(256 * 4KiB) for * performance reason. */ ctrlr->page_size = PAGE_SIZE; ctrlr->max_xfer_size = ctrlr->page_size * UFSHCI_MAX_PRDT_ENTRY_COUNT; timeout_period = UFSHCI_DEFAULT_TIMEOUT_PERIOD; TUNABLE_INT_FETCH("hw.ufshci.timeout_period", &timeout_period); timeout_period = min(timeout_period, UFSHCI_MAX_TIMEOUT_PERIOD); timeout_period = max(timeout_period, UFSHCI_MIN_TIMEOUT_PERIOD); ctrlr->timeout_period = timeout_period; retry_count = UFSHCI_DEFAULT_RETRY_COUNT; TUNABLE_INT_FETCH("hw.ufshci.retry_count", &retry_count); ctrlr->retry_count = retry_count; /* Disable all interrupts */ ufshci_mmio_write_4(ctrlr, ie, 0); /* Enable Host Controller */ error = ufshci_ctrlr_enable_host_ctrlr(ctrlr); if (error) return (error); /* Send DME_LINKSTARTUP command to start the link startup procedure */ error = ufshci_uic_send_dme_link_startup(ctrlr); if (error) return (error); + /* Read the UECPA register to clear */ + ufshci_mmio_read_4(ctrlr, uecpa); + + /* Diable Auto-hibernate */ + ahit = 0; + ufshci_mmio_write_4(ctrlr, ahit, ahit); + /* * The device_present(UFSHCI_HCS_REG_DP) bit becomes true if the host * controller has successfully received a Link Startup UIC command * response and the UFS device has found a physical link to the * controller. */ hcs = ufshci_mmio_read_4(ctrlr, hcs); if (!UFSHCIV(UFSHCI_HCS_REG_DP, hcs)) { ufshci_printf(ctrlr, "UFS device not found\n"); return (ENXIO); } /* Enable additional interrupts by programming the IE register. */ ie = ufshci_mmio_read_4(ctrlr, ie); ie |= UFSHCIM(UFSHCI_IE_REG_UTRCE); /* UTR Completion */ ie |= UFSHCIM(UFSHCI_IE_REG_UEE); /* UIC Error */ ie |= UFSHCIM(UFSHCI_IE_REG_UTMRCE); /* UTMR Completion */ ie |= UFSHCIM(UFSHCI_IE_REG_DFEE); /* Device Fatal Error */ ie |= UFSHCIM(UFSHCI_IE_REG_UTPEE); /* UTP Error */ ie |= UFSHCIM(UFSHCI_IE_REG_HCFEE); /* Host Ctrlr Fatal Error */ ie |= UFSHCIM(UFSHCI_IE_REG_SBFEE); /* System Bus Fatal Error */ ie |= UFSHCIM(UFSHCI_IE_REG_CEFEE); /* Crypto Engine Fatal Error */ ufshci_mmio_write_4(ctrlr, ie, ie); /* TODO: Initialize interrupt Aggregation Control Register (UTRIACR) */ /* Allocate and initialize UTP Task Management Request List. */ error = ufshci_utmr_req_queue_construct(ctrlr); if (error) return (error); /* Allocate and initialize UTP Transfer Request List or SQ/CQ. */ error = ufshci_utr_req_queue_construct(ctrlr); if (error) return (error); /* TODO: Separate IO and Admin slot */ /* max_hw_pend_io is the number of slots in the transfer_req_queue */ ctrlr->max_hw_pend_io = ctrlr->transfer_req_queue.num_entries; return (0); } void ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev) { if (ctrlr->resource == NULL) goto nores; /* TODO: Flush In-flight IOs */ /* Release resources */ ufshci_utmr_req_queue_destroy(ctrlr); ufshci_utr_req_queue_destroy(ctrlr); if (ctrlr->tag) bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag); if (ctrlr->res) bus_release_resource(ctrlr->dev, SYS_RES_IRQ, rman_get_rid(ctrlr->res), ctrlr->res); mtx_lock(&ctrlr->sc_mtx); ufshci_sim_detach(ctrlr); mtx_unlock(&ctrlr->sc_mtx); bus_release_resource(dev, SYS_RES_MEMORY, ctrlr->resource_id, ctrlr->resource); nores: mtx_destroy(&ctrlr->uic_cmd_lock); mtx_destroy(&ctrlr->sc_mtx); return; } int ufshci_ctrlr_reset(struct ufshci_controller *ctrlr) { uint32_t ie; int error; /* Backup and disable all interrupts */ ie = ufshci_mmio_read_4(ctrlr, ie); ufshci_mmio_write_4(ctrlr, ie, 0); /* Release resources */ ufshci_utmr_req_queue_destroy(ctrlr); ufshci_utr_req_queue_destroy(ctrlr); /* Reset Host Controller */ error = ufshci_ctrlr_enable_host_ctrlr(ctrlr); if (error) return (error); /* Send DME_LINKSTARTUP command to start the link startup procedure */ error = ufshci_uic_send_dme_link_startup(ctrlr); if (error) return (error); /* Enable interrupts */ ufshci_mmio_write_4(ctrlr, ie, ie); /* Allocate and initialize UTP Task Management Request List. */ error = ufshci_utmr_req_queue_construct(ctrlr); if (error) return (error); /* Allocate and initialize UTP Transfer Request List or SQ/CQ. */ error = ufshci_utr_req_queue_construct(ctrlr); if (error) return (error); return (0); } int ufshci_ctrlr_submit_task_mgmt_request(struct ufshci_controller *ctrlr, struct ufshci_request *req) { return ( ufshci_req_queue_submit_request(&ctrlr->task_mgmt_req_queue, req, /*is_admin*/ false)); } int ufshci_ctrlr_submit_admin_request(struct ufshci_controller *ctrlr, struct ufshci_request *req) { return (ufshci_req_queue_submit_request(&ctrlr->transfer_req_queue, req, /*is_admin*/ true)); } int ufshci_ctrlr_submit_io_request(struct ufshci_controller *ctrlr, struct ufshci_request *req) { return (ufshci_req_queue_submit_request(&ctrlr->transfer_req_queue, req, /*is_admin*/ false)); } int ufshci_ctrlr_send_nop(struct ufshci_controller *ctrlr) { struct ufshci_completion_poll_status status; status.done = 0; ufshci_ctrlr_cmd_send_nop(ctrlr, ufshci_completion_poll_cb, &status); ufshci_completion_poll(&status); if (status.error) { ufshci_printf(ctrlr, "ufshci_ctrlr_send_nop failed!\n"); return (ENXIO); } return (0); } static void ufshci_ctrlr_fail(struct ufshci_controller *ctrlr, bool admin_also) { printf("ufshci(4): ufshci_ctrlr_fail\n"); ctrlr->is_failed = true; /* TODO: task_mgmt_req_queue should be handled as fail */ ufshci_req_queue_fail(ctrlr, &ctrlr->transfer_req_queue.hwq[UFSHCI_SDB_Q]); } static void ufshci_ctrlr_start(struct ufshci_controller *ctrlr) { TSENTER(); if (ufshci_ctrlr_send_nop(ctrlr) != 0) { ufshci_ctrlr_fail(ctrlr, false); return; } /* Initialize UFS target drvice */ if (ufshci_dev_init(ctrlr) != 0) { ufshci_ctrlr_fail(ctrlr, false); return; } /* Initialize Reference Clock */ if (ufshci_dev_init_reference_clock(ctrlr) != 0) { ufshci_ctrlr_fail(ctrlr, false); return; } /* Initialize unipro */ if (ufshci_dev_init_unipro(ctrlr) != 0) { ufshci_ctrlr_fail(ctrlr, false); return; } /* * Initialize UIC Power Mode * QEMU UFS devices do not support unipro and power mode. */ if (!(ctrlr->quirks & UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE) && ufshci_dev_init_uic_power_mode(ctrlr) != 0) { ufshci_ctrlr_fail(ctrlr, false); return; } /* Initialize UFS Power Mode */ if (ufshci_dev_init_ufs_power_mode(ctrlr) != 0) { ufshci_ctrlr_fail(ctrlr, false); return; } - /* Read Controller Descriptor (Device, Geometry)*/ + /* Read Controller Descriptor (Device, Geometry) */ if (ufshci_dev_get_descriptor(ctrlr) != 0) { ufshci_ctrlr_fail(ctrlr, false); return; } - /* TODO: Configure Write Protect */ + if (ufshci_dev_config_write_booster(ctrlr)) { + ufshci_ctrlr_fail(ctrlr, false); + return; + } /* TODO: Configure Background Operations */ - /* TODO: Configure Write Booster */ - if (ufshci_sim_attach(ctrlr) != 0) { ufshci_ctrlr_fail(ctrlr, false); return; } TSEXIT(); } void ufshci_ctrlr_start_config_hook(void *arg) { struct ufshci_controller *ctrlr = arg; TSENTER(); if (ufshci_utmr_req_queue_enable(ctrlr) == 0 && ufshci_utr_req_queue_enable(ctrlr) == 0) ufshci_ctrlr_start(ctrlr); else ufshci_ctrlr_fail(ctrlr, false); ufshci_sysctl_initialize_ctrlr(ctrlr); config_intrhook_disestablish(&ctrlr->config_hook); TSEXIT(); } /* * Poll all the queues enabled on the device for completion. */ void ufshci_ctrlr_poll(struct ufshci_controller *ctrlr) { uint32_t is; is = ufshci_mmio_read_4(ctrlr, is); /* UIC error */ if (is & UFSHCIM(UFSHCI_IS_REG_UE)) { uint32_t uecpa, uecdl, uecn, uect, uecdme; /* UECPA for Host UIC Error Code within PHY Adapter Layer */ uecpa = ufshci_mmio_read_4(ctrlr, uecpa); if (uecpa & UFSHCIM(UFSHCI_UECPA_REG_ERR)) { ufshci_printf(ctrlr, "UECPA error code: 0x%x\n", UFSHCIV(UFSHCI_UECPA_REG_EC, uecpa)); } /* UECDL for Host UIC Error Code within Data Link Layer */ uecdl = ufshci_mmio_read_4(ctrlr, uecdl); if (uecdl & UFSHCIM(UFSHCI_UECDL_REG_ERR)) { ufshci_printf(ctrlr, "UECDL error code: 0x%x\n", UFSHCIV(UFSHCI_UECDL_REG_EC, uecdl)); } /* UECN for Host UIC Error Code within Network Layer */ uecn = ufshci_mmio_read_4(ctrlr, uecn); if (uecn & UFSHCIM(UFSHCI_UECN_REG_ERR)) { ufshci_printf(ctrlr, "UECN error code: 0x%x\n", UFSHCIV(UFSHCI_UECN_REG_EC, uecn)); } /* UECT for Host UIC Error Code within Transport Layer */ uect = ufshci_mmio_read_4(ctrlr, uect); if (uect & UFSHCIM(UFSHCI_UECT_REG_ERR)) { ufshci_printf(ctrlr, "UECT error code: 0x%x\n", UFSHCIV(UFSHCI_UECT_REG_EC, uect)); } /* UECDME for Host UIC Error Code within DME subcomponent */ uecdme = ufshci_mmio_read_4(ctrlr, uecdme); if (uecdme & UFSHCIM(UFSHCI_UECDME_REG_ERR)) { ufshci_printf(ctrlr, "UECDME error code: 0x%x\n", UFSHCIV(UFSHCI_UECDME_REG_EC, uecdme)); } ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UE)); } /* Device Fatal Error Status */ if (is & UFSHCIM(UFSHCI_IS_REG_DFES)) { ufshci_printf(ctrlr, "Device fatal error on ISR\n"); ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_DFES)); } /* UTP Error Status */ if (is & UFSHCIM(UFSHCI_IS_REG_UTPES)) { ufshci_printf(ctrlr, "UTP error on ISR\n"); ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTPES)); } /* Host Controller Fatal Error Status */ if (is & UFSHCIM(UFSHCI_IS_REG_HCFES)) { ufshci_printf(ctrlr, "Host controller fatal error on ISR\n"); ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_HCFES)); } /* System Bus Fatal Error Status */ if (is & UFSHCIM(UFSHCI_IS_REG_SBFES)) { ufshci_printf(ctrlr, "System bus fatal error on ISR\n"); ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_SBFES)); } /* Crypto Engine Fatal Error Status */ if (is & UFSHCIM(UFSHCI_IS_REG_CEFES)) { ufshci_printf(ctrlr, "Crypto engine fatal error on ISR\n"); ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_CEFES)); } /* UTP Task Management Request Completion Status */ if (is & UFSHCIM(UFSHCI_IS_REG_UTMRCS)) { ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTMRCS)); ufshci_req_queue_process_completions( &ctrlr->task_mgmt_req_queue); } /* UTP Transfer Request Completion Status */ if (is & UFSHCIM(UFSHCI_IS_REG_UTRCS)) { ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTRCS)); ufshci_req_queue_process_completions( &ctrlr->transfer_req_queue); } /* MCQ CQ Event Status */ if (is & UFSHCIM(UFSHCI_IS_REG_CQES)) { /* TODO: We need to process completion Queue Pairs */ ufshci_printf(ctrlr, "MCQ completion not yet implemented\n"); ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_CQES)); } } /* * Poll the single-vector interrupt case: num_io_queues will be 1 and * there's only a single vector. While we're polling, we mask further * interrupts in the controller. */ void ufshci_ctrlr_shared_handler(void *arg) { struct ufshci_controller *ctrlr = arg; ufshci_ctrlr_poll(ctrlr); } void ufshci_reg_dump(struct ufshci_controller *ctrlr) { ufshci_printf(ctrlr, "========= UFSHCI Register Dump =========\n"); UFSHCI_DUMP_REG(ctrlr, cap); UFSHCI_DUMP_REG(ctrlr, mcqcap); UFSHCI_DUMP_REG(ctrlr, ver); UFSHCI_DUMP_REG(ctrlr, ext_cap); UFSHCI_DUMP_REG(ctrlr, hcpid); UFSHCI_DUMP_REG(ctrlr, hcmid); UFSHCI_DUMP_REG(ctrlr, ahit); UFSHCI_DUMP_REG(ctrlr, is); UFSHCI_DUMP_REG(ctrlr, ie); UFSHCI_DUMP_REG(ctrlr, hcsext); UFSHCI_DUMP_REG(ctrlr, hcs); UFSHCI_DUMP_REG(ctrlr, hce); UFSHCI_DUMP_REG(ctrlr, uecpa); UFSHCI_DUMP_REG(ctrlr, uecdl); UFSHCI_DUMP_REG(ctrlr, uecn); UFSHCI_DUMP_REG(ctrlr, uect); UFSHCI_DUMP_REG(ctrlr, uecdme); ufshci_printf(ctrlr, "========================================\n"); } diff --git a/sys/dev/ufshci/ufshci_dev.c b/sys/dev/ufshci/ufshci_dev.c index a0e32914e2aa..dd196b1d638b 100644 --- a/sys/dev/ufshci/ufshci_dev.c +++ b/sys/dev/ufshci/ufshci_dev.c @@ -1,428 +1,777 @@ /*- * Copyright (c) 2025, Samsung Electronics Co., Ltd. * Written by Jaeyoon Choi * * SPDX-License-Identifier: BSD-2-Clause */ #include #include #include #include "ufshci_private.h" #include "ufshci_reg.h" static int ufshci_dev_read_descriptor(struct ufshci_controller *ctrlr, enum ufshci_descriptor_type desc_type, uint8_t index, uint8_t selector, void *desc, size_t desc_size) { struct ufshci_completion_poll_status status; struct ufshci_query_param param; param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST; param.opcode = UFSHCI_QUERY_OPCODE_READ_DESCRIPTOR; param.type = desc_type; param.index = index; param.selector = selector; param.value = 0; param.desc_size = desc_size; status.done = 0; ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb, &status, param); ufshci_completion_poll(&status); if (status.error) { ufshci_printf(ctrlr, "ufshci_dev_read_descriptor failed!\n"); return (ENXIO); } memcpy(desc, status.cpl.response_upiu.query_response_upiu.command_data, desc_size); return (0); } static int ufshci_dev_read_device_descriptor(struct ufshci_controller *ctrlr, struct ufshci_device_descriptor *desc) { return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_DEVICE, 0, 0, desc, sizeof(struct ufshci_device_descriptor))); } static int ufshci_dev_read_geometry_descriptor(struct ufshci_controller *ctrlr, struct ufshci_geometry_descriptor *desc) { return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_GEOMETRY, 0, 0, desc, sizeof(struct ufshci_geometry_descriptor))); } +static int +ufshci_dev_read_unit_descriptor(struct ufshci_controller *ctrlr, uint8_t lun, + struct ufshci_unit_descriptor *desc) +{ + return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_UNIT, lun, 0, + desc, sizeof(struct ufshci_unit_descriptor))); +} + static int ufshci_dev_read_flag(struct ufshci_controller *ctrlr, enum ufshci_flags flag_type, uint8_t *flag) { struct ufshci_completion_poll_status status; struct ufshci_query_param param; param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST; param.opcode = UFSHCI_QUERY_OPCODE_READ_FLAG; param.type = flag_type; param.index = 0; param.selector = 0; param.value = 0; status.done = 0; ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb, &status, param); ufshci_completion_poll(&status); if (status.error) { ufshci_printf(ctrlr, "ufshci_dev_read_flag failed!\n"); return (ENXIO); } *flag = status.cpl.response_upiu.query_response_upiu.flag_value; return (0); } static int ufshci_dev_set_flag(struct ufshci_controller *ctrlr, enum ufshci_flags flag_type) { struct ufshci_completion_poll_status status; struct ufshci_query_param param; param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST; param.opcode = UFSHCI_QUERY_OPCODE_SET_FLAG; param.type = flag_type; param.index = 0; param.selector = 0; param.value = 0; status.done = 0; ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb, &status, param); ufshci_completion_poll(&status); if (status.error) { ufshci_printf(ctrlr, "ufshci_dev_set_flag failed!\n"); return (ENXIO); } return (0); } +static int +ufshci_dev_clear_flag(struct ufshci_controller *ctrlr, + enum ufshci_flags flag_type) +{ + struct ufshci_completion_poll_status status; + struct ufshci_query_param param; + + param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST; + param.opcode = UFSHCI_QUERY_OPCODE_CLEAR_FLAG; + param.type = flag_type; + param.index = 0; + param.selector = 0; + param.value = 0; + + status.done = 0; + ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb, + &status, param); + ufshci_completion_poll(&status); + if (status.error) { + ufshci_printf(ctrlr, "ufshci_dev_clear_flag failed!\n"); + return (ENXIO); + } + + return (0); +} + +static int +ufshci_dev_read_attribute(struct ufshci_controller *ctrlr, + enum ufshci_attributes attr_type, uint8_t index, uint8_t selector, + uint64_t *value) +{ + struct ufshci_completion_poll_status status; + struct ufshci_query_param param; + + param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST; + param.opcode = UFSHCI_QUERY_OPCODE_READ_ATTRIBUTE; + param.type = attr_type; + param.index = index; + param.selector = selector; + param.value = 0; + + status.done = 0; + ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb, + &status, param); + ufshci_completion_poll(&status); + if (status.error) { + ufshci_printf(ctrlr, "ufshci_dev_read_attribute failed!\n"); + return (ENXIO); + } + + *value = status.cpl.response_upiu.query_response_upiu.value_64; + + return (0); +} + static int ufshci_dev_write_attribute(struct ufshci_controller *ctrlr, enum ufshci_attributes attr_type, uint8_t index, uint8_t selector, uint64_t value) { struct ufshci_completion_poll_status status; struct ufshci_query_param param; param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST; param.opcode = UFSHCI_QUERY_OPCODE_WRITE_ATTRIBUTE; param.type = attr_type; param.index = index; param.selector = selector; param.value = value; status.done = 0; ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb, &status, param); ufshci_completion_poll(&status); if (status.error) { ufshci_printf(ctrlr, "ufshci_dev_write_attribute failed!\n"); return (ENXIO); } return (0); } int ufshci_dev_init(struct ufshci_controller *ctrlr) { int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms); sbintime_t delta_t = SBT_1US; uint8_t flag; int error; const uint8_t device_init_completed = 0; error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_DEVICE_INIT); if (error) return (error); /* Wait for the UFSHCI_FLAG_F_DEVICE_INIT flag to change */ while (1) { error = ufshci_dev_read_flag(ctrlr, UFSHCI_FLAG_F_DEVICE_INIT, &flag); if (error) return (error); if (flag == device_init_completed) break; if (timeout - ticks < 0) { ufshci_printf(ctrlr, "device init did not become %d " "within %d ms\n", device_init_completed, ctrlr->device_init_timeout_in_ms); return (ENXIO); } pause_sbt("ufshciinit", delta_t, 0, C_PREL(1)); delta_t = min(SBT_1MS, delta_t * 3 / 2); } return (0); } int ufshci_dev_reset(struct ufshci_controller *ctrlr) { if (ufshci_uic_send_dme_endpoint_reset(ctrlr)) return (ENXIO); return (ufshci_dev_init(ctrlr)); } int ufshci_dev_init_reference_clock(struct ufshci_controller *ctrlr) { int error; uint8_t index, selector; index = 0; /* bRefClkFreq is device type attribute */ selector = 0; /* bRefClkFreq is device type attribute */ error = ufshci_dev_write_attribute(ctrlr, UFSHCI_ATTR_B_REF_CLK_FREQ, index, selector, ctrlr->ref_clk); if (error) return (error); return (0); } int ufshci_dev_init_unipro(struct ufshci_controller *ctrlr) { uint32_t pa_granularity, peer_pa_granularity; uint32_t t_activate, pear_t_activate; /* * Unipro Version: * - 7~15 = Above 2.0, 6 = 2.0, 5 = 1.8, 4 = 1.61, 3 = 1.6, 2 = 1.41, * 1 = 1.40, 0 = Reserved */ if (ufshci_uic_send_dme_get(ctrlr, PA_LocalVerInfo, &ctrlr->unipro_version)) return (ENXIO); if (ufshci_uic_send_dme_get(ctrlr, PA_RemoteVerInfo, &ctrlr->ufs_dev.unipro_version)) return (ENXIO); /* * PA_Granularity: Granularity for PA_TActivate and PA_Hibern8Time * - 1=1us, 2=4us, 3=8us, 4=16us, 5=32us, 6=100us */ if (ufshci_uic_send_dme_get(ctrlr, PA_Granularity, &pa_granularity)) return (ENXIO); if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity, &peer_pa_granularity)) return (ENXIO); /* * PA_TActivate: Time to wait before activating a burst in order to * wake-up peer M-RX * UniPro automatically sets timing information such as PA_TActivate * through the PACP_CAP_EXT1_ind command during Link Startup operation. */ if (ufshci_uic_send_dme_get(ctrlr, PA_TActivate, &t_activate)) return (ENXIO); if (ufshci_uic_send_dme_peer_get(ctrlr, PA_TActivate, &pear_t_activate)) return (ENXIO); if (ctrlr->quirks & UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE) { /* * Intel Lake-field UFSHCI has a quirk. We need to add 200us to * the PEER's PA_TActivate. */ if (pa_granularity == peer_pa_granularity) { pear_t_activate = t_activate + 2; if (ufshci_uic_send_dme_peer_set(ctrlr, PA_TActivate, pear_t_activate)) return (ENXIO); } } return (0); } int ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr) { /* HSSerise: A = 1, B = 2 */ const uint32_t hs_series = 2; /* * TX/RX PWRMode: * - TX[3:0], RX[7:4] * - Fast Mode = 1, Slow Mode = 2, FastAuto Mode = 4, SlowAuto Mode = 5 */ const uint32_t fast_mode = 1; const uint32_t rx_bit_shift = 4; - const uint32_t power_mode = (fast_mode << rx_bit_shift) | fast_mode; + uint32_t power_mode, peer_granularity; /* Update lanes with available TX/RX lanes */ if (ufshci_uic_send_dme_get(ctrlr, PA_AvailTxDataLanes, &ctrlr->max_tx_lanes)) return (ENXIO); if (ufshci_uic_send_dme_get(ctrlr, PA_AvailRxDataLanes, &ctrlr->max_rx_lanes)) return (ENXIO); /* Get max HS-GEAR value */ if (ufshci_uic_send_dme_get(ctrlr, PA_MaxRxHSGear, &ctrlr->max_rx_hs_gear)) return (ENXIO); /* Set the data lane to max */ ctrlr->tx_lanes = ctrlr->max_tx_lanes; ctrlr->rx_lanes = ctrlr->max_rx_lanes; if (ufshci_uic_send_dme_set(ctrlr, PA_ActiveTxDataLanes, ctrlr->tx_lanes)) return (ENXIO); if (ufshci_uic_send_dme_set(ctrlr, PA_ActiveRxDataLanes, ctrlr->rx_lanes)) return (ENXIO); + if (ctrlr->quirks & UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY) { + /* Before changing gears, first change the number of lanes. */ + if (ufshci_uic_send_dme_get(ctrlr, PA_PWRMode, &power_mode)) + return (ENXIO); + if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode)) + return (ENXIO); + + /* Wait for power mode changed. */ + if (ufshci_uic_power_mode_ready(ctrlr)) { + ufshci_reg_dump(ctrlr); + return (ENXIO); + } + } + /* Set HS-GEAR to max gear */ ctrlr->hs_gear = ctrlr->max_rx_hs_gear; if (ufshci_uic_send_dme_set(ctrlr, PA_TxGear, ctrlr->hs_gear)) return (ENXIO); if (ufshci_uic_send_dme_set(ctrlr, PA_RxGear, ctrlr->hs_gear)) return (ENXIO); /* * Set termination * - HS-MODE = ON / LS-MODE = OFF */ if (ufshci_uic_send_dme_set(ctrlr, PA_TxTermination, true)) return (ENXIO); if (ufshci_uic_send_dme_set(ctrlr, PA_RxTermination, true)) return (ENXIO); /* Set HSSerise (A = 1, B = 2) */ if (ufshci_uic_send_dme_set(ctrlr, PA_HSSeries, hs_series)) return (ENXIO); /* Set Timeout values */ if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData0, DL_FC0ProtectionTimeOutVal_Default)) return (ENXIO); if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData1, DL_TC0ReplayTimeOutVal_Default)) return (ENXIO); if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData2, DL_AFC0ReqTimeOutVal_Default)) return (ENXIO); if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData3, DL_FC0ProtectionTimeOutVal_Default)) return (ENXIO); if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData4, DL_TC0ReplayTimeOutVal_Default)) return (ENXIO); if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData5, DL_AFC0ReqTimeOutVal_Default)) return (ENXIO); if (ufshci_uic_send_dme_set(ctrlr, DME_LocalFC0ProtectionTimeOutVal, DL_FC0ProtectionTimeOutVal_Default)) return (ENXIO); if (ufshci_uic_send_dme_set(ctrlr, DME_LocalTC0ReplayTimeOutVal, DL_TC0ReplayTimeOutVal_Default)) return (ENXIO); if (ufshci_uic_send_dme_set(ctrlr, DME_LocalAFC0ReqTimeOutVal, DL_AFC0ReqTimeOutVal_Default)) return (ENXIO); /* Set TX/RX PWRMode */ + power_mode = (fast_mode << rx_bit_shift) | fast_mode; if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode)) return (ENXIO); /* Wait for power mode changed. */ if (ufshci_uic_power_mode_ready(ctrlr)) { ufshci_reg_dump(ctrlr); return (ENXIO); } /* Clear 'Power Mode completion status' */ ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UPMS)); if (ctrlr->quirks & UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE) { /* * Intel Lake-field UFSHCI has a quirk. * We need to wait 1250us and clear dme error. */ pause_sbt("ufshci", ustosbt(1250), 0, C_PREL(1)); /* Test with dme_peer_get to make sure there are no errors. */ - if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity, NULL)) + if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity, + &peer_granularity)) return (ENXIO); } return (0); } int ufshci_dev_init_ufs_power_mode(struct ufshci_controller *ctrlr) { /* TODO: Need to implement */ return (0); } int ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr) { struct ufshci_device *device = &ctrlr->ufs_dev; /* * The kDeviceDensityUnit is defined in the spec as 512. * qTotalRawDeviceCapacity use big-endian byte ordering. */ const uint32_t device_density_unit = 512; uint32_t ver; int error; error = ufshci_dev_read_device_descriptor(ctrlr, &device->dev_desc); if (error) return (error); ver = be16toh(device->dev_desc.wSpecVersion); - ufshci_printf(ctrlr, "UFS device spec version %u.%u%u\n", + ufshci_printf(ctrlr, "UFS device spec version %u.%u.%u\n", UFSHCIV(UFSHCI_VER_REG_MJR, ver), UFSHCIV(UFSHCI_VER_REG_MNR, ver), UFSHCIV(UFSHCI_VER_REG_VS, ver)); ufshci_printf(ctrlr, "%u enabled LUNs found\n", device->dev_desc.bNumberLU); error = ufshci_dev_read_geometry_descriptor(ctrlr, &device->geo_desc); if (error) return (error); if (device->geo_desc.bMaxNumberLU == 0) { device->max_lun_count = 8; } else if (device->geo_desc.bMaxNumberLU == 1) { device->max_lun_count = 32; } else { ufshci_printf(ctrlr, "Invalid Geometry Descriptor bMaxNumberLU value=%d\n", device->geo_desc.bMaxNumberLU); return (ENXIO); } ctrlr->max_lun_count = device->max_lun_count; ufshci_printf(ctrlr, "UFS device total size is %lu bytes\n", be64toh(device->geo_desc.qTotalRawDeviceCapacity) * device_density_unit); return (0); } + +static int +ufshci_dev_enable_write_booster(struct ufshci_controller *ctrlr) +{ + struct ufshci_device *dev = &ctrlr->ufs_dev; + int error; + + /* Enable WriteBooster */ + error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_WRITE_BOOSTER_EN); + if (error) { + ufshci_printf(ctrlr, "Failed to enable WriteBooster\n"); + return (error); + } + dev->is_wb_enabled = true; + + /* Enable WriteBooster buffer flush during hibernate */ + error = ufshci_dev_set_flag(ctrlr, + UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE); + if (error) { + ufshci_printf(ctrlr, + "Failed to enable WriteBooster buffer flush during hibernate\n"); + return (error); + } + + /* Enable WriteBooster buffer flush */ + error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN); + if (error) { + ufshci_printf(ctrlr, + "Failed to enable WriteBooster buffer flush\n"); + return (error); + } + dev->is_wb_flush_enabled = true; + + return (0); +} + +static int +ufshci_dev_disable_write_booster(struct ufshci_controller *ctrlr) +{ + struct ufshci_device *dev = &ctrlr->ufs_dev; + int error; + + /* Disable WriteBooster buffer flush */ + error = ufshci_dev_clear_flag(ctrlr, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN); + if (error) { + ufshci_printf(ctrlr, + "Failed to disable WriteBooster buffer flush\n"); + return (error); + } + dev->is_wb_flush_enabled = false; + + /* Disable WriteBooster buffer flush during hibernate */ + error = ufshci_dev_clear_flag(ctrlr, + UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE); + if (error) { + ufshci_printf(ctrlr, + "Failed to disable WriteBooster buffer flush during hibernate\n"); + return (error); + } + + /* Disable WriteBooster */ + error = ufshci_dev_clear_flag(ctrlr, UFSHCI_FLAG_F_WRITE_BOOSTER_EN); + if (error) { + ufshci_printf(ctrlr, "Failed to disable WriteBooster\n"); + return (error); + } + dev->is_wb_enabled = false; + + return (0); +} + +static int +ufshci_dev_is_write_booster_buffer_life_time_left( + struct ufshci_controller *ctrlr, bool *is_life_time_left) +{ + struct ufshci_device *dev = &ctrlr->ufs_dev; + uint8_t buffer_lun; + uint64_t life_time; + uint32_t error; + + if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED) + buffer_lun = dev->wb_dedicated_lu; + else + buffer_lun = 0; + + error = ufshci_dev_read_attribute(ctrlr, + UFSHCI_ATTR_B_WB_BUFFER_LIFE_TIME_EST, buffer_lun, 0, &life_time); + if (error) + return (error); + + *is_life_time_left = (life_time != UFSHCI_ATTR_WB_LIFE_EXCEEDED); + + return (0); +} + +/* + * This function is not yet in use. It will be used when suspend/resume is + * implemented. + */ +static __unused int +ufshci_dev_need_write_booster_buffer_flush(struct ufshci_controller *ctrlr, + bool *need_flush) +{ + struct ufshci_device *dev = &ctrlr->ufs_dev; + bool is_life_time_left = false; + uint64_t available_buffer_size, current_buffer_size; + uint8_t buffer_lun; + uint32_t error; + + *need_flush = false; + + if (!dev->is_wb_enabled) + return (0); + + error = ufshci_dev_is_write_booster_buffer_life_time_left(ctrlr, + &is_life_time_left); + if (error) + return (error); + + if (!is_life_time_left) + return (ufshci_dev_disable_write_booster(ctrlr)); + + if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED) + buffer_lun = dev->wb_dedicated_lu; + else + buffer_lun = 0; + + error = ufshci_dev_read_attribute(ctrlr, + UFSHCI_ATTR_B_AVAILABLE_WB_BUFFER_SIZE, buffer_lun, 0, + &available_buffer_size); + if (error) + return (error); + + switch (dev->wb_user_space_config_option) { + case UFSHCI_DESC_WB_BUF_USER_SPACE_REDUCTION: + *need_flush = (available_buffer_size <= + UFSHCI_ATTR_WB_AVAILABLE_10); + break; + case UFSHCI_DESC_WB_BUF_PRESERVE_USER_SPACE: + /* + * In PRESERVE USER SPACE mode, flush should be performed when + * the current buffer is greater than 0 and the available buffer + * below write_booster_flush_threshold is left. + */ + error = ufshci_dev_read_attribute(ctrlr, + UFSHCI_ATTR_D_CURRENT_WB_BUFFER_SIZE, buffer_lun, 0, + ¤t_buffer_size); + if (error) + return (error); + + if (current_buffer_size == 0) + return (0); + + *need_flush = (available_buffer_size < + dev->write_booster_flush_threshold); + break; + default: + ufshci_printf(ctrlr, + "Invalid bWriteBoosterBufferPreserveUserSpaceEn value"); + return (EINVAL); + } + + /* + * TODO: Need to handle WRITEBOOSTER_FLUSH_NEEDED exception case from + * wExceptionEventStatus attribute. + */ + + return (0); +} + +int +ufshci_dev_config_write_booster(struct ufshci_controller *ctrlr) +{ + struct ufshci_device *dev = &ctrlr->ufs_dev; + uint32_t extended_ufs_feature_support; + uint32_t alloc_units; + struct ufshci_unit_descriptor unit_desc; + uint8_t lun; + bool is_life_time_left; + uint32_t mega_byte = 1024 * 1024; + uint32_t error = 0; + + extended_ufs_feature_support = be32toh( + dev->dev_desc.dExtendedUfsFeaturesSupport); + if (!(extended_ufs_feature_support & + UFSHCI_DESC_EXT_UFS_FEATURE_WRITE_BOOSTER)) { + /* This device does not support Write Booster */ + return (0); + } + + if (ufshci_dev_enable_write_booster(ctrlr)) + return (0); + + /* Get WriteBooster buffer parameters */ + dev->wb_buffer_type = dev->dev_desc.bWriteBoosterBufferType; + dev->wb_user_space_config_option = + dev->dev_desc.bWriteBoosterBufferPreserveUserSpaceEn; + + /* + * Find the size of the write buffer. + * With LU-dedicated (00h), the WriteBooster buffer is assigned + * exclusively to one chosen LU (not one-per-LU), whereas Shared (01h) + * uses a single device-wide buffer shared by multiple LUs. + */ + if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_SINGLE_SHARED) { + alloc_units = be32toh( + dev->dev_desc.dNumSharedWriteBoosterBufferAllocUnits); + ufshci_printf(ctrlr, + "WriteBooster buffer type = Shared, alloc_units=%d\n", + alloc_units); + } else if (dev->wb_buffer_type == + UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED) { + ufshci_printf(ctrlr, "WriteBooster buffer type = Dedicated\n"); + for (lun = 0; lun < ctrlr->max_lun_count; lun++) { + /* Find a dedicated buffer using a unit descriptor */ + if (ufshci_dev_read_unit_descriptor(ctrlr, lun, + &unit_desc)) + continue; + + alloc_units = be32toh( + unit_desc.dLUNumWriteBoosterBufferAllocUnits); + if (alloc_units) { + dev->wb_dedicated_lu = lun; + break; + } + } + } else { + ufshci_printf(ctrlr, + "Not supported WriteBooster buffer type: 0x%x\n", + dev->wb_buffer_type); + goto out; + } + + if (alloc_units == 0) { + ufshci_printf(ctrlr, "The WriteBooster buffer size is zero\n"); + goto out; + } + + dev->wb_buffer_size_mb = alloc_units * + dev->geo_desc.bAllocationUnitSize * + (be32toh(dev->geo_desc.dSegmentSize)) / + (mega_byte / UFSHCI_SECTOR_SIZE); + + /* Set to flush when 40% of the available buffer size remains */ + dev->write_booster_flush_threshold = UFSHCI_ATTR_WB_AVAILABLE_40; + + /* + * Check if WriteBooster Buffer lifetime is left. + * WriteBooster Buffer lifetime — percent of life used based on P/E + * cycles. If "preserve user space" is enabled, writes to normal user + * space also consume WB life since the area is shared. + */ + error = ufshci_dev_is_write_booster_buffer_life_time_left(ctrlr, + &is_life_time_left); + if (error) + goto out; + + if (!is_life_time_left) { + ufshci_printf(ctrlr, + "There is no WriteBooster buffer life time left.\n"); + goto out; + } + + ufshci_printf(ctrlr, "WriteBooster Enabled\n"); + return (0); +out: + ufshci_dev_disable_write_booster(ctrlr); + return (error); +} + diff --git a/sys/dev/ufshci/ufshci_pci.c b/sys/dev/ufshci/ufshci_pci.c index 65a69ee0b518..d64b7526f713 100644 --- a/sys/dev/ufshci/ufshci_pci.c +++ b/sys/dev/ufshci/ufshci_pci.c @@ -1,260 +1,261 @@ /*- * Copyright (c) 2025, Samsung Electronics Co., Ltd. * Written by Jaeyoon Choi * * SPDX-License-Identifier: BSD-2-Clause */ #include #include #include #include #include #include #include #include #include #include #include "ufshci_private.h" static int ufshci_pci_probe(device_t); static int ufshci_pci_attach(device_t); static int ufshci_pci_detach(device_t); static int ufshci_pci_setup_interrupts(struct ufshci_controller *ctrlr); static device_method_t ufshci_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ufshci_pci_probe), DEVMETHOD(device_attach, ufshci_pci_attach), DEVMETHOD(device_detach, ufshci_pci_detach), /* TODO: Implement Suspend, Resume */ { 0, 0 } }; static driver_t ufshci_pci_driver = { "ufshci", ufshci_pci_methods, sizeof(struct ufshci_controller), }; DRIVER_MODULE(ufshci, pci, ufshci_pci_driver, 0, 0); static struct _pcsid { uint32_t devid; const char *desc; uint32_t ref_clk; uint32_t quirks; } pci_ids[] = { { 0x131b36, "QEMU UFS Host Controller", UFSHCI_REF_CLK_19_2MHz, UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE }, { 0x98fa8086, "Intel Lakefield UFS Host Controller", UFSHCI_REF_CLK_19_2MHz, UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE | - UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE }, + UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE | + UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY }, { 0x54ff8086, "Intel UFS Host Controller", UFSHCI_REF_CLK_19_2MHz }, { 0x00000000, NULL } }; static int ufshci_pci_probe(device_t device) { struct ufshci_controller *ctrlr = device_get_softc(device); uint32_t devid = pci_get_devid(device); struct _pcsid *ep = pci_ids; while (ep->devid && ep->devid != devid) ++ep; if (ep->devid) { ctrlr->quirks = ep->quirks; ctrlr->ref_clk = ep->ref_clk; } if (ep->desc) { device_set_desc(device, ep->desc); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int ufshci_pci_allocate_bar(struct ufshci_controller *ctrlr) { ctrlr->resource_id = PCIR_BAR(0); ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY, &ctrlr->resource_id, RF_ACTIVE); if (ctrlr->resource == NULL) { ufshci_printf(ctrlr, "unable to allocate pci resource\n"); return (ENOMEM); } ctrlr->bus_tag = rman_get_bustag(ctrlr->resource); ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource); ctrlr->regs = (struct ufshci_registers *)ctrlr->bus_handle; return (0); } static int ufshci_pci_attach(device_t dev) { struct ufshci_controller *ctrlr = device_get_softc(dev); int status; ctrlr->dev = dev; status = ufshci_pci_allocate_bar(ctrlr); if (status != 0) goto bad; pci_enable_busmaster(dev); status = ufshci_pci_setup_interrupts(ctrlr); if (status != 0) goto bad; return (ufshci_attach(dev)); bad: if (ctrlr->resource != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, ctrlr->resource_id, ctrlr->resource); } if (ctrlr->tag) bus_teardown_intr(dev, ctrlr->res, ctrlr->tag); if (ctrlr->res) bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(ctrlr->res), ctrlr->res); if (ctrlr->msi_count > 0) pci_release_msi(dev); return (status); } static int ufshci_pci_detach(device_t dev) { struct ufshci_controller *ctrlr = device_get_softc(dev); int error; error = ufshci_detach(dev); if (ctrlr->msi_count > 0) pci_release_msi(dev); pci_disable_busmaster(dev); return (error); } static int ufshci_pci_setup_shared(struct ufshci_controller *ctrlr, int rid) { int error; ctrlr->num_io_queues = 1; ctrlr->rid = rid; ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ, &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE); if (ctrlr->res == NULL) { ufshci_printf(ctrlr, "unable to allocate shared interrupt\n"); return (ENOMEM); } error = bus_setup_intr(ctrlr->dev, ctrlr->res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, ufshci_ctrlr_shared_handler, ctrlr, &ctrlr->tag); if (error) { ufshci_printf(ctrlr, "unable to setup shared interrupt\n"); return (error); } return (0); } static int ufshci_pci_setup_interrupts(struct ufshci_controller *ctrlr) { device_t dev = ctrlr->dev; int force_intx = 0; int num_io_queues, per_cpu_io_queues, min_cpus_per_ioq; int num_vectors_requested; TUNABLE_INT_FETCH("hw.ufshci.force_intx", &force_intx); if (force_intx) goto intx; if (pci_msix_count(dev) == 0) goto msi; /* * Try to allocate one MSI-X per core for I/O queues, plus one * for admin queue, but accept single shared MSI-X if have to. * Fall back to MSI if can't get any MSI-X. */ /* * TODO: Need to implement MCQ(Multi Circular Queue) * Example: num_io_queues = mp_ncpus; */ num_io_queues = 1; TUNABLE_INT_FETCH("hw.ufshci.num_io_queues", &num_io_queues); if (num_io_queues < 1 || num_io_queues > mp_ncpus) num_io_queues = mp_ncpus; per_cpu_io_queues = 1; TUNABLE_INT_FETCH("hw.ufshci.per_cpu_io_queues", &per_cpu_io_queues); if (per_cpu_io_queues == 0) num_io_queues = 1; min_cpus_per_ioq = smp_threads_per_core; TUNABLE_INT_FETCH("hw.ufshci.min_cpus_per_ioq", &min_cpus_per_ioq); if (min_cpus_per_ioq > 1) { num_io_queues = min(num_io_queues, max(1, mp_ncpus / min_cpus_per_ioq)); } num_io_queues = min(num_io_queues, max(1, pci_msix_count(dev) - 1)); again: if (num_io_queues > vm_ndomains) num_io_queues -= num_io_queues % vm_ndomains; num_vectors_requested = min(num_io_queues + 1, pci_msix_count(dev)); ctrlr->msi_count = num_vectors_requested; if (pci_alloc_msix(dev, &ctrlr->msi_count) != 0) { ufshci_printf(ctrlr, "unable to allocate MSI-X\n"); ctrlr->msi_count = 0; goto msi; } if (ctrlr->msi_count == 1) return (ufshci_pci_setup_shared(ctrlr, 1)); if (ctrlr->msi_count != num_vectors_requested) { pci_release_msi(dev); num_io_queues = ctrlr->msi_count - 1; goto again; } ctrlr->num_io_queues = num_io_queues; return (0); msi: /* * Try to allocate 2 MSIs (admin and I/O queues), but accept single * shared if have to. Fall back to INTx if can't get any MSI. */ ctrlr->msi_count = min(pci_msi_count(dev), 2); if (ctrlr->msi_count > 0) { if (pci_alloc_msi(dev, &ctrlr->msi_count) != 0) { ufshci_printf(ctrlr, "unable to allocate MSI\n"); ctrlr->msi_count = 0; } else if (ctrlr->msi_count == 2) { ctrlr->num_io_queues = 1; return (0); } } intx: return (ufshci_pci_setup_shared(ctrlr, ctrlr->msi_count > 0 ? 1 : 0)); } diff --git a/sys/dev/ufshci/ufshci_private.h b/sys/dev/ufshci/ufshci_private.h index 1a2742ae2e80..2e033f84c373 100644 --- a/sys/dev/ufshci/ufshci_private.h +++ b/sys/dev/ufshci/ufshci_private.h @@ -1,524 +1,537 @@ /*- * Copyright (c) 2025, Samsung Electronics Co., Ltd. * Written by Jaeyoon Choi * * SPDX-License-Identifier: BSD-2-Clause */ #ifndef __UFSHCI_PRIVATE_H__ #define __UFSHCI_PRIVATE_H__ #ifdef _KERNEL #include #else /* !_KERNEL */ #include #include #endif /* _KERNEL */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ufshci.h" MALLOC_DECLARE(M_UFSHCI); #define UFSHCI_DEVICE_INIT_TIMEOUT_MS (2000) /* in milliseconds */ #define UFSHCI_UIC_CMD_TIMEOUT_MS (500) /* in milliseconds */ #define UFSHCI_DEFAULT_TIMEOUT_PERIOD (10) /* in seconds */ #define UFSHCI_MIN_TIMEOUT_PERIOD (5) /* in seconds */ #define UFSHCI_MAX_TIMEOUT_PERIOD (120) /* in seconds */ #define UFSHCI_DEFAULT_RETRY_COUNT (4) #define UFSHCI_UTR_ENTRIES (32) #define UFSHCI_UTRM_ENTRIES (8) +#define UFSHCI_SECTOR_SIZE (512) + struct ufshci_controller; struct ufshci_completion_poll_status { struct ufshci_completion cpl; int done; bool error; }; struct ufshci_request { struct ufshci_upiu request_upiu; size_t request_size; size_t response_size; struct memdesc payload; enum ufshci_data_direction data_direction; ufshci_cb_fn_t cb_fn; void *cb_arg; bool is_admin; int32_t retries; bool payload_valid; bool timeout; bool spare[2]; /* Future use */ STAILQ_ENTRY(ufshci_request) stailq; }; enum ufshci_slot_state { UFSHCI_SLOT_STATE_FREE = 0x0, UFSHCI_SLOT_STATE_RESERVED = 0x1, UFSHCI_SLOT_STATE_SCHEDULED = 0x2, UFSHCI_SLOT_STATE_TIMEOUT = 0x3, UFSHCI_SLOT_STATE_NEED_ERROR_HANDLING = 0x4, }; struct ufshci_tracker { struct ufshci_request *req; struct ufshci_req_queue *req_queue; struct ufshci_hw_queue *hwq; uint8_t slot_num; enum ufshci_slot_state slot_state; size_t response_size; sbintime_t deadline; bus_dmamap_t payload_dma_map; uint64_t payload_addr; struct ufshci_utp_cmd_desc *ucd; bus_addr_t ucd_bus_addr; uint16_t prdt_off; uint16_t prdt_entry_cnt; }; enum ufshci_queue_mode { UFSHCI_Q_MODE_SDB = 0x00, /* Single Doorbell Mode*/ UFSHCI_Q_MODE_MCQ = 0x01, /* Multi-Circular Queue Mode*/ }; /* * UFS uses slot-based Single Doorbell (SDB) mode for request submission by * default and additionally supports Multi-Circular Queue (MCQ) in UFS 4.0. To * minimize duplicated code between SDB and MCQ, mode dependent operations are * extracted into ufshci_qops. */ struct ufshci_qops { int (*construct)(struct ufshci_controller *ctrlr, struct ufshci_req_queue *req_queue, uint32_t num_entries, bool is_task_mgmt); void (*destroy)(struct ufshci_controller *ctrlr, struct ufshci_req_queue *req_queue); struct ufshci_hw_queue *(*get_hw_queue)( struct ufshci_req_queue *req_queue); int (*enable)(struct ufshci_controller *ctrlr, struct ufshci_req_queue *req_queue); int (*reserve_slot)(struct ufshci_req_queue *req_queue, struct ufshci_tracker **tr); int (*reserve_admin_slot)(struct ufshci_req_queue *req_queue, struct ufshci_tracker **tr); void (*ring_doorbell)(struct ufshci_controller *ctrlr, struct ufshci_tracker *tr); bool (*is_doorbell_cleared)(struct ufshci_controller *ctrlr, uint8_t slot); void (*clear_cpl_ntf)(struct ufshci_controller *ctrlr, struct ufshci_tracker *tr); bool (*process_cpl)(struct ufshci_req_queue *req_queue); int (*get_inflight_io)(struct ufshci_controller *ctrlr); }; #define UFSHCI_SDB_Q 0 /* Queue number for a single doorbell queue */ /* * Generic queue container used by both SDB (fixed 32-slot bitmap) and MCQ * (ring buffer) modes. Fields are shared; some such as sq_head, sq_tail and * cq_head are not used in SDB but used in MCQ. */ struct ufshci_hw_queue { uint32_t id; int domain; int cpu; union { struct ufshci_utp_xfer_req_desc *utrd; struct ufshci_utp_task_mgmt_req_desc *utmrd; }; bus_dma_tag_t dma_tag_queue; bus_dmamap_t queuemem_map; bus_addr_t req_queue_addr; bus_addr_t *ucd_bus_addr; uint32_t num_entries; uint32_t num_trackers; /* * A Request List using the single doorbell method uses a dedicated * ufshci_tracker, one per slot. */ struct ufshci_tracker **act_tr; uint32_t sq_head; /* MCQ mode */ uint32_t sq_tail; /* MCQ mode */ uint32_t cq_head; /* MCQ mode */ uint32_t phase; int64_t num_cmds; int64_t num_intr_handler_calls; int64_t num_retries; int64_t num_failures; struct mtx_padalign qlock; }; struct ufshci_req_queue { struct ufshci_controller *ctrlr; int domain; /* * queue_mode: active transfer scheme * UFSHCI_Q_MODE_SDB – legacy single‑doorbell list * UFSHCI_Q_MODE_MCQ – modern multi‑circular queue (UFSHCI 4.0+) */ enum ufshci_queue_mode queue_mode; uint8_t num_q; struct ufshci_hw_queue *hwq; struct ufshci_qops qops; bool is_task_mgmt; uint32_t num_entries; uint32_t num_trackers; /* Shared DMA resource */ struct ufshci_utp_cmd_desc *ucd; bus_dma_tag_t dma_tag_ucd; bus_dma_tag_t dma_tag_payload; bus_dmamap_t ucdmem_map; }; struct ufshci_device { uint32_t max_lun_count; struct ufshci_device_descriptor dev_desc; struct ufshci_geometry_descriptor geo_desc; uint32_t unipro_version; + + /* WriteBooster */ + bool is_wb_enabled; + bool is_wb_flush_enabled; + uint32_t wb_buffer_type; + uint32_t wb_buffer_size_mb; + uint32_t wb_user_space_config_option; + uint8_t wb_dedicated_lu; + uint32_t write_booster_flush_threshold; }; /* * One of these per allocated device. */ struct ufshci_controller { device_t dev; uint32_t quirks; #define UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE \ 1 /* QEMU does not support UIC POWER MODE */ #define UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE \ 2 /* Need an additional 200 ms of PA_TActivate */ #define UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE \ 4 /* Need to wait 1250us after power mode change */ - +#define UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY \ + 8 /* Need to change the number of lanes before changing HS-GEAR. */ uint32_t ref_clk; struct cam_sim *ufshci_sim; struct cam_path *ufshci_path; struct mtx sc_mtx; uint32_t sc_unit; uint8_t sc_name[16]; struct ufshci_device ufs_dev; bus_space_tag_t bus_tag; bus_space_handle_t bus_handle; int resource_id; struct resource *resource; /* Currently, there is no UFSHCI that supports MSI, MSI-X. */ int msi_count; /* Fields for tracking progress during controller initialization. */ struct intr_config_hook config_hook; /* For shared legacy interrupt. */ int rid; struct resource *res; void *tag; uint32_t major_version; uint32_t minor_version; uint32_t num_io_queues; uint32_t max_hw_pend_io; /* Maximum logical unit number */ uint32_t max_lun_count; /* Maximum i/o size in bytes */ uint32_t max_xfer_size; /* Controller capacity */ uint32_t cap; /* Page size and log2(page_size) - 12 that we're currently using */ uint32_t page_size; /* Timeout value on device initialization */ uint32_t device_init_timeout_in_ms; /* Timeout value on UIC command */ uint32_t uic_cmd_timeout_in_ms; /* UTMR/UTR queue timeout period in seconds */ uint32_t timeout_period; /* UTMR/UTR queue retry count */ uint32_t retry_count; /* UFS Host Controller Interface Registers */ struct ufshci_registers *regs; /* UFS Transport Protocol Layer (UTP) */ struct ufshci_req_queue task_mgmt_req_queue; struct ufshci_req_queue transfer_req_queue; bool is_single_db_supported; /* 0 = supported */ bool is_mcq_supported; /* 1 = supported */ /* UFS Interconnect Layer (UIC) */ struct mtx uic_cmd_lock; uint32_t unipro_version; uint8_t hs_gear; uint32_t tx_lanes; uint32_t rx_lanes; uint32_t max_rx_hs_gear; uint32_t max_tx_lanes; uint32_t max_rx_lanes; bool is_failed; }; #define ufshci_mmio_offsetof(reg) offsetof(struct ufshci_registers, reg) #define ufshci_mmio_read_4(sc, reg) \ bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, \ ufshci_mmio_offsetof(reg)) #define ufshci_mmio_write_4(sc, reg, val) \ bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \ ufshci_mmio_offsetof(reg), val) #define ufshci_printf(ctrlr, fmt, args...) \ device_printf(ctrlr->dev, fmt, ##args) /* UFSHCI */ void ufshci_completion_poll_cb(void *arg, const struct ufshci_completion *cpl, bool error); /* SIM */ int ufshci_sim_attach(struct ufshci_controller *ctrlr); void ufshci_sim_detach(struct ufshci_controller *ctrlr); /* Controller */ int ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev); void ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev); int ufshci_ctrlr_reset(struct ufshci_controller *ctrlr); /* ctrlr defined as void * to allow use with config_intrhook. */ void ufshci_ctrlr_start_config_hook(void *arg); void ufshci_ctrlr_poll(struct ufshci_controller *ctrlr); int ufshci_ctrlr_submit_task_mgmt_request(struct ufshci_controller *ctrlr, struct ufshci_request *req); int ufshci_ctrlr_submit_admin_request(struct ufshci_controller *ctrlr, struct ufshci_request *req); int ufshci_ctrlr_submit_io_request(struct ufshci_controller *ctrlr, struct ufshci_request *req); int ufshci_ctrlr_send_nop(struct ufshci_controller *ctrlr); void ufshci_reg_dump(struct ufshci_controller *ctrlr); /* Device */ int ufshci_dev_init(struct ufshci_controller *ctrlr); int ufshci_dev_reset(struct ufshci_controller *ctrlr); int ufshci_dev_init_reference_clock(struct ufshci_controller *ctrlr); int ufshci_dev_init_unipro(struct ufshci_controller *ctrlr); int ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr); int ufshci_dev_init_ufs_power_mode(struct ufshci_controller *ctrlr); int ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr); +int ufshci_dev_config_write_booster(struct ufshci_controller *ctrlr); /* Controller Command */ void ufshci_ctrlr_cmd_send_task_mgmt_request(struct ufshci_controller *ctrlr, ufshci_cb_fn_t cb_fn, void *cb_arg, uint8_t function, uint8_t lun, uint8_t task_tag, uint8_t iid); void ufshci_ctrlr_cmd_send_nop(struct ufshci_controller *ctrlr, ufshci_cb_fn_t cb_fn, void *cb_arg); void ufshci_ctrlr_cmd_send_query_request(struct ufshci_controller *ctrlr, ufshci_cb_fn_t cb_fn, void *cb_arg, struct ufshci_query_param param); void ufshci_ctrlr_cmd_send_scsi_command(struct ufshci_controller *ctrlr, ufshci_cb_fn_t cb_fn, void *cb_arg, uint8_t *cmd_ptr, uint8_t cmd_len, uint32_t data_len, uint8_t lun, bool is_write); /* Request Queue */ bool ufshci_req_queue_process_completions(struct ufshci_req_queue *req_queue); int ufshci_utmr_req_queue_construct(struct ufshci_controller *ctrlr); int ufshci_utr_req_queue_construct(struct ufshci_controller *ctrlr); void ufshci_utmr_req_queue_destroy(struct ufshci_controller *ctrlr); void ufshci_utr_req_queue_destroy(struct ufshci_controller *ctrlr); int ufshci_utmr_req_queue_enable(struct ufshci_controller *ctrlr); int ufshci_utr_req_queue_enable(struct ufshci_controller *ctrlr); void ufshci_req_queue_fail(struct ufshci_controller *ctrlr, struct ufshci_hw_queue *hwq); int ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue, struct ufshci_request *req, bool is_admin); void ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr); /* Request Single Doorbell Queue */ int ufshci_req_sdb_construct(struct ufshci_controller *ctrlr, struct ufshci_req_queue *req_queue, uint32_t num_entries, bool is_task_mgmt); void ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr, struct ufshci_req_queue *req_queue); struct ufshci_hw_queue *ufshci_req_sdb_get_hw_queue( struct ufshci_req_queue *req_queue); int ufshci_req_sdb_enable(struct ufshci_controller *ctrlr, struct ufshci_req_queue *req_queue); int ufshci_req_sdb_reserve_slot(struct ufshci_req_queue *req_queue, struct ufshci_tracker **tr); void ufshci_req_sdb_utmr_ring_doorbell(struct ufshci_controller *ctrlr, struct ufshci_tracker *tr); void ufshci_req_sdb_utr_ring_doorbell(struct ufshci_controller *ctrlr, struct ufshci_tracker *tr); bool ufshci_req_sdb_utmr_is_doorbell_cleared(struct ufshci_controller *ctrlr, uint8_t slot); bool ufshci_req_sdb_utr_is_doorbell_cleared(struct ufshci_controller *ctrlr, uint8_t slot); void ufshci_req_sdb_utmr_clear_cpl_ntf(struct ufshci_controller *ctrlr, struct ufshci_tracker *tr); void ufshci_req_sdb_utr_clear_cpl_ntf(struct ufshci_controller *ctrlr, struct ufshci_tracker *tr); bool ufshci_req_sdb_process_cpl(struct ufshci_req_queue *req_queue); int ufshci_req_sdb_get_inflight_io(struct ufshci_controller *ctrlr); /* UIC Command */ int ufshci_uic_power_mode_ready(struct ufshci_controller *ctrlr); int ufshci_uic_cmd_ready(struct ufshci_controller *ctrlr); int ufshci_uic_send_dme_link_startup(struct ufshci_controller *ctrlr); int ufshci_uic_send_dme_get(struct ufshci_controller *ctrlr, uint16_t attribute, uint32_t *return_value); int ufshci_uic_send_dme_set(struct ufshci_controller *ctrlr, uint16_t attribute, uint32_t value); int ufshci_uic_send_dme_peer_get(struct ufshci_controller *ctrlr, uint16_t attribute, uint32_t *return_value); int ufshci_uic_send_dme_peer_set(struct ufshci_controller *ctrlr, uint16_t attribute, uint32_t value); int ufshci_uic_send_dme_endpoint_reset(struct ufshci_controller *ctrlr); /* SYSCTL */ void ufshci_sysctl_initialize_ctrlr(struct ufshci_controller *ctrlr); int ufshci_attach(device_t dev); int ufshci_detach(device_t dev); /* * Wait for a command to complete using the ufshci_completion_poll_cb. Used in * limited contexts where the caller knows it's OK to block briefly while the * command runs. The ISR will run the callback which will set status->done to * true, usually within microseconds. If not, then after one second timeout * handler should reset the controller and abort all outstanding requests * including this polled one. If still not after ten seconds, then something is * wrong with the driver, and panic is the only way to recover. * * Most commands using this interface aren't actual I/O to the drive's media so * complete within a few microseconds. Adaptively spin for one tick to catch the * vast majority of these without waiting for a tick plus scheduling delays. * Since these are on startup, this drastically reduces startup time. */ static __inline void ufshci_completion_poll(struct ufshci_completion_poll_status *status) { int timeout = ticks + 10 * hz; sbintime_t delta_t = SBT_1US; while (!atomic_load_acq_int(&status->done)) { if (timeout - ticks < 0) panic( "UFSHCI polled command failed to complete within 10s."); pause_sbt("ufshci_cpl", delta_t, 0, C_PREL(1)); delta_t = min(SBT_1MS, delta_t * 3 / 2); } } static __inline void ufshci_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) { uint64_t *bus_addr = (uint64_t *)arg; KASSERT(nseg == 1, ("number of segments (%d) is not 1", nseg)); if (error != 0) printf("ufshci_single_map err %d\n", error); *bus_addr = seg[0].ds_addr; } static __inline struct ufshci_request * _ufshci_allocate_request(const int how, ufshci_cb_fn_t cb_fn, void *cb_arg) { struct ufshci_request *req; KASSERT(how == M_WAITOK || how == M_NOWAIT, ("nvme_allocate_request: invalid how %d", how)); req = malloc(sizeof(*req), M_UFSHCI, how | M_ZERO); if (req != NULL) { req->cb_fn = cb_fn; req->cb_arg = cb_arg; req->timeout = true; } return (req); } static __inline struct ufshci_request * ufshci_allocate_request_vaddr(void *payload, uint32_t payload_size, const int how, ufshci_cb_fn_t cb_fn, void *cb_arg) { struct ufshci_request *req; req = _ufshci_allocate_request(how, cb_fn, cb_arg); if (req != NULL) { if (payload_size) { req->payload = memdesc_vaddr(payload, payload_size); req->payload_valid = true; } } return (req); } static __inline struct ufshci_request * ufshci_allocate_request_bio(struct bio *bio, const int how, ufshci_cb_fn_t cb_fn, void *cb_arg) { struct ufshci_request *req; req = _ufshci_allocate_request(how, cb_fn, cb_arg); if (req != NULL) { req->payload = memdesc_bio(bio); req->payload_valid = true; } return (req); } #define ufshci_free_request(req) free(req, M_UFSHCI) void ufshci_ctrlr_shared_handler(void *arg); #endif /* __UFSHCI_PRIVATE_H__ */ diff --git a/sys/dev/ufshci/ufshci_reg.h b/sys/dev/ufshci/ufshci_reg.h index 6c9b3e2c8c04..6d5768505102 100644 --- a/sys/dev/ufshci/ufshci_reg.h +++ b/sys/dev/ufshci/ufshci_reg.h @@ -1,469 +1,469 @@ /*- * Copyright (c) 2025, Samsung Electronics Co., Ltd. * Written by Jaeyoon Choi * * SPDX-License-Identifier: BSD-2-Clause */ #ifndef __UFSHCI_REG_H__ #define __UFSHCI_REG_H__ #include #include /* UFSHCI 4.1, section 5.1 Register Map */ struct ufshci_registers { /* Host Capabilities (00h) */ uint32_t cap; /* Host Controller Capabiities */ uint32_t mcqcap; /* Multi-Circular Queue Capability Register */ uint32_t ver; /* UFS Version */ uint32_t ext_cap; /* Extended Controller Capabilities */ uint32_t hcpid; /* Product ID */ uint32_t hcmid; /* Manufacturer ID */ uint32_t ahit; /* Auto-Hibernate Idle Timer */ uint32_t reserved1; /* Operation and Runtime (20h) */ uint32_t is; /* Interrupt Status */ uint32_t ie; /* Interrupt Enable */ uint32_t reserved2; uint32_t hcsext; /* Host Controller Status Extended */ uint32_t hcs; /* Host Controller Status */ uint32_t hce; /* Host Controller Enable */ uint32_t uecpa; /* Host UIC Error Code PHY Adapter Layer */ uint32_t uecdl; /* Host UIC Error Code Data Link Layer */ uint32_t uecn; /* Host UIC Error Code Network Layer */ uint32_t uect; /* Host UIC Error Code Transport Layer */ uint32_t uecdme; /* Host UIC Error Code DME */ uint32_t utriacr; /* Interrupt Aggregation Control */ /* UTP Transfer (50h) */ uint32_t utrlba; /* UTRL Base Address */ uint32_t utrlbau; /* UTRL Base Address Upper 32-Bits */ uint32_t utrldbr; /* UTRL DoorBell Register */ uint32_t utrlclr; /* UTRL CLear Register */ uint32_t utrlrsr; /* UTR Run-Stop Register */ uint32_t utrlcnr; /* UTRL Completion Notification */ uint64_t reserved3; /* UTP Task Managemeng (70h) */ uint32_t utmrlba; /* UTRL Base Address */ uint32_t utmrlbau; /* UTMRL Base Address Upper 32-Bits */ uint32_t utmrldbr; /* UTMRL DoorBell Register */ uint32_t utmrlclr; /* UTMRL CLear Register */ uint32_t utmrlrsr; /* UTM Run-Stop Register */ uint8_t reserved4[12]; /* UIC Command (90h) */ uint32_t uiccmd; /* UIC Command Register */ uint32_t ucmdarg1; /* UIC Command Argument 1 */ uint32_t ucmdarg2; /* UIC Command Argument 2 */ uint32_t ucmdarg3; /* UIC Command Argument 3 */ uint8_t reserved5[16]; /* UMA (B0h) */ uint8_t reserved6[16]; /* Reserved for Unified Memory Extension */ /* Vendor Specific (C0h) */ uint8_t vendor[64]; /* Vendor Specific Registers */ /* Crypto (100h) */ uint32_t ccap; /* Crypto Capability */ uint32_t reserved7[511]; /* Config (300h) */ uint32_t config; /* Global Configuration */ uint8_t reserved9[124]; /* MCQ Configuration (380h) */ uint32_t mcqconfig; /* MCQ Config Register */ /* Event Specific Interrupt Lower Base Address */ uint32_t esilba; /* Event Specific Interrupt Upper Base Address */ uint32_t esiuba; /* TODO: Need to define SQ/CQ registers */ }; /* Register field definitions */ #define UFSHCI__REG__SHIFT (0) #define UFSHCI__REG__MASK (0) /* * UFSHCI 4.1, section 5.2.1, Offset 00h: CAP * Controller Capabilities */ #define UFSHCI_CAP_REG_NUTRS_SHIFT (0) #define UFSHCI_CAP_REG_NUTRS_MASK (0xFF) #define UFSHCI_CAP_REG_NORTT_SHIFT (8) #define UFSHCI_CAP_REG_NORTT_MASK (0xFF) #define UFSHCI_CAP_REG_NUTMRS_SHIFT (16) #define UFSHCI_CAP_REG_NUTMRS_MASK (0x7) #define UFSHCI_CAP_REG_EHSLUTRDS_SHIFT (22) #define UFSHCI_CAP_REG_EHSLUTRDS_MASK (0x1) #define UFSHCI_CAP_REG_AUTOH8_SHIFT (23) #define UFSHCI_CAP_REG_AUTOH8_MASK (0x1) #define UFSHCI_CAP_REG_64AS_SHIFT (24) #define UFSHCI_CAP_REG_64AS_MASK (0x1) #define UFSHCI_CAP_REG_OODDS_SHIFT (25) #define UFSHCI_CAP_REG_OODDS_MASK (0x1) #define UFSHCI_CAP_REG_UICDMETMS_SHIFT (26) #define UFSHCI_CAP_REG_UICDMETMS_MASK (0x1) #define UFSHCI_CAP_REG_CS_SHIFT (28) #define UFSHCI_CAP_REG_CS_MASK (0x1) #define UFSHCI_CAP_REG_LSDBS_SHIFT (29) #define UFSHCI_CAP_REG_LSDBS_MASK (0x1) #define UFSHCI_CAP_REG_MCQS_SHIFT (30) #define UFSHCI_CAP_REG_MCQS_MASK (0x1) #define UFSHCI_CAP_REG_EIS_SHIFT (31) #define UFSHCI_CAP_REG_EIS_MASK (0x1) /* * UFSHCI 4.1, section 5.2.2, Offset 04h: MCQCAP * Multi-Circular Queue Capability Register */ #define UFSHCI_MCQCAP_REG_MAXQ_SHIFT (0) #define UFSHCI_MCQCAP_REG_MAXQ_MASK (0xFF) #define UFSHCI_MCQCAP_REG_SP_SHIFT (8) #define UFSHCI_MCQCAP_REG_SP_MASK (0x1) #define UFSHCI_MCQCAP_REG_RRP_SHIFT (9) #define UFSHCI_MCQCAP_REG_RRP_MASK (0x1) #define UFSHCI_MCQCAP_REG_EIS_SHIFT (10) #define UFSHCI_MCQCAP_REG_EIS_MASK (0x1) #define UFSHCI_MCQCAP_REG_QCFGPTR_SHIFT (16) #define UFSHCI_MCQCAP_REG_QCFGPTR_MASK (0xFF) #define UFSHCI_MCQCAP_REG_MIAG_SHIFT (24) #define UFSHCI_MCQCAP_REG_MIAG_MASK (0xFF) /* * UFSHCI 4.1, section 5.2.3, Offset 08h: VER * UFS Version */ #define UFSHCI_VER_REG_VS_SHIFT (0) #define UFSHCI_VER_REG_VS_MASK (0xF) #define UFSHCI_VER_REG_MNR_SHIFT (4) #define UFSHCI_VER_REG_MNR_MASK (0xF) #define UFSHCI_VER_REG_MJR_SHIFT (8) #define UFSHCI_VER_REG_MJR_MASK (0xFF) /* * UFSHCI 4.1, section 5.2.4, Offset 0Ch: EXT_CAP * Extended Controller Capabilities */ #define UFSHCI_EXTCAP_REG_HOST_HINT_CACAHE_SIZE_SHIFT (0) #define UFSHCI_EXTCAP_REG_HOST_HINT_CACAHE_SIZE_MASK (0xFFFF) /* * UFSHCI 4.1, section 5.2.5, Offset 10h: HCPID * Host Controller Identification Descriptor – Product ID */ #define UFSHCI_HCPID_REG_PID_SHIFT (0) #define UFSHCI_HCPID_REG_PID_MASK (0xFFFFFFFF) /* * UFSHCI 4.1, section 5.2.6, Offset 14h: HCMID * Host Controller Identification Descriptor – Manufacturer ID */ #define UFSHCI_HCMID_REG_MIC_SHIFT (0) #define UFSHCI_HCMID_REG_MIC_MASK (0xFFFF) #define UFSHCI_HCMID_REG_BI_SHIFT (8) #define UFSHCI_HCMID_REG_BI_MASK (0xFFFF) /* * UFSHCI 4.1, section 5.2.7, Offset 18h: AHIT * Auto-Hibernate Idle Timer */ #define UFSHCI_AHIT_REG_AH8ITV_SHIFT (0) #define UFSHCI_AHIT_REG_AH8ITV_MASK (0x3FF) #define UFSHCI_AHIT_REG_TS_SHIFT (10) #define UFSHCI_AHIT_REG_TS_MASK (0x7) /* * UFSHCI 4.1, section 5.3.1, Offset 20h: IS * Interrupt Status */ #define UFSHCI_IS_REG_UTRCS_SHIFT (0) #define UFSHCI_IS_REG_UTRCS_MASK (0x1) #define UFSHCI_IS_REG_UDEPRI_SHIFT (1) #define UFSHCI_IS_REG_UDEPRI_MASK (0x1) #define UFSHCI_IS_REG_UE_SHIFT (2) #define UFSHCI_IS_REG_UE_MASK (0x1) #define UFSHCI_IS_REG_UTMS_SHIFT (3) #define UFSHCI_IS_REG_UTMS_MASK (0x1) #define UFSHCI_IS_REG_UPMS_SHIFT (4) #define UFSHCI_IS_REG_UPMS_MASK (0x1) #define UFSHCI_IS_REG_UHXS_SHIFT (5) #define UFSHCI_IS_REG_UHXS_MASK (0x1) #define UFSHCI_IS_REG_UHES_SHIFT (6) #define UFSHCI_IS_REG_UHES_MASK (0x1) #define UFSHCI_IS_REG_ULLS_SHIFT (7) #define UFSHCI_IS_REG_ULLS_MASK (0x1) #define UFSHCI_IS_REG_ULSS_SHIFT (8) #define UFSHCI_IS_REG_ULSS_MASK (0x1) #define UFSHCI_IS_REG_UTMRCS_SHIFT (9) #define UFSHCI_IS_REG_UTMRCS_MASK (0x1) #define UFSHCI_IS_REG_UCCS_SHIFT (10) #define UFSHCI_IS_REG_UCCS_MASK (0x1) #define UFSHCI_IS_REG_DFES_SHIFT (11) #define UFSHCI_IS_REG_DFES_MASK (0x1) #define UFSHCI_IS_REG_UTPES_SHIFT (12) #define UFSHCI_IS_REG_UTPES_MASK (0x1) #define UFSHCI_IS_REG_HCFES_SHIFT (16) #define UFSHCI_IS_REG_HCFES_MASK (0x1) #define UFSHCI_IS_REG_SBFES_SHIFT (17) #define UFSHCI_IS_REG_SBFES_MASK (0x1) #define UFSHCI_IS_REG_CEFES_SHIFT (18) #define UFSHCI_IS_REG_CEFES_MASK (0x1) #define UFSHCI_IS_REG_SQES_SHIFT (19) #define UFSHCI_IS_REG_SQES_MASK (0x1) #define UFSHCI_IS_REG_CQES_SHIFT (20) #define UFSHCI_IS_REG_CQES_MASK (0x1) #define UFSHCI_IS_REG_IAGES_SHIFT (21) #define UFSHCI_IS_REG_IAGES_MASK (0x1) /* * UFSHCI 4.1, section 5.3.2, Offset 24h: IE * Interrupt Enable */ #define UFSHCI_IE_REG_UTRCE_SHIFT (0) #define UFSHCI_IE_REG_UTRCE_MASK (0x1) #define UFSHCI_IE_REG_UDEPRIE_SHIFT (1) #define UFSHCI_IE_REG_UDEPRIE_MASK (0x1) #define UFSHCI_IE_REG_UEE_SHIFT (2) #define UFSHCI_IE_REG_UEE_MASK (0x1) #define UFSHCI_IE_REG_UTMSE_SHIFT (3) #define UFSHCI_IE_REG_UTMSE_MASK (0x1) #define UFSHCI_IE_REG_UPMSE_SHIFT (4) #define UFSHCI_IE_REG_UPMSE_MASK (0x1) #define UFSHCI_IE_REG_UHXSE_SHIFT (5) #define UFSHCI_IE_REG_UHXSE_MASK (0x1) #define UFSHCI_IE_REG_UHESE_SHIFT (6) #define UFSHCI_IE_REG_UHESE_MASK (0x1) #define UFSHCI_IE_REG_ULLSE_SHIFT (7) #define UFSHCI_IE_REG_ULLSE_MASK (0x1) #define UFSHCI_IE_REG_ULSSE_SHIFT (8) #define UFSHCI_IE_REG_ULSSE_MASK (0x1) #define UFSHCI_IE_REG_UTMRCE_SHIFT (9) #define UFSHCI_IE_REG_UTMRCE_MASK (0x1) #define UFSHCI_IE_REG_UCCE_SHIFT (10) #define UFSHCI_IE_REG_UCCE_MASK (0x1) #define UFSHCI_IE_REG_DFEE_SHIFT (11) #define UFSHCI_IE_REG_DFEE_MASK (0x1) #define UFSHCI_IE_REG_UTPEE_SHIFT (12) #define UFSHCI_IE_REG_UTPEE_MASK (0x1) #define UFSHCI_IE_REG_HCFEE_SHIFT (16) #define UFSHCI_IE_REG_HCFEE_MASK (0x1) #define UFSHCI_IE_REG_SBFEE_SHIFT (17) #define UFSHCI_IE_REG_SBFEE_MASK (0x1) #define UFSHCI_IE_REG_CEFEE_SHIFT (18) #define UFSHCI_IE_REG_CEFEE_MASK (0x1) #define UFSHCI_IE_REG_SQEE_SHIFT (19) #define UFSHCI_IE_REG_SQEE_MASK (0x1) #define UFSHCI_IE_REG_CQEE_SHIFT (20) #define UFSHCI_IE_REG_CQEE_MASK (0x1) #define UFSHCI_IE_REG_IAGEE_SHIFT (21) #define UFSHCI_IE_REG_IAGEE_MASK (0x1) /* * UFSHCI 4.1, section 5.3.3, Offset 2Ch: HCSEXT * Host Controller Status Extended */ #define UFSHCI_HCSEXT_IIDUTPE_SHIFT (0) #define UFSHCI_HCSEXT_IIDUTPE_MASK (0xF) #define UFSHCI_HCSEXT_EXT_IIDUTPE_SHIFT (4) #define UFSHCI_HCSEXT_EXT_IIDUTPE_MASK (0xF) /* * UFSHCI 4.1, section 5.3.4, Offset 30h: HCS * Host Controller Status */ #define UFSHCI_HCS_REG_DP_SHIFT (0) #define UFSHCI_HCS_REG_DP_MASK (0x1) #define UFSHCI_HCS_REG_UTRLRDY_SHIFT (1) #define UFSHCI_HCS_REG_UTRLRDY_MASK (0x1) #define UFSHCI_HCS_REG_UTMRLRDY_SHIFT (2) #define UFSHCI_HCS_REG_UTMRLRDY_MASK (0x1) #define UFSHCI_HCS_REG_UCRDY_SHIFT (3) #define UFSHCI_HCS_REG_UCRDY_MASK (0x1) -#define UFSHCI_HCS_REG_UPMCRS_SHIFT (7) +#define UFSHCI_HCS_REG_UPMCRS_SHIFT (8) #define UFSHCI_HCS_REG_UPMCRS_MASK (0x7) #define UFSHCI_HCS_REG_UTPEC_SHIFT (12) #define UFSHCI_HCS_REG_UTPEC_MASK (0xF) #define UFSHCI_HCS_REG_TTAGUTPE_SHIFT (16) #define UFSHCI_HCS_REG_TTAGUTPE_MASK (0xFF) #define UFSHCI_HCS_REG_TLUNUTPE_SHIFT (24) #define UFSHCI_HCS_REG_TLUNUTPE_MASK (0xFF) /* * UFSHCI 4.1, section 5.3.5, Offset 34h: HCE * Host Controller Enable */ #define UFSHCI_HCE_REG_HCE_SHIFT (0) #define UFSHCI_HCE_REG_HCE_MASK (0x1) #define UFSHCI_HCE_REG_CGE_SHIFT (1) #define UFSHCI_HCE_REG_CGE_MASK (0x1) /* * UFSHCI 4.1, section 5.3.6, Offset 38h: UECPA * Host UIC Error Code PHY Adapter Layer */ #define UFSHCI_UECPA_REG_EC_SHIFT (0) #define UFSHCI_UECPA_REG_EC_MASK (0xF) #define UFSHCI_UECPA_REG_ERR_SHIFT (31) #define UFSHCI_UECPA_REG_ERR_MASK (0x1) /* * UFSHCI 4.1, section 5.3.7, Offset 3Ch: UECDL * Host UIC Error Code Data Link Layer */ #define UFSHCI_UECDL_REG_EC_SHIFT (0) #define UFSHCI_UECDL_REG_EC_MASK (0xFFFF) #define UFSHCI_UECDL_REG_ERR_SHIFT (31) #define UFSHCI_UECDL_REG_ERR_MASK (0x1) /* * UFSHCI 4.1, section 5.3.8, Offset 40h: UECN * Host UIC Error Code Network Layer */ #define UFSHCI_UECN_REG_EC_SHIFT (0) #define UFSHCI_UECN_REG_EC_MASK (0x7) #define UFSHCI_UECN_REG_ERR_SHIFT (31) #define UFSHCI_UECN_REG_ERR_MASK (0x1) /* * UFSHCI 4.1, section 5.3.9, Offset 44h: UECT * Host UIC Error Code Transport Layer */ #define UFSHCI_UECT_REG_EC_SHIFT (0) #define UFSHCI_UECT_REG_EC_MASK (0x7F) #define UFSHCI_UECT_REG_ERR_SHIFT (31) #define UFSHCI_UECT_REG_ERR_MASK (0x1) /* * UFSHCI 4.1, section 5.3.10, Offset 48h: UECDME * Host UIC Error Code */ #define UFSHCI_UECDME_REG_EC_SHIFT (0) #define UFSHCI_UECDME_REG_EC_MASK (0xF) #define UFSHCI_UECDME_REG_ERR_SHIFT (31) #define UFSHCI_UECDME_REG_ERR_MASK (0x1) /* * UFSHCI 4.1, section 5.4.1, Offset 50h: UTRLBA * UTP Transfer Request List Base Address */ #define UFSHCI_UTRLBA_REG_UTRLBA_SHIFT (0) #define UFSHCI_UTRLBA_REG_UTRLBA_MASK (0xFFFFFFFF) /* * UFSHCI 4.1, section 5.4.2, Offset 54h: UTRLBAU * UTP Transfer Request List Base Address Upper 32-bits */ #define UFSHCI_UTRLBAU_REG_UTRLBAU_SHIFT (0) #define UFSHCI_UTRLBAU_REG_UTRLBAU_MASK (0xFFFFFFFF) /* * UFSHCI 4.1, section 5.4.3, Offset 58h: UTRLDBR * UTP Transfer Request List Door Bell Register */ #define UFSHCI_UTRLDBR_REG_UTRLDBR_SHIFT (0) #define UFSHCI_UTRLDBR_REG_UTRLDBR_MASK (0xFFFFFFFF) /* * UFSHCI 4.1, section 5.4.4, Offset 5Ch: UTRLCLR * UTP Transfer Request List Clear Register */ #define UFSHCI_UTRLCLR_REG_UTRLCLR_SHIFT (0) #define UFSHCI_UTRLCLR_REG_UTRLCLR_MASK (0xFFFFFFFF) /* * UFSHCI 4.1, section 5.4.5, Offset 60h: UTRLRSR * UTP Transfer Request List Run Stop Register */ #define UFSHCI_UTRLRSR_REG_UTRLRSR_SHIFT (0) #define UFSHCI_UTRLRSR_REG_UTRLRSR_MASK (0x1) /* * UFSHCI 4.1, section 5.4.6, Offset 64h: UTRLCNR * UTP Transfer Request List Completion Notification Register */ #define UFSHCI_UTRLCNR_REG_UTRLCNR_SHIFT (0) #define UFSHCI_UTRLCNR_REG_UTRLCNR_MASK (0xFFFFFFFF) /* * UFSHCI 4.1, section 5.5.1, Offset 70h: UTMRLBA * UTP Task Management Request List Base Address */ #define UFSHCI_UTMRLBA_REG_UTMRLBA_SHIFT (0) #define UFSHCI_UTMRLBA_REG_UTMRLBA_MASK (0xFFFFFFFF) /* * UFSHCI 4.1, section 5.5.2, Offset 74h: UTMRLBAU * UTP Task Management Request List Base Address Upper 32-bits */ #define UFSHCI_UTMRLBAU_REG_UTMRLBAU_SHIFT (0) #define UFSHCI_UTMRLBAU_REG_UTMRLBAU_MASK (0xFFFFFFFF) /* * UFSHCI 4.1, section 5.5.3, Offset 78h: UTMRLDBR * UTP Task Management Request List Door Bell Register */ #define UFSHCI_UTMRLDBR_REG_UTMRLDBR_SHIFT (0) #define UFSHCI_UTMRLDBR_REG_UTMRLDBR_MASK (0xFF) /* * UFSHCI 4.1, section 5.5.4, Offset 7Ch: UTMRLCLR * UTP Task Management Request List CLear Register */ #define UFSHCI_UTMRLCLR_REG_UTMRLCLR_SHIFT (0) #define UFSHCI_UTMRLCLR_REG_UTMRLCLR_MASK (0xFF) /* * UFSHCI 4.1, section 5.5.5, Offset 80h: UTMRLRSR * UTP Task Management Request List Run Stop Register */ #define UFSHCI_UTMRLRSR_REG_UTMRLRSR_SHIFT (0) #define UFSHCI_UTMRLRSR_REG_UTMRLRSR_MASK (0xFF) /* * UFSHCI 4.1, section 5.6.1 * Offset 90h: UICCMD – UIC Command */ #define UFSHCI_UICCMD_REG_CMDOP_SHIFT (0) #define UFSHCI_UICCMD_REG_CMDOP_MASK (0xFF) /* * UFSHCI 4.1, section 5.6.2 * Offset 94h: UICCMDARG1 – UIC Command Argument 1 */ #define UFSHCI_UICCMDARG1_REG_ARG1_SHIFT (0) #define UFSHCI_UICCMDARG1_REG_ARG1_MASK (0xFFFFFFFF) #define UFSHCI_UICCMDARG1_REG_GEN_SELECTOR_INDEX_SHIFT (0) #define UFSHCI_UICCMDARG1_REG_GEN_SELECTOR_INDEX_MASK (0xFFFF) #define UFSHCI_UICCMDARG1_REG_MIB_ATTR_SHIFT (16) #define UFSHCI_UICCMDARG1_REG_MIB_ATTR_MASK (0xFFFF) /* * UFSHCI 4.1, section 5.6.3 * Offset 98h: UICCMDARG2 – UIC Command Argument 2 */ #define UFSHCI_UICCMDARG2_REG_ARG2_SHIFT (0) #define UFSHCI_UICCMDARG2_REG_ARG2_MASK (0xFFFFFFFF) #define UFSHCI_UICCMDARG2_REG_ERROR_CODE_SHIFT (0) #define UFSHCI_UICCMDARG2_REG_ERROR_CODE_MASK (0xFF) #define UFSHCI_UICCMDARG2_REG_ATTR_SET_TYPE_SHIFT (16) #define UFSHCI_UICCMDARG2_REG_ATTR_SET_TYPE_MASK (0xFF) /* * UFSHCI 4.1, section 5.6.4 * Offset 9Ch: UICCMDARG3 – UIC Command Argument 3 */ #define UFSHCI_UICCMDARG3_REG_ARG3_SHIFT (0) #define UFSHCI_UICCMDARG3_REG_ARG3_MASK (0xFFFFFFFF) /* Helper macro to combine *_MASK and *_SHIFT defines */ #define UFSHCIM(name) (name##_MASK << name##_SHIFT) /* Helper macro to extract value from x */ #define UFSHCIV(name, x) (((x) >> name##_SHIFT) & name##_MASK) /* Helper macro to construct a field value */ #define UFSHCIF(name, x) (((x)&name##_MASK) << name##_SHIFT) #define UFSHCI_DUMP_REG(ctrlr, member) \ do { \ uint32_t _val = ufshci_mmio_read_4(ctrlr, member); \ ufshci_printf(ctrlr, " %-15s (0x%03lx) : 0x%08x\n", #member, \ ufshci_mmio_offsetof(member), _val); \ } while (0) #endif /* __UFSHCI_REG_H__ */ diff --git a/sys/dev/ufshci/ufshci_sysctl.c b/sys/dev/ufshci/ufshci_sysctl.c index 5e5069f12e5f..56bc06b13f3c 100644 --- a/sys/dev/ufshci/ufshci_sysctl.c +++ b/sys/dev/ufshci/ufshci_sysctl.c @@ -1,233 +1,253 @@ /*- * Copyright (c) 2025, Samsung Electronics Co., Ltd. * Written by Jaeyoon Choi * * SPDX-License-Identifier: BSD-2-Clause */ #include #include #include #include #include "ufshci_private.h" static int ufshci_sysctl_timeout_period(SYSCTL_HANDLER_ARGS) { uint32_t *ptr = arg1; uint32_t newval = *ptr; int error = sysctl_handle_int(oidp, &newval, 0, req); if (error || (req->newptr == NULL)) return (error); if (newval > UFSHCI_MAX_TIMEOUT_PERIOD || newval < UFSHCI_MIN_TIMEOUT_PERIOD) { return (EINVAL); } else { *ptr = newval; } return (0); } static int ufshci_sysctl_num_cmds(SYSCTL_HANDLER_ARGS) { struct ufshci_controller *ctrlr = arg1; int64_t num_cmds = 0; int i; num_cmds = ctrlr->task_mgmt_req_queue.hwq[UFSHCI_SDB_Q].num_cmds; if (ctrlr->transfer_req_queue.hwq != NULL) { for (i = 0; i < ctrlr->num_io_queues; i++) num_cmds += ctrlr->transfer_req_queue.hwq[i].num_cmds; } return (sysctl_handle_64(oidp, &num_cmds, 0, req)); } static int ufshci_sysctl_num_intr_handler_calls(SYSCTL_HANDLER_ARGS) { struct ufshci_controller *ctrlr = arg1; int64_t num_intr_handler_calls = 0; int i; num_intr_handler_calls = ctrlr->task_mgmt_req_queue.hwq[UFSHCI_SDB_Q].num_intr_handler_calls; if (ctrlr->transfer_req_queue.hwq != NULL) { for (i = 0; i < ctrlr->num_io_queues; i++) num_intr_handler_calls += ctrlr->transfer_req_queue .hwq[i] .num_intr_handler_calls; } return (sysctl_handle_64(oidp, &num_intr_handler_calls, 0, req)); } static int ufshci_sysctl_num_retries(SYSCTL_HANDLER_ARGS) { struct ufshci_controller *ctrlr = arg1; int64_t num_retries = 0; int i; num_retries = ctrlr->task_mgmt_req_queue.hwq[UFSHCI_SDB_Q].num_retries; if (ctrlr->transfer_req_queue.hwq != NULL) { for (i = 0; i < ctrlr->num_io_queues; i++) num_retries += ctrlr->transfer_req_queue.hwq[i].num_retries; } return (sysctl_handle_64(oidp, &num_retries, 0, req)); } static int ufshci_sysctl_num_failures(SYSCTL_HANDLER_ARGS) { struct ufshci_controller *ctrlr = arg1; int64_t num_failures = 0; int i; num_failures = ctrlr->task_mgmt_req_queue.hwq[UFSHCI_SDB_Q].num_failures; if (ctrlr->transfer_req_queue.hwq != NULL) { for (i = 0; i < ctrlr->num_io_queues; i++) num_failures += ctrlr->transfer_req_queue.hwq[i].num_failures; } return (sysctl_handle_64(oidp, &num_failures, 0, req)); } static void ufshci_sysctl_initialize_queue(struct ufshci_hw_queue *hwq, struct sysctl_ctx_list *ctrlr_ctx, struct sysctl_oid *que_tree) { struct sysctl_oid_list *que_list = SYSCTL_CHILDREN(que_tree); SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_entries", CTLFLAG_RD, &hwq->num_entries, 0, "Number of entries in hardware queue"); SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_trackers", CTLFLAG_RD, &hwq->num_trackers, 0, "Number of trackers pre-allocated for this queue pair"); SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_head", CTLFLAG_RD, &hwq->sq_head, 0, "Current head of submission queue (as observed by driver)"); SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_tail", CTLFLAG_RD, &hwq->sq_tail, 0, "Current tail of submission queue (as observed by driver)"); SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "cq_head", CTLFLAG_RD, &hwq->cq_head, 0, "Current head of completion queue (as observed by driver)"); SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_cmds", CTLFLAG_RD, &hwq->num_cmds, "Number of commands submitted"); SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_intr_handler_calls", CTLFLAG_RD, &hwq->num_intr_handler_calls, "Number of times interrupt handler was invoked (will typically be " "less than number of actual interrupts generated due to " "interrupt aggregation)"); SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_retries", CTLFLAG_RD, &hwq->num_retries, "Number of commands retried"); SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_failures", CTLFLAG_RD, &hwq->num_failures, "Number of commands ending in failure after all retries"); /* TODO: Implement num_ignored */ /* TODO: Implement recovery state */ /* TODO: Implement dump debug */ } void ufshci_sysctl_initialize_ctrlr(struct ufshci_controller *ctrlr) { struct sysctl_ctx_list *ctrlr_ctx; struct sysctl_oid *ctrlr_tree, *que_tree, *ioq_tree; struct sysctl_oid_list *ctrlr_list, *ioq_list; + struct ufshci_device *dev = &ctrlr->ufs_dev; #define QUEUE_NAME_LENGTH 16 char queue_name[QUEUE_NAME_LENGTH]; int i; ctrlr_ctx = device_get_sysctl_ctx(ctrlr->dev); ctrlr_tree = device_get_sysctl_tree(ctrlr->dev); ctrlr_list = SYSCTL_CHILDREN(ctrlr_tree); SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "major_version", CTLFLAG_RD, &ctrlr->major_version, 0, "UFS spec major version"); SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "minor_version", CTLFLAG_RD, &ctrlr->minor_version, 0, "UFS spec minor version"); SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "io_queue_mode", CTLFLAG_RD, &ctrlr->transfer_req_queue.queue_mode, 0, "Active host-side queuing scheme " "(Single-Doorbell or Multi-Circular-Queue)"); SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "num_io_queues", CTLFLAG_RD, &ctrlr->num_io_queues, 0, "Number of I/O queue pairs"); SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "cap", CTLFLAG_RD, &ctrlr->cap, 0, "Number of I/O queue pairs"); + SYSCTL_ADD_BOOL(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_enabled", + CTLFLAG_RD, &dev->is_wb_enabled, 0, "WriteBooster enable/disable"); + + SYSCTL_ADD_BOOL(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_flush_enabled", + CTLFLAG_RD, &dev->is_wb_flush_enabled, 0, + "WriteBooster flush enable/disable"); + + SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_buffer_type", + CTLFLAG_RD, &dev->wb_buffer_type, 0, "WriteBooster type"); + + SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_buffer_size_mb", + CTLFLAG_RD, &dev->wb_buffer_size_mb, 0, + "WriteBooster buffer size in MB"); + + SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, + "wb_user_space_config_option", CTLFLAG_RD, + &dev->wb_user_space_config_option, 0, + "WriteBooster preserve user space mode"); + SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, "timeout_period", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, &ctrlr->timeout_period, 0, ufshci_sysctl_timeout_period, "IU", "Timeout period for I/O queues (in seconds)"); SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, "num_cmds", CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, ctrlr, 0, ufshci_sysctl_num_cmds, "IU", "Number of commands submitted"); SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, "num_intr_handler_calls", CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, ctrlr, 0, ufshci_sysctl_num_intr_handler_calls, "IU", "Number of times interrupt handler was invoked (will " "typically be less than number of actual interrupts " "generated due to coalescing)"); SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, "num_retries", CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, ctrlr, 0, ufshci_sysctl_num_retries, "IU", "Number of commands retried"); SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, "num_failures", CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, ctrlr, 0, ufshci_sysctl_num_failures, "IU", "Number of commands ending in failure after all retries"); que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO, "utmrq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "UTP Task Management Request Queue"); ufshci_sysctl_initialize_queue( &ctrlr->task_mgmt_req_queue.hwq[UFSHCI_SDB_Q], ctrlr_ctx, que_tree); /* * Make sure that we've constructed the I/O queues before setting up the * sysctls. Failed controllers won't allocate it, but we want the rest * of the sysctls to diagnose things. */ if (ctrlr->transfer_req_queue.hwq != NULL) { ioq_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO, "ioq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "UTP Transfer Request Queue (I/O Queue)"); ioq_list = SYSCTL_CHILDREN(ioq_tree); for (i = 0; i < ctrlr->num_io_queues; i++) { snprintf(queue_name, QUEUE_NAME_LENGTH, "%d", i); que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ioq_list, OID_AUTO, queue_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "IO Queue"); ufshci_sysctl_initialize_queue( &ctrlr->transfer_req_queue.hwq[i], ctrlr_ctx, que_tree); } } } diff --git a/sys/dev/ufshci/ufshci_uic_cmd.c b/sys/dev/ufshci/ufshci_uic_cmd.c index 2c5f635dc11e..b9c867ff7065 100644 --- a/sys/dev/ufshci/ufshci_uic_cmd.c +++ b/sys/dev/ufshci/ufshci_uic_cmd.c @@ -1,224 +1,241 @@ /*- * Copyright (c) 2025, Samsung Electronics Co., Ltd. * Written by Jaeyoon Choi * * SPDX-License-Identifier: BSD-2-Clause */ #include #include #include #include "ufshci_private.h" #include "ufshci_reg.h" int ufshci_uic_power_mode_ready(struct ufshci_controller *ctrlr) { - uint32_t is; + uint32_t is, hcs; int timeout; /* Wait for the IS flag to change */ timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms); while (1) { is = ufshci_mmio_read_4(ctrlr, is); if (UFSHCIV(UFSHCI_IS_REG_UPMS, is)) { ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UPMS)); break; } if (timeout - ticks < 0) { ufshci_printf(ctrlr, "Power mode is not changed " "within %d ms\n", ctrlr->uic_cmd_timeout_in_ms); return (ENXIO); } /* TODO: Replace busy-wait with interrupt-based pause. */ DELAY(10); } + /* Check HCS power mode change request status */ + hcs = ufshci_mmio_read_4(ctrlr, hcs); + if (UFSHCIV(UFSHCI_HCS_REG_UPMCRS, hcs) != 0x01) { + ufshci_printf(ctrlr, + "Power mode change request status error: 0x%x\n", + UFSHCIV(UFSHCI_HCS_REG_UPMCRS, hcs)); + return (ENXIO); + } + return (0); } int ufshci_uic_cmd_ready(struct ufshci_controller *ctrlr) { uint32_t hcs; int timeout; /* Wait for the HCS flag to change */ timeout = ticks + MSEC_2_TICKS(ctrlr->uic_cmd_timeout_in_ms); while (1) { hcs = ufshci_mmio_read_4(ctrlr, hcs); if (UFSHCIV(UFSHCI_HCS_REG_UCRDY, hcs)) break; if (timeout - ticks < 0) { ufshci_printf(ctrlr, "UIC command is not ready " "within %d ms\n", ctrlr->uic_cmd_timeout_in_ms); return (ENXIO); } /* TODO: Replace busy-wait with interrupt-based pause. */ DELAY(10); } return (0); } static int ufshci_uic_wait_cmd(struct ufshci_controller *ctrlr, struct ufshci_uic_cmd *uic_cmd) { uint32_t is; int timeout; mtx_assert(&ctrlr->uic_cmd_lock, MA_OWNED); /* Wait for the IS flag to change */ timeout = ticks + MSEC_2_TICKS(ctrlr->uic_cmd_timeout_in_ms); int delta = 10; while (1) { is = ufshci_mmio_read_4(ctrlr, is); if (UFSHCIV(UFSHCI_IS_REG_UCCS, is)) { ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UCCS)); break; } if (timeout - ticks < 0) { ufshci_printf(ctrlr, "UIC command is not completed " "within %d ms\n", ctrlr->uic_cmd_timeout_in_ms); return (ENXIO); } DELAY(delta); delta = min(1000, delta * 2); } return (0); } static int ufshci_uic_send_cmd(struct ufshci_controller *ctrlr, struct ufshci_uic_cmd *uic_cmd, uint32_t *return_value) { int error; + uint32_t config_result_code; mtx_lock(&ctrlr->uic_cmd_lock); error = ufshci_uic_cmd_ready(ctrlr); if (error) { mtx_unlock(&ctrlr->uic_cmd_lock); return (ENXIO); } ufshci_mmio_write_4(ctrlr, ucmdarg1, uic_cmd->argument1); ufshci_mmio_write_4(ctrlr, ucmdarg2, uic_cmd->argument2); ufshci_mmio_write_4(ctrlr, ucmdarg3, uic_cmd->argument3); ufshci_mmio_write_4(ctrlr, uiccmd, uic_cmd->opcode); error = ufshci_uic_wait_cmd(ctrlr, uic_cmd); mtx_unlock(&ctrlr->uic_cmd_lock); if (error) return (ENXIO); + config_result_code = ufshci_mmio_read_4(ctrlr, ucmdarg2); + if (config_result_code) { + ufshci_printf(ctrlr, + "Failed to send UIC command. (config result code = 0x%x)\n", + config_result_code); + } + if (return_value != NULL) *return_value = ufshci_mmio_read_4(ctrlr, ucmdarg3); return (0); } int ufshci_uic_send_dme_link_startup(struct ufshci_controller *ctrlr) { struct ufshci_uic_cmd uic_cmd; uic_cmd.opcode = UFSHCI_DME_LINK_STARTUP; uic_cmd.argument1 = 0; uic_cmd.argument2 = 0; uic_cmd.argument3 = 0; return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, NULL)); } int ufshci_uic_send_dme_get(struct ufshci_controller *ctrlr, uint16_t attribute, uint32_t *return_value) { struct ufshci_uic_cmd uic_cmd; uic_cmd.opcode = UFSHCI_DME_GET; uic_cmd.argument1 = attribute << 16; uic_cmd.argument2 = 0; uic_cmd.argument3 = 0; return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, return_value)); } int ufshci_uic_send_dme_set(struct ufshci_controller *ctrlr, uint16_t attribute, uint32_t value) { struct ufshci_uic_cmd uic_cmd; uic_cmd.opcode = UFSHCI_DME_SET; uic_cmd.argument1 = attribute << 16; /* This drvier always sets only volatile values. */ uic_cmd.argument2 = UFSHCI_ATTR_SET_TYPE_NORMAL << 16; uic_cmd.argument3 = value; return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, NULL)); } int ufshci_uic_send_dme_peer_get(struct ufshci_controller *ctrlr, uint16_t attribute, uint32_t *return_value) { struct ufshci_uic_cmd uic_cmd; uic_cmd.opcode = UFSHCI_DME_PEER_GET; uic_cmd.argument1 = attribute << 16; uic_cmd.argument2 = 0; uic_cmd.argument3 = 0; return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, return_value)); } int ufshci_uic_send_dme_peer_set(struct ufshci_controller *ctrlr, uint16_t attribute, uint32_t value) { struct ufshci_uic_cmd uic_cmd; uic_cmd.opcode = UFSHCI_DME_PEER_SET; uic_cmd.argument1 = attribute << 16; /* This drvier always sets only volatile values. */ uic_cmd.argument2 = UFSHCI_ATTR_SET_TYPE_NORMAL << 16; uic_cmd.argument3 = value; return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, NULL)); } int ufshci_uic_send_dme_endpoint_reset(struct ufshci_controller *ctrlr) { struct ufshci_uic_cmd uic_cmd; uic_cmd.opcode = UFSHCI_DME_ENDPOINT_RESET; uic_cmd.argument1 = 0; uic_cmd.argument2 = 0; uic_cmd.argument3 = 0; return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, NULL)); }