diff --git a/sys/dev/ufshci/ufshci.h b/sys/dev/ufshci/ufshci.h index 188f8c41def1..766d8de0535b 100644 --- a/sys/dev/ufshci/ufshci.h +++ b/sys/dev/ufshci/ufshci.h @@ -1,1088 +1,1096 @@ /*- * Copyright (c) 2025, Samsung Electronics Co., Ltd. * Written by Jaeyoon Choi * * SPDX-License-Identifier: BSD-2-Clause */ #ifndef __UFSHCI_H__ #define __UFSHCI_H__ #include #include /* * Note: This driver currently assumes a little-endian architecture. * Big-endian support is not yet implemented. */ /* MIPI UniPro spec 2.0, section 5.8.1 "PHY Adapter Common Attributes" */ #define PA_AvailTxDataLanes 0x1520 #define PA_AvailRxDataLanes 0x1540 /* * MIPI UniPro spec 2.0, section 5.8.2 "PHY Adapter M-PHY-Specific * Attributes" */ #define PA_ConnectedTxDataLanes 0x1561 #define PA_ConnectedRxDataLanes 0x1581 #define PA_MaxRxHSGear 0x1587 #define PA_Granularity 0x15AA #define PA_TActivate 0x15A8 #define PA_RemoteVerInfo 0x15A0 #define PA_LocalVerInfo 0x15A9 /* UFSHCI spec 4.1, section 7.4 "UIC Power Mode Change" */ #define PA_ActiveTxDataLanes 0x1560 #define PA_ActiveRxDataLanes 0x1580 #define PA_TxGear 0x1568 #define PA_RxGear 0x1583 #define PA_TxTermination 0x1569 #define PA_RxTermination 0x1584 #define PA_HSSeries 0x156A #define PA_PWRModeUserData0 0x15B0 #define PA_PWRModeUserData1 0x15B1 #define PA_PWRModeUserData2 0x15B2 #define PA_PWRModeUserData3 0x15B3 #define PA_PWRModeUserData4 0x15B4 #define PA_PWRModeUserData5 0x15B5 #define PA_TxHsAdaptType 0x15D4 #define PA_PWRMode 0x1571 #define DME_LocalFC0ProtectionTimeOutVal 0xD041 #define DME_LocalTC0ReplayTimeOutVal 0xD042 #define DME_LocalAFC0ReqTimeOutVal 0xD043 /* Currently, UFS uses TC0 only. */ #define DL_FC0ProtectionTimeOutVal_Default 8191 #define DL_TC0ReplayTimeOutVal_Default 65535 #define DL_AFC0ReqTimeOutVal_Default 32767 /* UFS Spec 4.1, section 6.4 "Reference Clock" */ enum ufshci_attribute_reference_clock { UFSHCI_REF_CLK_19_2MHz = 0x0, UFSHCI_REF_CLK_26MHz = 0x1, UFSHCI_REF_CLK_38_4MHz = 0x2, UFSHCI_REF_CLK_OBSOLETE = 0x3, }; /* UFS spec 4.1, section 9 "UFS UIC Layer: MIPI Unipro" */ enum ufshci_uic_cmd_opcode { /* Configuration */ UFSHCI_DME_GET = 0x01, UFSHCI_DME_SET = 0x02, UFSHCI_DME_PEER_GET = 0x03, UFSHCI_DME_PEER_SET = 0x04, /* Controll */ UFSHCI_DME_POWER_ON = 0x10, UFSHCI_DME_POWER_OFF = 0x11, UFSHCI_DME_ENABLE = 0x12, UFSHCI_DME_RESET = 0x14, UFSHCI_DME_ENDPOINT_RESET = 0x15, UFSHCI_DME_LINK_STARTUP = 0x16, UFSHCI_DME_HIBERNATE_ENTER = 0x17, UFSHCI_DME_HIBERNATE_EXIT = 0x18, UFSHCI_DME_TEST_MODE = 0x1a, }; /* UFSHCI spec 4.1, section 5.6.3 "Offset 98h: UICCMDARG2 – UIC Command * Argument" */ enum ufshci_uic_cmd_attr_set_type { UFSHCI_ATTR_SET_TYPE_NORMAL = 0, /* volatile value */ UFSHCI_ATTR_SET_TYPE_STATIC = 1, /* non-volatile reset value */ }; struct ufshci_uic_cmd { uint8_t opcode; uint32_t argument1; uint32_t argument2; uint32_t argument3; }; /* UFS spec 4.1, section 10.5 "UPIU Transactions" */ enum transaction_code { UFSHCI_UPIU_TRANSACTION_CODE_NOP_OUT = 0x00, UFSHCI_UPIU_TRANSACTION_CODE_COMMAND = 0x01, UFSHCI_UPIU_TRANSACTION_CODE_DATA_OUT = 0x02, UFSHCI_UPIU_TRANSACTION_CODE_TASK_MANAGEMENT_REQUEST = 0x04, UFSHCI_UPIU_TRANSACTION_CODE_QUERY_REQUEST = 0x16, UFSHCI_UPIU_TRANSACTION_CODE_NOP_IN = 0x20, UFSHCI_UPIU_TRANSACTION_CODE_RESPONSE = 0x21, UFSHCI_UPIU_TRANSACTION_CODE_DATA_IN = 0x22, UFSHCI_UPIU_TRANSACTION_CODE_TASK_MANAGEMENT_RESPONSE = 0x24, UFSHCI_UPIU_TRANSACTION_CODE_READY_TO_TRANSFER = 0x31, UFSHCI_UPIU_TRANSACTION_CODE_QUERY_RESPONSE = 0x36, UFSHCI_UPIU_TRANSACTION_CODE_REJECT_UPIU = 0x3f, }; enum overall_command_status { UFSHCI_DESC_SUCCESS = 0x0, UFSHCI_DESC_INVALID_COMMAND_TABLE_ATTRIBUTES = 0x01, UFSHCI_DESC_INVALID_PRDT_ATTRIBUTES = 0x02, UFSHCI_DESC_MISMATCH_DATA_BUFFER_SIZE = 0x03, UFSHCI_DESC_MISMATCH_RESPONSE_UPIU_SIZE = 0x04, UFSHCI_DESC_COMMUNICATION_FAILURE_WITHIN_UIC_LAYERS = 0x05, UFSHCI_DESC_ABORTED = 0x06, UFSHCI_DESC_HOST_CONTROLLER_FATAL_ERROR = 0x07, UFSHCI_DESC_DEVICEFATALERROR = 0x08, UFSHCI_DESC_INVALID_CRYPTO_CONFIGURATION = 0x09, UFSHCI_DESC_GENERAL_CRYPTO_ERROR = 0x0A, UFSHCI_DESC_INVALID = 0x0F, }; enum response_code { UFSHCI_RESPONSE_CODE_TARGET_SUCCESS = 0x00, UFSHCI_RESPONSE_CODE_TARGET_FAILURE = 0x01, UFSHCI_RESPONSE_CODE_PARAMETER_NOTREADABLE = 0xF6, UFSHCI_RESPONSE_CODE_PARAMETER_NOTWRITEABLE = 0xF7, UFSHCI_RESPONSE_CODE_PARAMETER_ALREADYWRITTEN = 0xF8, UFSHCI_RESPONSE_CODE_INVALID_LENGTH = 0xF9, UFSHCI_RESPONSE_CODE_INVALID_VALUE = 0xFA, UFSHCI_RESPONSE_CODE_INVALID_SELECTOR = 0xFB, UFSHCI_RESPONSE_CODE_INVALID_INDEX = 0xFC, UFSHCI_RESPONSE_CODE_INVALID_IDN = 0xFD, UFSHCI_RESPONSE_CODE_INVALID_OPCODE = 0xFE, UFSHCI_RESPONSE_CODE_GENERAL_FAILURE = 0xFF, }; /* UFSHCI spec 4.1, section 6.1.1 "UTP Transfer Request Descriptor" */ enum ufshci_command_type { UFSHCI_COMMAND_TYPE_UFS_STORAGE = 0x01, UFSHCI_COMMAND_TYPE_NULLIFIED_UTRD = 0x0F, }; enum ufshci_data_direction { UFSHCI_DATA_DIRECTION_NO_DATA_TRANSFER = 0x00, UFSHCI_DATA_DIRECTION_FROM_SYS_TO_TGT = 0x01, UFSHCI_DATA_DIRECTION_FROM_TGT_TO_SYS = 0x10, UFSHCI_DATA_DIRECTION_RESERVED = 0b11, }; enum ufshci_utr_overall_command_status { UFSHCI_UTR_OCS_SUCCESS = 0x0, UFSHCI_UTR_OCS_INVALID_COMMAND_TABLE_ATTRIBUTES = 0x01, UFSHCI_UTR_OCS_INVALID_PRDT_ATTRIBUTES = 0x02, UFSHCI_UTR_OCS_MISMATCH_DATA_BUFFER_SIZE = 0x03, UFSHCI_UTR_OCS_MISMATCH_RESPONSE_UPIU_SIZE = 0x04, UFSHCI_UTR_OCS_COMMUNICATION_FAILURE_WITHIN_UIC_LAYERS = 0x05, UFSHCI_UTR_OCS_ABORTED = 0x06, UFSHCI_UTR_OCS_HOST_CONTROLLER_FATAL_ERROR = 0x07, UFSHCI_UTR_OCS_DEVICE_FATAL_ERROR = 0x08, UFSHCI_UTR_OCS_INVALID_CRYPTO_CONFIGURATION = 0x09, UFSHCI_UTR_OCS_GENERAL_CRYPTO_ERROR = 0x0A, UFSHCI_UTR_OCS_INVALID = 0xF, }; struct ufshci_utp_xfer_req_desc { /* dword 0 */ uint32_t cci : 8; /* [7:0] */ uint32_t total_ehs_length : 8; /* [15:8] */ uint32_t reserved0 : 7; /* [22:16] */ uint32_t ce : 1; /* [23] */ uint32_t interrupt : 1; /* [24] */ uint32_t data_direction : 2; /* [26:25] */ uint32_t reserved1 : 1; /* [27] */ uint32_t command_type : 4; /* [31:28] */ /* dword 1 */ uint32_t data_unit_number_lower; /* [31:0] */ /* dword 2 */ uint8_t overall_command_status; /* [7:0] */ uint8_t common_data_size; /* [15:8] */ uint16_t last_data_byte_count; /* [31:16] */ /* dword 3 */ uint32_t data_unit_number_upper; /* [31:0] */ /* dword 4 */ uint32_t utp_command_descriptor_base_address; /* [31:0] */ /* dword 5 */ uint32_t utp_command_descriptor_base_address_upper; /* [31:0] */ /* dword 6 */ uint16_t response_upiu_length; /* [15:0] */ uint16_t response_upiu_offset; /* [31:16] */ /* dword 7 */ uint16_t prdt_length; /* [15:0] */ uint16_t prdt_offset; /* [31:16] */ } __packed __aligned(8); _Static_assert(sizeof(struct ufshci_utp_xfer_req_desc) == 32, "ufshci_utp_xfer_req_desc must be 32 bytes"); /* * According to the UFSHCI specification, the size of the UTP command * descriptor is as follows. The size of the transfer request is not limited, * a transfer response can be as long as 65535 * dwords, and a PRDT can be as * long as 65565 * PRDT entry size(16 bytes). However, for ease of use, this * UFSHCI Driver imposes the following limits. The size of the transfer * request and the transfer response is 1024 bytes or less. The PRDT region * limits the number of scatter gathers to 256 + 1, using a total of 4096 + * 16 bytes. Therefore, only 8KB size is allocated for the UTP command * descriptor. */ #define UFSHCI_UTP_COMMAND_DESCRIPTOR_SIZE 8192 #define UFSHCI_UTP_XFER_REQ_SIZE 512 #define UFSHCI_UTP_XFER_RESP_SIZE 512 /* * To reduce the size of the UTP Command Descriptor(8KB), we must use only * 256 + 1 PRDT entries. The reason for adding the 1 is that if the data is * not aligned, one additional PRDT_ENTRY is used. */ #define UFSHCI_MAX_PRDT_ENTRY_COUNT (256 + 1) /* UFSHCI spec 4.1, section 6.1.2 "UTP Command Descriptor" */ struct ufshci_prdt_entry { /* dword 0 */ uint32_t data_base_address; /* [31:0] */ /* dword 1 */ uint32_t data_base_address_upper; /* [31:0] */ /* dword 2 */ uint32_t reserved; /* [31:0] */ /* dword 3 */ uint32_t data_byte_count; /* [17:0] Maximum byte * count is 256KB */ } __packed __aligned(8); _Static_assert(sizeof(struct ufshci_prdt_entry) == 16, "ufshci_prdt_entry must be 16 bytes"); struct ufshci_utp_cmd_desc { uint8_t command_upiu[UFSHCI_UTP_XFER_REQ_SIZE]; uint8_t response_upiu[UFSHCI_UTP_XFER_RESP_SIZE]; uint8_t prd_table[sizeof(struct ufshci_prdt_entry) * UFSHCI_MAX_PRDT_ENTRY_COUNT]; uint8_t padding[3072 - sizeof(struct ufshci_prdt_entry)]; } __packed __aligned(128); _Static_assert(sizeof(struct ufshci_utp_cmd_desc) == UFSHCI_UTP_COMMAND_DESCRIPTOR_SIZE, "ufshci_utp_cmd_desc must be 8192 bytes"); #define UFSHCI_UTP_TASK_MGMT_REQ_SIZE 32 #define UFSHCI_UTP_TASK_MGMT_RESP_SIZE 32 enum ufshci_utmr_overall_command_status { UFSHCI_UTMR_OCS_SUCCESS = 0x0, UFSHCI_UTMR_OCS_INVALID_TASK_MANAGEMENT_FUNCTION_ATTRIBUTES = 0x01, UFSHCI_UTMR_OCS_MISMATCH_TASK_MANAGEMENT_REQUEST_SIZE = 0x02, UFSHCI_UTMR_OCS_MISMATCH_TASK_MANAGEMENT_RESPONSE_SIZE = 0x03, UFSHCI_UTMR_OCS_PEER_COMMUNICATION_FAILURE = 0x04, UFSHCI_UTMR_OCS_ABORTED = 0x05, UFSHCI_UTMR_OCS_FATAL_ERROR = 0x06, UFSHCI_UTMR_OCS_DEVICE_FATAL_ERROR = 0x07, UFSHCI_UTMR_OCS_INVALID = 0xF, }; /* UFSHCI spec 4.1, section 6.3.1 "UTP Task Management Request Descriptor" */ struct ufshci_utp_task_mgmt_req_desc { /* dword 0 */ uint32_t reserved0 : 24; /* [23:0] */ uint32_t interrupt : 1; /* [24] */ uint32_t reserved1 : 7; /* [31:25] */ /* dword 1 */ uint32_t reserved2; /* [31:0] */ /* dword 2 */ uint8_t overall_command_status; /* [7:0] */ uint8_t reserved3; /* [15:8] */ uint16_t reserved4; /* [31:16] */ /* dword 3 */ uint32_t reserved5; /* [31:0] */ /* dword 4-11 */ uint8_t request_upiu[UFSHCI_UTP_TASK_MGMT_REQ_SIZE]; /* dword 12-19 */ uint8_t response_upiu[UFSHCI_UTP_TASK_MGMT_RESP_SIZE]; } __packed __aligned(8); _Static_assert(sizeof(struct ufshci_utp_task_mgmt_req_desc) == 80, "ufshci_utp_task_mgmt_req_desc must be 80 bytes"); /* UFS spec 4.1, section 10.6.2 "Basic Header Format" */ struct ufshci_upiu_header { /* dword 0 */ union { struct { uint8_t trans_code : 6; /* [5:0] */ uint8_t dd : 1; /* [6] */ uint8_t hd : 1; /* [7] */ }; uint8_t trans_type; }; union { struct { uint8_t task_attribute : 2; /* [1:0] */ uint8_t cp : 1; /* [2] */ uint8_t retransmit_indicator : 1; /* [3] */ #define UFSHCI_OPERATIONAL_FLAG_W 0x2 #define UFSHCI_OPERATIONAL_FLAG_R 0x4 uint8_t operational_flags : 4; /* [7:4] */ }; uint8_t flags; }; uint8_t lun; #define UFSHCI_UPIU_UNIT_NUMBER_ID_MASK 0x7f #define UFSHCI_UPIU_WLUN_ID_MASK 0x80 uint8_t task_tag; /* dword 1 */ #define UFSHCI_COMMAND_SET_TYPE_SCSI 0 uint8_t cmd_set_type : 4; /* [3:0] */ uint8_t iid : 4; /* [7:4] */ uint8_t ext_iid_or_function; uint8_t response; uint8_t ext_iid_or_status; /* dword 2 */ uint8_t ehs_length; uint8_t device_infomation; uint16_t data_segment_length; /* (Big-endian) */ } __packed __aligned(4); _Static_assert(sizeof(struct ufshci_upiu_header) == 12, "ufshci_upiu_header must be 12 bytes"); #define UFSHCI_MAX_UPIU_SIZE 512 #define UFSHCI_UPIU_ALIGNMENT 8 /* UPIU requires 64-bit alignment. */ struct ufshci_upiu { /* dword 0-2 */ struct ufshci_upiu_header header; /* dword 3-127 */ uint8_t reserved[UFSHCI_MAX_UPIU_SIZE - sizeof(struct ufshci_upiu_header)]; } __packed __aligned(8); _Static_assert(sizeof(struct ufshci_upiu) == 512, "ufshci_upiu must be 512 bytes"); /* UFS Spec 4.1, section 10.7.1 "COMMAND UPIU" */ struct ufshci_cmd_command_upiu { /* dword 0-2 */ struct ufshci_upiu_header header; /* dword 3 */ uint32_t expected_data_transfer_length; /* (Big-endian) */ /* dword 4-7 */ uint8_t cdb[16]; } __packed __aligned(4); _Static_assert(sizeof(struct ufshci_cmd_command_upiu) == 32, "bad size for ufshci_cmd_command_upiu"); _Static_assert(sizeof(struct ufshci_cmd_command_upiu) <= UFSHCI_UTP_XFER_REQ_SIZE, "bad size for ufshci_cmd_command_upiu"); _Static_assert(sizeof(struct ufshci_cmd_command_upiu) % UFSHCI_UPIU_ALIGNMENT == 0, "UPIU requires 64-bit alignment"); /* UFS Spec 4.1, section 10.7.2 "RESPONSE UPIU" */ struct ufshci_cmd_response_upiu { /* dword 0-2 */ struct ufshci_upiu_header header; /* dword 3 */ uint32_t residual_transfer_count; /* (Big-endian) */ /* dword 4-7 */ uint8_t reserved[16]; /* Sense Data */ uint16_t sense_data_len; /* (Big-endian) */ uint8_t sense_data[18]; /* Add padding to align the kUpiuAlignment. */ uint8_t padding[4]; } __packed __aligned(4); _Static_assert(sizeof(struct ufshci_cmd_response_upiu) == 56, "bad size for ufshci_cmd_response_upiu"); _Static_assert(sizeof(struct ufshci_cmd_response_upiu) <= UFSHCI_UTP_XFER_RESP_SIZE, "bad size for ufshci_cmd_response_upiu"); _Static_assert(sizeof(struct ufshci_cmd_response_upiu) % UFSHCI_UPIU_ALIGNMENT == 0, "UPIU requires 64-bit alignment"); enum task_management_function { UFSHCI_TASK_MGMT_FUNCTION_ABORT_TASK = 0x01, UFSHCI_TASK_MGMT_FUNCTION_ABORT_TASK_SET = 0x02, UFSHCI_TASK_MGMT_FUNCTION_CLEAR_TASK_SET = 0x04, UFSHCI_TASK_MGMT_FUNCTION_LOGICAL_UNIT_RESET = 0x08, UFSHCI_TASK_MGMT_FUNCTION_QUERY_TASK = 0x80, UFSHCI_TASK_MGMT_FUNCTION_QUERY_TASKSET = 0x81, }; /* UFS Spec 4.1, section 10.7.6 "TASK MANAGEMENT REQUEST UPIU" */ struct ufshci_task_mgmt_request_upiu { /* dword 0-2 */ struct ufshci_upiu_header header; /* dword 3 */ uint32_t input_param1; /* (Big-endian) */ /* dword 4 */ uint32_t input_param2; /* (Big-endian) */ /* dword 5 */ uint32_t input_param3; /* (Big-endian) */ /* dword 6-7 */ uint8_t reserved[8]; } __packed __aligned(4); _Static_assert(sizeof(struct ufshci_task_mgmt_request_upiu) == 32, "bad size for ufshci_task_mgmt_request_upiu"); _Static_assert(sizeof(struct ufshci_task_mgmt_request_upiu) <= UFSHCI_UTP_XFER_RESP_SIZE, "bad size for ufshci_task_mgmt_request_upiu"); _Static_assert(sizeof(struct ufshci_task_mgmt_request_upiu) % UFSHCI_UPIU_ALIGNMENT == 0, "UPIU requires 64-bit alignment"); enum task_management_service_response { UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_COMPLETE = 0x00, UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_NOT_SUPPORTED = 0x04, UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_FAILED = 0x05, UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_SUCCEEDED = 0x08, UFSHCI_TASK_MGMT_SERVICE_RESPONSE_INCORRECT_LUN = 0x09, }; /* UFS Spec 4.1, section 10.7.7 "TASK MANAGEMENT RESPONSE UPIU" */ struct ufshci_task_mgmt_response_upiu { /* dword 0-2 */ struct ufshci_upiu_header header; /* dword 3 */ uint32_t output_param1; /* (Big-endian) */ /* dword 4 */ uint32_t output_param2; /* (Big-endian) */ /* dword 5-7 */ uint8_t reserved[12]; } __packed __aligned(4); _Static_assert(sizeof(struct ufshci_task_mgmt_response_upiu) == 32, "bad size for ufshci_task_mgmt_response_upiu"); _Static_assert(sizeof(struct ufshci_task_mgmt_response_upiu) <= UFSHCI_UTP_XFER_RESP_SIZE, "bad size for ufshci_task_mgmt_response_upiu"); _Static_assert(sizeof(struct ufshci_task_mgmt_response_upiu) % UFSHCI_UPIU_ALIGNMENT == 0, "UPIU requires 64-bit alignment"); /* UFS Spec 4.1, section 10.7.8 "QUERY REQUEST UPIU" */ enum ufshci_query_function { UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST = 0x01, UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST = 0x81, }; enum ufshci_query_opcode { UFSHCI_QUERY_OPCODE_NOP = 0, UFSHCI_QUERY_OPCODE_READ_DESCRIPTOR, UFSHCI_QUERY_OPCODE_WRITE_DESCRIPTOR, UFSHCI_QUERY_OPCODE_READ_ATTRIBUTE, UFSHCI_QUERY_OPCODE_WRITE_ATTRIBUTE, UFSHCI_QUERY_OPCODE_READ_FLAG, UFSHCI_QUERY_OPCODE_SET_FLAG, UFSHCI_QUERY_OPCODE_CLEAR_FLAG, UFSHCI_QUERY_OPCODE_TOGGLE_FLAG, }; struct ufshci_query_param { enum ufshci_query_function function; enum ufshci_query_opcode opcode; uint8_t type; uint8_t index; uint8_t selector; uint64_t value; size_t desc_size; }; struct ufshci_query_request_upiu { /* dword 0-2 */ struct ufshci_upiu_header header; /* dword 3 */ uint8_t opcode; uint8_t idn; uint8_t index; uint8_t selector; /* dword 4-5 */ union { /* The Write Attribute opcode uses 64 - bit value. */ uint64_t value_64; /* (Big-endian) */ struct { uint8_t reserved1[2]; uint16_t length; /* (Big-endian) */ uint32_t value_32; /* (Big-endian) */ }; } __packed __aligned(4); /* dword 6 */ uint32_t reserved2; /* dword 7 */ uint32_t reserved3; uint8_t command_data[256]; } __packed __aligned(4); _Static_assert(sizeof(struct ufshci_query_request_upiu) == 288, "bad size for ufshci_query_request_upiu"); _Static_assert(sizeof(struct ufshci_query_request_upiu) <= UFSHCI_UTP_XFER_REQ_SIZE, "bad size for ufshci_query_request_upiu"); _Static_assert(sizeof(struct ufshci_query_request_upiu) % UFSHCI_UPIU_ALIGNMENT == 0, "UPIU requires 64-bit alignment"); /* UFS Spec 4.1, section 10.7.9 "QUERY RESPONSE UPIU" */ enum ufshci_query_response_code { UFSHCI_QUERY_RESP_CODE_SUCCESS = 0x00, UFSHCI_QUERY_RESP_CODE_PARAMETER_NOT_READABLE = 0xf6, UFSHCI_QUERY_RESP_CODE_PARAMETER_NOT_WRITEABLE = 0xf7, UFSHCI_QUERY_RESP_CODE_PARAMETER_ALREADY_WRITTEN = 0xf8, UFSHCI_QUERY_RESP_CODE_INVALID_LENGTH = 0xf9, UFSHCI_QUERY_RESP_CODE_INVALID_VALUE = 0xfa, UFSHCI_QUERY_RESP_CODE_INVALID_SELECTOR = 0xfb, UFSHCI_QUERY_RESP_CODE_INVALID_INDEX = 0xfc, UFSHCI_QUERY_RESP_CODE_INVALID_IDN = 0xfd, UFSHCI_QUERY_RESP_CODE_INVALID_OPCODE = 0xfe, UFSHCI_QUERY_RESP_CODE_GENERAL_FAILURE = 0xff, }; struct ufshci_query_response_upiu { /* dword 0-2 */ struct ufshci_upiu_header header; /* dword 3 */ uint8_t opcode; uint8_t idn; uint8_t index; uint8_t selector; /* dword 4-5 */ union { /* The Read / Write Attribute opcodes use 64 - bit value. */ uint64_t value_64; /* (Big-endian) */ struct { uint8_t reserved1[2]; uint16_t length; /* (Big-endian) */ union { uint32_t value_32; /* (Big-endian) */ struct { uint8_t reserved2[3]; uint8_t flag_value; }; }; }; } __packed __aligned(4); /* dword 6 */ uint8_t reserved3[4]; /* dword 7 */ uint8_t reserved4[4]; uint8_t command_data[256]; } __packed __aligned(4); _Static_assert(sizeof(struct ufshci_query_response_upiu) == 288, "bad size for ufshci_query_response_upiu"); _Static_assert(sizeof(struct ufshci_query_response_upiu) <= UFSHCI_UTP_XFER_RESP_SIZE, "bad size for ufshci_query_response_upiu"); _Static_assert(sizeof(struct ufshci_query_response_upiu) % UFSHCI_UPIU_ALIGNMENT == 0, "UPIU requires 64-bit alignment"); /* UFS 4.1, section 10.7.11 "NOP OUT UPIU" */ struct ufshci_nop_out_upiu { /* dword 0-2 */ struct ufshci_upiu_header header; /* dword 3-7 */ uint8_t reserved[20]; } __packed __aligned(8); _Static_assert(sizeof(struct ufshci_nop_out_upiu) == 32, "ufshci_upiu_nop_out must be 32 bytes"); /* UFS 4.1, section 10.7.12 "NOP IN UPIU" */ struct ufshci_nop_in_upiu { /* dword 0-2 */ struct ufshci_upiu_header header; /* dword 3-7 */ uint8_t reserved[20]; } __packed __aligned(8); _Static_assert(sizeof(struct ufshci_nop_in_upiu) == 32, "ufshci_upiu_nop_in must be 32 bytes"); union ufshci_reponse_upiu { struct ufshci_upiu_header header; struct ufshci_cmd_response_upiu cmd_response_upiu; struct ufshci_query_response_upiu query_response_upiu; struct ufshci_task_mgmt_response_upiu task_mgmt_response_upiu; struct ufshci_nop_in_upiu nop_in_upiu; }; struct ufshci_completion { union ufshci_reponse_upiu response_upiu; size_t size; }; typedef void (*ufshci_cb_fn_t)(void *, const struct ufshci_completion *, bool); +/* UFS 4.1, section 10.8.5 "Well Known Logical Unit Defined in UFS" */ +enum ufshci_well_known_luns { + UFSHCI_WLUN_REPORT_LUNS = 0x81, + UFSHCI_WLUN_BOOT = 0xb0, + UFSHCI_WLUN_RPMB = 0xc4, + UFSHCI_WLUN_UFS_DEVICE = 0xd0, +}; + /* * UFS Spec 4.1, section 14.1 "UFS Descriptors" * All descriptors use big-endian byte ordering. */ enum ufshci_descriptor_type { UFSHCI_DESC_TYPE_DEVICE = 0x00, UFSHCI_DESC_TYPE_CONFIGURATION = 0x01, UFSHCI_DESC_TYPE_UNIT = 0x02, UFSHCI_DESC_TYPE_INTERCONNECT = 0x04, UFSHCI_DESC_TYPE_STRING = 0x05, UFSHCI_DESC_TYPE_GEOMETRY = 0X07, UFSHCI_DESC_TYPE_POWER = 0x08, UFSHCI_DESC_TYPE_DEVICE_HEALTH = 0x09, UFSHCI_DESC_TYPE_FBO_EXTENSION_SPECIFICATION = 0x0a, }; /* * UFS Spec 4.1, section 14.1.5.2 "Device Descriptor" * DeviceDescriptor use big-endian byte ordering. */ struct ufshci_device_descriptor { uint8_t bLength; uint8_t bDescriptorIDN; uint8_t bDevice; uint8_t bDeviceClass; uint8_t bDeviceSubClass; uint8_t bProtocol; uint8_t bNumberLU; uint8_t bNumberWLU; uint8_t bBootEnable; uint8_t bDescrAccessEn; uint8_t bInitPowerMode; uint8_t bHighPriorityLUN; uint8_t bSecureRemovalType; uint8_t bSecurityLU; uint8_t bBackgroundOpsTermLat; uint8_t bInitActiveICCLevel; /* 0x10 */ uint16_t wSpecVersion; uint16_t wManufactureDate; uint8_t iManufacturerName; uint8_t iProductName; uint8_t iSerialNumber; uint8_t iOemID; uint16_t wManufacturerID; uint8_t bUD0BaseOffset; uint8_t bUDConfigPLength; uint8_t bDeviceRTTCap; uint16_t wPeriodicRTCUpdate; uint8_t bUfsFeaturesSupport; /* 0x20 */ uint8_t bFFUTimeout; uint8_t bQueueDepth; uint16_t wDeviceVersion; uint8_t bNumSecureWPArea; uint32_t dPSAMaxDataSize; uint8_t bPSAStateTimeout; uint8_t iProductRevisionLevel; uint8_t Reserved[5]; /* 0x2a */ /* 0x30 */ uint8_t ReservedUME[16]; /* 0x40 */ uint8_t ReservedHpb[3]; uint8_t Reserved2[12]; uint32_t dExtendedUfsFeaturesSupport; uint8_t bWriteBoosterBufferPreserveUserSpaceEn; uint8_t bWriteBoosterBufferType; uint32_t dNumSharedWriteBoosterBufferAllocUnits; } __packed; _Static_assert(sizeof(struct ufshci_device_descriptor) == 89, "bad size for ufshci_device_descriptor"); /* Defines the bit field of dExtendedUfsFeaturesSupport. */ enum ufshci_desc_wb_ext_ufs_feature { UFSHCI_DESC_EXT_UFS_FEATURE_FFU = (1 << 0), UFSHCI_DESC_EXT_UFS_FEATURE_PSA = (1 << 1), UFSHCI_DESC_EXT_UFS_FEATURE_DEV_LIFE_SPAN = (1 << 2), UFSHCI_DESC_EXT_UFS_FEATURE_REFRESH_OP = (1 << 3), UFSHCI_DESC_EXT_UFS_FEATURE_TOO_HIGH_TEMP = (1 << 4), UFSHCI_DESC_EXT_UFS_FEATURE_TOO_LOW_TEMP = (1 << 5), UFSHCI_DESC_EXT_UFS_FEATURE_EXT_TEMP = (1 << 6), UFSHCI_DESC_EXT_UFS_FEATURE_HPB_SUPPORT = (1 << 7), UFSHCI_DESC_EXT_UFS_FEATURE_WRITE_BOOSTER = (1 << 8), UFSHCI_DESC_EXT_UFS_FEATURE_PERF_THROTTLING = (1 << 9), UFSHCI_DESC_EXT_UFS_FEATURE_ADVANCED_RPMB = (1 << 10), UFSHCI_DESC_EXT_UFS_FEATURE_ZONED_UFS_EXTENSION = (1 << 11), UFSHCI_DESC_EXT_UFS_FEATURE_DEV_LEVEL_EXCEPTION = (1 << 12), UFSHCI_DESC_EXT_UFS_FEATURE_HID = (1 << 13), UFSHCI_DESC_EXT_UFS_FEATURE_BARRIER = (1 << 14), UFSHCI_DESC_EXT_UFS_FEATURE_CLEAR_ERROR_HISTORY = (1 << 15), UFSHCI_DESC_EXT_UFS_FEATURE_EXT_IID = (1 << 16), UFSHCI_DESC_EXT_UFS_FEATURE_FBO = (1 << 17), UFSHCI_DESC_EXT_UFS_FEATURE_FAST_RECOVERY_MODE = (1 << 18), UFSHCI_DESC_EXT_UFS_FEATURE_RPMB_VENDOR_CMD = (1 << 19), }; /* Defines the bit field of bWriteBoosterBufferType. */ enum ufshci_desc_wb_buffer_type { UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED = 0x00, UFSHCI_DESC_WB_BUF_TYPE_SINGLE_SHARED = 0x01, }; /* Defines the bit field of bWriteBoosterBufferPreserveUserSpaceEn. */ enum ufshci_desc_user_space_config { UFSHCI_DESC_WB_BUF_USER_SPACE_REDUCTION = 0x00, UFSHCI_DESC_WB_BUF_PRESERVE_USER_SPACE = 0x01, }; /* * UFS Spec 4.1, section 14.1.5.3 "Configuration Descriptor" * ConfigurationDescriptor use big-endian byte ordering. */ struct ufshci_unit_descriptor_configurable_parameters { uint8_t bLUEnable; uint8_t bBootLunID; uint8_t bLUWriteProtect; uint8_t bMemoryType; uint32_t dNumAllocUnits; uint8_t bDataReliability; uint8_t bLogicalBlockSize; uint8_t bProvisioningType; uint16_t wContextCapabilities; union { struct { uint8_t Reserved[3]; uint8_t ReservedHpb[6]; } __packed; uint16_t wZoneBufferAllocUnits; }; uint32_t dLUNumWriteBoosterBufferAllocUnits; } __packed; _Static_assert(sizeof(struct ufshci_unit_descriptor_configurable_parameters) == 27, "bad size for ufshci_unit_descriptor_configurable_parameters"); #define UFSHCI_CONFIGURATION_DESCEIPTOR_LU_NUM 8 struct ufshci_configuration_descriptor { uint8_t bLength; uint8_t bDescriptorIDN; uint8_t bConfDescContinue; uint8_t bBootEnable; uint8_t bDescrAccessEn; uint8_t bInitPowerMode; uint8_t bHighPriorityLUN; uint8_t bSecureRemovalType; uint8_t bInitActiveICCLevel; uint16_t wPeriodicRTCUpdate; uint8_t Reserved; uint8_t bRPMBRegionEnable; uint8_t bRPMBRegion1Size; uint8_t bRPMBRegion2Size; uint8_t bRPMBRegion3Size; uint8_t bWriteBoosterBufferPreserveUserSpaceEn; uint8_t bWriteBoosterBufferType; uint32_t dNumSharedWriteBoosterBufferAllocUnits; /* 0x16 */ struct ufshci_unit_descriptor_configurable_parameters unit_config_params[UFSHCI_CONFIGURATION_DESCEIPTOR_LU_NUM]; } __packed; _Static_assert(sizeof(struct ufshci_configuration_descriptor) == (22 + 27 * 8), "bad size for ufshci_configuration_descriptor"); /* * UFS Spec 4.1, section 14.1.5.4 "Geometry Descriptor" * GeometryDescriptor use big-endian byte ordering. */ struct ufshci_geometry_descriptor { uint8_t bLength; uint8_t bDescriptorIDN; uint8_t bMediaTechnology; uint8_t Reserved; uint64_t qTotalRawDeviceCapacity; uint8_t bMaxNumberLU; uint32_t dSegmentSize; /* 0x11 */ uint8_t bAllocationUnitSize; uint8_t bMinAddrBlockSize; uint8_t bOptimalReadBlockSize; uint8_t bOptimalWriteBlockSize; uint8_t bMaxInBufferSize; uint8_t bMaxOutBufferSize; uint8_t bRPMB_ReadWriteSize; uint8_t bDynamicCapacityResourcePolicy; uint8_t bDataOrdering; uint8_t bMaxContexIDNumber; uint8_t bSysDataTagUnitSize; uint8_t bSysDataTagResSize; uint8_t bSupportedSecRTypes; uint16_t wSupportedMemoryTypes; /* 0x20 */ uint32_t dSystemCodeMaxNAllocU; uint16_t wSystemCodeCapAdjFac; uint32_t dNonPersistMaxNAllocU; uint16_t wNonPersistCapAdjFac; uint32_t dEnhanced1MaxNAllocU; /* 0x30 */ uint16_t wEnhanced1CapAdjFac; uint32_t dEnhanced2MaxNAllocU; uint16_t wEnhanced2CapAdjFac; uint32_t dEnhanced3MaxNAllocU; uint16_t wEnhanced3CapAdjFac; uint32_t dEnhanced4MaxNAllocU; /* 0x42 */ uint16_t wEnhanced4CapAdjFac; uint32_t dOptimalLogicalBlockSize; uint8_t ReservedHpb[5]; uint8_t Reserved2[2]; uint32_t dWriteBoosterBufferMaxNAllocUnits; uint8_t bDeviceMaxWriteBoosterLUs; uint8_t bWriteBoosterBufferCapAdjFac; uint8_t bSupportedWriteBoosterBufferUserSpaceReductionTypes; uint8_t bSupportedWriteBoosterBufferTypes; } __packed; _Static_assert(sizeof(struct ufshci_geometry_descriptor) == 87, "bad size for ufshci_geometry_descriptor"); /* * UFS Spec 4.1, section 14.1.5.5 "Unit Descriptor" * UnitDescriptor use big-endian byte ordering. */ struct ufshci_unit_descriptor { uint8_t bLength; uint8_t bDescriptorIDN; uint8_t bUnitIndex; uint8_t bLUEnable; uint8_t bBootLunID; uint8_t bLUWriteProtect; uint8_t bLUQueueDepth; uint8_t bPSASensitive; uint8_t bMemoryType; uint8_t bDataReliability; uint8_t bLogicalBlockSize; uint64_t qLogicalBlockCount; /* 0x13 */ uint32_t dEraseBlockSize; uint8_t bProvisioningType; uint64_t qPhyMemResourceCount; /* 0x20 */ uint16_t wContextCapabilities; uint8_t bLargeUnitGranularity_M1; uint8_t ReservedHpb[6]; uint32_t dLUNumWriteBoosterBufferAllocUnits; } __packed; _Static_assert(sizeof(struct ufshci_unit_descriptor) == 45, "bad size for ufshci_unit_descriptor"); enum LUWriteProtect { kNoWriteProtect = 0x00, kPowerOnWriteProtect = 0x01, kPermanentWriteProtect = 0x02, }; /* * UFS Spec 4.1, section 14.1.5.6 "RPMB Unit Descriptor" * RpmbUnitDescriptor use big-endian byte ordering. */ struct ufshci_rpmb_unit_descriptor { uint8_t bLength; uint8_t bDescriptorIDN; uint8_t bUnitIndex; uint8_t bLUEnable; uint8_t bBootLunID; uint8_t bLUWriteProtect; uint8_t bLUQueueDepth; uint8_t bPSASensitive; uint8_t bMemoryType; uint8_t Reserved; uint8_t bLogicalBlockSize; uint64_t qLogicalBlockCount; /* 0x13 */ uint32_t dEraseBlockSize; uint8_t bProvisioningType; uint64_t qPhyMemResourceCount; /* 0x20 */ uint8_t Reserved1[3]; } __packed; _Static_assert(sizeof(struct ufshci_rpmb_unit_descriptor) == 35, "bad size for RpmbUnitDescriptor"); /* * UFS Spec 4.1, section 14.1.5.7 "Power Parameters Descriptor" * PowerParametersDescriptor use big-endian byte ordering. */ struct ufshci_power_parameters_descriptor { uint8_t bLength; uint8_t bDescriptorIDN; uint16_t wActiveICCLevelsVCC[16]; uint16_t wActiveICCLevelsVCCQ[16]; uint16_t wActiveICCLevelsVCCQ2[16]; } __packed; _Static_assert(sizeof(struct ufshci_power_parameters_descriptor) == 98, "bad size for PowerParametersDescriptor"); /* * UFS Spec 4.1, section 14.1.5.8 "Interconnect Descriptor" * InterconnectDescriptor use big-endian byte ordering. */ struct ufshci_interconnect_descriptor { uint8_t bLength; uint8_t bDescriptorIDN; uint16_t bcdUniproVersion; uint16_t bcdMphyVersion; } __packed; _Static_assert(sizeof(struct ufshci_interconnect_descriptor) == 6, "bad size for InterconnectDescriptor"); /* * UFS Spec 4.1, section 14.1.5.9-13 "String Descriptor" * StringDescriptor use big-endian byte ordering. */ struct ufshci_string_descriptor { uint8_t bLength; uint8_t bDescriptorIDN; uint16_t UC[126]; } __packed; _Static_assert(sizeof(struct ufshci_string_descriptor) == 254, "bad size for StringDescriptor"); /* * UFS Spec 4.1, section 14.1.5.14 "Device Health Descriptor" * DeviceHealthDescriptor use big-endian byte ordering. */ struct ufshci_device_healthd_descriptor { uint8_t bLength; uint8_t bDescriptorIDN; uint8_t bPreEOLInfo; uint8_t bDeviceLifeTimeEstA; uint8_t bDeviceLifeTimeEstB; uint8_t VendorPropInfo[32]; uint32_t dRefreshTotalCount; uint32_t dRefreshProgress; } __packed; _Static_assert(sizeof(struct ufshci_device_healthd_descriptor) == 45, "bad size for DeviceHealthDescriptor"); /* * UFS Spec 4.1, section 14.1.5.15 "Vendor Specific Descriptor" * VendorSpecificDescriptor use big-endian byte ordering. */ struct ufshci_vendor_specific_descriptor { uint8_t bLength; uint8_t bDescriptorIDN; uint8_t DATA[254]; } __packed; _Static_assert(sizeof(struct ufshci_vendor_specific_descriptor) == 256, "bad size for VendorSpecificDescriptor"); /* UFS Spec 4.1, section 14.2 "Flags" */ enum ufshci_flags { UFSHCI_FLAG_F_RESERVED = 0x00, UFSHCI_FLAG_F_DEVICE_INIT = 0x01, UFSHCI_FLAG_F_PERMANENT_WP_EN = 0x02, UFSHCI_FLAS_F_POWER_ON_WP_EN = 0x03, UFSHCI_FLAG_F_BACKGROUND_OPS_EN = 0x04, UFSHCI_FLAG_F_DEVICE_LIFE_SPAN_MODE_EN = 0x05, UFSHCI_FLAG_F_PURGE_ENABLE = 0x06, UFSHCI_FLAG_F_REFRESH_ENABLE = 0x07, UFSHCI_FLAG_F_PHY_RESOURCE_REMOVAL = 0x08, UFSHCI_FLAG_F_BUSY_RTC = 0x09, UFSHCI_FLAG_F_PERMANENTLY_DISABLE_FW_UPDATE = 0x0b, UFSHCI_FLAG_F_WRITE_BOOSTER_EN = 0x0e, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN = 0x0f, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE = 0x10, UFSHCI_FLAG_F_UNPIN_EN = 0x13, }; /* UFS Spec 4.1, section 14.3 "Attributes" */ enum ufshci_attributes { UFSHCI_ATTR_B_BOOT_LUN_EN = 0x00, UFSHCI_ATTR_B_CURRENT_POWER_MODE = 0x02, UFSHCI_ATTR_B_ACTIVE_ICC_LEVEL = 0x03, UFSHCI_ATTR_B_OUT_OF_ORDER_DATA_EN = 0x04, UFSHCI_ATTR_B_BACKGROUND_OP_STATUS = 0x05, UFSHCI_ATTR_B_PURGE_STATUS = 0x06, UFSHCI_ATTR_B_MAX_DATA_IN_SIZE = 0x07, UFSHCI_ATTR_B_MAX_DATA_OUT_SIZE = 0x08, UFSHCI_ATTR_D_DYN_CAP_NEEDED = 0x09, UFSHCI_ATTR_B_REF_CLK_FREQ = 0x0a, UFSHCI_ATTR_B_CONFIG_DESCR_LOCK = 0x0b, UFSHCI_ATTR_B_MAX_NUM_OF_RTT = 0x0c, UFSHCI_ATTR_W_EXCEPTION_EVENT_CONTROL = 0x0d, UFSHCI_ATTR_W_EXCEPTION_EVENT_STATUS = 0x0e, UFSHCI_ATTR_D_SECONDS_PASSED = 0x0f, UFSHCI_ATTR_W_CONTEXT_CONF = 0x10, UFSHCI_ATTR_B_DEVICE_FFU_STATUS = 0x14, UFSHCI_ATTR_B_PSA_STATE = 0x15, UFSHCI_ATTR_D_PSA_DATA_SIZE = 0x16, UFSHCI_ATTR_B_REF_CLK_GATING_WAIT_TIME = 0x17, UFSHCI_ATTR_B_DEVICE_CASE_ROUGH_TEMPERAURE = 0x18, UFSHCI_ATTR_B_DEVICE_TOO_HIGH_TEMP_BOUNDARY = 0x19, UFSHCI_ATTR_B_DEVICE_TOO_LOW_TEMP_BOUNDARY = 0x1a, UFSHCI_ATTR_B_THROTTLING_STATUS = 0x1b, UFSHCI_ATTR_B_WB_BUFFER_FLUSH_STATUS = 0x1c, UFSHCI_ATTR_B_AVAILABLE_WB_BUFFER_SIZE = 0x1d, UFSHCI_ATTR_B_WB_BUFFER_LIFE_TIME_EST = 0x1e, UFSHCI_ATTR_D_CURRENT_WB_BUFFER_SIZE = 0x1f, UFSHCI_ATTR_B_REFRESH_STATUS = 0x2c, UFSHCI_ATTR_B_REFRESH_FREQ = 0x2d, UFSHCI_ATTR_B_REFRESH_UNIT = 0x2e, UFSHCI_ATTR_B_REFRESH_METHOD = 0x2f, }; /* bAvailableWriteBoosterBufferSize codes (UFS WriteBooster abailable buffer * left %) */ enum ufshci_wb_available_buffer_Size { UFSHCI_ATTR_WB_AVAILABLE_0 = 0x00, /* 0% buffer remains */ UFSHCI_ATTR_WB_AVAILABLE_10 = 0x01, /* 10% buffer remains */ UFSHCI_ATTR_WB_AVAILABLE_20 = 0x02, /* 20% buffer remains */ UFSHCI_ATTR_WB_AVAILABLE_30 = 0x03, /* 30% buffer remains */ UFSHCI_ATTR_WB_AVAILABLE_40 = 0x04, /* 40% buffer remains */ UFSHCI_ATTR_WB_AVAILABLE_50 = 0x05, /* 50% buffer remains */ UFSHCI_ATTR_WB_AVAILABLE_60 = 0x06, /* 60% buffer remains */ UFSHCI_ATTR_WB_AVAILABLE_70 = 0x07, /* 70% buffer remains */ UFSHCI_ATTR_WB_AVAILABLE_80 = 0x08, /* 80% buffer remains */ UFSHCI_ATTR_WB_AVAILABLE_90 = 0x09, /* 90% buffer remains */ UFSHCI_ATTR_WB_AVAILABLE_100 = 0x0A, /* 100% buffer remains */ }; /* bWriteBoosterBufferLifeTimeEst codes (UFS WriteBooster buffer life %) */ enum ufshci_wb_lifetime { UFSHCI_ATTR_WB_LIFE_DISABLED = 0x00, /* Info not available */ UFSHCI_ATTR_WB_LIFE_0_10 = 0x01, /* 0%–10% used */ UFSHCI_ATTR_WB_LIFE_10_20 = 0x02, /* 10%–20% used */ UFSHCI_ATTR_WB_LIFE_20_30 = 0x03, /* 20%–30% used */ UFSHCI_ATTR_WB_LIFE_30_40 = 0x04, /* 30%–40% used */ UFSHCI_ATTR_WB_LIFE_40_50 = 0x05, /* 40%–50% used */ UFSHCI_ATTR_WB_LIFE_50_60 = 0x06, /* 50%–60% used */ UFSHCI_ATTR_WB_LIFE_60_70 = 0x07, /* 60%–70% used */ UFSHCI_ATTR_WB_LIFE_70_80 = 0x08, /* 70%–80% used */ UFSHCI_ATTR_WB_LIFE_80_90 = 0x09, /* 80%–90% used */ UFSHCI_ATTR_WB_LIFE_90_100 = 0x0A, /* 90%–100% used */ UFSHCI_ATTR_WB_LIFE_EXCEEDED = 0x0B, /* Exceeded estimated life (treat as WB disabled) */ }; #endif /* __UFSHCI_H__ */ diff --git a/sys/dev/ufshci/ufshci_pci.c b/sys/dev/ufshci/ufshci_pci.c index 992026fd4f4d..7f78e462db72 100644 --- a/sys/dev/ufshci/ufshci_pci.c +++ b/sys/dev/ufshci/ufshci_pci.c @@ -1,262 +1,263 @@ /*- * Copyright (c) 2025, Samsung Electronics Co., Ltd. * Written by Jaeyoon Choi * * SPDX-License-Identifier: BSD-2-Clause */ #include #include #include #include #include #include #include #include #include #include #include "ufshci_private.h" static int ufshci_pci_probe(device_t); static int ufshci_pci_attach(device_t); static int ufshci_pci_detach(device_t); static int ufshci_pci_setup_interrupts(struct ufshci_controller *ctrlr); static device_method_t ufshci_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ufshci_pci_probe), DEVMETHOD(device_attach, ufshci_pci_attach), DEVMETHOD(device_detach, ufshci_pci_detach), /* TODO: Implement Suspend, Resume */ { 0, 0 } }; static driver_t ufshci_pci_driver = { "ufshci", ufshci_pci_methods, sizeof(struct ufshci_controller), }; DRIVER_MODULE(ufshci, pci, ufshci_pci_driver, 0, 0); static struct _pcsid { uint32_t devid; const char *desc; uint32_t ref_clk; uint32_t quirks; } pci_ids[] = { { 0x131b36, "QEMU UFS Host Controller", UFSHCI_REF_CLK_19_2MHz, UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE | - UFSHCI_QUIRK_NOT_SUPPORT_ABORT_TASK }, + UFSHCI_QUIRK_NOT_SUPPORT_ABORT_TASK | + UFSHCI_QUIRK_SKIP_WELL_KNOWN_LUNS }, { 0x98fa8086, "Intel Lakefield UFS Host Controller", UFSHCI_REF_CLK_19_2MHz, UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE | UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE | UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY }, { 0x54ff8086, "Intel UFS Host Controller", UFSHCI_REF_CLK_19_2MHz }, { 0x00000000, NULL } }; static int ufshci_pci_probe(device_t device) { struct ufshci_controller *ctrlr = device_get_softc(device); uint32_t devid = pci_get_devid(device); struct _pcsid *ep = pci_ids; while (ep->devid && ep->devid != devid) ++ep; if (ep->devid) { ctrlr->quirks = ep->quirks; ctrlr->ref_clk = ep->ref_clk; } if (ep->desc) { device_set_desc(device, ep->desc); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int ufshci_pci_allocate_bar(struct ufshci_controller *ctrlr) { ctrlr->resource_id = PCIR_BAR(0); ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY, &ctrlr->resource_id, RF_ACTIVE); if (ctrlr->resource == NULL) { ufshci_printf(ctrlr, "unable to allocate pci resource\n"); return (ENOMEM); } ctrlr->bus_tag = rman_get_bustag(ctrlr->resource); ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource); ctrlr->regs = (struct ufshci_registers *)ctrlr->bus_handle; return (0); } static int ufshci_pci_attach(device_t dev) { struct ufshci_controller *ctrlr = device_get_softc(dev); int status; ctrlr->dev = dev; status = ufshci_pci_allocate_bar(ctrlr); if (status != 0) goto bad; pci_enable_busmaster(dev); status = ufshci_pci_setup_interrupts(ctrlr); if (status != 0) goto bad; return (ufshci_attach(dev)); bad: if (ctrlr->resource != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, ctrlr->resource_id, ctrlr->resource); } if (ctrlr->tag) bus_teardown_intr(dev, ctrlr->res, ctrlr->tag); if (ctrlr->res) bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(ctrlr->res), ctrlr->res); if (ctrlr->msi_count > 0) pci_release_msi(dev); return (status); } static int ufshci_pci_detach(device_t dev) { struct ufshci_controller *ctrlr = device_get_softc(dev); int error; error = ufshci_detach(dev); if (ctrlr->msi_count > 0) pci_release_msi(dev); pci_disable_busmaster(dev); return (error); } static int ufshci_pci_setup_shared(struct ufshci_controller *ctrlr, int rid) { int error; ctrlr->num_io_queues = 1; ctrlr->rid = rid; ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ, &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE); if (ctrlr->res == NULL) { ufshci_printf(ctrlr, "unable to allocate shared interrupt\n"); return (ENOMEM); } error = bus_setup_intr(ctrlr->dev, ctrlr->res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, ufshci_ctrlr_shared_handler, ctrlr, &ctrlr->tag); if (error) { ufshci_printf(ctrlr, "unable to setup shared interrupt\n"); return (error); } return (0); } static int ufshci_pci_setup_interrupts(struct ufshci_controller *ctrlr) { device_t dev = ctrlr->dev; int force_intx = 0; int num_io_queues, per_cpu_io_queues, min_cpus_per_ioq; int num_vectors_requested; TUNABLE_INT_FETCH("hw.ufshci.force_intx", &force_intx); if (force_intx) goto intx; if (pci_msix_count(dev) == 0) goto msi; /* * Try to allocate one MSI-X per core for I/O queues, plus one * for admin queue, but accept single shared MSI-X if have to. * Fall back to MSI if can't get any MSI-X. */ /* * TODO: Need to implement MCQ(Multi Circular Queue) * Example: num_io_queues = mp_ncpus; */ num_io_queues = 1; TUNABLE_INT_FETCH("hw.ufshci.num_io_queues", &num_io_queues); if (num_io_queues < 1 || num_io_queues > mp_ncpus) num_io_queues = mp_ncpus; per_cpu_io_queues = 1; TUNABLE_INT_FETCH("hw.ufshci.per_cpu_io_queues", &per_cpu_io_queues); if (per_cpu_io_queues == 0) num_io_queues = 1; min_cpus_per_ioq = smp_threads_per_core; TUNABLE_INT_FETCH("hw.ufshci.min_cpus_per_ioq", &min_cpus_per_ioq); if (min_cpus_per_ioq > 1) { num_io_queues = min(num_io_queues, max(1, mp_ncpus / min_cpus_per_ioq)); } num_io_queues = min(num_io_queues, max(1, pci_msix_count(dev) - 1)); again: if (num_io_queues > vm_ndomains) num_io_queues -= num_io_queues % vm_ndomains; num_vectors_requested = min(num_io_queues + 1, pci_msix_count(dev)); ctrlr->msi_count = num_vectors_requested; if (pci_alloc_msix(dev, &ctrlr->msi_count) != 0) { ufshci_printf(ctrlr, "unable to allocate MSI-X\n"); ctrlr->msi_count = 0; goto msi; } if (ctrlr->msi_count == 1) return (ufshci_pci_setup_shared(ctrlr, 1)); if (ctrlr->msi_count != num_vectors_requested) { pci_release_msi(dev); num_io_queues = ctrlr->msi_count - 1; goto again; } ctrlr->num_io_queues = num_io_queues; return (0); msi: /* * Try to allocate 2 MSIs (admin and I/O queues), but accept single * shared if have to. Fall back to INTx if can't get any MSI. */ ctrlr->msi_count = min(pci_msi_count(dev), 2); if (ctrlr->msi_count > 0) { if (pci_alloc_msi(dev, &ctrlr->msi_count) != 0) { ufshci_printf(ctrlr, "unable to allocate MSI\n"); ctrlr->msi_count = 0; } else if (ctrlr->msi_count == 2) { ctrlr->num_io_queues = 1; return (0); } } intx: return (ufshci_pci_setup_shared(ctrlr, ctrlr->msi_count > 0 ? 1 : 0)); } diff --git a/sys/dev/ufshci/ufshci_private.h b/sys/dev/ufshci/ufshci_private.h index ec388c06e248..3cee021880a8 100644 --- a/sys/dev/ufshci/ufshci_private.h +++ b/sys/dev/ufshci/ufshci_private.h @@ -1,570 +1,572 @@ /*- * Copyright (c) 2025, Samsung Electronics Co., Ltd. * Written by Jaeyoon Choi * * SPDX-License-Identifier: BSD-2-Clause */ #ifndef __UFSHCI_PRIVATE_H__ #define __UFSHCI_PRIVATE_H__ #ifdef _KERNEL #include #else /* !_KERNEL */ #include #include #endif /* _KERNEL */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ufshci.h" MALLOC_DECLARE(M_UFSHCI); #define UFSHCI_DEVICE_INIT_TIMEOUT_MS (2000) /* in milliseconds */ #define UFSHCI_UIC_CMD_TIMEOUT_MS (500) /* in milliseconds */ #define UFSHCI_DEFAULT_TIMEOUT_PERIOD (10) /* in seconds */ #define UFSHCI_MIN_TIMEOUT_PERIOD (5) /* in seconds */ #define UFSHCI_MAX_TIMEOUT_PERIOD (120) /* in seconds */ #define UFSHCI_DEFAULT_RETRY_COUNT (4) #define UFSHCI_UTR_ENTRIES (32) #define UFSHCI_UTRM_ENTRIES (8) #define UFSHCI_SECTOR_SIZE (512) struct ufshci_controller; struct ufshci_completion_poll_status { struct ufshci_completion cpl; int done; bool error; }; struct ufshci_request { struct ufshci_upiu request_upiu; size_t request_size; size_t response_size; struct memdesc payload; enum ufshci_data_direction data_direction; ufshci_cb_fn_t cb_fn; void *cb_arg; bool is_admin; int32_t retries; bool payload_valid; bool spare[2]; /* Future use */ STAILQ_ENTRY(ufshci_request) stailq; }; enum ufshci_slot_state { UFSHCI_SLOT_STATE_FREE = 0x0, UFSHCI_SLOT_STATE_RESERVED = 0x1, UFSHCI_SLOT_STATE_SCHEDULED = 0x2, UFSHCI_SLOT_STATE_TIMEOUT = 0x3, UFSHCI_SLOT_STATE_NEED_ERROR_HANDLING = 0x4, }; struct ufshci_tracker { TAILQ_ENTRY(ufshci_tracker) tailq; struct ufshci_request *req; struct ufshci_req_queue *req_queue; struct ufshci_hw_queue *hwq; uint8_t slot_num; enum ufshci_slot_state slot_state; size_t response_size; sbintime_t deadline; bus_dmamap_t payload_dma_map; uint64_t payload_addr; struct ufshci_utp_cmd_desc *ucd; bus_addr_t ucd_bus_addr; uint16_t prdt_off; uint16_t prdt_entry_cnt; }; enum ufshci_queue_mode { UFSHCI_Q_MODE_SDB = 0x00, /* Single Doorbell Mode*/ UFSHCI_Q_MODE_MCQ = 0x01, /* Multi-Circular Queue Mode*/ }; /* * UFS uses slot-based Single Doorbell (SDB) mode for request submission by * default and additionally supports Multi-Circular Queue (MCQ) in UFS 4.0. To * minimize duplicated code between SDB and MCQ, mode dependent operations are * extracted into ufshci_qops. */ struct ufshci_qops { int (*construct)(struct ufshci_controller *ctrlr, struct ufshci_req_queue *req_queue, uint32_t num_entries, bool is_task_mgmt); void (*destroy)(struct ufshci_controller *ctrlr, struct ufshci_req_queue *req_queue); struct ufshci_hw_queue *(*get_hw_queue)( struct ufshci_req_queue *req_queue); int (*enable)(struct ufshci_controller *ctrlr, struct ufshci_req_queue *req_queue); void (*disable)(struct ufshci_controller *ctrlr, struct ufshci_req_queue *req_queue); int (*reserve_slot)(struct ufshci_req_queue *req_queue, struct ufshci_tracker **tr); int (*reserve_admin_slot)(struct ufshci_req_queue *req_queue, struct ufshci_tracker **tr); void (*ring_doorbell)(struct ufshci_controller *ctrlr, struct ufshci_tracker *tr); bool (*is_doorbell_cleared)(struct ufshci_controller *ctrlr, uint8_t slot); void (*clear_cpl_ntf)(struct ufshci_controller *ctrlr, struct ufshci_tracker *tr); bool (*process_cpl)(struct ufshci_req_queue *req_queue); int (*get_inflight_io)(struct ufshci_controller *ctrlr); }; #define UFSHCI_SDB_Q 0 /* Queue number for a single doorbell queue */ enum ufshci_recovery { RECOVERY_NONE = 0, /* Normal operations */ RECOVERY_WAITING, /* waiting for the reset to complete */ }; /* * Generic queue container used by both SDB (fixed 32-slot bitmap) and MCQ * (ring buffer) modes. Fields are shared; some such as sq_head, sq_tail and * cq_head are not used in SDB but used in MCQ. */ struct ufshci_hw_queue { struct ufshci_controller *ctrlr; struct ufshci_req_queue *req_queue; uint32_t id; int domain; int cpu; struct callout timer; /* recovery lock */ bool timer_armed; /* recovery lock */ enum ufshci_recovery recovery_state; /* recovery lock */ union { struct ufshci_utp_xfer_req_desc *utrd; struct ufshci_utp_task_mgmt_req_desc *utmrd; }; bus_dma_tag_t dma_tag_queue; bus_dmamap_t queuemem_map; bus_addr_t req_queue_addr; bus_addr_t *ucd_bus_addr; uint32_t num_entries; uint32_t num_trackers; TAILQ_HEAD(, ufshci_tracker) free_tr; TAILQ_HEAD(, ufshci_tracker) outstanding_tr; /* * A Request List using the single doorbell method uses a dedicated * ufshci_tracker, one per slot. */ struct ufshci_tracker **act_tr; uint32_t sq_head; /* MCQ mode */ uint32_t sq_tail; /* MCQ mode */ uint32_t cq_head; /* MCQ mode */ uint32_t phase; int64_t num_cmds; int64_t num_intr_handler_calls; int64_t num_retries; int64_t num_failures; /* * Each lock may be acquired independently. * When both are required, acquire them in this order to avoid * deadlocks. (recovery_lock -> qlock) */ struct mtx_padalign qlock; struct mtx_padalign recovery_lock; }; struct ufshci_req_queue { struct ufshci_controller *ctrlr; int domain; /* * queue_mode: active transfer scheme * UFSHCI_Q_MODE_SDB – legacy single‑doorbell list * UFSHCI_Q_MODE_MCQ – modern multi‑circular queue (UFSHCI 4.0+) */ enum ufshci_queue_mode queue_mode; uint8_t num_q; struct ufshci_hw_queue *hwq; struct ufshci_qops qops; bool is_task_mgmt; uint32_t num_entries; uint32_t num_trackers; /* Shared DMA resource */ struct ufshci_utp_cmd_desc *ucd; bus_dma_tag_t dma_tag_ucd; bus_dma_tag_t dma_tag_payload; bus_dmamap_t ucdmem_map; }; struct ufshci_device { uint32_t max_lun_count; struct ufshci_device_descriptor dev_desc; struct ufshci_geometry_descriptor geo_desc; uint32_t unipro_version; /* WriteBooster */ bool is_wb_enabled; bool is_wb_flush_enabled; uint32_t wb_buffer_type; uint32_t wb_buffer_size_mb; uint32_t wb_user_space_config_option; uint8_t wb_dedicated_lu; uint32_t write_booster_flush_threshold; }; /* * One of these per allocated device. */ struct ufshci_controller { device_t dev; uint32_t quirks; #define UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE \ 1 /* QEMU does not support UIC POWER MODE */ #define UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE \ 2 /* Need an additional 200 ms of PA_TActivate */ #define UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE \ 4 /* Need to wait 1250us after power mode change */ #define UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY \ 8 /* Need to change the number of lanes before changing HS-GEAR. */ #define UFSHCI_QUIRK_NOT_SUPPORT_ABORT_TASK \ 16 /* QEMU does not support Task Management Request */ +#define UFSHCI_QUIRK_SKIP_WELL_KNOWN_LUNS \ + 32 /* QEMU does not support Well known logical units*/ uint32_t ref_clk; struct cam_sim *ufshci_sim; struct cam_path *ufshci_path; struct mtx sc_mtx; uint32_t sc_unit; uint8_t sc_name[16]; struct ufshci_device ufs_dev; bus_space_tag_t bus_tag; bus_space_handle_t bus_handle; int resource_id; struct resource *resource; /* Currently, there is no UFSHCI that supports MSI, MSI-X. */ int msi_count; /* Fields for tracking progress during controller initialization. */ struct intr_config_hook config_hook; struct task reset_task; struct taskqueue *taskqueue; /* For shared legacy interrupt. */ int rid; struct resource *res; void *tag; uint32_t major_version; uint32_t minor_version; uint32_t enable_aborts; uint32_t num_io_queues; uint32_t max_hw_pend_io; /* Maximum logical unit number */ uint32_t max_lun_count; /* Maximum i/o size in bytes */ uint32_t max_xfer_size; /* Controller capacity */ uint32_t cap; /* Page size and log2(page_size) - 12 that we're currently using */ uint32_t page_size; /* Timeout value on device initialization */ uint32_t device_init_timeout_in_ms; /* Timeout value on UIC command */ uint32_t uic_cmd_timeout_in_ms; /* UTMR/UTR queue timeout period in seconds */ uint32_t timeout_period; /* UTMR/UTR queue retry count */ uint32_t retry_count; /* UFS Host Controller Interface Registers */ struct ufshci_registers *regs; /* UFS Transport Protocol Layer (UTP) */ struct ufshci_req_queue task_mgmt_req_queue; struct ufshci_req_queue transfer_req_queue; bool is_single_db_supported; /* 0 = supported */ bool is_mcq_supported; /* 1 = supported */ /* UFS Interconnect Layer (UIC) */ struct mtx uic_cmd_lock; uint32_t unipro_version; uint8_t hs_gear; uint32_t tx_lanes; uint32_t rx_lanes; uint32_t max_rx_hs_gear; uint32_t max_tx_lanes; uint32_t max_rx_lanes; bool is_failed; }; #define ufshci_mmio_offsetof(reg) offsetof(struct ufshci_registers, reg) #define ufshci_mmio_read_4(sc, reg) \ bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, \ ufshci_mmio_offsetof(reg)) #define ufshci_mmio_write_4(sc, reg, val) \ bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \ ufshci_mmio_offsetof(reg), val) #define ufshci_printf(ctrlr, fmt, args...) \ device_printf(ctrlr->dev, fmt, ##args) /* UFSHCI */ void ufshci_completion_poll_cb(void *arg, const struct ufshci_completion *cpl, bool error); /* SIM */ int ufshci_sim_attach(struct ufshci_controller *ctrlr); void ufshci_sim_detach(struct ufshci_controller *ctrlr); /* Controller */ int ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev); void ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev); void ufshci_ctrlr_reset(struct ufshci_controller *ctrlr); /* ctrlr defined as void * to allow use with config_intrhook. */ void ufshci_ctrlr_start_config_hook(void *arg); void ufshci_ctrlr_poll(struct ufshci_controller *ctrlr); int ufshci_ctrlr_submit_task_mgmt_request(struct ufshci_controller *ctrlr, struct ufshci_request *req); int ufshci_ctrlr_submit_admin_request(struct ufshci_controller *ctrlr, struct ufshci_request *req); int ufshci_ctrlr_submit_io_request(struct ufshci_controller *ctrlr, struct ufshci_request *req); int ufshci_ctrlr_send_nop(struct ufshci_controller *ctrlr); void ufshci_reg_dump(struct ufshci_controller *ctrlr); /* Device */ int ufshci_dev_init(struct ufshci_controller *ctrlr); int ufshci_dev_reset(struct ufshci_controller *ctrlr); int ufshci_dev_init_reference_clock(struct ufshci_controller *ctrlr); int ufshci_dev_init_unipro(struct ufshci_controller *ctrlr); int ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr); int ufshci_dev_init_ufs_power_mode(struct ufshci_controller *ctrlr); int ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr); int ufshci_dev_config_write_booster(struct ufshci_controller *ctrlr); /* Controller Command */ void ufshci_ctrlr_cmd_send_task_mgmt_request(struct ufshci_controller *ctrlr, ufshci_cb_fn_t cb_fn, void *cb_arg, uint8_t function, uint8_t lun, uint8_t task_tag, uint8_t iid); void ufshci_ctrlr_cmd_send_nop(struct ufshci_controller *ctrlr, ufshci_cb_fn_t cb_fn, void *cb_arg); void ufshci_ctrlr_cmd_send_query_request(struct ufshci_controller *ctrlr, ufshci_cb_fn_t cb_fn, void *cb_arg, struct ufshci_query_param param); void ufshci_ctrlr_cmd_send_scsi_command(struct ufshci_controller *ctrlr, ufshci_cb_fn_t cb_fn, void *cb_arg, uint8_t *cmd_ptr, uint8_t cmd_len, uint32_t data_len, uint8_t lun, bool is_write); /* Request Queue */ bool ufshci_req_queue_process_completions(struct ufshci_req_queue *req_queue); int ufshci_utmr_req_queue_construct(struct ufshci_controller *ctrlr); int ufshci_utr_req_queue_construct(struct ufshci_controller *ctrlr); void ufshci_utmr_req_queue_destroy(struct ufshci_controller *ctrlr); void ufshci_utr_req_queue_destroy(struct ufshci_controller *ctrlr); void ufshci_utmr_req_queue_disable(struct ufshci_controller *ctrlr); int ufshci_utmr_req_queue_enable(struct ufshci_controller *ctrlr); void ufshci_utr_req_queue_disable(struct ufshci_controller *ctrlr); int ufshci_utr_req_queue_enable(struct ufshci_controller *ctrlr); void ufshci_req_queue_fail(struct ufshci_controller *ctrlr, struct ufshci_hw_queue *hwq); int ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue, struct ufshci_request *req, bool is_admin); void ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr); /* Request Single Doorbell Queue */ int ufshci_req_sdb_construct(struct ufshci_controller *ctrlr, struct ufshci_req_queue *req_queue, uint32_t num_entries, bool is_task_mgmt); void ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr, struct ufshci_req_queue *req_queue); struct ufshci_hw_queue *ufshci_req_sdb_get_hw_queue( struct ufshci_req_queue *req_queue); void ufshci_req_sdb_disable(struct ufshci_controller *ctrlr, struct ufshci_req_queue *req_queue); int ufshci_req_sdb_enable(struct ufshci_controller *ctrlr, struct ufshci_req_queue *req_queue); int ufshci_req_sdb_reserve_slot(struct ufshci_req_queue *req_queue, struct ufshci_tracker **tr); void ufshci_req_sdb_utmr_ring_doorbell(struct ufshci_controller *ctrlr, struct ufshci_tracker *tr); void ufshci_req_sdb_utr_ring_doorbell(struct ufshci_controller *ctrlr, struct ufshci_tracker *tr); bool ufshci_req_sdb_utmr_is_doorbell_cleared(struct ufshci_controller *ctrlr, uint8_t slot); bool ufshci_req_sdb_utr_is_doorbell_cleared(struct ufshci_controller *ctrlr, uint8_t slot); void ufshci_req_sdb_utmr_clear_cpl_ntf(struct ufshci_controller *ctrlr, struct ufshci_tracker *tr); void ufshci_req_sdb_utr_clear_cpl_ntf(struct ufshci_controller *ctrlr, struct ufshci_tracker *tr); bool ufshci_req_sdb_process_cpl(struct ufshci_req_queue *req_queue); int ufshci_req_sdb_get_inflight_io(struct ufshci_controller *ctrlr); /* UIC Command */ int ufshci_uic_power_mode_ready(struct ufshci_controller *ctrlr); int ufshci_uic_cmd_ready(struct ufshci_controller *ctrlr); int ufshci_uic_send_dme_link_startup(struct ufshci_controller *ctrlr); int ufshci_uic_send_dme_get(struct ufshci_controller *ctrlr, uint16_t attribute, uint32_t *return_value); int ufshci_uic_send_dme_set(struct ufshci_controller *ctrlr, uint16_t attribute, uint32_t value); int ufshci_uic_send_dme_peer_get(struct ufshci_controller *ctrlr, uint16_t attribute, uint32_t *return_value); int ufshci_uic_send_dme_peer_set(struct ufshci_controller *ctrlr, uint16_t attribute, uint32_t value); int ufshci_uic_send_dme_endpoint_reset(struct ufshci_controller *ctrlr); /* SYSCTL */ void ufshci_sysctl_initialize_ctrlr(struct ufshci_controller *ctrlr); int ufshci_attach(device_t dev); int ufshci_detach(device_t dev); /* * Wait for a command to complete using the ufshci_completion_poll_cb. Used in * limited contexts where the caller knows it's OK to block briefly while the * command runs. The ISR will run the callback which will set status->done to * true, usually within microseconds. If not, then after one second timeout * handler should reset the controller and abort all outstanding requests * including this polled one. If still not after ten seconds, then something is * wrong with the driver, and panic is the only way to recover. * * Most commands using this interface aren't actual I/O to the drive's media so * complete within a few microseconds. Adaptively spin for one tick to catch the * vast majority of these without waiting for a tick plus scheduling delays. * Since these are on startup, this drastically reduces startup time. */ static __inline void ufshci_completion_poll(struct ufshci_completion_poll_status *status) { int timeout = ticks + 10 * hz; sbintime_t delta_t = SBT_1US; while (!atomic_load_acq_int(&status->done)) { if (timeout - ticks < 0) panic( "UFSHCI polled command failed to complete within 10s."); pause_sbt("ufshci_cpl", delta_t, 0, C_PREL(1)); delta_t = min(SBT_1MS, delta_t * 3 / 2); } } static __inline void ufshci_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) { uint64_t *bus_addr = (uint64_t *)arg; KASSERT(nseg == 1, ("number of segments (%d) is not 1", nseg)); if (error != 0) printf("ufshci_single_map err %d\n", error); *bus_addr = seg[0].ds_addr; } static __inline struct ufshci_request * _ufshci_allocate_request(const int how, ufshci_cb_fn_t cb_fn, void *cb_arg) { struct ufshci_request *req; KASSERT(how == M_WAITOK || how == M_NOWAIT, ("ufshci_allocate_request: invalid how %d", how)); req = malloc(sizeof(*req), M_UFSHCI, how | M_ZERO); if (req != NULL) { req->cb_fn = cb_fn; req->cb_arg = cb_arg; } return (req); } static __inline struct ufshci_request * ufshci_allocate_request_vaddr(void *payload, uint32_t payload_size, const int how, ufshci_cb_fn_t cb_fn, void *cb_arg) { struct ufshci_request *req; req = _ufshci_allocate_request(how, cb_fn, cb_arg); if (req != NULL) { if (payload_size) { req->payload = memdesc_vaddr(payload, payload_size); req->payload_valid = true; } } return (req); } static __inline struct ufshci_request * ufshci_allocate_request_bio(struct bio *bio, const int how, ufshci_cb_fn_t cb_fn, void *cb_arg) { struct ufshci_request *req; req = _ufshci_allocate_request(how, cb_fn, cb_arg); if (req != NULL) { req->payload = memdesc_bio(bio); req->payload_valid = true; } return (req); } #define ufshci_free_request(req) free(req, M_UFSHCI) void ufshci_ctrlr_shared_handler(void *arg); #endif /* __UFSHCI_PRIVATE_H__ */ diff --git a/sys/dev/ufshci/ufshci_sim.c b/sys/dev/ufshci/ufshci_sim.c index 1b80df089a46..db2e194b699b 100644 --- a/sys/dev/ufshci/ufshci_sim.c +++ b/sys/dev/ufshci/ufshci_sim.c @@ -1,394 +1,398 @@ /*- * Copyright (c) 2025, Samsung Electronics Co., Ltd. * Written by Jaeyoon Choi * * SPDX-License-Identifier: BSD-2-Clause */ #include #include #include #include #include #include #include #include "ufshci_private.h" #define sim2ctrlr(sim) ((struct ufshci_controller *)cam_sim_softc(sim)) static void ufshci_sim_scsiio_done(void *ccb_arg, const struct ufshci_completion *cpl, bool error) { const uint8_t *sense_data; uint16_t sense_data_max_size; uint16_t sense_data_len; union ccb *ccb = (union ccb *)ccb_arg; /* * Let the periph know the completion, and let it sort out what * it means. Report an error or success based on OCS and UPIU * response code. And We need to copy the sense data to be handled * by the CAM. */ sense_data = cpl->response_upiu.cmd_response_upiu.sense_data; sense_data_max_size = sizeof( cpl->response_upiu.cmd_response_upiu.sense_data); sense_data_len = be16toh( cpl->response_upiu.cmd_response_upiu.sense_data_len); memcpy(&ccb->csio.sense_data, sense_data, min(sense_data_len, sense_data_max_size)); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; if (error) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); } else { ccb->ccb_h.status = CAM_REQ_CMP; xpt_done_direct(ccb); } } /* * Complete the command as an illegal command with invalid field */ static void ufshci_sim_illegal_request(union ccb *ccb) { scsi_set_sense_data(&ccb->csio.sense_data, /*sense_format*/ SSD_TYPE_NONE, /*current_error*/ 1, /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, /*asc*/ 0x24, /* 24h/00h INVALID FIELD IN CDB */ /*ascq*/ 0x00, /*extra args*/ SSD_ELEM_NONE); ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID | CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); xpt_done(ccb); } /* * The SCSI LUN format and the UFS UPIU LUN format are different. * This function converts the SCSI LUN format to the UFS UPIU LUN format. */ static uint8_t ufshci_sim_translate_scsi_to_ufs_lun(lun_id_t scsi_lun) { const int address_format_offset = 8; uint8_t address_format = scsi_lun >> address_format_offset; /* Well known logical unit */ if (((address_format & RPL_LUNDATA_ATYP_MASK) == RPL_LUNDATA_ATYP_EXTLUN) && ((address_format & RPL_LUNDATA_EXT_EAM_MASK) == RPL_LUNDATA_EXT_EAM_WK)) return ((scsi_lun & UFSHCI_UPIU_UNIT_NUMBER_ID_MASK) | UFSHCI_UPIU_WLUN_ID_MASK); /* Logical unit */ return (scsi_lun & UFSHCI_UPIU_UNIT_NUMBER_ID_MASK); } static void ufshchi_sim_scsiio(struct cam_sim *sim, union ccb *ccb) { struct ccb_scsiio *csio = &ccb->csio; struct ufshci_request *req; void *payload; struct ufshci_cmd_command_upiu *upiu; uint8_t *cdb; uint32_t payload_len; bool is_write; struct ufshci_controller *ctrlr; uint8_t data_direction; int error; /* UFS device cannot process these commands */ if (csio->cdb_io.cdb_bytes[0] == MODE_SENSE_6 || csio->cdb_io.cdb_bytes[0] == MODE_SELECT_6 || csio->cdb_io.cdb_bytes[0] == READ_12 || csio->cdb_io.cdb_bytes[0] == WRITE_12) { ufshci_sim_illegal_request(ccb); return; } ctrlr = sim2ctrlr(sim); payload = csio->data_ptr; payload_len = csio->dxfer_len; is_write = csio->ccb_h.flags & CAM_DIR_OUT; /* TODO: Check other data type */ if ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_BIO) req = ufshci_allocate_request_bio((struct bio *)payload, M_NOWAIT, ufshci_sim_scsiio_done, ccb); else req = ufshci_allocate_request_vaddr(payload, payload_len, M_NOWAIT, ufshci_sim_scsiio_done, ccb); req->request_size = sizeof(struct ufshci_cmd_command_upiu); req->response_size = sizeof(struct ufshci_cmd_response_upiu); switch (ccb->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_IN: data_direction = UFSHCI_DATA_DIRECTION_FROM_TGT_TO_SYS; break; case CAM_DIR_OUT: data_direction = UFSHCI_DATA_DIRECTION_FROM_SYS_TO_TGT; break; default: data_direction = UFSHCI_DATA_DIRECTION_NO_DATA_TRANSFER; } req->data_direction = data_direction; upiu = (struct ufshci_cmd_command_upiu *)&req->request_upiu; memset(upiu, 0, req->request_size); upiu->header.trans_type = UFSHCI_UPIU_TRANSACTION_CODE_COMMAND; upiu->header.operational_flags = is_write ? UFSHCI_OPERATIONAL_FLAG_W : UFSHCI_OPERATIONAL_FLAG_R; upiu->header.lun = ufshci_sim_translate_scsi_to_ufs_lun( csio->ccb_h.target_lun); upiu->header.cmd_set_type = UFSHCI_COMMAND_SET_TYPE_SCSI; upiu->expected_data_transfer_length = htobe32(payload_len); ccb->ccb_h.status |= CAM_SIM_QUEUED; if (csio->ccb_h.flags & CAM_CDB_POINTER) cdb = csio->cdb_io.cdb_ptr; else cdb = csio->cdb_io.cdb_bytes; if (cdb == NULL || csio->cdb_len > sizeof(upiu->cdb)) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } memcpy(upiu->cdb, cdb, csio->cdb_len); error = ufshci_ctrlr_submit_io_request(ctrlr, req); if (error == EBUSY) { ccb->ccb_h.status = CAM_SCSI_BUSY; xpt_done(ccb); return; } else if (error) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } } static uint32_t ufshci_link_kBps(struct ufshci_controller *ctrlr) { uint32_t gear = ctrlr->hs_gear; uint32_t lanes = ctrlr->rx_lanes; /* * per-lane effective bandwidth (KB/s, SI 1 KB = 1000 B) * All HS-Gears use 8b/10b line coding, i.e. 80 % efficiency. * - KB/s per lane = raw-rate(Gbps) × 0.8(8b/10b) / 8(bit) */ static const uint32_t kbps_per_lane[] = { 0, /* unused */ 145920, /* HS-Gear1 : 1459.2 Mbps */ 291840, /* HS-Gear2 : 2918.4 Mbps */ 583680, /* HS-Gear3 : 5836.8 Mbps */ 1167360, /* HS-Gear4 : 11673.6 Mbps */ 2334720 /* HS-Gear5 : 23347.2 Mbps */ }; /* Sanity checks */ if (gear >= nitems(kbps_per_lane)) gear = 0; /* out-of-range -> treat as invalid */ if (lanes == 0 || lanes > 2) lanes = 1; /* UFS spec allows 1–2 data lanes */ return kbps_per_lane[gear] * lanes; } static void ufshci_cam_action(struct cam_sim *sim, union ccb *ccb) { struct ufshci_controller *ctrlr = sim2ctrlr(sim); if (ctrlr == NULL) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; xpt_done(ccb); return; } /* Perform the requested action */ switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: ufshchi_sim_scsiio(sim, ccb); return; case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; + uint32_t need_scan_wluns = 0; + + if (!(ctrlr->quirks & UFSHCI_QUIRK_SKIP_WELL_KNOWN_LUNS)) + need_scan_wluns = PIM_WLUNS; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE; cpi->target_sprt = 0; - cpi->hba_misc = PIM_UNMAPPED | PIM_NO_6_BYTE; + cpi->hba_misc = need_scan_wluns | PIM_UNMAPPED | PIM_NO_6_BYTE; cpi->hba_eng_cnt = 0; cpi->max_target = 0; cpi->max_lun = ctrlr->max_lun_count; cpi->async_flags = 0; cpi->maxio = ctrlr->max_xfer_size; cpi->initiator_id = 1; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "UFSHCI", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->base_transfer_speed = ufshci_link_kBps(ctrlr); cpi->transport = XPORT_UFSHCI; cpi->transport_version = 1; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC5; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_RESET_BUS: ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_RESET_DEV: if (ufshci_dev_reset(ctrlr)) ccb->ccb_h.status = CAM_REQ_CMP_ERR; else ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_ABORT: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; struct ccb_trans_settings_ufshci *ufshcix; cts = &ccb->cts; ufshcix = &cts->xport_specific.ufshci; ufshcix->hs_gear = ctrlr->hs_gear; ufshcix->tx_lanes = ctrlr->tx_lanes; ufshcix->rx_lanes = ctrlr->rx_lanes; ufshcix->max_hs_gear = ctrlr->max_rx_hs_gear; ufshcix->max_tx_lanes = ctrlr->max_tx_lanes; ufshcix->max_rx_lanes = ctrlr->max_rx_lanes; ufshcix->valid = CTS_UFSHCI_VALID_LINK; cts->transport = XPORT_UFSHCI; cts->transport_version = 1; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_SPC5; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, 1); break; case XPT_NOOP: ccb->ccb_h.status = CAM_REQ_CMP; break; default: printf("invalid ccb=%p func=%#x\n", ccb, ccb->ccb_h.func_code); break; } xpt_done(ccb); return; } static void ufshci_cam_poll(struct cam_sim *sim) { struct ufshci_controller *ctrlr = sim2ctrlr(sim); ufshci_ctrlr_poll(ctrlr); } int ufshci_sim_attach(struct ufshci_controller *ctrlr) { device_t dev; struct cam_devq *devq; int max_trans; dev = ctrlr->dev; max_trans = ctrlr->max_hw_pend_io; if ((devq = cam_simq_alloc(max_trans)) == NULL) { printf("Failed to allocate a simq\n"); return (ENOMEM); } ctrlr->ufshci_sim = cam_sim_alloc(ufshci_cam_action, ufshci_cam_poll, "ufshci", ctrlr, device_get_unit(dev), &ctrlr->sc_mtx, max_trans, max_trans, devq); if (ctrlr->ufshci_sim == NULL) { printf("Failed to allocate a sim\n"); cam_simq_free(devq); return (ENOMEM); } mtx_lock(&ctrlr->sc_mtx); if (xpt_bus_register(ctrlr->ufshci_sim, ctrlr->dev, 0) != CAM_SUCCESS) { cam_sim_free(ctrlr->ufshci_sim, /*free_devq*/ TRUE); cam_simq_free(devq); mtx_unlock(&ctrlr->sc_mtx); printf("Failed to create a bus\n"); return (ENOMEM); } if (xpt_create_path(&ctrlr->ufshci_path, /*periph*/ NULL, cam_sim_path(ctrlr->ufshci_sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(ctrlr->ufshci_sim)); cam_sim_free(ctrlr->ufshci_sim, /*free_devq*/ TRUE); cam_simq_free(devq); mtx_unlock(&ctrlr->sc_mtx); printf("Failed to create a path\n"); return (ENOMEM); } mtx_unlock(&ctrlr->sc_mtx); return (0); } void ufshci_sim_detach(struct ufshci_controller *ctrlr) { int error; if (ctrlr->ufshci_path != NULL) { xpt_free_path(ctrlr->ufshci_path); ctrlr->ufshci_path = NULL; } if (ctrlr->ufshci_sim != NULL) { error = xpt_bus_deregister(cam_sim_path(ctrlr->ufshci_sim)); if (error == 0) { /* accessing the softc is not possible after this */ ctrlr->ufshci_sim->softc = NULL; ufshci_printf(ctrlr, "%s: %s:%d:%d caling " "cam_sim_free sim %p refc %u mtx %p\n", __func__, ctrlr->sc_name, cam_sim_path(ctrlr->ufshci_sim), ctrlr->sc_unit, ctrlr->ufshci_sim, ctrlr->ufshci_sim->refcount, ctrlr->ufshci_sim->mtx); } else { panic("%s: %s: CAM layer is busy: errno %d\n", __func__, ctrlr->sc_name, error); } cam_sim_free(ctrlr->ufshci_sim, /* free_devq */ TRUE); ctrlr->ufshci_sim = NULL; } }