Page MenuHomeFreeBSD

D50370.id156571.diff
No OneTemporary

D50370.id156571.diff

diff --git a/sbin/camcontrol/camcontrol.c b/sbin/camcontrol/camcontrol.c
--- a/sbin/camcontrol/camcontrol.c
+++ b/sbin/camcontrol/camcontrol.c
@@ -5400,6 +5400,19 @@
nvmf_transport_type(nvmf->trtype));
}
}
+ if (cts->transport == XPORT_UFSHCI) {
+ struct ccb_trans_settings_ufshci *ufshci =
+ &cts->xport_specific.ufshci;
+
+ if (ufshci->valid & CTS_UFSHCI_VALID_MODE) {
+ fprintf(stdout, "%sHigh Speed Gear: %d (%d max)\n",
+ pathstr, ufshci->hs_gear, ufshci->max_hs_gear);
+ fprintf(stdout, "%sUnipro TX lanes: %d (%d max)\n", pathstr,
+ ufshci->tx_lanes, ufshci->max_tx_lanes);
+ fprintf(stdout, "%sUnipro RX lanes: %d (%d max)\n", pathstr,
+ ufshci->rx_lanes, ufshci->max_rx_lanes);
+ }
+ }
if (cts->protocol == PROTO_ATA) {
struct ccb_trans_settings_ata *ata=
&cts->proto_specific.ata;
diff --git a/sys/cam/cam_ccb.h b/sys/cam/cam_ccb.h
--- a/sys/cam/cam_ccb.h
+++ b/sys/cam/cam_ccb.h
@@ -298,6 +298,7 @@
XPORT_NVME, /* NVMe over PCIe */
XPORT_MMCSD, /* MMC, SD, SDIO card */
XPORT_NVMF, /* NVMe over Fabrics */
+ XPORT_UFSHCI, /* Universal Flash Storage Host Interface */
} cam_xport;
#define XPORT_IS_NVME(t) ((t) == XPORT_NVME || (t) == XPORT_NVMF)
@@ -1065,6 +1066,24 @@
uint8_t trtype;
};
+struct ccb_trans_settings_ufshci
+{
+ u_int valid; /* Which fields to honor */
+ /*
+ * Ensure the validity of the information for the Unipro link
+ * (GEAR, SPEED, LANE)
+ */
+#define CTS_UFSHCI_VALID_LINK 0x01
+ uint32_t speed;
+ uint8_t hs_gear; /* High Speed Gear (G1, G2, G3...) */
+ uint8_t tx_lanes;
+ uint8_t rx_lanes;
+ uint8_t max_hs_gear; /* Maximum HS Gear */
+ uint8_t max_tx_lanes;
+ uint8_t max_rx_lanes;
+};
+
+
#include <cam/mmc/mmc_bus.h>
struct ccb_trans_settings_mmc {
struct mmc_ios ios;
@@ -1138,6 +1157,7 @@
struct ccb_trans_settings_sata sata;
struct ccb_trans_settings_nvme nvme;
struct ccb_trans_settings_nvmf nvmf;
+ struct ccb_trans_settings_ufshci ufshci;
} xport_specific;
};
diff --git a/sys/cam/scsi/scsi_xpt.c b/sys/cam/scsi/scsi_xpt.c
--- a/sys/cam/scsi/scsi_xpt.c
+++ b/sys/cam/scsi/scsi_xpt.c
@@ -618,6 +618,7 @@
SCSI_XPT_XPORT(iscsi, ISCSI);
SCSI_XPT_XPORT(srp, SRP);
SCSI_XPT_XPORT(ppb, PPB);
+SCSI_XPT_XPORT(ufshci, UFSHCI);
#undef SCSI_XPORT_XPORT
diff --git a/sys/dev/ufshci/ufshci.h b/sys/dev/ufshci/ufshci.h
new file mode 100644
--- /dev/null
+++ b/sys/dev/ufshci/ufshci.h
@@ -0,0 +1,934 @@
+/*-
+ * Copyright (c) 2025, Samsung Electronics Co., Ltd.
+ * Written by Jaeyoon Choi
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#ifndef __UFSHCI_H__
+#define __UFSHCI_H__
+
+#include <sys/param.h>
+#include <sys/endian.h>
+
+/* MIPI UniPro spec 2.0, section 5.8.1 "PHY Adapter Common Attributes" */
+#define PA_AvailTxDataLanes 0x1520
+#define PA_AvailRxDataLanes 0x1540
+
+/*
+ * MIPI UniPro spec 2.0, section 5.8.2 "PHY Adapter M-PHY-Specific
+ * Attributes"
+ */
+#define PA_ConnectedTxDataLanes 0x1561
+#define PA_ConnectedRxDataLanes 0x1581
+#define PA_MaxRxHSGear 0x1587
+#define PA_Granularity 0x15AA
+#define PA_TActivate 0x15A8
+
+#define PA_RemoteVerInfo 0x15A0
+#define PA_LocalVerInfo 0x15A9
+
+/* UFSHCI spec 4.1, section 7.4 "UIC Power Mode Change" */
+#define PA_ActiveTxDataLanes 0x1560
+#define PA_ActiveRxDataLanes 0x1580
+#define PA_TxGear 0x1568
+#define PA_RxGear 0x1583
+#define PA_TxTermination 0x1569
+#define PA_RxTermination 0x1584
+#define PA_HSSeries 0x156A
+#define PA_PWRModeUserData0 0x15B0
+#define PA_PWRModeUserData1 0x15B1
+#define PA_PWRModeUserData2 0x15B2
+#define PA_PWRModeUserData3 0x15B3
+#define PA_PWRModeUserData4 0x15B4
+#define PA_PWRModeUserData5 0x15B5
+
+#define PA_TxHsAdaptType 0x15D4
+#define PA_PWRMode 0x1571
+
+#define DME_LocalFC0ProtectionTimeOutVal 0xD041
+#define DME_LocalTC0ReplayTimeOutVal 0xD042
+#define DME_LocalAFC0ReqTimeOutVal 0xD043
+
+/* Currently, UFS uses TC0 only. */
+#define DL_FC0ProtectionTimeOutVal_Default 8191
+#define DL_TC0ReplayTimeOutVal_Default 65535
+#define DL_AFC0ReqTimeOutVal_Default 32767
+
+/* UFS Spec 4.1, section 6.4 "Reference Clock" */
+enum ufshci_attribute_reference_clock {
+ UFSHCI_REF_CLK_19_2MHz = 0x0,
+ UFSHCI_REF_CLK_26MHz = 0x1,
+ UFSHCI_REF_CLK_38_4MHz = 0x2,
+ UFSHCI_REF_CLK_OBSOLETE = 0x3,
+};
+
+/* UFS spec 4.1, section 9 "UFS UIC Layer: MIPI Unipro" */
+enum ufshci_uic_cmd_opcode {
+ /* Configuration */
+ UFSHCI_DME_GET = 0x01,
+ UFSHCI_DME_SET = 0x02,
+ UFSHCI_DME_PEER_GET = 0x03,
+ UFSHCI_DME_PEER_SET = 0x04,
+ /* Controll */
+ UFSHCI_DME_POWER_ON = 0x10,
+ UFSHCI_DME_POWER_OFF = 0x11,
+ UFSHCI_DME_ENABLE = 0x12,
+ UFSHCI_DME_RESET = 0x14,
+ UFSHCI_DME_ENDPOINT_RESET = 0x15,
+ UFSHCI_DME_LINK_STARTUP = 0x16,
+ UFSHCI_DME_HIBERNATE_ENTER = 0x17,
+ UFSHCI_DME_HIBERNATE_EXIT = 0x18,
+ UFSHCI_DME_TEST_MODE = 0x1a,
+};
+
+/* UFSHCI spec 4.1, section 5.6.3 "Offset 98h: UICCMDARG2 – UIC Command
+ * Argument" */
+enum ufshci_uic_cmd_attr_set_type {
+ UFSHCI_ATTR_SET_TYPE_NORMAL = 0, /* volatile value */
+ UFSHCI_ATTR_SET_TYPE_STATIC = 1, /* non-volatile reset value */
+};
+
+struct ufshci_uic_cmd {
+ uint8_t opcode;
+ uint32_t argument1;
+ uint32_t argument2;
+ uint32_t argument3;
+};
+
+/* UFS spec 4.1, section 10.5 "UPIU Transactions" */
+enum transaction_code {
+ UFSHCI_UPIU_TRANSACTION_CODE_NOP_OUT = 0x00,
+ UFSHCI_UPIU_TRANSACTION_CODE_COMMAND = 0x01,
+ UFSHCI_UPIU_TRANSACTION_CODE_DATA_OUT = 0x02,
+ UFSHCI_UPIU_TRANSACTION_CODE_TASK_MANAGEMENT_REQUEST = 0x04,
+ UFSHCI_UPIU_TRANSACTION_CODE_QUERY_REQUEST = 0x16,
+ UFSHCI_UPIU_TRANSACTION_CODE_NOP_IN = 0x20,
+ UFSHCI_UPIU_TRANSACTION_CODE_RESPONSE = 0x21,
+ UFSHCI_UPIU_TRANSACTION_CODE_DATA_IN = 0x22,
+ UFSHCI_UPIU_TRANSACTION_CODE_TASK_MANAGEMENT_RESPONSE = 0x24,
+ UFSHCI_UPIU_TRANSACTION_CODE_READY_TO_TRANSFER = 0x31,
+ UFSHCI_UPIU_TRANSACTION_CODE_QUERY_RESPONSE = 0x36,
+ UFSHCI_UPIU_TRANSACTION_CODE_REJECT_UPIU = 0x3f,
+};
+
+enum overall_command_status {
+ UFSHCI_DESC_SUCCESS = 0x0,
+ UFSHCI_DESC_INVALID_COMMAND_TABLE_ATTRIBUTES = 0x01,
+ UFSHCI_DESC_INVALID_PRDT_ATTRIBUTES = 0x02,
+ UFSHCI_DESC_MISMATCH_DATA_BUFFER_SIZE = 0x03,
+ UFSHCI_DESC_MISMATCH_RESPONSE_UPIU_SIZE = 0x04,
+ UFSHCI_DESC_COMMUNICATION_FAILURE_WITHIN_UIC_LAYERS = 0x05,
+ UFSHCI_DESC_ABORTED = 0x06,
+ UFSHCI_DESC_HOST_CONTROLLER_FATAL_ERROR = 0x07,
+ UFSHCI_DESC_DEVICEFATALERROR = 0x08,
+ UFSHCI_DESC_INVALID_CRYPTO_CONFIGURATION = 0x09,
+ UFSHCI_DESC_GENERAL_CRYPTO_ERROR = 0x0A,
+ UFSHCI_DESC_INVALID = 0x0F,
+};
+
+enum response_code {
+ UFSHCI_RESPONSE_CODE_TARGET_SUCCESS = 0x00,
+ UFSHCI_RESPONSE_CODE_TARGET_FAILURE = 0x01,
+ UFSHCI_RESPONSE_CODE_PARAMETER_NOTREADABLE = 0xF6,
+ UFSHCI_RESPONSE_CODE_PARAMETER_NOTWRITEABLE = 0xF7,
+ UFSHCI_RESPONSE_CODE_PARAMETER_ALREADYWRITTEN = 0xF8,
+ UFSHCI_RESPONSE_CODE_INVALID_LENGTH = 0xF9,
+ UFSHCI_RESPONSE_CODE_INVALID_VALUE = 0xFA,
+ UFSHCI_RESPONSE_CODE_INVALID_SELECTOR = 0xFB,
+ UFSHCI_RESPONSE_CODE_INVALID_INDEX = 0xFC,
+ UFSHCI_RESPONSE_CODE_INVALID_IDN = 0xFD,
+ UFSHCI_RESPONSE_CODE_INVALID_OPCODE = 0xFE,
+ UFSHCI_RESPONSE_CODE_GENERAL_FAILURE = 0xFF,
+};
+
+/* UFSHCI spec 4.1, section 6.1.1 "UTP Transfer Request Descriptor" */
+enum ufshci_command_type {
+ UFSHCI_COMMAND_TYPE_UFS_STORAGE = 0x01,
+ UFSHCI_COMMAND_TYPE_NULLIFIED_UTRD = 0x0F,
+};
+
+enum ufshci_data_direction {
+ UFSHCI_DATA_DIRECTION_NO_DATA_TRANSFER = 0x00,
+ UFSHCI_DATA_DIRECTION_FROM_SYS_TO_TGT = 0x01,
+ UFSHCI_DATA_DIRECTION_FROM_TGT_TO_SYS = 0x10,
+ UFSHCI_DATA_DIRECTION_RESERVED = 0b11,
+};
+
+enum ufshci_overall_command_status {
+ UFSHCI_OCS_SUCCESS = 0x0,
+ UFSHCI_OCS_INVALID_COMMAND_TABLE_ATTRIBUTES = 0x01,
+ UFSHCI_OCS_INVALID_PRDT_ATTRIBUTES = 0x02,
+ UFSHCI_OCS_MISMATCH_DATA_BUFFER_SIZE = 0x03,
+ UFSHCI_OCS_MISMATCH_RESPONSE_UPIU_SIZE = 0x04,
+ UFSHCI_OCS_COMMUNICATION_FAILURE_WITHIN_UIC_LAYERS = 0x05,
+ UFSHCI_OCS_ABORTED = 0x06,
+ UFSHCI_OCS_HOST_CONTROLLER_FATAL_ERROR = 0x07,
+ UFSHCI_OCS_DEVICE_FATAL_ERROR = 0x08,
+ UFSHCI_OCS_INVALID_CRYPTO_CONFIGURATION = 0x09,
+ UFSHCI_OCS_GENERAL_CRYPTO_ERROR = 0x0A,
+ UFSHCI_OCS_INVALID = 0xF,
+};
+
+struct ufshci_utp_xfer_req_desc {
+ /* dword 0 */
+ uint32_t cci : 8; /* [7:0] */
+ uint32_t total_ehs_length : 8; /* [15:8] */
+ uint32_t reserved0 : 7; /* [22:16] */
+ uint32_t ce : 1; /* [23] */
+ uint32_t interrupt : 1; /* [24] */
+ uint32_t data_direction : 2; /* [26:25] */
+ uint32_t reserved1 : 1; /* [27] */
+ uint32_t command_type : 4; /* [31:28] */
+
+ /* dword 1 */
+ uint32_t data_unit_number_lower; /* [31:0] */
+
+ /* dword 2 */
+ uint8_t overall_command_status; /* [7:0] */
+ uint8_t common_data_size; /* [15:8] */
+ uint16_t last_data_byte_count; /* [31:16] */
+
+ /* dword 3 */
+ uint32_t data_unit_number_upper; /* [31:0] */
+
+ /* dword 4 */
+ uint32_t utp_command_descriptor_base_address; /* [31:0] */
+
+ /* dword 5 */
+ uint32_t utp_command_descriptor_base_address_upper; /* [31:0] */
+
+ /* dword 6 */
+ uint16_t response_upiu_length; /* [15:0] */
+ uint16_t response_upiu_offset; /* [31:16] */
+
+ /* dword 7 */
+ uint16_t prdt_length; /* [15:0] */
+ uint16_t prdt_offset; /* [31:16] */
+} __packed __aligned(8);
+
+_Static_assert(sizeof(struct ufshci_utp_xfer_req_desc) == 32,
+ "ufshci_utp_xfer_req_desc must be 32 bytes");
+
+/*
+ * According to the UFSHCI specification, the size of the UTP command
+ * descriptor is as follows. The size of the transfer request is not limited,
+ * a transfer response can be as long as 65535 * dwords, and a PRDT can be as
+ * long as 65565 * PRDT entry size(16 bytes). However, for ease of use, this
+ * UFSHCI Driver imposes the following limits. The size of the transfer
+ * request and the transfer response is 1024 bytes or less. The PRDT region
+ * limits the number of scatter gathers to 256 + 1, using a total of 4096 +
+ * 16 bytes. Therefore, only 8KB size is allocated for the UTP command
+ * descriptor.
+ */
+#define UFSHCI_UTP_COMMAND_DESCRIPTOR_SIZE 8192
+#define UFSHCI_UTP_XFER_REQ_SIZE 512
+#define UFSHCI_UTP_XFER_RESP_SIZE 512
+
+/*
+ * To reduce the size of the UTP Command Descriptor(8KB), we must use only
+ * 256 + 1 PRDT entries. The reason for adding the 1 is that if the data is
+ * not aligned, one additional PRDT_ENTRY is used.
+ */
+#define UFSHCI_MAX_PRDT_ENTRY_COUNT (256 + 1)
+
+/* UFSHCI spec 4.1, section 6.1.2 "UTP Command Descriptor" */
+struct ufshci_prdt_entry {
+ /* dword 0 */
+ uint32_t data_base_address; /* [31:0] */
+
+ /* dword 1 */
+ uint32_t data_base_address_upper; /* [31:0] */
+
+ /* dword 2 */
+ uint32_t reserved; /* [31:0] */
+
+ /* dword 3 */
+ uint32_t data_byte_count; /* [17:0] Maximum byte
+ * count is 256KB */
+} __packed __aligned(8);
+
+_Static_assert(sizeof(struct ufshci_prdt_entry) == 16,
+ "ufshci_prdt_entry must be 16 bytes");
+
+struct ufshci_utp_cmd_desc {
+ uint8_t command_upiu[UFSHCI_UTP_XFER_REQ_SIZE];
+ uint8_t response_upiu[UFSHCI_UTP_XFER_RESP_SIZE];
+ uint8_t prd_table[sizeof(struct ufshci_prdt_entry) *
+ UFSHCI_MAX_PRDT_ENTRY_COUNT];
+ uint8_t padding[3072 - sizeof(struct ufshci_prdt_entry)];
+} __packed __aligned(128);
+
+_Static_assert(sizeof(struct ufshci_utp_cmd_desc) ==
+ UFSHCI_UTP_COMMAND_DESCRIPTOR_SIZE,
+ "ufshci_utp_cmd_desc must be 8192 bytes");
+
+#define UFSHCI_UTP_TASK_MGMT_REQ_SIZE 32
+#define UFSHCI_UTP_TASK_MGMT_RESP_SIZE 32
+
+/* UFSHCI spec 4.1, section 6.3.1 "UTP Task Management Request Descriptor" */
+struct ufshci_utp_task_mgmt_req_desc {
+ /* dword 0 */
+ uint32_t reserved0 : 24; /* [23:0] */
+ uint32_t interrupt : 1; /* [24] */
+ uint32_t reserved1 : 7; /* [31:25] */
+
+ /* dword 1 */
+ uint32_t reserved2; /* [31:0] */
+
+ /* dword 2 */
+ uint8_t overall_command_status; /* [7:0] */
+ uint8_t reserved3; /* [15:8] */
+ uint16_t reserved4; /* [31:16] */
+
+ /* dword 3 */
+ uint32_t reserved5; /* [31:0] */
+
+ /* dword 4-11 */
+ uint8_t request_upiu[UFSHCI_UTP_TASK_MGMT_REQ_SIZE];
+
+ /* dword 12-19 */
+ uint8_t response_upiu[UFSHCI_UTP_TASK_MGMT_RESP_SIZE];
+
+} __packed __aligned(8);
+
+_Static_assert(sizeof(struct ufshci_utp_task_mgmt_req_desc) == 80,
+ "ufshci_utp_task_mgmt_req_desc must be 80 bytes");
+
+/* UFS spec 4.1, section 10.6.2 "Basic Header Format" */
+struct ufshci_upiu_header {
+ /* dword 0 */
+ union {
+ struct {
+ uint8_t trans_code : 6; /* [5:0] */
+ uint8_t dd : 1; /* [6] */
+ uint8_t hd : 1; /* [7] */
+ };
+ uint8_t trans_type;
+ };
+ union {
+ struct {
+ uint8_t task_attribute : 2; /* [1:0] */
+ uint8_t cp : 1; /* [2] */
+ uint8_t retransmit_indicator : 1; /* [3] */
+#define UFSHCI_OPERATIONAL_FLAG_W 0x2
+#define UFSHCI_OPERATIONAL_FLAG_R 0x4
+ uint8_t operational_flags : 4; /* [7:4] */
+ };
+ uint8_t flags;
+ };
+ uint8_t lun;
+ uint8_t task_tag;
+
+ /* dword 1 */
+#define UFSHCI_COMMAND_SET_TYPE_SCSI 0
+ uint8_t cmd_set_type : 4; /* [3:0] */
+ uint8_t iid : 4; /* [7:4] */
+ uint8_t ext_iid_or_function;
+ uint8_t response;
+ uint8_t ext_iid_or_status;
+
+ /* dword 2 */
+ uint8_t ehs_length;
+ uint8_t device_infomation;
+ uint16_t data_segment_length; /* (Big-endian) */
+} __packed __aligned(4);
+
+_Static_assert(sizeof(struct ufshci_upiu_header) == 12,
+ "ufshci_upiu_header must be 12 bytes");
+
+#define UFSHCI_MAX_UPIU_SIZE 512
+#define UFSHCI_UPIU_ALIGNMENT 8 /* UPIU requires 64-bit alignment. */
+
+struct ufshci_upiu {
+ /* dword 0-2 */
+ struct ufshci_upiu_header header;
+ /* dword 3-127 */
+ uint8_t
+ reserved[UFSHCI_MAX_UPIU_SIZE - sizeof(struct ufshci_upiu_header)];
+} __packed __aligned(8);
+
+_Static_assert(sizeof(struct ufshci_upiu) == 512,
+ "ufshci_upiu must be 512 bytes");
+
+struct ufshci_cmd_command_upiu {
+ /* dword 0-2 */
+ struct ufshci_upiu_header header;
+ /* dword 3 */
+ uint32_t expected_data_transfer_length; /* (Big-endian) */
+
+ /* dword 4-7 */
+ uint8_t cdb[16];
+
+} __packed __aligned(4);
+
+_Static_assert(sizeof(struct ufshci_cmd_command_upiu) == 32,
+ "bad size for ufshci_cmd_command_upiu");
+_Static_assert(sizeof(struct ufshci_cmd_command_upiu) <=
+ UFSHCI_UTP_XFER_REQ_SIZE,
+ "bad size for ufshci_cmd_command_upiu");
+_Static_assert(sizeof(struct ufshci_cmd_command_upiu) % UFSHCI_UPIU_ALIGNMENT ==
+ 0,
+ "UPIU requires 64-bit alignment");
+
+struct ufshci_cmd_response_upiu {
+ /* dword 0-2 */
+ struct ufshci_upiu_header header;
+ /* dword 3 */
+ uint32_t residual_transfer_count; /* (Big-endian) */
+
+ /* dword 4-7 */
+ uint8_t reserved[16];
+
+ /* Sense Data */
+ uint16_t sense_data_len; /* (Big-endian) */
+ uint8_t sense_data[18];
+
+ /* Add padding to align the kUpiuAlignment. */
+ uint8_t padding[4];
+} __packed __aligned(4);
+
+_Static_assert(sizeof(struct ufshci_cmd_response_upiu) == 56,
+ "bad size for ufshci_cmd_response_upiu");
+_Static_assert(sizeof(struct ufshci_cmd_response_upiu) <=
+ UFSHCI_UTP_XFER_RESP_SIZE,
+ "bad size for ufshci_cmd_response_upiu");
+_Static_assert(sizeof(struct ufshci_cmd_response_upiu) %
+ UFSHCI_UPIU_ALIGNMENT ==
+ 0,
+ "UPIU requires 64-bit alignment");
+
+/* UFS Spec 4.1, section 10.7.8 "QUERY REQUEST UPIU" */
+enum ufshci_query_function {
+ UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST = 0x01,
+ UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST = 0x81,
+};
+
+enum ufshci_query_opcode {
+ UFSHCI_QUERY_OPCODE_NOP = 0,
+ UFSHCI_QUERY_OPCODE_READ_DESCRIPTOR,
+ UFSHCI_QUERY_OPCODE_WRITE_DESCRIPTOR,
+ UFSHCI_QUERY_OPCODE_READ_ATTRIBUTE,
+ UFSHCI_QUERY_OPCODE_WRITE_ATTRIBUTE,
+ UFSHCI_QUERY_OPCODE_READ_FLAG,
+ UFSHCI_QUERY_OPCODE_SET_FLAG,
+ UFSHCI_QUERY_OPCODE_CLEAR_FLAG,
+ UFSHCI_QUERY_OPCODE_TOGGLE_FLAG,
+};
+
+struct ufshci_query_param {
+ enum ufshci_query_function function;
+ enum ufshci_query_opcode opcode;
+ uint8_t type;
+ uint8_t index;
+ uint8_t selector;
+ uint64_t value;
+ size_t desc_size;
+};
+
+struct ufshci_query_request_upiu {
+ /* dword 0-2 */
+ struct ufshci_upiu_header header;
+ /* dword 3 */
+ uint8_t opcode;
+ uint8_t idn;
+ uint8_t index;
+ uint8_t selector;
+
+ /* dword 4-5 */
+ union {
+ /* The Write Attribute opcode uses 64 - bit value. */
+ uint64_t value_64; /* (Big-endian) */
+ struct {
+ uint8_t reserved1[2];
+ uint16_t length; /* (Big-endian) */
+ uint32_t value_32; /* (Big-endian) */
+ };
+ } __packed __aligned(4);
+
+ /* dword 6 */
+ uint32_t reserved2;
+
+ /* dword 7 */
+ uint32_t reserved3;
+
+ uint8_t command_data[256];
+} __packed __aligned(4);
+
+_Static_assert(sizeof(struct ufshci_query_request_upiu) == 288,
+ "bad size for ufshci_query_request_upiu");
+_Static_assert(sizeof(struct ufshci_query_request_upiu) <=
+ UFSHCI_UTP_XFER_REQ_SIZE,
+ "bad size for ufshci_query_request_upiu");
+_Static_assert(sizeof(struct ufshci_query_request_upiu) %
+ UFSHCI_UPIU_ALIGNMENT ==
+ 0,
+ "UPIU requires 64-bit alignment");
+
+/* UFS Spec 4.1, section 10.7.9 "QUERY RESPONSE UPIU" */
+enum ufshci_query_response_code {
+ UFSHCI_QUERY_RESP_CODE_SUCCESS = 0x00,
+ UFSHCI_QUERY_RESP_CODE_PARAMETER_NOT_READABLE = 0xf6,
+ UFSHCI_QUERY_RESP_CODE_PARAMETER_NOT_WRITEABLE = 0xf7,
+ UFSHCI_QUERY_RESP_CODE_PARAMETER_ALREADY_WRITTEN = 0xf8,
+ UFSHCI_QUERY_RESP_CODE_INVALID_LENGTH = 0xf9,
+ UFSHCI_QUERY_RESP_CODE_INVALID_VALUE = 0xfa,
+ UFSHCI_QUERY_RESP_CODE_INVALID_SELECTOR = 0xfb,
+ UFSHCI_QUERY_RESP_CODE_INVALID_INDEX = 0xfc,
+ UFSHCI_QUERY_RESP_CODE_INVALID_IDN = 0xfd,
+ UFSHCI_QUERY_RESP_CODE_INVALID_OPCODE = 0xfe,
+ UFSHCI_QUERY_RESP_CODE_GENERAL_FAILURE = 0xff,
+};
+
+struct ufshci_query_response_upiu {
+ /* dword 0-2 */
+ struct ufshci_upiu_header header;
+ /* dword 3 */
+ uint8_t opcode;
+ uint8_t idn;
+ uint8_t index;
+ uint8_t selector;
+
+ /* dword 4-5 */
+ union {
+ /* The Read / Write Attribute opcodes use 64 - bit value. */
+ uint64_t value_64; /* (Big-endian) */
+ struct {
+ uint8_t reserved1[2];
+ uint16_t length; /* (Big-endian) */
+ union {
+ uint32_t value_32; /* (Big-endian) */
+ struct {
+ uint8_t reserved2[3];
+ uint8_t flag_value;
+ };
+ };
+ };
+ } __packed __aligned(4);
+
+ /* dword 6 */
+ uint8_t reserved3[4];
+
+ /* dword 7 */
+ uint8_t reserved4[4];
+
+ uint8_t command_data[256];
+} __packed __aligned(4);
+
+_Static_assert(sizeof(struct ufshci_query_response_upiu) == 288,
+ "bad size for ufshci_query_response_upiu");
+_Static_assert(sizeof(struct ufshci_query_response_upiu) <=
+ UFSHCI_UTP_XFER_RESP_SIZE,
+ "bad size for ufshci_query_response_upiu");
+_Static_assert(sizeof(struct ufshci_query_response_upiu) %
+ UFSHCI_UPIU_ALIGNMENT ==
+ 0,
+ "UPIU requires 64-bit alignment");
+
+/* UFS 4.1, section 10.7.11 "NOP OUT UPIU" */
+struct ufshci_nop_out_upiu {
+ /* dword 0-2 */
+ struct ufshci_upiu_header header;
+ /* dword 3-7 */
+ uint8_t reserved[20];
+} __packed __aligned(8);
+_Static_assert(sizeof(struct ufshci_nop_out_upiu) == 32,
+ "ufshci_upiu_nop_out must be 32 bytes");
+
+/* UFS 4.1, section 10.7.12 "NOP IN UPIU" */
+struct ufshci_nop_in_upiu {
+ /* dword 0-2 */
+ struct ufshci_upiu_header header;
+ /* dword 3-7 */
+ uint8_t reserved[20];
+} __packed __aligned(8);
+_Static_assert(sizeof(struct ufshci_nop_in_upiu) == 32,
+ "ufshci_upiu_nop_in must be 32 bytes");
+
+union ufshci_reponse_upiu {
+ struct ufshci_upiu_header header;
+ struct ufshci_cmd_response_upiu cmd_response_upiu;
+ struct ufshci_query_response_upiu query_response_upiu;
+ struct ufshci_nop_in_upiu nop_in_upiu;
+};
+
+struct ufshci_completion {
+ union ufshci_reponse_upiu response_upiu;
+ size_t size;
+};
+
+typedef void (*ufshci_cb_fn_t)(void *, const struct ufshci_completion *, bool);
+
+/*
+ * UFS Spec 4.1, section 14.1 "UFS Descriptors"
+ * All descriptors use big-endian byte ordering.
+ */
+enum ufshci_descriptor_type {
+ UFSHCI_DESC_TYPE_DEVICE = 0x00,
+ UFSHCI_DESC_TYPE_CONFIGURATION = 0x01,
+ UFSHCI_DESC_TYPE_UNIT = 0x02,
+ UFSHCI_DESC_TYPE_INTERCONNECT = 0x04,
+ UFSHCI_DESC_TYPE_STRING = 0x05,
+ UFSHCI_DESC_TYPE_GEOMETRY = 0X07,
+ UFSHCI_DESC_TYPE_POWER = 0x08,
+ UFSHCI_DESC_TYPE_DEVICE_HEALTH = 0x09,
+ UFSHCI_DESC_TYPE_FBO_EXTENSION_SPECIFICATION = 0x0a,
+};
+
+/*
+ * UFS Spec 4.1, section 14.1.5.2 "Device Descriptor"
+ * DeviceDescriptor use big-endian byte ordering.
+ */
+struct ufshci_device_descriptor {
+ uint8_t bLength;
+ uint8_t bDescriptorIDN;
+ uint8_t bDevice;
+ uint8_t bDeviceClass;
+ uint8_t bDeviceSubClass;
+ uint8_t bProtocol;
+ uint8_t bNumberLU;
+ uint8_t bNumberWLU;
+ uint8_t bBootEnable;
+ uint8_t bDescrAccessEn;
+ uint8_t bInitPowerMode;
+ uint8_t bHighPriorityLUN;
+ uint8_t bSecureRemovalType;
+ uint8_t bSecurityLU;
+ uint8_t bBackgroundOpsTermLat;
+ uint8_t bInitActiveICCLevel;
+ /* 0x10 */
+ uint16_t wSpecVersion;
+ uint16_t wManufactureDate;
+ uint8_t iManufacturerName;
+ uint8_t iProductName;
+ uint8_t iSerialNumber;
+ uint8_t iOemID;
+ uint16_t wManufacturerID;
+ uint8_t bUD0BaseOffset;
+ uint8_t bUDConfigPLength;
+ uint8_t bDeviceRTTCap;
+ uint16_t wPeriodicRTCUpdate;
+ uint8_t bUfsFeaturesSupport;
+ /* 0x20 */
+ uint8_t bFFUTimeout;
+ uint8_t bQueueDepth;
+ uint16_t wDeviceVersion;
+ uint8_t bNumSecureWPArea;
+ uint32_t dPSAMaxDataSize;
+ uint8_t bPSAStateTimeout;
+ uint8_t iProductRevisionLevel;
+ uint8_t Reserved[5];
+ /* 0x2a */
+ /* 0x30 */
+ uint8_t ReservedUME[16];
+ /* 0x40 */
+ uint8_t ReservedHpb[3];
+ uint8_t Reserved2[12];
+ uint32_t dExtendedUfsFeaturesSupport;
+ uint8_t bWriteBoosterBufferPreserveUserSpaceEn;
+ uint8_t bWriteBoosterBufferType;
+ uint32_t dNumSharedWriteBoosterBufferAllocUnits;
+} __packed;
+
+_Static_assert(sizeof(struct ufshci_device_descriptor) == 89,
+ "bad size for ufshci_device_descriptor");
+
+/*
+ * UFS Spec 4.1, section 14.1.5.3 "Configuration Descriptor"
+ * ConfigurationDescriptor use big-endian byte ordering.
+ */
+struct ufshci_unit_descriptor_configurable_parameters {
+ uint8_t bLUEnable;
+ uint8_t bBootLunID;
+ uint8_t bLUWriteProtect;
+ uint8_t bMemoryType;
+ uint32_t dNumAllocUnits;
+ uint8_t bDataReliability;
+ uint8_t bLogicalBlockSize;
+ uint8_t bProvisioningType;
+ uint16_t wContextCapabilities;
+ union {
+ struct {
+ uint8_t Reserved[3];
+ uint8_t ReservedHpb[6];
+ } __packed;
+ uint16_t wZoneBufferAllocUnits;
+ };
+ uint32_t dLUNumWriteBoosterBufferAllocUnits;
+} __packed;
+
+_Static_assert(sizeof(struct ufshci_unit_descriptor_configurable_parameters) ==
+ 27,
+ "bad size for ufshci_unit_descriptor_configurable_parameters");
+
+#define UFSHCI_CONFIGURATION_DESCEIPTOR_LU_NUM 8
+
+struct ufshci_configuration_descriptor {
+ uint8_t bLength;
+ uint8_t bDescriptorIDN;
+ uint8_t bConfDescContinue;
+ uint8_t bBootEnable;
+ uint8_t bDescrAccessEn;
+ uint8_t bInitPowerMode;
+ uint8_t bHighPriorityLUN;
+ uint8_t bSecureRemovalType;
+ uint8_t bInitActiveICCLevel;
+ uint16_t wPeriodicRTCUpdate;
+ uint8_t Reserved;
+ uint8_t bRPMBRegionEnable;
+ uint8_t bRPMBRegion1Size;
+ uint8_t bRPMBRegion2Size;
+ uint8_t bRPMBRegion3Size;
+ uint8_t bWriteBoosterBufferPreserveUserSpaceEn;
+ uint8_t bWriteBoosterBufferType;
+ uint32_t dNumSharedWriteBoosterBufferAllocUnits;
+ /* 0x16 */
+ struct ufshci_unit_descriptor_configurable_parameters
+ unit_config_params[UFSHCI_CONFIGURATION_DESCEIPTOR_LU_NUM];
+} __packed;
+
+_Static_assert(sizeof(struct ufshci_configuration_descriptor) == (22 + 27 * 8),
+ "bad size for ufshci_configuration_descriptor");
+
+/*
+ * UFS Spec 4.1, section 14.1.5.4 "Geometry Descriptor"
+ * GeometryDescriptor use big-endian byte ordering.
+ */
+struct ufshci_geometry_descriptor {
+ uint8_t bLength;
+ uint8_t bDescriptorIDN;
+ uint8_t bMediaTechnology;
+ uint8_t Reserved;
+ uint64_t qTotalRawDeviceCapacity;
+ uint8_t bMaxNumberLU;
+ uint32_t dSegmentSize;
+ /* 0x11 */
+ uint8_t bAllocationUnitSize;
+ uint8_t bMinAddrBlockSize;
+ uint8_t bOptimalReadBlockSize;
+ uint8_t bOptimalWriteBlockSize;
+ uint8_t bMaxInBufferSize;
+ uint8_t bMaxOutBufferSize;
+ uint8_t bRPMB_ReadWriteSize;
+ uint8_t bDynamicCapacityResourcePolicy;
+ uint8_t bDataOrdering;
+ uint8_t bMaxContexIDNumber;
+ uint8_t bSysDataTagUnitSize;
+ uint8_t bSysDataTagResSize;
+ uint8_t bSupportedSecRTypes;
+ uint16_t wSupportedMemoryTypes;
+ /* 0x20 */
+ uint32_t dSystemCodeMaxNAllocU;
+ uint16_t wSystemCodeCapAdjFac;
+ uint32_t dNonPersistMaxNAllocU;
+ uint16_t wNonPersistCapAdjFac;
+ uint32_t dEnhanced1MaxNAllocU;
+ /* 0x30 */
+ uint16_t wEnhanced1CapAdjFac;
+ uint32_t dEnhanced2MaxNAllocU;
+ uint16_t wEnhanced2CapAdjFac;
+ uint32_t dEnhanced3MaxNAllocU;
+ uint16_t wEnhanced3CapAdjFac;
+ uint32_t dEnhanced4MaxNAllocU;
+ /* 0x42 */
+ uint16_t wEnhanced4CapAdjFac;
+ uint32_t dOptimalLogicalBlockSize;
+ uint8_t ReservedHpb[5];
+ uint8_t Reserved2[2];
+ uint32_t dWriteBoosterBufferMaxNAllocUnits;
+ uint8_t bDeviceMaxWriteBoosterLUs;
+ uint8_t bWriteBoosterBufferCapAdjFac;
+ uint8_t bSupportedWriteBoosterBufferUserSpaceReductionTypes;
+ uint8_t bSupportedWriteBoosterBufferTypes;
+} __packed;
+
+_Static_assert(sizeof(struct ufshci_geometry_descriptor) == 87,
+ "bad size for ufshci_geometry_descriptor");
+
+/*
+ * UFS Spec 4.1, section 14.1.5.5 "Unit Descriptor"
+ * UnitDescriptor use big-endian byte ordering.
+ */
+struct ufshci_unit_descriptor {
+ uint8_t bLength;
+ uint8_t bDescriptorIDN;
+ uint8_t bUnitIndex;
+ uint8_t bLUEnable;
+ uint8_t bBootLunID;
+ uint8_t bLUWriteProtect;
+ uint8_t bLUQueueDepth;
+ uint8_t bPSASensitive;
+ uint8_t bMemoryType;
+ uint8_t bDataReliability;
+ uint8_t bLogicalBlockSize;
+ uint64_t qLogicalBlockCount;
+ /* 0x13 */
+ uint32_t dEraseBlockSize;
+ uint8_t bProvisioningType;
+ uint64_t qPhyMemResourceCount;
+ /* 0x20 */
+ uint16_t wContextCapabilities;
+ uint8_t bLargeUnitGranularity_M1;
+ uint8_t ReservedHpb[6];
+ uint32_t dLUNumWriteBoosterBufferAllocUnits;
+} __packed;
+_Static_assert(sizeof(struct ufshci_unit_descriptor) == 45,
+ "bad size for ufshci_unit_descriptor");
+
+enum LUWriteProtect {
+ kNoWriteProtect = 0x00,
+ kPowerOnWriteProtect = 0x01,
+ kPermanentWriteProtect = 0x02,
+};
+
+/*
+ * UFS Spec 4.1, section 14.1.5.6 "RPMB Unit Descriptor"
+ * RpmbUnitDescriptor use big-endian byte ordering.
+ */
+struct ufshci_rpmb_unit_descriptor {
+ uint8_t bLength;
+ uint8_t bDescriptorIDN;
+ uint8_t bUnitIndex;
+ uint8_t bLUEnable;
+ uint8_t bBootLunID;
+ uint8_t bLUWriteProtect;
+ uint8_t bLUQueueDepth;
+ uint8_t bPSASensitive;
+ uint8_t bMemoryType;
+ uint8_t Reserved;
+ uint8_t bLogicalBlockSize;
+ uint64_t qLogicalBlockCount;
+ /* 0x13 */
+ uint32_t dEraseBlockSize;
+ uint8_t bProvisioningType;
+ uint64_t qPhyMemResourceCount;
+ /* 0x20 */
+ uint8_t Reserved1[3];
+} __packed;
+_Static_assert(sizeof(struct ufshci_rpmb_unit_descriptor) == 35,
+ "bad size for RpmbUnitDescriptor");
+
+/*
+ * UFS Spec 4.1, section 14.1.5.7 "Power Parameters Descriptor"
+ * PowerParametersDescriptor use big-endian byte ordering.
+ */
+struct ufshci_power_parameters_descriptor {
+ uint8_t bLength;
+ uint8_t bDescriptorIDN;
+ uint16_t wActiveICCLevelsVCC[16];
+ uint16_t wActiveICCLevelsVCCQ[16];
+ uint16_t wActiveICCLevelsVCCQ2[16];
+} __packed;
+_Static_assert(sizeof(struct ufshci_power_parameters_descriptor) == 98,
+ "bad size for PowerParametersDescriptor");
+
+/*
+ * UFS Spec 4.1, section 14.1.5.8 "Interconnect Descriptor"
+ * InterconnectDescriptor use big-endian byte ordering.
+ */
+struct ufshci_interconnect_descriptor {
+ uint8_t bLength;
+ uint8_t bDescriptorIDN;
+ uint16_t bcdUniproVersion;
+ uint16_t bcdMphyVersion;
+} __packed;
+_Static_assert(sizeof(struct ufshci_interconnect_descriptor) == 6,
+ "bad size for InterconnectDescriptor");
+
+/*
+ * UFS Spec 4.1, section 14.1.5.9-13 "String Descriptor"
+ * StringDescriptor use big-endian byte ordering.
+ */
+struct ufshci_string_descriptor {
+ uint8_t bLength;
+ uint8_t bDescriptorIDN;
+ uint16_t UC[126];
+} __packed;
+_Static_assert(sizeof(struct ufshci_string_descriptor) == 254,
+ "bad size for StringDescriptor");
+
+/*
+ * UFS Spec 4.1, section 14.1.5.14 "Device Health Descriptor"
+ * DeviceHealthDescriptor use big-endian byte ordering.
+ */
+struct ufshci_device_healthd_descriptor {
+ uint8_t bLength;
+ uint8_t bDescriptorIDN;
+ uint8_t bPreEOLInfo;
+ uint8_t bDeviceLifeTimeEstA;
+ uint8_t bDeviceLifeTimeEstB;
+ uint8_t VendorPropInfo[32];
+ uint32_t dRefreshTotalCount;
+ uint32_t dRefreshProgress;
+} __packed;
+_Static_assert(sizeof(struct ufshci_device_healthd_descriptor) == 45,
+ "bad size for DeviceHealthDescriptor");
+
+/*
+ * UFS Spec 4.1, section 14.1.5.15 "Vendor Specific Descriptor"
+ * VendorSpecificDescriptor use big-endian byte ordering.
+ */
+struct ufshci_vendor_specific_descriptor {
+ uint8_t bLength;
+ uint8_t bDescriptorIDN;
+ uint8_t DATA[254];
+} __packed;
+_Static_assert(sizeof(struct ufshci_vendor_specific_descriptor) == 256,
+ "bad size for VendorSpecificDescriptor");
+
+/* UFS Spec 4.1, section 14.2 "Flags" */
+enum ufshci_flags {
+ UFSHCI_FLAG_F_RESERVED = 0x00,
+ UFSHCI_FLAG_F_DEVICE_INIT = 0x01,
+ UFSHCI_FLAG_F_PERMANENT_WP_EN = 0x02,
+ UFSHCI_FLAS_F_POWER_ON_WP_EN = 0x03,
+ UFSHCI_FLAG_F_BACKGROUND_OPS_EN = 0x04,
+ UFSHCI_FLAG_F_DEVICE_LIFE_SPAN_MODE_EN = 0x05,
+ UFSHCI_FLAG_F_PURGE_ENABLE = 0x06,
+ UFSHCI_FLAG_F_REFRESH_ENABLE = 0x07,
+ UFSHCI_FLAG_F_PHY_RESOURCE_REMOVAL = 0x08,
+ UFSHCI_FLAG_F_BUSY_RTC = 0x09,
+ UFSHCI_FLAG_F_PERMANENTLY_DISABLE_FW_UPDATE = 0x0b,
+ UFSHCI_FLAG_F_WRITE_BOOSTER_EN = 0x0e,
+ UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN = 0x0f,
+ UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE = 0x10,
+ UFSHCI_FLAG_F_UNPIN_EN = 0x13,
+};
+
+/* UFS Spec 4.1, section 14.3 "Attributes" */
+enum ufshci_attributes {
+ UFSHCI_ATTR_B_BOOT_LUN_EN = 0x00,
+ UFSHCI_ATTR_B_CURRENT_POWER_MODE = 0x02,
+ UFSHCI_ATTR_B_ACTIVE_ICC_LEVEL = 0x03,
+ UFSHCI_ATTR_B_OUT_OF_ORDER_DATA_EN = 0x04,
+ UFSHCI_ATTR_B_BACKGROUND_OP_STATUS = 0x05,
+ UFSHCI_ATTR_B_PURGE_STATUS = 0x06,
+ UFSHCI_ATTR_B_MAX_DATA_IN_SIZE = 0x07,
+ UFSHCI_ATTR_B_MAX_DATA_OUT_SIZE = 0x08,
+ UFSHCI_ATTR_D_DYN_CAP_NEEDED = 0x09,
+ UFSHCI_ATTR_B_REF_CLK_FREQ = 0x0a,
+ UFSHCI_ATTR_B_CONFIG_DESCR_LOCK = 0x0b,
+ UFSHCI_ATTR_B_MAX_NUM_OF_RTT = 0x0c,
+ UFSHCI_ATTR_W_EXCEPTION_EVENT_CONTROL = 0x0d,
+ UFSHCI_ATTR_W_EXCEPTION_EVENT_STATUS = 0x0e,
+ UFSHCI_ATTR_D_SECONDS_PASSED = 0x0f,
+ UFSHCI_ATTR_W_CONTEXT_CONF = 0x10,
+ UFSHCI_ATTR_B_DEVICE_FFU_STATUS = 0x14,
+ UFSHCI_ATTR_B_PSA_STATE = 0x15,
+ UFSHCI_ATTR_D_PSA_DATA_SIZE = 0x16,
+ UFSHCI_ATTR_B_REF_CLK_GATING_WAIT_TIME = 0x17,
+ UFSHCI_ATTR_B_DEVICE_CASE_ROUGH_TEMPERAURE = 0x18,
+ UFSHCI_ATTR_B_DEVICE_TOO_HIGH_TEMP_BOUNDARY = 0x19,
+ UFSHCI_ATTR_B_DEVICE_TOO_LOW_TEMP_BOUNDARY = 0x1a,
+ UFSHCI_ATTR_B_THROTTLING_STATUS = 0x1b,
+ UFSHCI_ATTR_B_WB_BUFFER_FLUSH_STATUS = 0x1c,
+ UFSHCI_ATTR_B_AVAILABLE_WB_BUFFER_SIZE = 0x1d,
+ UFSHCI_ATTR_B_WB_BUFFER_LIFE_TIME_EST = 0x1e,
+ UFSHCI_ATTR_D_CURRENT_WB_BUFFER_SIZE = 0x1f,
+ UFSHCI_ATTR_B_REFRESH_STATUS = 0x2c,
+ UFSHCI_ATTR_B_REFRESH_FREQ = 0x2d,
+ UFSHCI_ATTR_B_REFRESH_UNIT = 0x2e,
+ UFSHCI_ATTR_B_REFRESH_METHOD = 0x2f,
+};
+
+#endif /* __UFSHCI_H__ */
diff --git a/sys/dev/ufshci/ufshci.c b/sys/dev/ufshci/ufshci.c
new file mode 100644
--- /dev/null
+++ b/sys/dev/ufshci/ufshci.c
@@ -0,0 +1,76 @@
+/*-
+ * Copyright (c) 2025, Samsung Electronics Co., Ltd.
+ * Written by Jaeyoon Choi
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/module.h>
+
+#include "ufshci_private.h"
+
+MALLOC_DEFINE(M_UFSHCI, "ufshci", "ufshci(4) memory allocations");
+
+int
+ufshci_attach(device_t dev)
+{
+ struct ufshci_controller *ctrlr = device_get_softc(dev);
+ int status;
+
+ status = ufshci_ctrlr_construct(ctrlr, dev);
+ if (status != 0) {
+ ufshci_ctrlr_destruct(ctrlr, dev);
+ return (status);
+ }
+
+ ctrlr->config_hook.ich_func = ufshci_ctrlr_start_config_hook;
+ ctrlr->config_hook.ich_arg = ctrlr;
+
+ if (config_intrhook_establish(&ctrlr->config_hook) != 0)
+ return (ENOMEM);
+
+ return (0);
+}
+
+int
+ufshci_detach(device_t dev)
+{
+ struct ufshci_controller *ctrlr = device_get_softc(dev);
+
+ config_intrhook_drain(&ctrlr->config_hook);
+
+ ufshci_ctrlr_destruct(ctrlr, dev);
+
+ return (0);
+}
+
+void
+ufshci_completion_poll_cb(void *arg, const struct ufshci_completion *cpl,
+ bool error)
+{
+ struct ufshci_completion_poll_status *status = arg;
+
+ /*
+ * Copy status into the argument passed by the caller, so that the
+ * caller can check the status to determine if the the request passed
+ * or failed.
+ */
+ memcpy(&status->cpl.response_upiu, &cpl->response_upiu, cpl->size);
+ status->error = error;
+ atomic_store_rel_int(&status->done, 1);
+}
+
+static int
+ufshci_modevent(module_t mod __unused, int type __unused, void *argp __unused)
+{
+ return (0);
+}
+
+static moduledata_t ufshci_mod = { "ufshci", ufshci_modevent, 0 };
+
+DECLARE_MODULE(ufshci, ufshci_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
+MODULE_VERSION(ufshci, 1);
+MODULE_DEPEND(ufshci, cam, 1, 1, 1);
diff --git a/sys/dev/ufshci/ufshci_ctrlr.c b/sys/dev/ufshci/ufshci_ctrlr.c
new file mode 100644
--- /dev/null
+++ b/sys/dev/ufshci/ufshci_ctrlr.c
@@ -0,0 +1,503 @@
+/*-
+ * Copyright (c) 2025, Samsung Electronics Co., Ltd.
+ * Written by Jaeyoon Choi
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+
+#include "ufshci_private.h"
+#include "ufshci_reg.h"
+
+static int
+ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr)
+{
+ int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms);
+ sbintime_t delta_t = SBT_1US;
+ uint32_t hce;
+
+ hce = ufshci_mmio_read_4(ctrlr, hce);
+
+ /* If UFS host controller is already enabled, disable it. */
+ if (UFSHCIV(UFSHCI_HCE_REG_HCE, hce)) {
+ hce &= ~UFSHCIM(UFSHCI_HCE_REG_HCE);
+ ufshci_mmio_write_4(ctrlr, hce, hce);
+ }
+
+ /* Enable UFS host controller */
+ hce |= UFSHCIM(UFSHCI_HCE_REG_HCE);
+ ufshci_mmio_write_4(ctrlr, hce, hce);
+
+ /*
+ * During the controller initialization, the value of the HCE bit is
+ * unstable, so we need to read the HCE value after some time after
+ * initialization is complete.
+ */
+ pause_sbt("ufshci_hce", ustosbt(100), 0, C_PREL(1));
+
+ /* Wait for the HCE flag to change */
+ while (1) {
+ hce = ufshci_mmio_read_4(ctrlr, hce);
+ if (UFSHCIV(UFSHCI_HCE_REG_HCE, hce))
+ break;
+ if (timeout - ticks < 0) {
+ ufshci_printf(ctrlr,
+ "host controller failed to enable "
+ "within %d ms\n",
+ ctrlr->device_init_timeout_in_ms);
+ return (ENXIO);
+ }
+
+ pause_sbt("ufshci_hce", delta_t, 0, C_PREL(1));
+ delta_t = min(SBT_1MS, delta_t * 3 / 2);
+ }
+
+ return (0);
+}
+
+int
+ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev)
+{
+ uint32_t ver, cap, hcs, ie;
+ uint32_t timeout_period, retry_count;
+ int error;
+
+ ctrlr->device_init_timeout_in_ms = UFSHCI_DEVICE_INIT_TIMEOUT_MS;
+ ctrlr->uic_cmd_timeout_in_ms = UFSHCI_UIC_CMD_TIMEOUT_MS;
+ ctrlr->dev = dev;
+ ctrlr->sc_unit = device_get_unit(dev);
+
+ snprintf(ctrlr->sc_name, sizeof(ctrlr->sc_name), "%s",
+ device_get_nameunit(dev));
+
+ mtx_init(&ctrlr->sc_mtx, device_get_nameunit(dev), NULL,
+ MTX_DEF | MTX_RECURSE);
+
+ mtx_init(&ctrlr->uic_cmd_lock, "ufshci ctrlr uic cmd lock", NULL,
+ MTX_DEF);
+
+ ver = ufshci_mmio_read_4(ctrlr, ver);
+ ctrlr->major_version = UFSHCIV(UFSHCI_VER_REG_MJR, ver);
+ ctrlr->minor_version = UFSHCIV(UFSHCI_VER_REG_MNR, ver);
+ ufshci_printf(ctrlr, "UFSHCI Version: %d.%d\n", ctrlr->major_version,
+ ctrlr->minor_version);
+
+ /* Read Device Capabilities */
+ ctrlr->cap = cap = ufshci_mmio_read_4(ctrlr, cap);
+ ctrlr->is_single_db_supported = UFSHCIV(UFSHCI_CAP_REG_LSDBS, cap);
+ /*
+ * TODO: This driver does not yet support multi-queue.
+ * Check the UFSHCI_CAP_REG_MCQS bit in the future to determine if
+ * multi-queue support is available.
+ */
+ ctrlr->is_mcq_supported = false;
+ if (!(ctrlr->is_single_db_supported == 0 || ctrlr->is_mcq_supported))
+ return (ENXIO);
+ /*
+ * The maximum transfer size supported by UFSHCI spec is 65535 * 256 KiB
+ * However, we limit the maximum transfer size to 1MiB(256 * 4KiB) for
+ * performance reason.
+ */
+ ctrlr->page_size = PAGE_SIZE;
+ ctrlr->max_xfer_size = ctrlr->page_size * UFSHCI_MAX_PRDT_ENTRY_COUNT;
+
+ timeout_period = UFSHCI_DEFAULT_TIMEOUT_PERIOD;
+ TUNABLE_INT_FETCH("hw.ufshci.timeout_period", &timeout_period);
+ timeout_period = min(timeout_period, UFSHCI_MAX_TIMEOUT_PERIOD);
+ timeout_period = max(timeout_period, UFSHCI_MIN_TIMEOUT_PERIOD);
+ ctrlr->timeout_period = timeout_period;
+
+ retry_count = UFSHCI_DEFAULT_RETRY_COUNT;
+ TUNABLE_INT_FETCH("hw.ufshci.retry_count", &retry_count);
+ ctrlr->retry_count = retry_count;
+
+ /* Disable all interrupts */
+ ufshci_mmio_write_4(ctrlr, ie, 0);
+
+ /* Enable Host Controller */
+ error = ufshci_ctrlr_enable_host_ctrlr(ctrlr);
+ if (error)
+ return (error);
+
+ /* Send DME_LINKSTARTUP command to start the link startup procedure */
+ error = ufshci_uic_send_dme_link_startup(ctrlr);
+ if (error)
+ return (error);
+
+ /*
+ * The device_present(UFSHCI_HCS_REG_DP) bit becomes true if the host
+ * controller has successfully received a Link Startup UIC command
+ * response and the UFS device has found a physical link to the
+ * controller.
+ */
+ hcs = ufshci_mmio_read_4(ctrlr, hcs);
+ if (!UFSHCIV(UFSHCI_HCS_REG_DP, hcs)) {
+ ufshci_printf(ctrlr, "UFS device not found\n");
+ return (ENXIO);
+ }
+
+ /* Enable additional interrupts by programming the IE register. */
+ ie = ufshci_mmio_read_4(ctrlr, ie);
+ ie |= UFSHCIM(UFSHCI_IE_REG_UTRCE); /* UTR Completion */
+ ie |= UFSHCIM(UFSHCI_IE_REG_UEE); /* UIC Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_UTMRCE); /* UTMR Completion */
+ ie |= UFSHCIM(UFSHCI_IE_REG_DFEE); /* Device Fatal Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_UTPEE); /* UTP Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_HCFEE); /* Host Ctrlr Fatal Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_SBFEE); /* System Bus Fatal Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_CEFEE); /* Crypto Engine Fatal Error */
+ ufshci_mmio_write_4(ctrlr, ie, ie);
+
+ /* TODO: Initialize interrupt Aggregation Control Register (UTRIACR) */
+
+ /* Allocate and initialize UTP Task Management Request List. */
+ error = ufshci_utm_req_queue_construct(ctrlr);
+ if (error)
+ return (error);
+
+ /* Allocate and initialize UTP Transfer Request List or SQ/CQ. */
+ error = ufshci_ut_req_queue_construct(ctrlr);
+ if (error)
+ return (error);
+
+ /* TODO: Separate IO and Admin slot */
+ /* max_hw_pend_io is the number of slots in the transfer_req_queue */
+ ctrlr->max_hw_pend_io = ctrlr->transfer_req_queue.num_entries;
+
+ return (0);
+}
+
+void
+ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev)
+{
+ if (ctrlr->resource == NULL)
+ goto nores;
+
+ /* TODO: Flush In-flight IOs */
+
+ /* Release resources */
+ // ufshci_utm_req_queue_destroy(ctrlr);
+ ufshci_ut_req_queue_destroy(ctrlr);
+
+ if (ctrlr->tag)
+ bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
+
+ if (ctrlr->res)
+ bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
+ rman_get_rid(ctrlr->res), ctrlr->res);
+
+ mtx_lock(&ctrlr->sc_mtx);
+
+ ufshci_sim_detach(ctrlr);
+
+ mtx_unlock(&ctrlr->sc_mtx);
+
+ bus_release_resource(dev, SYS_RES_MEMORY, ctrlr->resource_id,
+ ctrlr->resource);
+nores:
+ mtx_destroy(&ctrlr->uic_cmd_lock);
+ mtx_destroy(&ctrlr->sc_mtx);
+
+ return;
+}
+
+int
+ufshci_ctrl_reset(struct ufshci_controller *ctrlr)
+{
+ uint32_t ie;
+ int error;
+
+ /* Backup and disable all interrupts */
+ ie = ufshci_mmio_read_4(ctrlr, ie);
+ ufshci_mmio_write_4(ctrlr, ie, 0);
+
+ /* Release resources */
+ ufshci_utm_req_queue_destroy(ctrlr);
+ ufshci_ut_req_queue_destroy(ctrlr);
+
+ /* Reset Host Controller */
+ error = ufshci_ctrlr_enable_host_ctrlr(ctrlr);
+ if (error)
+ return (error);
+
+ /* Send DME_LINKSTARTUP command to start the link startup procedure */
+ error = ufshci_uic_send_dme_link_startup(ctrlr);
+ if (error)
+ return (error);
+
+ /* Enable interrupts */
+ ufshci_mmio_write_4(ctrlr, ie, ie);
+
+ /* Allocate and initialize UTP Task Management Request List. */
+ error = ufshci_utm_req_queue_construct(ctrlr);
+ if (error)
+ return (error);
+
+ /* Allocate and initialize UTP Transfer Request List or SQ/CQ. */
+ error = ufshci_ut_req_queue_construct(ctrlr);
+ if (error)
+ return (error);
+
+ return (0);
+}
+
+int
+ufshci_ctrl_submit_admin_request(struct ufshci_controller *ctrlr,
+ struct ufshci_request *req)
+{
+ return (ufshci_req_queue_submit_request(&ctrlr->transfer_req_queue, req,
+ /*is_admin*/ true));
+}
+
+int
+ufshci_ctrl_submit_io_request(struct ufshci_controller *ctrlr,
+ struct ufshci_request *req)
+{
+ return (ufshci_req_queue_submit_request(&ctrlr->transfer_req_queue, req,
+ /*is_admin*/ false));
+}
+
+int
+ufshci_ctrl_send_nop(struct ufshci_controller *ctrlr)
+{
+ struct ufshci_completion_poll_status status;
+
+ status.done = 0;
+ ufshci_ctrl_cmd_send_nop(ctrlr, ufshci_completion_poll_cb, &status);
+ ufshci_completion_poll(&status);
+ if (status.error) {
+ ufshci_printf(ctrlr, "ufshci_ctrl_send_nop failed!\n");
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static void
+ufshci_ctrlr_fail(struct ufshci_controller *ctrlr, bool admin_also)
+{
+ printf("ufshci(4): ufshci_ctrlr_fail\n");
+
+ ctrlr->is_failed = true;
+
+ /* TODO: task_mgmt_req_queue should be handled as fail */
+
+ ufshci_req_queue_fail(ctrlr,
+ &ctrlr->transfer_req_queue.hwq[UFSHCI_SDB_Q]);
+}
+
+static void
+ufshci_ctrlr_start(struct ufshci_controller *ctrlr)
+{
+ TSENTER();
+
+ if (ufshci_ctrl_send_nop(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr, false);
+ return;
+ }
+
+ /* Initialize UFS target drvice */
+ if (ufshci_dev_init(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr, false);
+ return;
+ }
+
+ /* Initialize Reference Clock */
+ if (ufshci_dev_init_reference_clock(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr, false);
+ return;
+ }
+
+ /* Initialize unipro */
+ if (ufshci_dev_init_unipro(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr, false);
+ return;
+ }
+
+ /*
+ * Initialize UIC Power Mode
+ * QEMU UFS devices do not support unipro and power mode.
+ */
+ if (!(ctrlr->quirks & UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE) &&
+ ufshci_dev_init_uic_power_mode(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr, false);
+ return;
+ }
+
+ /* Initialize UFS Power Mode */
+ if (ufshci_dev_init_ufs_power_mode(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr, false);
+ return;
+ }
+
+ /* Read Controller Descriptor (Device, Geometry)*/
+ if (ufshci_dev_get_descriptor(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr, false);
+ return;
+ }
+
+ /* TODO: Configure Write Protect */
+
+ /* TODO: Configure Background Operations */
+
+ /* TODO: Configure Write Booster */
+
+ if (ufshci_sim_attach(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr, false);
+ return;
+ }
+
+ TSEXIT();
+}
+
+void
+ufshci_ctrlr_start_config_hook(void *arg)
+{
+ struct ufshci_controller *ctrlr = arg;
+
+ TSENTER();
+
+ if (ufshci_utm_req_queue_enable(ctrlr) == 0 &&
+ ufshci_ut_req_queue_enable(ctrlr) == 0)
+ ufshci_ctrlr_start(ctrlr);
+ else
+ ufshci_ctrlr_fail(ctrlr, false);
+
+ ufshci_sysctl_initialize_ctrlr(ctrlr);
+ config_intrhook_disestablish(&ctrlr->config_hook);
+
+ TSEXIT();
+}
+
+/*
+ * Poll all the queues enabled on the device for completion.
+ */
+void
+ufshci_ctrlr_poll(struct ufshci_controller *ctrlr)
+{
+ uint32_t is;
+
+ is = ufshci_mmio_read_4(ctrlr, is);
+
+ /* UIC error */
+ if (is & UFSHCIM(UFSHCI_IS_REG_UE)) {
+ uint32_t uecpa, uecdl, uecn, uect, uecdme;
+
+ /* UECPA for Host UIC Error Code within PHY Adapter Layer */
+ uecpa = ufshci_mmio_read_4(ctrlr, uecpa);
+ if (uecpa & UFSHCIM(UFSHCI_UECPA_REG_ERR)) {
+ ufshci_printf(ctrlr, "UECPA error code: 0x%x\n",
+ UFSHCIV(UFSHCI_UECPA_REG_EC, uecpa));
+ }
+ /* UECDL for Host UIC Error Code within Data Link Layer */
+ uecdl = ufshci_mmio_read_4(ctrlr, uecdl);
+ if (uecdl & UFSHCIM(UFSHCI_UECDL_REG_ERR)) {
+ ufshci_printf(ctrlr, "UECDL error code: 0x%x\n",
+ UFSHCIV(UFSHCI_UECDL_REG_EC, uecdl));
+ }
+ /* UECN for Host UIC Error Code within Network Layer */
+ uecn = ufshci_mmio_read_4(ctrlr, uecn);
+ if (uecn & UFSHCIM(UFSHCI_UECN_REG_ERR)) {
+ ufshci_printf(ctrlr, "UECN error code: 0x%x\n",
+ UFSHCIV(UFSHCI_UECN_REG_EC, uecn));
+ }
+ /* UECT for Host UIC Error Code within Transport Layer */
+ uect = ufshci_mmio_read_4(ctrlr, uect);
+ if (uect & UFSHCIM(UFSHCI_UECT_REG_ERR)) {
+ ufshci_printf(ctrlr, "UECT error code: 0x%x\n",
+ UFSHCIV(UFSHCI_UECT_REG_EC, uect));
+ }
+ /* UECDME for Host UIC Error Code within DME subcomponent */
+ uecdme = ufshci_mmio_read_4(ctrlr, uecdme);
+ if (uecdme & UFSHCIM(UFSHCI_UECDME_REG_ERR)) {
+ ufshci_printf(ctrlr, "UECDME error code: 0x%x\n",
+ UFSHCIV(UFSHCI_UECDME_REG_EC, uecdme));
+ }
+ ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UE));
+ }
+ /* Device Fatal Error Status */
+ if (is & UFSHCIM(UFSHCI_IS_REG_DFES)) {
+ ufshci_printf(ctrlr, "Device fatal error on ISR\n");
+ ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_DFES));
+ }
+ /* UTP Error Status */
+ if (is & UFSHCIM(UFSHCI_IS_REG_UTPES)) {
+ ufshci_printf(ctrlr, "UTP error on ISR\n");
+ ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTPES));
+ }
+ /* Host Controller Fatal Error Status */
+ if (is & UFSHCIM(UFSHCI_IS_REG_HCFES)) {
+ ufshci_printf(ctrlr, "Host controller fatal error on ISR\n");
+ ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_HCFES));
+ }
+ /* System Bus Fatal Error Status */
+ if (is & UFSHCIM(UFSHCI_IS_REG_SBFES)) {
+ ufshci_printf(ctrlr, "System bus fatal error on ISR\n");
+ ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_SBFES));
+ }
+ /* Crypto Engine Fatal Error Status */
+ if (is & UFSHCIM(UFSHCI_IS_REG_CEFES)) {
+ ufshci_printf(ctrlr, "Crypto engine fatal error on ISR\n");
+ ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_CEFES));
+ }
+ /* UTP Task Management Request Completion Status */
+ if (is & UFSHCIM(UFSHCI_IS_REG_UTMRCS)) {
+ ufshci_printf(ctrlr, "TODO: Implement UTMR completion\n");
+ ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTMRCS));
+ /* TODO: Implement UTMR completion */
+ }
+ /* UTP Transfer Request Completion Status */
+ if (is & UFSHCIM(UFSHCI_IS_REG_UTRCS)) {
+ ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTRCS));
+ ufshci_req_queue_process_completions(
+ &ctrlr->transfer_req_queue);
+ }
+ /* MCQ CQ Event Status */
+ if (is & UFSHCIM(UFSHCI_IS_REG_CQES)) {
+ /* TODO: We need to process completion Queue Pairs */
+ ufshci_printf(ctrlr, "MCQ completion not yet implemented\n");
+ ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_CQES));
+ }
+}
+
+/*
+ * Poll the single-vector interrupt case: num_io_queues will be 1 and
+ * there's only a single vector. While we're polling, we mask further
+ * interrupts in the controller.
+ */
+void
+ufshci_ctrlr_shared_handler(void *arg)
+{
+ struct ufshci_controller *ctrlr = arg;
+
+ ufshci_ctrlr_poll(ctrlr);
+}
+
+void
+ufshci_reg_dump(struct ufshci_controller *ctrlr)
+{
+ ufshci_printf(ctrlr, "========= UFSHCI Register Dump =========\n");
+
+ UFSHCI_DUMP_REG(ctrlr, cap);
+ UFSHCI_DUMP_REG(ctrlr, mcqcap);
+ UFSHCI_DUMP_REG(ctrlr, ver);
+ UFSHCI_DUMP_REG(ctrlr, ext_cap);
+ UFSHCI_DUMP_REG(ctrlr, hcpid);
+ UFSHCI_DUMP_REG(ctrlr, hcmid);
+ UFSHCI_DUMP_REG(ctrlr, ahit);
+ UFSHCI_DUMP_REG(ctrlr, is);
+ UFSHCI_DUMP_REG(ctrlr, ie);
+ UFSHCI_DUMP_REG(ctrlr, hcsext);
+ UFSHCI_DUMP_REG(ctrlr, hcs);
+ UFSHCI_DUMP_REG(ctrlr, hce);
+ UFSHCI_DUMP_REG(ctrlr, uecpa);
+ UFSHCI_DUMP_REG(ctrlr, uecdl);
+ UFSHCI_DUMP_REG(ctrlr, uecn);
+ UFSHCI_DUMP_REG(ctrlr, uect);
+ UFSHCI_DUMP_REG(ctrlr, uecdme);
+
+ ufshci_printf(ctrlr, "========================================\n");
+}
\ No newline at end of file
diff --git a/sys/dev/ufshci/ufshci_ctrlr_cmd.c b/sys/dev/ufshci/ufshci_ctrlr_cmd.c
new file mode 100644
--- /dev/null
+++ b/sys/dev/ufshci/ufshci_ctrlr_cmd.c
@@ -0,0 +1,53 @@
+/*-
+ * Copyright (c) 2025, Samsung Electronics Co., Ltd.
+ * Written by Jaeyoon Choi
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include "ufshci_private.h"
+
+void
+ufshci_ctrl_cmd_send_nop(struct ufshci_controller *ctrlr, ufshci_cb_fn_t cb_fn,
+ void *cb_arg)
+{
+ struct ufshci_request *req;
+ struct ufshci_nop_out_upiu *upiu;
+
+ req = ufshci_allocate_request_vaddr(NULL, 0, M_WAITOK, cb_fn, cb_arg);
+
+ req->request_size = sizeof(struct ufshci_nop_out_upiu);
+ req->response_size = sizeof(struct ufshci_nop_in_upiu);
+
+ upiu = (struct ufshci_nop_out_upiu *)&req->request_upiu;
+ memset(upiu, 0, req->request_size);
+ upiu->header.trans_type = UFSHCI_UPIU_TRANSACTION_CODE_NOP_OUT;
+
+ ufshci_ctrl_submit_admin_request(ctrlr, req);
+}
+
+void
+ufshci_ctrl_cmd_send_query_request(struct ufshci_controller *ctrlr,
+ ufshci_cb_fn_t cb_fn, void *cb_arg, struct ufshci_query_param param)
+{
+ struct ufshci_request *req;
+ struct ufshci_query_request_upiu *upiu;
+
+ req = ufshci_allocate_request_vaddr(NULL, 0, M_WAITOK, cb_fn, cb_arg);
+
+ req->request_size = sizeof(struct ufshci_query_request_upiu);
+ req->response_size = sizeof(struct ufshci_query_response_upiu);
+
+ upiu = (struct ufshci_query_request_upiu *)&req->request_upiu;
+ memset(upiu, 0, req->request_size);
+ upiu->header.trans_type = UFSHCI_UPIU_TRANSACTION_CODE_QUERY_REQUEST;
+ upiu->header.ext_iid_or_function = param.function;
+ upiu->opcode = param.opcode;
+ upiu->idn = param.type;
+ upiu->index = param.index;
+ upiu->selector = param.selector;
+ upiu->value_64 = param.value;
+ upiu->length = param.desc_size;
+
+ ufshci_ctrl_submit_admin_request(ctrlr, req);
+}
diff --git a/sys/dev/ufshci/ufshci_dev.c b/sys/dev/ufshci/ufshci_dev.c
new file mode 100644
--- /dev/null
+++ b/sys/dev/ufshci/ufshci_dev.c
@@ -0,0 +1,465 @@
+/*-
+ * Copyright (c) 2025, Samsung Electronics Co., Ltd.
+ * Written by Jaeyoon Choi
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+
+#include "ufshci_private.h"
+#include "ufshci_reg.h"
+
+static int
+ufshci_dev_read_descriptor(struct ufshci_controller *ctrlr,
+ enum ufshci_descriptor_type desc_type, uint8_t index, uint8_t selector,
+ void *desc, size_t desc_size)
+{
+ struct ufshci_completion_poll_status status;
+ struct ufshci_query_param param;
+
+ param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST;
+ param.opcode = UFSHCI_QUERY_OPCODE_READ_DESCRIPTOR;
+ param.type = desc_type;
+ param.index = index;
+ param.selector = selector;
+ param.value = 0;
+ param.desc_size = desc_size;
+
+ status.done = 0;
+ ufshci_ctrl_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
+ &status, param);
+ ufshci_completion_poll(&status);
+ if (status.error) {
+ ufshci_printf(ctrlr, "ufshci_dev_read_descriptor failed!\n");
+ return (ENXIO);
+ }
+
+ memcpy(desc, status.cpl.response_upiu.query_response_upiu.command_data,
+ desc_size);
+
+ return (0);
+}
+
+static int
+ufshci_dev_read_device_descriptor(struct ufshci_controller *ctrlr,
+ struct ufshci_device_descriptor *desc)
+{
+ return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_DEVICE, 0, 0,
+ desc, sizeof(struct ufshci_device_descriptor)));
+}
+
+static int
+ufshci_dev_read_geometry_descriptor(struct ufshci_controller *ctrlr,
+ struct ufshci_geometry_descriptor *desc)
+{
+ return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_GEOMETRY, 0,
+ 0, desc, sizeof(struct ufshci_geometry_descriptor)));
+}
+
+static int
+ufshci_dev_read_unit_descriptor(struct ufshci_controller *ctrlr,
+ struct ufshci_unit_descriptor *desc, uint8_t index)
+{
+ return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_UNIT, index,
+ 0, desc, sizeof(struct ufshci_unit_descriptor)));
+}
+
+static int
+ufshci_dev_read_flag(struct ufshci_controller *ctrlr,
+ enum ufshci_flags flag_type, uint8_t *flag)
+{
+ struct ufshci_completion_poll_status status;
+ struct ufshci_query_param param;
+
+ param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST;
+ param.opcode = UFSHCI_QUERY_OPCODE_READ_FLAG;
+ param.type = flag_type;
+ param.index = 0;
+ param.selector = 0;
+ param.value = 0;
+
+ status.done = 0;
+ ufshci_ctrl_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
+ &status, param);
+ ufshci_completion_poll(&status);
+ if (status.error) {
+ ufshci_printf(ctrlr, "ufshci_dev_read_flag failed!\n");
+ return (ENXIO);
+ }
+
+ *flag = status.cpl.response_upiu.query_response_upiu.flag_value;
+
+ return (0);
+}
+
+static int
+ufshci_dev_set_flag(struct ufshci_controller *ctrlr,
+ enum ufshci_flags flag_type)
+{
+ struct ufshci_completion_poll_status status;
+ struct ufshci_query_param param;
+
+ param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST;
+ param.opcode = UFSHCI_QUERY_OPCODE_SET_FLAG;
+ param.type = flag_type;
+ param.index = 0;
+ param.selector = 0;
+ param.value = 0;
+
+ status.done = 0;
+ ufshci_ctrl_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
+ &status, param);
+ ufshci_completion_poll(&status);
+ if (status.error) {
+ ufshci_printf(ctrlr, "ufshci_dev_set_flag failed!\n");
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static int
+ufshci_dev_read_attribute(struct ufshci_controller *ctrlr,
+ enum ufshci_attributes attr_type, uint8_t index, uint8_t selector,
+ uint64_t *attribute)
+{
+ struct ufshci_completion_poll_status status;
+ struct ufshci_query_param param;
+
+ param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST;
+ param.opcode = UFSHCI_QUERY_OPCODE_READ_ATTRIBUTE;
+ param.type = attr_type;
+ param.index = index;
+ param.selector = selector;
+ param.value = 0;
+
+ status.done = 0;
+ ufshci_ctrl_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
+ &status, param);
+ ufshci_completion_poll(&status);
+ if (status.error) {
+ ufshci_printf(ctrlr, "ufshci_dev_read_attribute failed!\n");
+ return (ENXIO);
+ }
+
+ *attribute = status.cpl.response_upiu.query_response_upiu.value_64;
+
+ return (0);
+}
+
+static int
+ufshci_dev_write_attribute(struct ufshci_controller *ctrlr,
+ enum ufshci_attributes attr_type, uint8_t index, uint8_t selector,
+ uint64_t value)
+{
+ struct ufshci_completion_poll_status status;
+ struct ufshci_query_param param;
+
+ param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST;
+ param.opcode = UFSHCI_QUERY_OPCODE_WRITE_ATTRIBUTE;
+ param.type = attr_type;
+ param.index = index;
+ param.selector = selector;
+ param.value = value;
+
+ status.done = 0;
+ ufshci_ctrl_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
+ &status, param);
+ ufshci_completion_poll(&status);
+ if (status.error) {
+ ufshci_printf(ctrlr, "ufshci_dev_write_attribute failed!\n");
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+int
+ufshci_dev_init(struct ufshci_controller *ctrlr)
+{
+ int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms);
+ sbintime_t delta_t = SBT_1US;
+ uint8_t flag;
+ int error;
+ const uint8_t device_init_completed = 0;
+
+ error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_DEVICE_INIT);
+ if (error)
+ return (error);
+
+ /* Wait for the UFSHCI_FLAG_F_DEVICE_INIT flag to change */
+ while (1) {
+ error = ufshci_dev_read_flag(ctrlr, UFSHCI_FLAG_F_DEVICE_INIT,
+ &flag);
+ if (error)
+ return (error);
+ if (flag == device_init_completed)
+ break;
+ if (timeout - ticks < 0) {
+ ufshci_printf(ctrlr,
+ "device init did not become %d "
+ "within %d ms\n",
+ device_init_completed,
+ ctrlr->device_init_timeout_in_ms);
+ return (ENXIO);
+ }
+
+ pause_sbt("ufshciinit", delta_t, 0, C_PREL(1));
+ delta_t = min(SBT_1MS, delta_t * 3 / 2);
+ }
+
+ return (0);
+}
+
+int
+ufshci_dev_reset(struct ufshci_controller *ctrlr)
+{
+ if (ufshci_uic_send_dme_endpoint_reset(ctrlr))
+ return (ENXIO);
+
+ return (ufshci_dev_init(ctrlr));
+}
+
+int
+ufshci_dev_init_reference_clock(struct ufshci_controller *ctrlr)
+{
+ int error;
+ uint8_t index, selector;
+
+ index = 0; /* bRefClkFreq is device type attribute */
+ selector = 0; /* bRefClkFreq is device type attribute */
+
+ error = ufshci_dev_write_attribute(ctrlr, UFSHCI_ATTR_B_REF_CLK_FREQ,
+ index, selector, ctrlr->ref_clk);
+ if (error)
+ return (error);
+
+ return (0);
+}
+
+int
+ufshci_dev_init_unipro(struct ufshci_controller *ctrlr)
+{
+ uint32_t pa_granularity, peer_pa_granularity;
+ uint32_t t_activate, pear_t_activate;
+
+ /*
+ * Unipro Version:
+ * - 7~15 = Above 2.0, 6 = 2.0, 5 = 1.8, 4 = 1.61, 3 = 1.6, 2 = 1.41,
+ * 1 = 1.40, 0 = Reserved
+ */
+ if (ufshci_uic_send_dme_get(ctrlr, PA_LocalVerInfo,
+ &ctrlr->unipro_version))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_get(ctrlr, PA_RemoteVerInfo,
+ &ctrlr->ufs_dev.unipro_version))
+ return (ENXIO);
+
+ /*
+ * PA_Granularity: Granularity for PA_TActivate and PA_Hibern8Time
+ * - 1=1us, 2=4us, 3=8us, 4=16us, 5=32us, 6=100us
+ */
+ if (ufshci_uic_send_dme_get(ctrlr, PA_Granularity, &pa_granularity))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity,
+ &peer_pa_granularity))
+ return (ENXIO);
+
+ /*
+ * PA_TActivate: Time to wait before activating a burst in order to
+ * wake-up peer M-RX
+ * UniPro automatically sets timing information such as PA_TActivate
+ * through the PACP_CAP_EXT1_ind command during Link Startup operation.
+ */
+ if (ufshci_uic_send_dme_get(ctrlr, PA_TActivate, &t_activate))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_peer_get(ctrlr, PA_TActivate, &pear_t_activate))
+ return (ENXIO);
+
+ if (ctrlr->quirks & UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE) {
+ /*
+ * Intel Lake-field UFSHCI has a quirk. We need to add 200us to
+ * the PEER's PA_TActivate.
+ */
+ if (pa_granularity == peer_pa_granularity) {
+ pear_t_activate = t_activate + 2;
+ if (ufshci_uic_send_dme_peer_set(ctrlr, PA_TActivate,
+ pear_t_activate))
+ return (ENXIO);
+ }
+ }
+
+ return (0);
+}
+
+int
+ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr)
+{
+ /* HSSerise: A = 1, B = 2 */
+ const uint32_t hs_series = 2;
+ /*
+ * TX/RX PWRMode:
+ * - TX[3:0], RX[7:4]
+ * - Fast Mode = 1, Slow Mode = 2, FastAuto Mode = 4, SlowAuto Mode = 5
+ */
+ const uint32_t fast_mode = 1;
+ const uint32_t rx_bit_shift = 4;
+ const uint32_t power_mode = (fast_mode << rx_bit_shift) | fast_mode;
+
+ /* Update lanes with available TX/RX lanes */
+ if (ufshci_uic_send_dme_get(ctrlr, PA_AvailTxDataLanes,
+ &ctrlr->max_tx_lanes))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_get(ctrlr, PA_AvailRxDataLanes,
+ &ctrlr->max_rx_lanes))
+ return (ENXIO);
+
+ /* Get max HS-GEAR value */
+ if (ufshci_uic_send_dme_get(ctrlr, PA_MaxRxHSGear,
+ &ctrlr->max_rx_hs_gear))
+ return (ENXIO);
+
+ /* Set the data lane to max */
+ ctrlr->tx_lanes = ctrlr->max_tx_lanes;
+ ctrlr->rx_lanes = ctrlr->max_rx_lanes;
+ if (ufshci_uic_send_dme_set(ctrlr, PA_ActiveTxDataLanes,
+ ctrlr->tx_lanes))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_set(ctrlr, PA_ActiveRxDataLanes,
+ ctrlr->rx_lanes))
+ return (ENXIO);
+
+ /* Set HS-GEAR to max gear */
+ ctrlr->hs_gear = ctrlr->max_rx_hs_gear;
+ if (ufshci_uic_send_dme_set(ctrlr, PA_TxGear, ctrlr->hs_gear))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_set(ctrlr, PA_RxGear, ctrlr->hs_gear))
+ return (ENXIO);
+
+ /*
+ * Set termination
+ * - HS-MODE = ON / LS-MODE = OFF
+ */
+ if (ufshci_uic_send_dme_set(ctrlr, PA_TxTermination, true))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_set(ctrlr, PA_RxTermination, true))
+ return (ENXIO);
+
+ /* Set HSSerise (A = 1, B = 2) */
+ if (ufshci_uic_send_dme_set(ctrlr, PA_HSSeries, hs_series))
+ return (ENXIO);
+
+ /* Set Timeout values */
+ if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData0,
+ DL_FC0ProtectionTimeOutVal_Default))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData1,
+ DL_TC0ReplayTimeOutVal_Default))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData2,
+ DL_AFC0ReqTimeOutVal_Default))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData3,
+ DL_FC0ProtectionTimeOutVal_Default))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData4,
+ DL_TC0ReplayTimeOutVal_Default))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData5,
+ DL_AFC0ReqTimeOutVal_Default))
+ return (ENXIO);
+
+ if (ufshci_uic_send_dme_set(ctrlr, DME_LocalFC0ProtectionTimeOutVal,
+ DL_FC0ProtectionTimeOutVal_Default))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_set(ctrlr, DME_LocalTC0ReplayTimeOutVal,
+ DL_TC0ReplayTimeOutVal_Default))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_set(ctrlr, DME_LocalAFC0ReqTimeOutVal,
+ DL_AFC0ReqTimeOutVal_Default))
+ return (ENXIO);
+
+ /* Set TX/RX PWRMode */
+ if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode))
+ return (ENXIO);
+
+ /* Wait for power mode changed. */
+ if (ufshci_uic_power_mode_ready(ctrlr)) {
+ ufshci_reg_dump(ctrlr);
+ return (ENXIO);
+ }
+
+ /* Clear 'Power Mode completion status' */
+ ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UPMS));
+
+ if (ctrlr->quirks & UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE) {
+ /*
+ * Intel Lake-field UFSHCI has a quirk.
+ * We need to wait 1250us and clear dme error.
+ */
+ pause_sbt("ufshci", ustosbt(1250), 0, C_PREL(1));
+
+ /* Test with dme_peer_get to make sure there are no errors. */
+ if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity, NULL))
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+int
+ufshci_dev_init_ufs_power_mode(struct ufshci_controller *ctrlr)
+{
+ /* TODO: Need to implement */
+
+ return (0);
+}
+
+int
+ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr)
+{
+ struct ufshci_device *device = &ctrlr->ufs_dev;
+ /*
+ * The kDeviceDensityUnit is defined in the spec as 512.
+ * qTotalRawDeviceCapacity use big-endian byte ordering.
+ */
+ const uint32_t device_density_unit = 512;
+ uint32_t ver;
+ int error;
+
+ error = ufshci_dev_read_device_descriptor(ctrlr, &device->dev_desc);
+ if (error)
+ return (error);
+
+ ver = be16toh(device->dev_desc.wSpecVersion);
+ ufshci_printf(ctrlr, "UFS device spec version %u.%u%u\n",
+ UFSHCIV(UFSHCI_VER_REG_MJR, ver), UFSHCIV(UFSHCI_VER_REG_MNR, ver),
+ UFSHCIV(UFSHCI_VER_REG_VS, ver));
+ ufshci_printf(ctrlr, "%u enabled LUNs found\n",
+ device->dev_desc.bNumberLU);
+
+ error = ufshci_dev_read_geometry_descriptor(ctrlr, &device->geo_desc);
+ if (error)
+ return (error);
+
+ if (device->geo_desc.bMaxNumberLU == 0) {
+ device->max_lun_count = 8;
+ } else if (device->geo_desc.bMaxNumberLU == 1) {
+ device->max_lun_count = 32;
+ } else {
+ ufshci_printf(ctrlr,
+ "Invalid Geometry Descriptor bMaxNumberLU value=%d\n",
+ device->geo_desc.bMaxNumberLU);
+ return (ENXIO);
+ }
+ ctrlr->max_lun_count = device->max_lun_count;
+
+ ufshci_printf(ctrlr, "UFS device total size is %lu bytes\n",
+ be64toh(device->geo_desc.qTotalRawDeviceCapacity) *
+ device_density_unit);
+
+ return (0);
+}
diff --git a/sys/dev/ufshci/ufshci_pci.c b/sys/dev/ufshci/ufshci_pci.c
new file mode 100644
--- /dev/null
+++ b/sys/dev/ufshci/ufshci_pci.c
@@ -0,0 +1,260 @@
+/*-
+ * Copyright (c) 2025, Samsung Electronics Co., Ltd.
+ * Written by Jaeyoon Choi
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/buf.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/proc.h>
+#include <sys/smp.h>
+
+#include <vm/vm.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include "ufshci_private.h"
+
+static int ufshci_pci_probe(device_t);
+static int ufshci_pci_attach(device_t);
+static int ufshci_pci_detach(device_t);
+
+static int ufshci_ctrlr_setup_interrupts(struct ufshci_controller *ctrlr);
+
+static device_method_t ufshci_pci_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, ufshci_pci_probe),
+ DEVMETHOD(device_attach, ufshci_pci_attach),
+ DEVMETHOD(device_detach, ufshci_pci_detach),
+ /* TODO: Implement Suspend, Resume */
+ { 0, 0 }
+};
+
+static driver_t ufshci_pci_driver = {
+ "ufshci",
+ ufshci_pci_methods,
+ sizeof(struct ufshci_controller),
+};
+
+DRIVER_MODULE(ufshci, pci, ufshci_pci_driver, 0, 0);
+
+static struct _pcsid {
+ uint32_t devid;
+ const char *desc;
+ uint32_t ref_clk;
+ uint32_t quirks;
+} pci_ids[] = { { 0x131b36, "QEMU UFS Host Controller", UFSHCI_REF_CLK_19_2MHz,
+ UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE },
+ { 0x98fa8086, "Intel Lakefield UFS Host Controller",
+ UFSHCI_REF_CLK_19_2MHz,
+ UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE |
+ UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE },
+ { 0x54ff8086, "Intel UFS Host Controller", UFSHCI_REF_CLK_19_2MHz },
+ { 0x00000000, NULL } };
+
+static int
+ufshci_pci_probe(device_t device)
+{
+ struct ufshci_controller *ctrlr = device_get_softc(device);
+ uint32_t devid = pci_get_devid(device);
+ struct _pcsid *ep = pci_ids;
+
+ while (ep->devid && ep->devid != devid)
+ ++ep;
+
+ if (ep->devid) {
+ ctrlr->quirks = ep->quirks;
+ ctrlr->ref_clk = ep->ref_clk;
+ }
+
+ if (ep->desc) {
+ device_set_desc(device, ep->desc);
+ return (BUS_PROBE_DEFAULT);
+ }
+
+ return (ENXIO);
+}
+
+static int
+ufshci_ctrlr_allocate_bar(struct ufshci_controller *ctrlr)
+{
+ ctrlr->resource_id = PCIR_BAR(0);
+
+ ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY,
+ &ctrlr->resource_id, RF_ACTIVE);
+
+ if (ctrlr->resource == NULL) {
+ ufshci_printf(ctrlr, "unable to allocate pci resource\n");
+ return (ENOMEM);
+ }
+
+ ctrlr->bus_tag = rman_get_bustag(ctrlr->resource);
+ ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource);
+ ctrlr->regs = (struct ufshci_registers *)ctrlr->bus_handle;
+
+ return (0);
+}
+
+static int
+ufshci_pci_attach(device_t dev)
+{
+ struct ufshci_controller *ctrlr = device_get_softc(dev);
+ int status;
+
+ ctrlr->dev = dev;
+ status = ufshci_ctrlr_allocate_bar(ctrlr);
+ if (status != 0)
+ goto bad;
+ pci_enable_busmaster(dev);
+ status = ufshci_ctrlr_setup_interrupts(ctrlr);
+ if (status != 0)
+ goto bad;
+
+ return (ufshci_attach(dev));
+bad:
+ if (ctrlr->resource != NULL) {
+ bus_release_resource(dev, SYS_RES_MEMORY, ctrlr->resource_id,
+ ctrlr->resource);
+ }
+
+ if (ctrlr->tag)
+ bus_teardown_intr(dev, ctrlr->res, ctrlr->tag);
+
+ if (ctrlr->res)
+ bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(ctrlr->res),
+ ctrlr->res);
+
+ if (ctrlr->msi_count > 0)
+ pci_release_msi(dev);
+
+ return (status);
+}
+
+static int
+ufshci_pci_detach(device_t dev)
+{
+ struct ufshci_controller *ctrlr = device_get_softc(dev);
+ int error;
+
+ error = ufshci_detach(dev);
+ if (ctrlr->msi_count > 0)
+ pci_release_msi(dev);
+ pci_disable_busmaster(dev);
+ return (error);
+}
+
+static int
+ufshci_ctrlr_setup_shared(struct ufshci_controller *ctrlr, int rid)
+{
+ int error;
+
+ ctrlr->num_io_queues = 1;
+ ctrlr->rid = rid;
+ ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
+ &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE);
+ if (ctrlr->res == NULL) {
+ ufshci_printf(ctrlr, "unable to allocate shared interrupt\n");
+ return (ENOMEM);
+ }
+
+ error = bus_setup_intr(ctrlr->dev, ctrlr->res,
+ INTR_TYPE_MISC | INTR_MPSAFE, NULL, ufshci_ctrlr_shared_handler,
+ ctrlr, &ctrlr->tag);
+ if (error) {
+ ufshci_printf(ctrlr, "unable to setup shared interrupt\n");
+ return (error);
+ }
+
+ return (0);
+}
+
+static int
+ufshci_ctrlr_setup_interrupts(struct ufshci_controller *ctrlr)
+{
+ device_t dev = ctrlr->dev;
+ int force_intx = 0;
+ int num_io_queues, per_cpu_io_queues, min_cpus_per_ioq;
+ int num_vectors_requested;
+
+ TUNABLE_INT_FETCH("hw.ufshci.force_intx", &force_intx);
+ if (force_intx)
+ goto intx;
+
+ if (pci_msix_count(dev) == 0)
+ goto msi;
+
+ /*
+ * Try to allocate one MSI-X per core for I/O queues, plus one
+ * for admin queue, but accept single shared MSI-X if have to.
+ * Fall back to MSI if can't get any MSI-X.
+ */
+
+ /*
+ * TODO: Need to implement MCQ(Multi Circular Queue)
+ * Example: num_io_queues = mp_ncpus;
+ */
+ num_io_queues = 1;
+
+ TUNABLE_INT_FETCH("hw.ufshci.num_io_queues", &num_io_queues);
+ if (num_io_queues < 1 || num_io_queues > mp_ncpus)
+ num_io_queues = mp_ncpus;
+
+ per_cpu_io_queues = 1;
+ TUNABLE_INT_FETCH("hw.ufshci.per_cpu_io_queues", &per_cpu_io_queues);
+ if (per_cpu_io_queues == 0)
+ num_io_queues = 1;
+
+ min_cpus_per_ioq = smp_threads_per_core;
+ TUNABLE_INT_FETCH("hw.ufshci.min_cpus_per_ioq", &min_cpus_per_ioq);
+ if (min_cpus_per_ioq > 1) {
+ num_io_queues = min(num_io_queues,
+ max(1, mp_ncpus / min_cpus_per_ioq));
+ }
+
+ num_io_queues = min(num_io_queues, max(1, pci_msix_count(dev) - 1));
+
+again:
+ if (num_io_queues > vm_ndomains)
+ num_io_queues -= num_io_queues % vm_ndomains;
+ num_vectors_requested = min(num_io_queues + 1, pci_msix_count(dev));
+ ctrlr->msi_count = num_vectors_requested;
+ if (pci_alloc_msix(dev, &ctrlr->msi_count) != 0) {
+ ufshci_printf(ctrlr, "unable to allocate MSI-X\n");
+ ctrlr->msi_count = 0;
+ goto msi;
+ }
+ if (ctrlr->msi_count == 1)
+ return (ufshci_ctrlr_setup_shared(ctrlr, 1));
+ if (ctrlr->msi_count != num_vectors_requested) {
+ pci_release_msi(dev);
+ num_io_queues = ctrlr->msi_count - 1;
+ goto again;
+ }
+
+ ctrlr->num_io_queues = num_io_queues;
+ return (0);
+
+msi:
+ /*
+ * Try to allocate 2 MSIs (admin and I/O queues), but accept single
+ * shared if have to. Fall back to INTx if can't get any MSI.
+ */
+ ctrlr->msi_count = min(pci_msi_count(dev), 2);
+ if (ctrlr->msi_count > 0) {
+ if (pci_alloc_msi(dev, &ctrlr->msi_count) != 0) {
+ ufshci_printf(ctrlr, "unable to allocate MSI\n");
+ ctrlr->msi_count = 0;
+ } else if (ctrlr->msi_count == 2) {
+ ctrlr->num_io_queues = 1;
+ return (0);
+ }
+ }
+
+intx:
+ return (ufshci_ctrlr_setup_shared(ctrlr, ctrlr->msi_count > 0 ? 1 : 0));
+}
diff --git a/sys/dev/ufshci/ufshci_private.h b/sys/dev/ufshci/ufshci_private.h
new file mode 100644
--- /dev/null
+++ b/sys/dev/ufshci/ufshci_private.h
@@ -0,0 +1,508 @@
+/*-
+ * Copyright (c) 2025, Samsung Electronics Co., Ltd.
+ * Written by Jaeyoon Choi
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#ifndef __UFSHCI_PRIVATE_H__
+#define __UFSHCI_PRIVATE_H__
+
+#ifdef _KERNEL
+#include <sys/types.h>
+#else /* !_KERNEL */
+#include <stdbool.h>
+#include <stdint.h>
+#endif /* _KERNEL */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bio.h>
+#include <sys/bus.h>
+#include <sys/counter.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/memdesc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/rman.h>
+#include <sys/taskqueue.h>
+
+#include <machine/bus.h>
+
+#include "ufshci.h"
+
+MALLOC_DECLARE(M_UFSHCI);
+
+#define UFSHCI_DEVICE_INIT_TIMEOUT_MS (2000) /* in milliseconds */
+#define UFSHCI_UIC_CMD_TIMEOUT_MS (200) /* in milliseconds */
+#define UFSHCI_DEFAULT_TIMEOUT_PERIOD (10) /* in seconds */
+#define UFSHCI_MIN_TIMEOUT_PERIOD (5) /* in seconds */
+#define UFSHCI_MAX_TIMEOUT_PERIOD (120) /* in seconds */
+
+#define UFSHCI_DEFAULT_RETRY_COUNT (4)
+
+#define UFSHCI_UTR_ENTRIES (32)
+#define UFSHCI_UTRM_ENTRIES (8)
+
+struct ufshci_controller;
+
+struct ufshci_completion_poll_status {
+ struct ufshci_completion cpl;
+ int done;
+ bool error;
+};
+
+struct ufshci_request {
+ struct ufshci_upiu request_upiu;
+ size_t request_size;
+ size_t response_size;
+
+ struct memdesc payload;
+ enum ufshci_data_direction data_direction;
+ ufshci_cb_fn_t cb_fn;
+ void *cb_arg;
+ bool is_admin;
+ int32_t retries;
+ bool payload_valid;
+ bool timeout;
+ bool spare[2]; /* Future use */
+ STAILQ_ENTRY(ufshci_request) stailq;
+};
+
+enum ufshci_slot_state {
+ UFSHCI_SLOT_STATE_FREE = 0x0,
+ UFSHCI_SLOT_STATE_RESERVED = 0x1,
+ UFSHCI_SLOT_STATE_SCHEDULED = 0x2,
+ UFSHCI_SLOT_STATE_TIMEOUT = 0x3,
+ UFSHCI_SLOT_STATE_NEED_ERROR_HANDLING = 0x4,
+};
+
+struct ufshci_tracker {
+ struct ufshci_request *req;
+ struct ufshci_req_queue *req_queue;
+ struct ufshci_hw_queue *hwq;
+ uint8_t slot_num;
+ enum ufshci_slot_state slot_state;
+ size_t response_size;
+ sbintime_t deadline;
+
+ bus_dmamap_t payload_dma_map;
+ uint64_t payload_addr;
+
+ struct ufshci_utp_cmd_desc *ucd;
+ bus_addr_t ucd_bus_addr;
+
+ uint16_t prdt_off;
+ uint16_t prdt_entry_cnt;
+};
+
+enum ufshci_queue_mode {
+ UFSHCI_Q_MODE_SDB = 0x00, /* Single Doorbell Mode*/
+ UFSHCI_Q_MODE_MCQ = 0x01, /* Multi-Circular Queue Mode*/
+};
+
+/*
+ * UFS uses slot-based Single Doorbell (SDB) mode for request submission by
+ * default and additionally supports Multi-Circular Queue (MCQ) in UFS 4.0. To
+ * minimize duplicated code between SDB and MCQ, mode dependent operations are
+ * extracted into ufshci_qops.
+ */
+struct ufshci_qops {
+ int (*construct)(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue, uint32_t num_entries,
+ bool is_task_mgmt);
+ void (*destroy)(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue);
+ struct ufshci_hw_queue *(*get_hw_queue)(
+ struct ufshci_req_queue *req_queue);
+ int (*enable)(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue);
+ int (*reserve_slot)(struct ufshci_req_queue *req_queue,
+ struct ufshci_tracker **tr);
+ int (*reserve_admin_slot)(struct ufshci_req_queue *req_queue,
+ struct ufshci_tracker **tr);
+ void (*ring_doorbell)(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr);
+ void (*clear_cpl_ntf)(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr);
+ bool (*process_cpl)(struct ufshci_req_queue *req_queue);
+ int (*get_inflight_io)(struct ufshci_controller *ctrlr);
+};
+
+#define UFSHCI_SDB_Q 0 /* Queue number for a single doorbell queue */
+
+/*
+ * Generic queue container used by both SDB (fixed 32-slot bitmap) and MCQ
+ * (ring buffer) modes. Fields are shared; some such as sq_head, sq_tail and
+ * cq_head are not used in SDB but used in MCQ.
+ */
+struct ufshci_hw_queue {
+ uint32_t id;
+ int domain;
+ int cpu;
+
+ struct ufshci_utp_xfer_req_desc *utrd;
+
+ bus_dma_tag_t dma_tag_queue;
+ bus_dmamap_t queuemem_map;
+ bus_addr_t req_queue_addr;
+
+ uint32_t num_entries;
+ uint32_t num_trackers;
+
+ /*
+ * A Request List using the single doorbell method uses a dedicated
+ * ufshci_tracker, one per slot.
+ */
+ struct ufshci_tracker **act_tr;
+
+ uint32_t sq_head; /* MCQ mode */
+ uint32_t sq_tail; /* MCQ mode */
+ uint32_t cq_head; /* MCQ mode */
+
+ uint32_t phase;
+ int64_t num_cmds;
+ int64_t num_intr_handler_calls;
+ int64_t num_retries;
+ int64_t num_failures;
+
+ struct mtx_padalign qlock;
+};
+
+struct ufshci_req_queue {
+ struct ufshci_controller *ctrlr;
+ int domain;
+
+ /*
+ * queue_mode: active transfer scheme
+ * UFSHCI_Q_MODE_SDB – legacy single‑doorbell list
+ * UFSHCI_Q_MODE_MCQ – modern multi‑circular queue (UFSHCI 4.0+)
+ */
+ enum ufshci_queue_mode queue_mode;
+
+ uint8_t num_q;
+ struct ufshci_hw_queue *hwq;
+
+ struct ufshci_qops qops;
+
+ bool is_task_mgmt;
+ uint32_t num_entries;
+ uint32_t num_trackers;
+
+ /* Shared DMA resource */
+ struct ufshci_utp_cmd_desc *ucd;
+
+ bus_dma_tag_t dma_tag_ucd;
+ bus_dma_tag_t dma_tag_payload;
+
+ bus_dmamap_t ucdmem_map;
+
+ bus_addr_t ucd_addr;
+};
+
+struct ufshci_device {
+ uint32_t max_lun_count;
+
+ struct ufshci_device_descriptor dev_desc;
+ struct ufshci_geometry_descriptor geo_desc;
+
+ uint32_t unipro_version;
+};
+
+/*
+ * One of these per allocated device.
+ */
+struct ufshci_controller {
+ device_t dev;
+
+ uint32_t quirks;
+#define UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE \
+ 1 /* QEMU does not support UIC POWER MODE */
+#define UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE \
+ 2 /* Need an additional 200 ms of PA_TActivate */
+#define UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE \
+ 4 /* Need to wait 1250us after power mode change */
+
+ uint32_t ref_clk;
+
+ struct cam_sim *ufshci_sim;
+ struct cam_path *ufshci_path;
+
+ struct mtx sc_mtx;
+ uint32_t sc_unit;
+ uint8_t sc_name[16];
+
+ struct ufshci_device ufs_dev;
+
+ bus_space_tag_t bus_tag;
+ bus_space_handle_t bus_handle;
+ int resource_id;
+ struct resource *resource;
+
+ /* Currently, there is no UFSHCI that supports MSI, MSI-X. */
+ int msi_count;
+
+ /* Fields for tracking progress during controller initialization. */
+ struct intr_config_hook config_hook;
+
+ /* For shared legacy interrupt. */
+ int rid;
+ struct resource *res;
+ void *tag;
+
+ uint32_t major_version;
+ uint32_t minor_version;
+
+ uint32_t num_io_queues;
+ uint32_t max_hw_pend_io;
+
+ /* Maximum logical unit number */
+ uint32_t max_lun_count;
+
+ /* Maximum i/o size in bytes */
+ uint32_t max_xfer_size;
+
+ /* Controller capacity */
+ uint32_t cap;
+
+ /* Page size and log2(page_size) - 12 that we're currently using */
+ uint32_t page_size;
+
+ /* Timeout value on device initialization */
+ uint32_t device_init_timeout_in_ms;
+
+ /* Timeout value on UIC command */
+ uint32_t uic_cmd_timeout_in_ms;
+
+ /* UTMR/UTR queue timeout period in seconds */
+ uint32_t timeout_period;
+
+ /* UTMR/UTR queue retry count */
+ uint32_t retry_count;
+
+ /* UFS Host Controller Interface Registers */
+ struct ufshci_registers *regs;
+
+ /* UFS Transport Protocol Layer (UTP) */
+ struct ufshci_req_queue task_mgmt_req_queue;
+ struct ufshci_req_queue transfer_req_queue;
+ bool is_single_db_supported; /* 0 = supported */
+ bool is_mcq_supported; /* 1 = supported */
+
+ /* UFS Interconnect Layer (UIC) */
+ struct mtx uic_cmd_lock;
+ uint32_t unipro_version;
+ uint8_t hs_gear;
+ uint32_t tx_lanes;
+ uint32_t rx_lanes;
+ uint32_t max_rx_hs_gear;
+ uint32_t max_tx_lanes;
+ uint32_t max_rx_lanes;
+
+ bool is_failed;
+};
+
+#define ufshci_mmio_offsetof(reg) offsetof(struct ufshci_registers, reg)
+
+#define ufshci_mmio_read_4(sc, reg) \
+ bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, \
+ ufshci_mmio_offsetof(reg))
+
+#define ufshci_mmio_write_4(sc, reg, val) \
+ bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
+ ufshci_mmio_offsetof(reg), val)
+
+#define ufshci_printf(ctrlr, fmt, args...) \
+ device_printf(ctrlr->dev, fmt, ##args)
+
+/* UFSHCI */
+void ufshci_completion_poll_cb(void *arg, const struct ufshci_completion *cpl,
+ bool error);
+
+/* SIM */
+int ufshci_sim_attach(struct ufshci_controller *ctrlr);
+void ufshci_sim_detach(struct ufshci_controller *ctrlr);
+
+/* Controller */
+int ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev);
+void ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev);
+int ufshci_ctrl_reset(struct ufshci_controller *ctrlr);
+/* ctrlr defined as void * to allow use with config_intrhook. */
+void ufshci_ctrlr_start_config_hook(void *arg);
+void ufshci_ctrlr_poll(struct ufshci_controller *ctrlr);
+
+int ufshci_ctrl_submit_admin_request(struct ufshci_controller *ctrlr,
+ struct ufshci_request *req);
+int ufshci_ctrl_submit_io_request(struct ufshci_controller *ctrlr,
+ struct ufshci_request *req);
+int ufshci_ctrl_send_nop(struct ufshci_controller *ctrlr);
+
+void ufshci_reg_dump(struct ufshci_controller *ctrlr);
+
+/* Device */
+int ufshci_dev_init(struct ufshci_controller *ctrlr);
+int ufshci_dev_reset(struct ufshci_controller *ctrlr);
+int ufshci_dev_init_reference_clock(struct ufshci_controller *ctrlr);
+int ufshci_dev_init_unipro(struct ufshci_controller *ctrlr);
+int ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr);
+int ufshci_dev_init_ufs_power_mode(struct ufshci_controller *ctrlr);
+int ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr);
+
+/* Controller Command */
+void ufshci_ctrl_cmd_send_nop(struct ufshci_controller *ctrlr,
+ ufshci_cb_fn_t cb_fn, void *cb_arg);
+void ufshci_ctrl_cmd_send_query_request(struct ufshci_controller *ctrlr,
+ ufshci_cb_fn_t cb_fn, void *cb_arg, struct ufshci_query_param param);
+void ufshci_ctrl_cmd_send_scsi_command(struct ufshci_controller *ctrlr,
+ ufshci_cb_fn_t cb_fn, void *cb_arg, uint8_t *cmd_ptr, uint8_t cmd_len,
+ uint32_t data_len, uint8_t lun, bool is_write);
+
+/* Request Queue */
+bool ufshci_req_queue_process_completions(struct ufshci_req_queue *req_queue);
+int ufshci_utm_req_queue_construct(struct ufshci_controller *ctrlr);
+int ufshci_ut_req_queue_construct(struct ufshci_controller *ctrlr);
+void ufshci_utm_req_queue_destroy(struct ufshci_controller *ctrlr);
+void ufshci_ut_req_queue_destroy(struct ufshci_controller *ctrlr);
+int ufshci_utm_req_queue_enable(struct ufshci_controller *ctrlr);
+int ufshci_ut_req_queue_enable(struct ufshci_controller *ctrlr);
+void ufshci_req_queue_fail(struct ufshci_controller *ctrlr,
+ struct ufshci_hw_queue *hwq);
+int ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue,
+ struct ufshci_request *req, bool is_admin);
+void ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr);
+
+/* Request Single Doorbell Queue */
+int ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue, uint32_t num_entries,
+ bool is_task_mgmt);
+void ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue);
+struct ufshci_hw_queue *ufshci_req_sdb_get_hw_queue(
+ struct ufshci_req_queue *req_queue);
+int ufshci_req_sdb_enable(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue);
+int ufshci_req_sdb_reserve_slot(struct ufshci_req_queue *req_queue,
+ struct ufshci_tracker **tr);
+void ufshci_req_sdb_ring_doorbell(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr);
+void ufshci_req_sdb_clear_cpl_ntf(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr);
+bool ufshci_req_sdb_process_cpl(struct ufshci_req_queue *req_queue);
+int ufshci_req_sdb_get_inflight_io(struct ufshci_controller *ctrlr);
+
+/* UIC Command */
+int ufshci_uic_power_mode_ready(struct ufshci_controller *ctrlr);
+int ufshci_uic_cmd_ready(struct ufshci_controller *ctrlr);
+int ufshci_uic_send_dme_link_startup(struct ufshci_controller *ctrlr);
+int ufshci_uic_send_dme_get(struct ufshci_controller *ctrlr, uint16_t attribute,
+ uint32_t *return_value);
+int ufshci_uic_send_dme_set(struct ufshci_controller *ctrlr, uint16_t attribute,
+ uint32_t value);
+int ufshci_uic_send_dme_peer_get(struct ufshci_controller *ctrlr,
+ uint16_t attribute, uint32_t *return_value);
+int ufshci_uic_send_dme_peer_set(struct ufshci_controller *ctrlr,
+ uint16_t attribute, uint32_t value);
+int ufshci_uic_send_dme_endpoint_reset(struct ufshci_controller *ctrlr);
+
+/* SYSCTL */
+void ufshci_sysctl_initialize_ctrlr(struct ufshci_controller *ctrlr);
+
+int ufshci_attach(device_t dev);
+int ufshci_detach(device_t dev);
+
+/*
+ * Wait for a command to complete using the ufshci_completion_poll_cb. Used in
+ * limited contexts where the caller knows it's OK to block briefly while the
+ * command runs. The ISR will run the callback which will set status->done to
+ * true, usually within microseconds. If not, then after one second timeout
+ * handler should reset the controller and abort all outstanding requests
+ * including this polled one. If still not after ten seconds, then something is
+ * wrong with the driver, and panic is the only way to recover.
+ *
+ * Most commands using this interface aren't actual I/O to the drive's media so
+ * complete within a few microseconds. Adaptively spin for one tick to catch the
+ * vast majority of these without waiting for a tick plus scheduling delays.
+ * Since these are on startup, this drastically reduces startup time.
+ */
+static __inline void
+ufshci_completion_poll(struct ufshci_completion_poll_status *status)
+{
+ int timeout = ticks + 10 * hz;
+ sbintime_t delta_t = SBT_1US;
+
+ while (!atomic_load_acq_int(&status->done)) {
+ if (timeout - ticks < 0)
+ panic(
+ "UFSHCI polled command failed to complete within 10s.");
+ pause_sbt("ufshci_cpl", delta_t, 0, C_PREL(1));
+ delta_t = min(SBT_1MS, delta_t * 3 / 2);
+ }
+}
+
+static __inline void
+ufshci_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
+{
+ uint64_t *bus_addr = (uint64_t *)arg;
+
+ KASSERT(nseg == 1, ("number of segments (%d) is not 1", nseg));
+ if (error != 0)
+ printf("ufshci_single_map err %d\n", error);
+ *bus_addr = seg[0].ds_addr;
+}
+
+static __inline struct ufshci_request *
+_ufshci_allocate_request(const int how, ufshci_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct ufshci_request *req;
+
+ KASSERT(how == M_WAITOK || how == M_NOWAIT,
+ ("nvme_allocate_request: invalid how %d", how));
+
+ req = malloc(sizeof(*req), M_UFSHCI, how | M_ZERO);
+ if (req != NULL) {
+ req->cb_fn = cb_fn;
+ req->cb_arg = cb_arg;
+ req->timeout = true;
+ }
+ return (req);
+}
+
+static __inline struct ufshci_request *
+ufshci_allocate_request_vaddr(void *payload, uint32_t payload_size,
+ const int how, ufshci_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct ufshci_request *req;
+
+ req = _ufshci_allocate_request(how, cb_fn, cb_arg);
+ if (req != NULL) {
+ if (payload_size) {
+ req->payload = memdesc_vaddr(payload, payload_size);
+ req->payload_valid = true;
+ }
+ }
+ return (req);
+}
+
+static __inline struct ufshci_request *
+ufshci_allocate_request_bio(struct bio *bio, const int how,
+ ufshci_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct ufshci_request *req;
+
+ req = _ufshci_allocate_request(how, cb_fn, cb_arg);
+ if (req != NULL) {
+ req->payload = memdesc_bio(bio);
+ req->payload_valid = true;
+ }
+ return (req);
+}
+
+#define ufshci_free_request(req) free(req, M_UFSHCI)
+
+void ufshci_ctrlr_shared_handler(void *arg);
+
+static devclass_t ufshci_devclass;
+
+#endif /* __UFSHCI_PRIVATE_H__ */
diff --git a/sys/dev/ufshci/ufshci_reg.h b/sys/dev/ufshci/ufshci_reg.h
new file mode 100644
--- /dev/null
+++ b/sys/dev/ufshci/ufshci_reg.h
@@ -0,0 +1,469 @@
+/*-
+ * Copyright (c) 2025, Samsung Electronics Co., Ltd.
+ * Written by Jaeyoon Choi
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+#ifndef __UFSHCI_REG_H__
+#define __UFSHCI_REG_H__
+
+#include <sys/param.h>
+#include <sys/endian.h>
+
+/* UFSHCI 4.1, section 5.1 Register Map */
+struct ufshci_registers {
+ /* Host Capabilities (00h) */
+ uint32_t cap; /* Host Controller Capabiities */
+ uint32_t mcqcap; /* Multi-Circular Queue Capability Register */
+ uint32_t ver; /* UFS Version */
+ uint32_t ext_cap; /* Extended Controller Capabilities */
+ uint32_t hcpid; /* Product ID */
+ uint32_t hcmid; /* Manufacturer ID */
+ uint32_t ahit; /* Auto-Hibernate Idle Timer */
+ uint32_t reserved1;
+ /* Operation and Runtime (20h) */
+ uint32_t is; /* Interrupt Status */
+ uint32_t ie; /* Interrupt Enable */
+ uint32_t reserved2;
+ uint32_t hcsext; /* Host Controller Status Extended */
+ uint32_t hcs; /* Host Controller Status */
+ uint32_t hce; /* Host Controller Enable */
+ uint32_t uecpa; /* Host UIC Error Code PHY Adapter Layer */
+ uint32_t uecdl; /* Host UIC Error Code Data Link Layer */
+ uint32_t uecn; /* Host UIC Error Code Network Layer */
+ uint32_t uect; /* Host UIC Error Code Transport Layer */
+ uint32_t uecdme; /* Host UIC Error Code DME */
+ uint32_t utriacr; /* Interrupt Aggregation Control */
+ /* UTP Transfer (50h) */
+ uint32_t utrlba; /* UTRL Base Address */
+ uint32_t utrlbau; /* UTRL Base Address Upper 32-Bits */
+ uint32_t utrldbr; /* UTRL DoorBell Register */
+ uint32_t utrlclr; /* UTRL CLear Register */
+ uint32_t utrlrsr; /* UTR Run-Stop Register */
+ uint32_t utrlcnr; /* UTRL Completion Notification */
+ uint64_t reserved3;
+ /* UTP Task Managemeng (70h) */
+ uint32_t utmrlba; /* UTRL Base Address */
+ uint32_t utmrlbau; /* UTMRL Base Address Upper 32-Bits */
+ uint32_t utmrldbr; /* UTMRL DoorBell Register */
+ uint32_t utmrlclr; /* UTMRL CLear Register */
+ uint32_t utmrlrsr; /* UTM Run-Stop Register */
+ uint8_t reserved4[12];
+ /* UIC Command (90h) */
+ uint32_t uiccmd; /* UIC Command Register */
+ uint32_t ucmdarg1; /* UIC Command Argument 1 */
+ uint32_t ucmdarg2; /* UIC Command Argument 2 */
+ uint32_t ucmdarg3; /* UIC Command Argument 3 */
+ uint8_t reserved5[16];
+ /* UMA (B0h) */
+ uint8_t reserved6[16]; /* Reserved for Unified Memory Extension */
+ /* Vendor Specific (C0h) */
+ uint8_t vendor[64]; /* Vendor Specific Registers */
+ /* Crypto (100h) */
+ uint32_t ccap; /* Crypto Capability */
+ uint32_t reserved7[511];
+ /* Config (300h) */
+ uint32_t config; /* Global Configuration */
+ uint8_t reserved9[124];
+ /* MCQ Configuration (380h) */
+ uint32_t mcqconfig; /* MCQ Config Register */
+ /* Event Specific Interrupt Lower Base Address */
+ uint32_t esilba;
+ /* Event Specific Interrupt Upper Base Address */
+ uint32_t esiuba;
+ /* TODO: Need to define SQ/CQ registers */
+};
+
+/* Register field definitions */
+#define UFSHCI__REG__SHIFT (0)
+#define UFSHCI__REG__MASK (0)
+
+/*
+ * UFSHCI 4.1, section 5.2.1, Offset 00h: CAP
+ * Controller Capabilities
+ */
+#define UFSHCI_CAP_REG_NUTRS_SHIFT (0)
+#define UFSHCI_CAP_REG_NUTRS_MASK (0xFF)
+#define UFSHCI_CAP_REG_NORTT_SHIFT (8)
+#define UFSHCI_CAP_REG_NORTT_MASK (0xFF)
+#define UFSHCI_CAP_REG_NUTMRS_SHIFT (16)
+#define UFSHCI_CAP_REG_NUTMRS_MASK (0x7)
+#define UFSHCI_CAP_REG_EHSLUTRDS_SHIFT (22)
+#define UFSHCI_CAP_REG_EHSLUTRDS_MASK (0x1)
+#define UFSHCI_CAP_REG_AUTOH8_SHIFT (23)
+#define UFSHCI_CAP_REG_AUTOH8_MASK (0x1)
+#define UFSHCI_CAP_REG_64AS_SHIFT (24)
+#define UFSHCI_CAP_REG_64AS_MASK (0x1)
+#define UFSHCI_CAP_REG_OODDS_SHIFT (25)
+#define UFSHCI_CAP_REG_OODDS_MASK (0x1)
+#define UFSHCI_CAP_REG_UICDMETMS_SHIFT (26)
+#define UFSHCI_CAP_REG_UICDMETMS_MASK (0x1)
+#define UFSHCI_CAP_REG_CS_SHIFT (28)
+#define UFSHCI_CAP_REG_CS_MASK (0x1)
+#define UFSHCI_CAP_REG_LSDBS_SHIFT (29)
+#define UFSHCI_CAP_REG_LSDBS_MASK (0x1)
+#define UFSHCI_CAP_REG_MCQS_SHIFT (30)
+#define UFSHCI_CAP_REG_MCQS_MASK (0x1)
+#define UFSHCI_CAP_REG_EIS_SHIFT (31)
+#define UFSHCI_CAP_REG_EIS_MASK (0x1)
+
+/*
+ * UFSHCI 4.1, section 5.2.2, Offset 04h: MCQCAP
+ * Multi-Circular Queue Capability Register
+ */
+#define UFSHCI_MCQCAP_REG_MAXQ_SHIFT (0)
+#define UFSHCI_MCQCAP_REG_MAXQ_MASK (0xFF)
+#define UFSHCI_MCQCAP_REG_SP_SHIFT (8)
+#define UFSHCI_MCQCAP_REG_SP_MASK (0x1)
+#define UFSHCI_MCQCAP_REG_RRP_SHIFT (9)
+#define UFSHCI_MCQCAP_REG_RRP_MASK (0x1)
+#define UFSHCI_MCQCAP_REG_EIS_SHIFT (10)
+#define UFSHCI_MCQCAP_REG_EIS_MASK (0x1)
+#define UFSHCI_MCQCAP_REG_QCFGPTR_SHIFT (16)
+#define UFSHCI_MCQCAP_REG_QCFGPTR_MASK (0xFF)
+#define UFSHCI_MCQCAP_REG_MIAG_SHIFT (24)
+#define UFSHCI_MCQCAP_REG_MIAG_MASK (0xFF)
+
+/*
+ * UFSHCI 4.1, section 5.2.3, Offset 08h: VER
+ * UFS Version
+ */
+#define UFSHCI_VER_REG_VS_SHIFT (0)
+#define UFSHCI_VER_REG_VS_MASK (0xF)
+#define UFSHCI_VER_REG_MNR_SHIFT (4)
+#define UFSHCI_VER_REG_MNR_MASK (0xF)
+#define UFSHCI_VER_REG_MJR_SHIFT (8)
+#define UFSHCI_VER_REG_MJR_MASK (0xFF)
+
+/*
+ * UFSHCI 4.1, section 5.2.4, Offset 0Ch: EXT_CAP
+ * Extended Controller Capabilities
+ */
+#define UFSHCI_EXTCAP_REG_HOST_HINT_CACAHE_SIZE_SHIFT (0)
+#define UFSHCI_EXTCAP_REG_HOST_HINT_CACAHE_SIZE_MASK (0xFFFF)
+
+/*
+ * UFSHCI 4.1, section 5.2.5, Offset 10h: HCPID
+ * Host Controller Identification Descriptor – Product ID
+ */
+#define UFSHCI_HCPID_REG_PID_SHIFT (0)
+#define UFSHCI_HCPID_REG_PID_MASK (0xFFFFFFFF)
+
+/*
+ * UFSHCI 4.1, section 5.2.6, Offset 14h: HCMID
+ * Host Controller Identification Descriptor – Manufacturer ID
+ */
+#define UFSHCI_HCMID_REG_MIC_SHIFT (0)
+#define UFSHCI_HCMID_REG_MIC_MASK (0xFFFF)
+#define UFSHCI_HCMID_REG_BI_SHIFT (8)
+#define UFSHCI_HCMID_REG_BI_MASK (0xFFFF)
+
+/*
+ * UFSHCI 4.1, section 5.2.7, Offset 18h: AHIT
+ * Auto-Hibernate Idle Timer
+ */
+#define UFSHCI_AHIT_REG_AH8ITV_SHIFT (0)
+#define UFSHCI_AHIT_REG_AH8ITV_MASK (0x3FF)
+#define UFSHCI_AHIT_REG_TS_SHIFT (10)
+#define UFSHCI_AHIT_REG_TS_MASK (0x7)
+
+/*
+ * UFSHCI 4.1, section 5.3.1, Offset 20h: IS
+ * Interrupt Status
+ */
+#define UFSHCI_IS_REG_UTRCS_SHIFT (0)
+#define UFSHCI_IS_REG_UTRCS_MASK (0x1)
+#define UFSHCI_IS_REG_UDEPRI_SHIFT (1)
+#define UFSHCI_IS_REG_UDEPRI_MASK (0x1)
+#define UFSHCI_IS_REG_UE_SHIFT (2)
+#define UFSHCI_IS_REG_UE_MASK (0x1)
+#define UFSHCI_IS_REG_UTMS_SHIFT (3)
+#define UFSHCI_IS_REG_UTMS_MASK (0x1)
+#define UFSHCI_IS_REG_UPMS_SHIFT (4)
+#define UFSHCI_IS_REG_UPMS_MASK (0x1)
+#define UFSHCI_IS_REG_UHXS_SHIFT (5)
+#define UFSHCI_IS_REG_UHXS_MASK (0x1)
+#define UFSHCI_IS_REG_UHES_SHIFT (6)
+#define UFSHCI_IS_REG_UHES_MASK (0x1)
+#define UFSHCI_IS_REG_ULLS_SHIFT (7)
+#define UFSHCI_IS_REG_ULLS_MASK (0x1)
+#define UFSHCI_IS_REG_ULSS_SHIFT (8)
+#define UFSHCI_IS_REG_ULSS_MASK (0x1)
+#define UFSHCI_IS_REG_UTMRCS_SHIFT (9)
+#define UFSHCI_IS_REG_UTMRCS_MASK (0x1)
+#define UFSHCI_IS_REG_UCCS_SHIFT (10)
+#define UFSHCI_IS_REG_UCCS_MASK (0x1)
+#define UFSHCI_IS_REG_DFES_SHIFT (11)
+#define UFSHCI_IS_REG_DFES_MASK (0x1)
+#define UFSHCI_IS_REG_UTPES_SHIFT (12)
+#define UFSHCI_IS_REG_UTPES_MASK (0x1)
+#define UFSHCI_IS_REG_HCFES_SHIFT (16)
+#define UFSHCI_IS_REG_HCFES_MASK (0x1)
+#define UFSHCI_IS_REG_SBFES_SHIFT (17)
+#define UFSHCI_IS_REG_SBFES_MASK (0x1)
+#define UFSHCI_IS_REG_CEFES_SHIFT (18)
+#define UFSHCI_IS_REG_CEFES_MASK (0x1)
+#define UFSHCI_IS_REG_SQES_SHIFT (19)
+#define UFSHCI_IS_REG_SQES_MASK (0x1)
+#define UFSHCI_IS_REG_CQES_SHIFT (20)
+#define UFSHCI_IS_REG_CQES_MASK (0x1)
+#define UFSHCI_IS_REG_IAGES_SHIFT (21)
+#define UFSHCI_IS_REG_IAGES_MASK (0x1)
+
+/*
+ * UFSHCI 4.1, section 5.3.2, Offset 24h: IE
+ * Interrupt Enable
+ */
+#define UFSHCI_IE_REG_UTRCE_SHIFT (0)
+#define UFSHCI_IE_REG_UTRCE_MASK (0x1)
+#define UFSHCI_IE_REG_UDEPRIE_SHIFT (1)
+#define UFSHCI_IE_REG_UDEPRIE_MASK (0x1)
+#define UFSHCI_IE_REG_UEE_SHIFT (2)
+#define UFSHCI_IE_REG_UEE_MASK (0x1)
+#define UFSHCI_IE_REG_UTMSE_SHIFT (3)
+#define UFSHCI_IE_REG_UTMSE_MASK (0x1)
+#define UFSHCI_IE_REG_UPMSE_SHIFT (4)
+#define UFSHCI_IE_REG_UPMSE_MASK (0x1)
+#define UFSHCI_IE_REG_UHXSE_SHIFT (5)
+#define UFSHCI_IE_REG_UHXSE_MASK (0x1)
+#define UFSHCI_IE_REG_UHESE_SHIFT (6)
+#define UFSHCI_IE_REG_UHESE_MASK (0x1)
+#define UFSHCI_IE_REG_ULLSE_SHIFT (7)
+#define UFSHCI_IE_REG_ULLSE_MASK (0x1)
+#define UFSHCI_IE_REG_ULSSE_SHIFT (8)
+#define UFSHCI_IE_REG_ULSSE_MASK (0x1)
+#define UFSHCI_IE_REG_UTMRCE_SHIFT (9)
+#define UFSHCI_IE_REG_UTMRCE_MASK (0x1)
+#define UFSHCI_IE_REG_UCCE_SHIFT (10)
+#define UFSHCI_IE_REG_UCCE_MASK (0x1)
+#define UFSHCI_IE_REG_DFEE_SHIFT (11)
+#define UFSHCI_IE_REG_DFEE_MASK (0x1)
+#define UFSHCI_IE_REG_UTPEE_SHIFT (12)
+#define UFSHCI_IE_REG_UTPEE_MASK (0x1)
+#define UFSHCI_IE_REG_HCFEE_SHIFT (16)
+#define UFSHCI_IE_REG_HCFEE_MASK (0x1)
+#define UFSHCI_IE_REG_SBFEE_SHIFT (17)
+#define UFSHCI_IE_REG_SBFEE_MASK (0x1)
+#define UFSHCI_IE_REG_CEFEE_SHIFT (18)
+#define UFSHCI_IE_REG_CEFEE_MASK (0x1)
+#define UFSHCI_IE_REG_SQEE_SHIFT (19)
+#define UFSHCI_IE_REG_SQEE_MASK (0x1)
+#define UFSHCI_IE_REG_CQEE_SHIFT (20)
+#define UFSHCI_IE_REG_CQEE_MASK (0x1)
+#define UFSHCI_IE_REG_IAGEE_SHIFT (21)
+#define UFSHCI_IE_REG_IAGEE_MASK (0x1)
+
+/*
+ * UFSHCI 4.1, section 5.3.3, Offset 2Ch: HCSEXT
+ * Host Controller Status Extended
+ */
+#define UFSHCI_HCSEXT_IIDUTPE_SHIFT (0)
+#define UFSHCI_HCSEXT_IIDUTPE_MASK (0xF)
+#define UFSHCI_HCSEXT_EXT_IIDUTPE_SHIFT (4)
+#define UFSHCI_HCSEXT_EXT_IIDUTPE_MASK (0xF)
+
+/*
+ * UFSHCI 4.1, section 5.3.4, Offset 30h: HCS
+ * Host Controller Status
+ */
+#define UFSHCI_HCS_REG_DP_SHIFT (0)
+#define UFSHCI_HCS_REG_DP_MASK (0x1)
+#define UFSHCI_HCS_REG_UTRLRDY_SHIFT (1)
+#define UFSHCI_HCS_REG_UTRLRDY_MASK (0x1)
+#define UFSHCI_HCS_REG_UTMRLRDY_SHIFT (2)
+#define UFSHCI_HCS_REG_UTMRLRDY_MASK (0x1)
+#define UFSHCI_HCS_REG_UCRDY_SHIFT (3)
+#define UFSHCI_HCS_REG_UCRDY_MASK (0x1)
+#define UFSHCI_HCS_REG_UPMCRS_SHIFT (7)
+#define UFSHCI_HCS_REG_UPMCRS_MASK (0x7)
+#define UFSHCI_HCS_REG_UTPEC_SHIFT (12)
+#define UFSHCI_HCS_REG_UTPEC_MASK (0xF)
+#define UFSHCI_HCS_REG_TTAGUTPE_SHIFT (16)
+#define UFSHCI_HCS_REG_TTAGUTPE_MASK (0xFF)
+#define UFSHCI_HCS_REG_TLUNUTPE_SHIFT (24)
+#define UFSHCI_HCS_REG_TLUNUTPE_MASK (0xFF)
+
+/*
+ * UFSHCI 4.1, section 5.3.5, Offset 34h: HCE
+ * Host Controller Enable
+ */
+#define UFSHCI_HCE_REG_HCE_SHIFT (0)
+#define UFSHCI_HCE_REG_HCE_MASK (0x1)
+#define UFSHCI_HCE_REG_CGE_SHIFT (1)
+#define UFSHCI_HCE_REG_CGE_MASK (0x1)
+
+/*
+ * UFSHCI 4.1, section 5.3.6, Offset 38h: UECPA
+ * Host UIC Error Code PHY Adapter Layer
+ */
+#define UFSHCI_UECPA_REG_EC_SHIFT (0)
+#define UFSHCI_UECPA_REG_EC_MASK (0xF)
+#define UFSHCI_UECPA_REG_ERR_SHIFT (31)
+#define UFSHCI_UECPA_REG_ERR_MASK (0x1)
+
+/*
+ * UFSHCI 4.1, section 5.3.7, Offset 3Ch: UECDL
+ * Host UIC Error Code Data Link Layer
+ */
+#define UFSHCI_UECDL_REG_EC_SHIFT (0)
+#define UFSHCI_UECDL_REG_EC_MASK (0xFFFF)
+#define UFSHCI_UECDL_REG_ERR_SHIFT (31)
+#define UFSHCI_UECDL_REG_ERR_MASK (0x1)
+
+/*
+ * UFSHCI 4.1, section 5.3.8, Offset 40h: UECN
+ * Host UIC Error Code Network Layer
+ */
+#define UFSHCI_UECN_REG_EC_SHIFT (0)
+#define UFSHCI_UECN_REG_EC_MASK (0x7)
+#define UFSHCI_UECN_REG_ERR_SHIFT (31)
+#define UFSHCI_UECN_REG_ERR_MASK (0x1)
+
+/*
+ * UFSHCI 4.1, section 5.3.9, Offset 44h: UECT
+ * Host UIC Error Code Transport Layer
+ */
+#define UFSHCI_UECT_REG_EC_SHIFT (0)
+#define UFSHCI_UECT_REG_EC_MASK (0x7F)
+#define UFSHCI_UECT_REG_ERR_SHIFT (31)
+#define UFSHCI_UECT_REG_ERR_MASK (0x1)
+
+/*
+ * UFSHCI 4.1, section 5.3.10, Offset 48h: UECDME
+ * Host UIC Error Code
+ */
+#define UFSHCI_UECDME_REG_EC_SHIFT (0)
+#define UFSHCI_UECDME_REG_EC_MASK (0xF)
+#define UFSHCI_UECDME_REG_ERR_SHIFT (31)
+#define UFSHCI_UECDME_REG_ERR_MASK (0x1)
+
+/*
+ * UFSHCI 4.1, section 5.4.1, Offset 50h: UTRLBA
+ * UTP Transfer Request List Base Address
+ */
+#define UFSHCI_UTRLBA_REG_UTRLBA_SHIFT (0)
+#define UFSHCI_UTRLBA_REG_UTRLBA_MASK (0xFFFFFFFF)
+
+/*
+ * UFSHCI 4.1, section 5.4.2, Offset 54h: UTRLBAU
+ * UTP Transfer Request List Base Address Upper 32-bits
+ */
+#define UFSHCI_UTRLBAU_REG_UTRLBAU_SHIFT (0)
+#define UFSHCI_UTRLBAU_REG_UTRLBAU_MASK (0xFFFFFFFF)
+
+/*
+ * UFSHCI 4.1, section 5.4.3, Offset 58h: UTRLDBR
+ * UTP Transfer Request List Door Bell Register
+ */
+#define UFSHCI_UTRLDBR_REG_UTRLDBR_SHIFT (0)
+#define UFSHCI_UTRLDBR_REG_UTRLDBR_MASK (0xFFFFFFFF)
+
+/*
+ * UFSHCI 4.1, section 5.4.4, Offset 5Ch: UTRLCLR
+ * UTP Transfer Request List Clear Register
+ */
+#define UFSHCI_UTRLCLR_REG_UTRLCLR_SHIFT (0)
+#define UFSHCI_UTRLCLR_REG_UTRLCLR_MASK (0xFFFFFFFF)
+
+/*
+ * UFSHCI 4.1, section 5.4.5, Offset 60h: UTRLRSR
+ * UTP Transfer Request List Run Stop Register
+ */
+#define UFSHCI_UTRLRSR_REG_UTRLRSR_SHIFT (0)
+#define UFSHCI_UTRLRSR_REG_UTRLRSR_MASK (0x1)
+
+/*
+ * UFSHCI 4.1, section 5.4.6, Offset 64h: UTRLCNR
+ * UTP Transfer Request List Completion Notification Register
+ */
+#define UFSHCI_UTRLCNR_REG_UTRLCNR_SHIFT (0)
+#define UFSHCI_UTRLCNR_REG_UTRLCNR_MASK (0xFFFFFFFF)
+
+/*
+ * UFSHCI 4.1, section 5.5.1, Offset 70h: UTMRLBA
+ * UTP Task Management Request List Base Address
+ */
+#define UFSHCI_UTMRLBA_REG_UTMRLBA_SHIFT (0)
+#define UFSHCI_UTMRLBA_REG_UTMRLBA_MASK (0xFFFFFFFF)
+
+/*
+ * UFSHCI 4.1, section 5.5.2, Offset 74h: UTMRLBAU
+ * UTP Task Management Request List Base Address Upper 32-bits
+ */
+#define UFSHCI_UTMRLBAU_REG_UTMRLBAU_SHIFT (0)
+#define UFSHCI_UTMRLBAU_REG_UTMRLBAU_MASK (0xFFFFFFFF)
+
+/*
+ * UFSHCI 4.1, section 5.5.3, Offset 78h: UTMRLDBR
+ * UTP Task Management Request List Door Bell Register
+ */
+#define UFSHCI_UTMRLDBR_REG_UTMRLDBR_SHIFT (0)
+#define UFSHCI_UTMRLDBR_REG_UTMRLDBR_MASK (0xFF)
+
+/*
+ * UFSHCI 4.1, section 5.5.4, Offset 7Ch: UTMRLCLR
+ * UTP Task Management Request List CLear Register
+ */
+#define UFSHCI_UTMRLCLR_REG_UTMRLCLR_SHIFT (0)
+#define UFSHCI_UTMRLCLR_REG_UTMRLCLR_MASK (0xFF)
+
+/*
+ * UFSHCI 4.1, section 5.5.5, Offset 80h: UTMRLRSR
+ * UTP Task Management Request List Run Stop Register
+ */
+#define UFSHCI_UTMRLRSR_REG_UTMRLRSR_SHIFT (0)
+#define UFSHCI_UTMRLRSR_REG_UTMRLRSR_MASK (0xFF)
+
+/*
+ * UFSHCI 4.1, section 5.6.1
+ * Offset 90h: UICCMD – UIC Command
+ */
+#define UFSHCI_UICCMD_REG_CMDOP_SHIFT (0)
+#define UFSHCI_UICCMD_REG_CMDOP_MASK (0xFF)
+
+/*
+ * UFSHCI 4.1, section 5.6.2
+ * Offset 94h: UICCMDARG1 – UIC Command Argument 1
+ */
+#define UFSHCI_UICCMDARG1_REG_ARG1_SHIFT (0)
+#define UFSHCI_UICCMDARG1_REG_ARG1_MASK (0xFFFFFFFF)
+#define UFSHCI_UICCMDARG1_REG_GEN_SELECTOR_INDEX_SHIFT (0)
+#define UFSHCI_UICCMDARG1_REG_GEN_SELECTOR_INDEX_MASK (0xFFFF)
+#define UFSHCI_UICCMDARG1_REG_MIB_ATTR_SHIFT (16)
+#define UFSHCI_UICCMDARG1_REG_MIB_ATTR_MASK (0xFFFF)
+
+/*
+ * UFSHCI 4.1, section 5.6.3
+ * Offset 98h: UICCMDARG2 – UIC Command Argument 2
+ */
+#define UFSHCI_UICCMDARG2_REG_ARG2_SHIFT (0)
+#define UFSHCI_UICCMDARG2_REG_ARG2_MASK (0xFFFFFFFF)
+#define UFSHCI_UICCMDARG2_REG_ERROR_CODE_SHIFT (0)
+#define UFSHCI_UICCMDARG2_REG_ERROR_CODE_MASK (0xFF)
+#define UFSHCI_UICCMDARG2_REG_ATTR_SET_TYPE_SHIFT (16)
+#define UFSHCI_UICCMDARG2_REG_ATTR_SET_TYPE_MASK (0xFF)
+
+/*
+ * UFSHCI 4.1, section 5.6.4
+ * Offset 9Ch: UICCMDARG3 – UIC Command Argument 3
+ */
+#define UFSHCI_UICCMDARG3_REG_ARG3_SHIFT (0)
+#define UFSHCI_UICCMDARG3_REG_ARG3_MASK (0xFFFFFFFF)
+
+/* Helper macro to combine *_MASK and *_SHIFT defines */
+#define UFSHCIM(name) (name##_MASK << name##_SHIFT)
+
+/* Helper macro to extract value from x */
+#define UFSHCIV(name, x) (((x) >> name##_SHIFT) & name##_MASK)
+
+/* Helper macro to construct a field value */
+#define UFSHCIF(name, x) (((x)&name##_MASK) << name##_SHIFT)
+
+#define UFSHCI_DUMP_REG(ctrlr, member) \
+ do { \
+ uint32_t _val = ufshci_mmio_read_4(ctrlr, member); \
+ ufshci_printf(ctrlr, " %-15s (0x%03lx) : 0x%08x\n", #member, \
+ ufshci_mmio_offsetof(member), _val); \
+ } while (0)
+
+#endif /* __UFSHCI_REG_H__ */
diff --git a/sys/dev/ufshci/ufshci_req_queue.c b/sys/dev/ufshci/ufshci_req_queue.c
new file mode 100644
--- /dev/null
+++ b/sys/dev/ufshci/ufshci_req_queue.c
@@ -0,0 +1,491 @@
+/*-
+ * Copyright (c) 2025, Samsung Electronics Co., Ltd.
+ * Written by Jaeyoon Choi
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/domainset.h>
+#include <sys/module.h>
+
+#include <cam/scsi/scsi_all.h>
+
+#include "sys/kassert.h"
+#include "ufshci_private.h"
+
+static void ufshci_req_queue_submit_tracker(struct ufshci_req_queue *req_queue,
+ struct ufshci_tracker *tr, enum ufshci_data_direction data_direction);
+
+static const struct ufshci_qops sdb_qops = {
+ .construct = ufshci_req_sdb_construct,
+ .destroy = ufshci_req_sdb_destroy,
+ .get_hw_queue = ufshci_req_sdb_get_hw_queue,
+ .enable = ufshci_req_sdb_enable,
+ .reserve_slot = ufshci_req_sdb_reserve_slot,
+ .reserve_admin_slot = ufshci_req_sdb_reserve_slot,
+ .ring_doorbell = ufshci_req_sdb_ring_doorbell,
+ .clear_cpl_ntf = ufshci_req_sdb_clear_cpl_ntf,
+ .process_cpl = ufshci_req_sdb_process_cpl,
+ .get_inflight_io = ufshci_req_sdb_get_inflight_io,
+};
+
+int
+ufshci_utm_req_queue_construct(struct ufshci_controller *ctrlr)
+{
+ struct ufshci_req_queue *req_queue;
+ int error;
+
+ /*
+ * UTP Task Management Request only supports Legacy Single Doorbell
+ * Queue.
+ */
+ req_queue = &ctrlr->task_mgmt_req_queue;
+ req_queue->queue_mode = UFSHCI_Q_MODE_SDB;
+ req_queue->qops = sdb_qops;
+
+ error = req_queue->qops.construct(ctrlr, req_queue, UFSHCI_UTRM_ENTRIES,
+ /*is_task_mgmt*/ true);
+
+ return (error);
+}
+
+void
+ufshci_utm_req_queue_destroy(struct ufshci_controller *ctrlr)
+{
+ ctrlr->task_mgmt_req_queue.qops.destroy(ctrlr,
+ &ctrlr->task_mgmt_req_queue);
+}
+
+int
+ufshci_utm_req_queue_enable(struct ufshci_controller *ctrlr)
+{
+ return (ctrlr->task_mgmt_req_queue.qops.enable(ctrlr,
+ &ctrlr->task_mgmt_req_queue));
+}
+
+int
+ufshci_ut_req_queue_construct(struct ufshci_controller *ctrlr)
+{
+ struct ufshci_req_queue *req_queue;
+ int error;
+
+ /*
+ * Currently, it does not support MCQ mode, so it should be set to SDB
+ * mode by default.
+ * TODO: Determine queue mode by checking Capability Registers
+ */
+ req_queue = &ctrlr->transfer_req_queue;
+ req_queue->queue_mode = UFSHCI_Q_MODE_SDB;
+ req_queue->qops = sdb_qops;
+
+ error = req_queue->qops.construct(ctrlr, req_queue, UFSHCI_UTR_ENTRIES,
+ /*is_task_mgmt*/ false);
+
+ return (error);
+}
+
+void
+ufshci_ut_req_queue_destroy(struct ufshci_controller *ctrlr)
+{
+ ctrlr->transfer_req_queue.qops.destroy(ctrlr,
+ &ctrlr->transfer_req_queue);
+}
+
+int
+ufshci_ut_req_queue_enable(struct ufshci_controller *ctrlr)
+{
+ return (ctrlr->transfer_req_queue.qops.enable(ctrlr,
+ &ctrlr->transfer_req_queue));
+}
+
+static bool
+ufshci_req_queue_response_is_error(struct ufshci_req_queue *req_queue,
+ uint8_t ocs, union ufshci_reponse_upiu *response)
+{
+ bool is_error = false;
+
+ /* Check request descriptor */
+ if (ocs != UFSHCI_DESC_SUCCESS) {
+ ufshci_printf(req_queue->ctrlr, "Invalid OCS = 0x%x\n", ocs);
+ is_error = true;
+ }
+
+ /* Check response UPIU header */
+ if (response->header.response != UFSHCI_RESPONSE_CODE_TARGET_SUCCESS) {
+ ufshci_printf(req_queue->ctrlr,
+ "Invalid response code = 0x%x\n",
+ response->header.response);
+ is_error = true;
+ }
+
+ return (is_error);
+}
+
+static void
+ufshci_req_queue_manual_complete_tracker(struct ufshci_tracker *tr, uint8_t ocs,
+ uint8_t rc)
+{
+ struct ufshci_utp_xfer_req_desc *desc;
+ struct ufshci_upiu_header *resp_header;
+
+ mtx_assert(&tr->hwq->qlock, MA_NOTOWNED);
+
+ resp_header = (struct ufshci_upiu_header *)tr->ucd->response_upiu;
+ resp_header->response = rc;
+
+ desc = &tr->hwq->utrd[tr->slot_num];
+ desc->overall_command_status = ocs;
+
+ ufshci_req_queue_complete_tracker(tr);
+}
+
+static void
+ufshci_req_queue_manual_complete_request(struct ufshci_req_queue *req_queue,
+ struct ufshci_request *req, uint8_t ocs, uint8_t rc)
+{
+ struct ufshci_completion cpl;
+ bool error;
+
+ memset(&cpl, 0, sizeof(cpl));
+ cpl.response_upiu.header.response = rc;
+ error = ufshci_req_queue_response_is_error(req_queue, ocs,
+ &cpl.response_upiu);
+
+ if (error) {
+ ufshci_printf(req_queue->ctrlr,
+ "Manual complete request error:0x%x", error);
+ }
+
+ if (req->cb_fn)
+ req->cb_fn(req->cb_arg, &cpl, error);
+
+ ufshci_free_request(req);
+}
+
+void
+ufshci_req_queue_fail(struct ufshci_controller *ctrlr,
+ struct ufshci_hw_queue *hwq)
+{
+ struct ufshci_req_queue *req_queue;
+ struct ufshci_tracker *tr;
+ struct ufshci_request *req;
+ int i;
+
+ if (!mtx_initialized(&hwq->qlock))
+ return;
+
+ mtx_lock(&hwq->qlock);
+
+ req_queue = &ctrlr->transfer_req_queue;
+
+ for (i = 0; i < req_queue->num_entries; i++) {
+ tr = hwq->act_tr[i];
+ req = tr->req;
+
+ if (tr->slot_state == UFSHCI_SLOT_STATE_RESERVED) {
+ mtx_unlock(&hwq->qlock);
+ ufshci_req_queue_manual_complete_request(req_queue, req,
+ UFSHCI_DESC_ABORTED,
+ UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
+ mtx_lock(&hwq->qlock);
+ } else if (tr->slot_state == UFSHCI_SLOT_STATE_SCHEDULED) {
+ /*
+ * Do not remove the tracker. The abort_tracker path
+ * will do that for us.
+ */
+ mtx_unlock(&hwq->qlock);
+ ufshci_req_queue_manual_complete_tracker(tr,
+ UFSHCI_DESC_ABORTED,
+ UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
+ mtx_lock(&hwq->qlock);
+ }
+ }
+
+ mtx_unlock(&hwq->qlock);
+}
+
+void
+ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
+{
+ struct ufshci_req_queue *req_queue = tr->req_queue;
+ struct ufshci_request *req = tr->req;
+ struct ufshci_completion cpl;
+ struct ufshci_utp_xfer_req_desc *desc;
+ uint8_t ocs;
+ bool retry, error, retriable;
+
+ mtx_assert(&tr->hwq->qlock, MA_NOTOWNED);
+
+ bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ cpl.size = tr->response_size;
+ memcpy(&cpl.response_upiu, (void *)tr->ucd->response_upiu, cpl.size);
+
+ desc = &tr->hwq->utrd[tr->slot_num];
+ ocs = desc->overall_command_status;
+
+ error = ufshci_req_queue_response_is_error(req_queue, ocs,
+ &cpl.response_upiu);
+
+ /* TODO: Implement retry */
+ // retriable = ufshci_completion_is_retry(cpl);
+ retriable = false;
+ retry = error && retriable &&
+ req->retries < req_queue->ctrlr->retry_count;
+ if (retry)
+ tr->hwq->num_retries++;
+ if (error && req->retries >= req_queue->ctrlr->retry_count && retriable)
+ tr->hwq->num_failures++;
+
+ KASSERT(tr->req, ("there is no request assigned to the tracker\n"));
+ KASSERT(cpl.response_upiu.header.task_tag ==
+ req->request_upiu.header.task_tag,
+ ("response task_tag does not match request task_tag\n"));
+
+ if (!retry) {
+ if (req->payload_valid) {
+ bus_dmamap_sync(req_queue->dma_tag_payload,
+ tr->payload_dma_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ }
+ /* Copy response from the command descriptor */
+ if (req->cb_fn)
+ req->cb_fn(req->cb_arg, &cpl, error);
+ }
+
+ mtx_lock(&tr->hwq->qlock);
+
+ /* Clear the UTRL Completion Notification register */
+ req_queue->qops.clear_cpl_ntf(req_queue->ctrlr, tr);
+
+ if (retry) {
+ req->retries++;
+ ufshci_req_queue_submit_tracker(req_queue, tr,
+ req->data_direction);
+ } else {
+ if (req->payload_valid) {
+ bus_dmamap_unload(req_queue->dma_tag_payload,
+ tr->payload_dma_map);
+ }
+
+ /* Clear tracker */
+ ufshci_free_request(req);
+ tr->req = NULL;
+ tr->slot_state = UFSHCI_SLOT_STATE_FREE;
+ }
+
+ mtx_unlock(&tr->hwq->qlock);
+}
+
+bool
+ufshci_req_queue_process_completions(struct ufshci_req_queue *req_queue)
+{
+ return (req_queue->qops.process_cpl(req_queue));
+}
+
+static void
+ufshci_payload_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
+{
+ struct ufshci_tracker *tr = arg;
+ struct ufshci_prdt_entry *prdt_entry;
+ int i;
+
+ /*
+ * If the mapping operation failed, return immediately. The caller
+ * is responsible for detecting the error status and failing the
+ * tracker manually.
+ */
+ if (error != 0) {
+ ufshci_printf(tr->req_queue->ctrlr,
+ "Failed to map payload %d\n", error);
+ return;
+ }
+
+ prdt_entry = (struct ufshci_prdt_entry *)tr->ucd->prd_table;
+
+ tr->prdt_entry_cnt = nseg;
+
+ for (i = 0; i < nseg; i++) {
+ prdt_entry->data_base_address = htole64(seg[i].ds_addr) &
+ 0xffffffff;
+ prdt_entry->data_base_address_upper = htole64(seg[i].ds_addr) >>
+ 32;
+ prdt_entry->data_byte_count = htole32(seg[i].ds_len - 1);
+
+ ++prdt_entry;
+ }
+ KASSERT(data_left == 0, ("There is still data left to map."));
+
+ bus_dmamap_sync(tr->req_queue->dma_tag_payload, tr->payload_dma_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+}
+
+static void
+ufshci_req_queue_prepare_prdt(struct ufshci_tracker *tr)
+{
+ struct ufshci_request *req = tr->req;
+ struct ufshci_utp_cmd_desc *cmd_desc = tr->ucd;
+ int error;
+
+ tr->prdt_off = UFSHCI_UTP_XFER_REQ_SIZE + UFSHCI_UTP_XFER_RESP_SIZE;
+
+ memset(cmd_desc->prd_table, 0, sizeof(cmd_desc->prd_table));
+
+ /* Filling PRDT enrties with payload */
+ error = bus_dmamap_load_mem(tr->req_queue->dma_tag_payload,
+ tr->payload_dma_map, &req->payload, ufshci_payload_map, tr,
+ BUS_DMA_NOWAIT);
+ if (error != 0) {
+ /*
+ * The dmamap operation failed, so we manually fail the
+ * tracker here with UFSHCI_DESC_INVALID_PRDT_ATTRIBUTES.
+ *
+ * ufshci_req_queue_manual_complete_tracker must not be called
+ * with the req_queue lock held.
+ */
+ ufshci_printf(tr->req_queue->ctrlr,
+ "bus_dmamap_load_mem returned with error:0x%x!\n", error);
+
+ mtx_unlock(&tr->hwq->qlock);
+ ufshci_req_queue_manual_complete_tracker(tr,
+ UFSHCI_DESC_INVALID_PRDT_ATTRIBUTES,
+ UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
+ mtx_lock(&tr->hwq->qlock);
+ }
+}
+
+static void
+ufshci_req_queue_fill_descriptor(struct ufshci_utp_xfer_req_desc *desc,
+ uint8_t data_direction, const uint64_t paddr, const uint16_t response_off,
+ const uint16_t response_len, const uint16_t prdt_off,
+ const uint16_t prdt_entry_cnt)
+{
+ uint8_t command_type;
+ /* Value to convert bytes to dwords */
+ const uint16_t dword_size = 4;
+
+ /*
+ * Set command type to UFS storage.
+ * The UFS 4.1 spec only defines 'UFS Storage' as a command type.
+ */
+ command_type = UFSHCI_COMMAND_TYPE_UFS_STORAGE;
+
+ memset(desc, 0, sizeof(struct ufshci_utp_xfer_req_desc));
+ desc->command_type = command_type;
+ desc->data_direction = data_direction;
+ desc->interrupt = true;
+ /* Set the initial value to Invalid. */
+ desc->overall_command_status = UFSHCI_OCS_INVALID;
+ desc->utp_command_descriptor_base_address = (uint32_t)(paddr &
+ 0xffffffff);
+ desc->utp_command_descriptor_base_address_upper = (uint32_t)(paddr >>
+ 32);
+
+ desc->response_upiu_offset = response_off / dword_size;
+ desc->response_upiu_length = response_len / dword_size;
+ desc->prdt_offset = prdt_off / dword_size;
+ desc->prdt_length = prdt_entry_cnt;
+}
+
+/*
+ * Submit the tracker to the hardware.
+ */
+static void
+ufshci_req_queue_submit_tracker(struct ufshci_req_queue *req_queue,
+ struct ufshci_tracker *tr, enum ufshci_data_direction data_direction)
+{
+ struct ufshci_controller *ctrlr = req_queue->ctrlr;
+ struct ufshci_request *req = tr->req;
+ uint64_t ucd_paddr;
+ uint16_t request_len, response_off, response_len;
+ uint8_t slot_num = tr->slot_num;
+
+ mtx_assert(&req_queue->qops.get_hw_queue(req_queue)->qlock, MA_OWNED);
+
+ /* TODO: Check timeout */
+
+ request_len = req->request_size;
+ response_off = UFSHCI_UTP_XFER_REQ_SIZE;
+ response_len = req->response_size;
+
+ /* Prepare UTP Command Descriptor */
+ memcpy(tr->ucd, &req->request_upiu, request_len);
+ memset((uint8_t *)tr->ucd + response_off, 0, response_len);
+
+ /* Prepare PRDT */
+ if (req->payload_valid)
+ ufshci_req_queue_prepare_prdt(tr);
+
+ bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ /* Prepare UTP Transfer Request Descriptor. */
+ ucd_paddr = tr->ucd_bus_addr;
+ ufshci_req_queue_fill_descriptor(&tr->hwq->utrd[slot_num],
+ data_direction, ucd_paddr, response_off, response_len, tr->prdt_off,
+ tr->prdt_entry_cnt);
+
+ bus_dmamap_sync(tr->hwq->dma_tag_queue, tr->hwq->queuemem_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ tr->slot_state = UFSHCI_SLOT_STATE_SCHEDULED;
+
+ /* Ring the doorbell */
+ req_queue->qops.ring_doorbell(ctrlr, tr);
+}
+
+static int
+_ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue,
+ struct ufshci_request *req)
+{
+ struct ufshci_tracker *tr = NULL;
+ int error;
+
+ mtx_assert(&req_queue->qops.get_hw_queue(req_queue)->qlock, MA_OWNED);
+
+ error = req_queue->qops.reserve_slot(req_queue, &tr);
+ if (error != 0) {
+ ufshci_printf(req_queue->ctrlr, "Failed to get tracker");
+ return (error);
+ }
+ KASSERT(tr, ("There is no tracker allocated."));
+
+ if (tr->slot_state == UFSHCI_SLOT_STATE_RESERVED ||
+ tr->slot_state == UFSHCI_SLOT_STATE_SCHEDULED)
+ return (EBUSY);
+
+ /* Set the task_tag value to slot_num for traceability. */
+ req->request_upiu.header.task_tag = tr->slot_num;
+
+ tr->slot_state = UFSHCI_SLOT_STATE_RESERVED;
+ tr->response_size = req->response_size;
+ tr->deadline = SBT_MAX;
+ tr->req = req;
+
+ ufshci_req_queue_submit_tracker(req_queue, tr, req->data_direction);
+
+ return (0);
+}
+
+int
+ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue,
+ struct ufshci_request *req, bool is_admin)
+{
+ struct ufshci_hw_queue *hwq;
+ uint32_t error;
+
+ /* TODO: MCQs should use a separate Admin queue. */
+
+ hwq = req_queue->qops.get_hw_queue(req_queue);
+ KASSERT(hwq, ("There is no HW queue allocated."));
+
+ mtx_lock(&hwq->qlock);
+ error = _ufshci_req_queue_submit_request(req_queue, req);
+ mtx_unlock(&hwq->qlock);
+
+ return (error);
+}
diff --git a/sys/dev/ufshci/ufshci_req_sdb.c b/sys/dev/ufshci/ufshci_req_sdb.c
new file mode 100644
--- /dev/null
+++ b/sys/dev/ufshci/ufshci_req_sdb.c
@@ -0,0 +1,433 @@
+/*-
+ * Copyright (c) 2025, Samsung Electronics Co., Ltd.
+ * Written by Jaeyoon Choi
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/domainset.h>
+#include <sys/module.h>
+
+#include "sys/kassert.h"
+#include "ufshci_private.h"
+#include "ufshci_reg.h"
+
+static void
+ufshci_req_sdb_cmd_desc_destroy(struct ufshci_req_queue *req_queue)
+{
+ struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
+ struct ufshci_tracker *tr;
+ int i;
+
+ for (i = 0; i < req_queue->num_trackers; i++) {
+ tr = hwq->act_tr[i];
+ bus_dmamap_destroy(req_queue->dma_tag_payload,
+ tr->payload_dma_map);
+ free(tr, M_UFSHCI);
+ }
+
+ if (hwq->act_tr) {
+ free(hwq->act_tr, M_UFSHCI);
+ hwq->act_tr = NULL;
+ }
+
+ if (req_queue->ucd) {
+ bus_dmamap_unload(req_queue->dma_tag_ucd,
+ req_queue->ucdmem_map);
+ bus_dmamem_free(req_queue->dma_tag_ucd, req_queue->ucd,
+ req_queue->ucdmem_map);
+ req_queue->ucd = NULL;
+ }
+
+ if (req_queue->dma_tag_ucd) {
+ bus_dma_tag_destroy(req_queue->dma_tag_ucd);
+ req_queue->dma_tag_ucd = NULL;
+ }
+}
+
+static int
+ufshci_req_sdb_cmd_desc_construct(struct ufshci_req_queue *req_queue,
+ uint32_t num_entries, struct ufshci_controller *ctrlr)
+{
+ struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
+ struct ufshci_tracker *tr;
+ size_t ucd_allocsz, payload_allocsz;
+ uint64_t ucdmem_phys;
+ uint8_t *ucdmem;
+ int i, error;
+
+ /*
+ * Each component must be page aligned, and individual PRP lists
+ * cannot cross a page boundary.
+ */
+ ucd_allocsz = num_entries * sizeof(struct ufshci_utp_cmd_desc);
+ ucd_allocsz = roundup2(ucd_allocsz, ctrlr->page_size);
+ payload_allocsz = num_entries * ctrlr->max_xfer_size;
+
+ /*
+ * Allocate physical memory for UTP Command Descriptor (UCD)
+ * Note: UFSHCI UCD format is restricted to 128-byte alignment.
+ */
+ error = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 128,
+ ctrlr->page_size, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
+ ucd_allocsz, howmany(ucd_allocsz, ctrlr->page_size),
+ ctrlr->page_size, 0, NULL, NULL, &req_queue->dma_tag_ucd);
+ if (error != 0) {
+ ufshci_printf(ctrlr, "request cmd desc tag create failed %d\n",
+ error);
+ goto out;
+ }
+
+ if (bus_dmamem_alloc(req_queue->dma_tag_ucd, (void **)&ucdmem,
+ BUS_DMA_COHERENT | BUS_DMA_NOWAIT, &req_queue->ucdmem_map)) {
+ ufshci_printf(ctrlr, "failed to allocate cmd desc memory\n");
+ goto out;
+ }
+
+ if (bus_dmamap_load(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
+ ucdmem, ucd_allocsz, ufshci_single_map, &ucdmem_phys, 0) != 0) {
+ ufshci_printf(ctrlr, "failed to load cmd desc memory\n");
+ bus_dmamem_free(req_queue->dma_tag_ucd, req_queue->ucd,
+ req_queue->ucdmem_map);
+ goto out;
+ }
+
+ req_queue->ucd = (struct ufshci_utp_cmd_desc *)ucdmem;
+ req_queue->ucd_addr = ucdmem_phys;
+
+ /*
+ * Allocate physical memory for PRDT
+ * Note: UFSHCI PRDT format is restricted to 8-byte alignment.
+ */
+ error = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 8,
+ ctrlr->page_size, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
+ payload_allocsz, howmany(payload_allocsz, ctrlr->page_size) + 1,
+ ctrlr->page_size, 0, NULL, NULL, &req_queue->dma_tag_payload);
+ if (error != 0) {
+ ufshci_printf(ctrlr, "request prdt tag create failed %d\n",
+ error);
+ goto out;
+ }
+
+ hwq->act_tr = malloc_domainset(sizeof(struct ufshci_tracker *) *
+ req_queue->num_entries,
+ M_UFSHCI, DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
+
+ for (i = 0; i < req_queue->num_trackers; i++) {
+ tr = malloc_domainset(sizeof(struct ufshci_tracker), M_UFSHCI,
+ DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
+
+ bus_dmamap_create(req_queue->dma_tag_payload, 0,
+ &tr->payload_dma_map);
+
+ tr->req_queue = req_queue;
+ tr->slot_num = i;
+ tr->slot_state = UFSHCI_SLOT_STATE_FREE;
+
+ tr->ucd = (struct ufshci_utp_cmd_desc *)ucdmem;
+ tr->ucd_bus_addr = ucdmem_phys;
+
+ ucdmem += sizeof(struct ufshci_utp_cmd_desc);
+ ucdmem_phys += sizeof(struct ufshci_utp_cmd_desc);
+
+ hwq->act_tr[i] = tr;
+ }
+
+ return (0);
+out:
+ ufshci_req_sdb_cmd_desc_destroy(req_queue);
+ return (ENOMEM);
+}
+
+static bool
+ufshci_req_sdb_is_doorbell_cleared(struct ufshci_controller *ctrlr,
+ uint8_t slot)
+{
+ uint32_t utrldbr;
+
+ utrldbr = ufshci_mmio_read_4(ctrlr, utrldbr);
+ return (!(utrldbr & (1 << slot)));
+}
+
+static bool
+ufshci_req_sdb_is_completed(struct ufshci_controller *ctrlr, uint8_t slot)
+{
+ uint32_t utrlcnr;
+
+ utrlcnr = ufshci_mmio_read_4(ctrlr, utrlcnr);
+ return (utrlcnr & (1 << slot));
+}
+
+int
+ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue, uint32_t num_entries, bool is_task_mgmt)
+{
+ struct ufshci_hw_queue *hwq;
+ size_t allocsz;
+ uint64_t queuemem_phys;
+ uint8_t *queuemem;
+ int error;
+
+ req_queue->ctrlr = ctrlr;
+ req_queue->is_task_mgmt = is_task_mgmt;
+ req_queue->num_entries = num_entries;
+ /*
+ * In Single Doorbell mode, the number of queue entries and the number
+ * of trackers are the same.
+ */
+ req_queue->num_trackers = num_entries;
+
+ /* Single Doorbell mode uses only one queue. (UFSHCI_SDB_Q = 0) */
+ req_queue->hwq = malloc(sizeof(struct ufshci_hw_queue), M_UFSHCI,
+ M_ZERO | M_NOWAIT);
+ hwq = &req_queue->hwq[UFSHCI_SDB_Q];
+
+ mtx_init(&hwq->qlock, "ufshci req_queue lock", NULL, MTX_DEF);
+
+ /*
+ * Allocate physical memory for request queue (UTP Transfer Request
+ * Descriptor (UTRD) or UTP Task Management Request Descriptor (UTMRD))
+ * Note: UTRD/UTMRD format is restricted to 1024-byte alignment.
+ */
+ allocsz = num_entries * sizeof(struct ufshci_utp_xfer_req_desc);
+ error = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 1024,
+ ctrlr->page_size, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
+ allocsz, 1, allocsz, 0, NULL, NULL, &hwq->dma_tag_queue);
+ if (error != 0) {
+ ufshci_printf(ctrlr, "request queue tag create failed %d\n",
+ error);
+ goto out;
+ }
+
+ if (bus_dmamem_alloc(hwq->dma_tag_queue, (void **)&queuemem,
+ BUS_DMA_COHERENT | BUS_DMA_NOWAIT, &hwq->queuemem_map)) {
+ ufshci_printf(ctrlr,
+ "failed to allocate request queue memory\n");
+ goto out;
+ }
+
+ if (bus_dmamap_load(hwq->dma_tag_queue, hwq->queuemem_map, queuemem,
+ allocsz, ufshci_single_map, &queuemem_phys, 0) != 0) {
+ ufshci_printf(ctrlr, "failed to load request queue memory\n");
+ bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd,
+ hwq->queuemem_map);
+ goto out;
+ }
+
+ hwq->num_cmds = 0;
+ hwq->num_intr_handler_calls = 0;
+ hwq->num_retries = 0;
+ hwq->num_failures = 0;
+ hwq->utrd = (struct ufshci_utp_xfer_req_desc *)queuemem;
+ hwq->req_queue_addr = queuemem_phys;
+
+ if (is_task_mgmt) {
+ /* UTP Task Management Request (UTMR) */
+ uint32_t utmrlba, utmrlbau;
+
+ utmrlba = hwq->req_queue_addr & 0xffffffff;
+ utmrlbau = hwq->req_queue_addr >> 32;
+ ufshci_mmio_write_4(ctrlr, utmrlba, utmrlba);
+ ufshci_mmio_write_4(ctrlr, utmrlbau, utmrlbau);
+ } else {
+ /* UTP Transfer Request (UTR) */
+ uint32_t utrlba, utrlbau;
+
+ /*
+ * Allocate physical memory for the command descriptor.
+ * UTP Transfer Request (UTR) requires memory for a separate
+ * command in addition to the queue.
+ */
+ if (ufshci_req_sdb_cmd_desc_construct(req_queue, num_entries,
+ ctrlr) != 0) {
+ ufshci_printf(ctrlr,
+ "failed to construct cmd descriptor memory\n");
+ bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd,
+ hwq->queuemem_map);
+ goto out;
+ }
+
+ utrlba = hwq->req_queue_addr & 0xffffffff;
+ utrlbau = hwq->req_queue_addr >> 32;
+ ufshci_mmio_write_4(ctrlr, utrlba, utrlba);
+ ufshci_mmio_write_4(ctrlr, utrlbau, utrlbau);
+ }
+
+ return (0);
+out:
+ ufshci_req_sdb_destroy(ctrlr, req_queue);
+ return (ENOMEM);
+}
+
+void
+ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue)
+{
+ struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
+
+ if (!req_queue->is_task_mgmt)
+ ufshci_req_sdb_cmd_desc_destroy(&ctrlr->transfer_req_queue);
+
+ if (hwq->utrd != NULL) {
+ bus_dmamap_unload(hwq->dma_tag_queue, hwq->queuemem_map);
+ bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd,
+ hwq->queuemem_map);
+ hwq->utrd = NULL;
+ }
+
+ if (hwq->dma_tag_queue) {
+ bus_dma_tag_destroy(hwq->dma_tag_queue);
+ hwq->dma_tag_queue = NULL;
+ }
+
+ if (mtx_initialized(&hwq->qlock))
+ mtx_destroy(&hwq->qlock);
+
+ free(req_queue->hwq, M_UFSHCI);
+}
+
+struct ufshci_hw_queue *
+ufshci_req_sdb_get_hw_queue(struct ufshci_req_queue *req_queue)
+{
+ return &req_queue->hwq[UFSHCI_SDB_Q];
+}
+
+int
+ufshci_req_sdb_enable(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue)
+{
+ if (req_queue->is_task_mgmt) {
+ uint32_t hcs, utmrldbr, utmrlrsr;
+
+ hcs = ufshci_mmio_read_4(ctrlr, hcs);
+ if (!(hcs & UFSHCIM(UFSHCI_HCS_REG_UTMRLRDY))) {
+ ufshci_printf(ctrlr,
+ "UTP task management request list is not ready\n");
+ return (ENXIO);
+ }
+
+ utmrldbr = ufshci_mmio_read_4(ctrlr, utmrldbr);
+ if (utmrldbr != 0) {
+ ufshci_printf(ctrlr,
+ "UTP task management request list door bell is not ready\n");
+ return (ENXIO);
+ }
+
+ utmrlrsr = UFSHCIM(UFSHCI_UTMRLRSR_REG_UTMRLRSR);
+ ufshci_mmio_write_4(ctrlr, utmrlrsr, utmrlrsr);
+ } else {
+ uint32_t hcs, utrldbr, utrlcnr, utrlrsr;
+
+ hcs = ufshci_mmio_read_4(ctrlr, hcs);
+ if (!(hcs & UFSHCIM(UFSHCI_HCS_REG_UTRLRDY))) {
+ ufshci_printf(ctrlr,
+ "UTP transfer request list is not ready\n");
+ return (ENXIO);
+ }
+
+ utrldbr = ufshci_mmio_read_4(ctrlr, utrldbr);
+ if (utrldbr != 0) {
+ ufshci_printf(ctrlr,
+ "UTP transfer request list door bell is not ready\n");
+ ufshci_printf(ctrlr,
+ "Clear the UTP transfer request list door bell\n");
+ ufshci_mmio_write_4(ctrlr, utrldbr, utrldbr);
+ }
+
+ utrlcnr = ufshci_mmio_read_4(ctrlr, utrlcnr);
+ if (utrlcnr != 0) {
+ ufshci_printf(ctrlr,
+ "UTP transfer request list notification is not ready\n");
+ ufshci_printf(ctrlr,
+ "Clear the UTP transfer request list notification\n");
+ ufshci_mmio_write_4(ctrlr, utrlcnr, utrlcnr);
+ }
+
+ utrlrsr = UFSHCIM(UFSHCI_UTRLRSR_REG_UTRLRSR);
+ ufshci_mmio_write_4(ctrlr, utrlrsr, utrlrsr);
+ }
+
+ return (0);
+}
+
+int
+ufshci_req_sdb_reserve_slot(struct ufshci_req_queue *req_queue,
+ struct ufshci_tracker **tr)
+{
+ struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
+ uint8_t i;
+
+ for (i = 0; i < req_queue->num_entries; i++) {
+ if (hwq->act_tr[i]->slot_state == UFSHCI_SLOT_STATE_FREE) {
+ *tr = hwq->act_tr[i];
+ (*tr)->hwq = hwq;
+ return (0);
+ }
+ }
+ return (EBUSY);
+}
+
+void
+ufshci_req_sdb_clear_cpl_ntf(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr)
+{
+ uint32_t utrlcnr;
+
+ utrlcnr = 1 << tr->slot_num;
+ ufshci_mmio_write_4(ctrlr, utrlcnr, utrlcnr);
+}
+
+void
+ufshci_req_sdb_ring_doorbell(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr)
+{
+ uint32_t utrldbr = 0;
+
+ utrldbr |= 1 << tr->slot_num;
+ ufshci_mmio_write_4(ctrlr, utrldbr, utrldbr);
+
+ tr->req_queue->hwq[UFSHCI_SDB_Q].num_cmds++;
+}
+
+bool
+ufshci_req_sdb_process_cpl(struct ufshci_req_queue *req_queue)
+{
+ struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
+ struct ufshci_tracker *tr;
+ uint8_t slot;
+ bool done = false;
+
+ hwq->num_intr_handler_calls++;
+
+ for (slot = 0; slot < req_queue->num_entries; slot++) {
+ bus_dmamap_sync(hwq->dma_tag_queue, hwq->queuemem_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ tr = hwq->act_tr[slot];
+
+ KASSERT(tr, ("there is no tracker assigned to the slot"));
+ /*
+ * When the response is delivered from the device, the doorbell
+ * is cleared.
+ */
+ if (tr->slot_state == UFSHCI_SLOT_STATE_SCHEDULED &&
+ ufshci_req_sdb_is_doorbell_cleared(req_queue->ctrlr,
+ slot)) {
+ ufshci_req_queue_complete_tracker(tr);
+ done = true;
+ }
+ }
+
+ return (done);
+}
+
+int
+ufshci_req_sdb_get_inflight_io(struct ufshci_controller *ctrlr)
+{
+ /* TODO: Implement inflight io*/
+
+ return (0);
+}
diff --git a/sys/dev/ufshci/ufshci_sim.c b/sys/dev/ufshci/ufshci_sim.c
new file mode 100644
--- /dev/null
+++ b/sys/dev/ufshci/ufshci_sim.c
@@ -0,0 +1,372 @@
+/*-
+ * Copyright (c) 2025, Samsung Electronics Co., Ltd.
+ * Written by Jaeyoon Choi
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <sys/param.h>
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_debug.h>
+#include <cam/cam_sim.h>
+#include <cam/cam_xpt_sim.h>
+#include <cam/scsi/scsi_all.h>
+
+#include "ufshci_private.h"
+
+#define sim2ctrlr(sim) ((struct ufshci_controller *)cam_sim_softc(sim))
+
+static void
+ufshci_sim_scsiio_done(void *ccb_arg, const struct ufshci_completion *cpl,
+ bool error)
+{
+ const uint8_t *sense_data;
+ uint16_t sense_data_max_size;
+ uint16_t sense_data_len;
+
+ union ccb *ccb = (union ccb *)ccb_arg;
+
+ /*
+ * Let the periph know the completion, and let it sort out what
+ * it means. Report an error or success based on OCS and UPIU
+ * response code. And We need to copy the sense data to be handled
+ * by the CAM.
+ */
+ sense_data = cpl->response_upiu.cmd_response_upiu.sense_data;
+ sense_data_max_size = sizeof(
+ cpl->response_upiu.cmd_response_upiu.sense_data);
+ sense_data_len = be16toh(
+ cpl->response_upiu.cmd_response_upiu.sense_data_len);
+ memcpy(&ccb->csio.sense_data, sense_data,
+ min(sense_data_len, sense_data_max_size));
+
+ ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
+ if (error) {
+ ccb->ccb_h.status = CAM_REQ_CMP_ERR;
+ xpt_done(ccb);
+ } else {
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ xpt_done_direct(ccb);
+ }
+}
+
+/*
+ * Complete the command as an illegal command with invalid field
+ */
+static void
+ufshci_sim_illegal_request(union ccb *ccb)
+{
+ scsi_set_sense_data(&ccb->csio.sense_data,
+ /*sense_format*/ SSD_TYPE_NONE,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+ /*asc*/ 0x24, /* 24h/00h INVALID FIELD IN CDB */
+ /*ascq*/ 0x00,
+ /*extra args*/ SSD_ELEM_NONE);
+ ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
+ ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID |
+ CAM_DEV_QFRZN;
+ xpt_freeze_devq(ccb->ccb_h.path, 1);
+ xpt_done(ccb);
+}
+
+static void
+ufshchi_sim_scsiio(struct cam_sim *sim, union ccb *ccb)
+{
+ struct ccb_scsiio *csio = &ccb->csio;
+ struct ufshci_request *req;
+ void *payload;
+ struct ufshci_cmd_command_upiu *upiu;
+ uint8_t *cdb;
+ uint32_t payload_len;
+ bool is_write;
+ struct ufshci_controller *ctrlr;
+ uint8_t data_direction;
+ int error;
+
+ /* UFS device cannot process these commands */
+ if (csio->cdb_io.cdb_bytes[0] == MODE_SENSE_6 ||
+ csio->cdb_io.cdb_bytes[0] == MODE_SELECT_6 ||
+ csio->cdb_io.cdb_bytes[0] == READ_12 ||
+ csio->cdb_io.cdb_bytes[0] == WRITE_12) {
+ ufshci_sim_illegal_request(ccb);
+ return;
+ }
+
+ ctrlr = sim2ctrlr(sim);
+ payload = csio->data_ptr;
+
+ payload_len = csio->dxfer_len;
+ is_write = csio->ccb_h.flags & CAM_DIR_OUT;
+
+ /* TODO: Check other data type */
+ if ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_BIO)
+ req = ufshci_allocate_request_bio((struct bio *)payload,
+ M_NOWAIT, ufshci_sim_scsiio_done, ccb);
+ else
+ req = ufshci_allocate_request_vaddr(payload, payload_len,
+ M_NOWAIT, ufshci_sim_scsiio_done, ccb);
+
+ req->request_size = sizeof(struct ufshci_cmd_command_upiu);
+ req->response_size = sizeof(struct ufshci_cmd_response_upiu);
+
+ switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
+ case CAM_DIR_IN:
+ data_direction = UFSHCI_DATA_DIRECTION_FROM_TGT_TO_SYS;
+ break;
+ case CAM_DIR_OUT:
+ data_direction = UFSHCI_DATA_DIRECTION_FROM_SYS_TO_TGT;
+ break;
+ default:
+ data_direction = UFSHCI_DATA_DIRECTION_NO_DATA_TRANSFER;
+ }
+ req->data_direction = data_direction;
+
+ upiu = (struct ufshci_cmd_command_upiu *)&req->request_upiu;
+ memset(upiu, 0, req->request_size);
+ upiu->header.trans_type = UFSHCI_UPIU_TRANSACTION_CODE_COMMAND;
+ upiu->header.operational_flags = is_write ? UFSHCI_OPERATIONAL_FLAG_W :
+ UFSHCI_OPERATIONAL_FLAG_R;
+ upiu->header.lun = csio->ccb_h.target_lun;
+ upiu->header.cmd_set_type = UFSHCI_COMMAND_SET_TYPE_SCSI;
+
+ upiu->expected_data_transfer_length = htobe32(payload_len);
+
+ ccb->ccb_h.status |= CAM_SIM_QUEUED;
+
+ if (csio->ccb_h.flags & CAM_CDB_POINTER)
+ cdb = csio->cdb_io.cdb_ptr;
+ else
+ cdb = csio->cdb_io.cdb_bytes;
+
+ if (cdb == NULL || csio->cdb_len > sizeof(upiu->cdb)) {
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ xpt_done(ccb);
+ return;
+ }
+ memcpy(upiu->cdb, cdb, csio->cdb_len);
+
+ error = ufshci_ctrl_submit_io_request(ctrlr, req);
+ if (error == EBUSY) {
+ ccb->ccb_h.status = CAM_SCSI_BUSY;
+ xpt_done(ccb);
+ return;
+ } else if (error) {
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ xpt_done(ccb);
+ return;
+ }
+}
+
+static uint32_t
+ufshci_link_kBps(struct ufshci_controller *ctrlr)
+{
+ uint32_t gear = ctrlr->hs_gear;
+ uint32_t lanes = ctrlr->rx_lanes;
+
+ /*
+ * per-lane effective bandwidth (KB/s, SI 1 KB = 1000 B)
+ * All HS-Gears use 8b/10b line coding, i.e. 80 % efficiency.
+ * - KB/s per lane = raw-rate(Gbps) × 0.8(8b/10b) / 8(bit)
+ */
+ static const uint32_t kbps_per_lane[] = {
+ 0, /* unused */
+ 145920, /* HS-Gear1 : 1459.2 Mbps */
+ 291840, /* HS-Gear2 : 2918.4 Mbps */
+ 583680, /* HS-Gear3 : 5836.8 Mbps */
+ 1167360, /* HS-Gear4 : 11673.6 Mbps */
+ 2334720 /* HS-Gear5 : 23347.2 Mbps */
+ };
+
+ /* Sanity checks */
+ if (gear >= nitems(kbps_per_lane))
+ gear = 0; /* out-of-range -> treat as invalid */
+
+ if (lanes == 0 || lanes > 2)
+ lanes = 1; /* UFS spec allows 1–2 data lanes */
+
+ return kbps_per_lane[gear] * lanes;
+}
+
+static void
+ufshci_cam_action(struct cam_sim *sim, union ccb *ccb)
+{
+ struct ufshci_controller *ctrlr = sim2ctrlr(sim);
+
+ if (ctrlr == NULL) {
+ ccb->ccb_h.status = CAM_SEL_TIMEOUT;
+ xpt_done(ccb);
+ return;
+ }
+
+ /* Perform the requested action */
+ switch (ccb->ccb_h.func_code) {
+ case XPT_SCSI_IO:
+ ufshchi_sim_scsiio(sim, ccb);
+ return;
+ case XPT_PATH_INQ: {
+ struct ccb_pathinq *cpi = &ccb->cpi;
+
+ cpi->version_num = 1;
+ cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE;
+ cpi->target_sprt = 0;
+ cpi->hba_misc = PIM_UNMAPPED | PIM_NO_6_BYTE;
+ cpi->hba_eng_cnt = 0;
+ cpi->max_target = 0;
+ cpi->max_lun = ctrlr->max_lun_count;
+ cpi->async_flags = 0;
+ cpi->maxio = ctrlr->max_xfer_size;
+ cpi->initiator_id = 1;
+ strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
+ strlcpy(cpi->hba_vid, "UFSHCI", HBA_IDLEN);
+ strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
+ cpi->unit_number = cam_sim_unit(sim);
+ cpi->base_transfer_speed = ufshci_link_kBps(ctrlr);
+ cpi->transport = XPORT_UFSHCI;
+ cpi->transport_version = 1;
+ cpi->protocol = PROTO_SCSI;
+ cpi->protocol_version = SCSI_REV_SPC5;
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ break;
+ }
+ case XPT_RESET_BUS:
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ break;
+ case XPT_RESET_DEV:
+ if (ufshci_dev_reset(ctrlr))
+ ccb->ccb_h.status = CAM_REQ_CMP_ERR;
+ else
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ break;
+ case XPT_ABORT:
+ /* TODO: Implement Task Management CMD*/
+ ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
+ break;
+ case XPT_SET_TRAN_SETTINGS:
+ ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
+ break;
+ case XPT_GET_TRAN_SETTINGS: {
+ struct ccb_trans_settings *cts;
+ struct ccb_trans_settings_ufshci *ufshcix;
+
+ cts = &ccb->cts;
+ ufshcix = &cts->xport_specific.ufshci;
+
+ ufshcix->hs_gear = ctrlr->hs_gear;
+ ufshcix->tx_lanes = ctrlr->tx_lanes;
+ ufshcix->rx_lanes = ctrlr->rx_lanes;
+ ufshcix->max_hs_gear = ctrlr->max_rx_hs_gear;
+ ufshcix->max_tx_lanes = ctrlr->max_tx_lanes;
+ ufshcix->max_rx_lanes = ctrlr->max_rx_lanes;
+ ufshcix->valid = CTS_UFSHCI_VALID_LINK;
+
+ cts->transport = XPORT_UFSHCI;
+ cts->transport_version = 1;
+ cts->protocol = PROTO_SCSI;
+ cts->protocol_version = SCSI_REV_SPC5;
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ break;
+ }
+ case XPT_CALC_GEOMETRY:
+ cam_calc_geometry(&ccb->ccg, 1);
+ break;
+ case XPT_NOOP:
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ break;
+ default:
+ printf("invalid ccb=%p func=%#x\n", ccb, ccb->ccb_h.func_code);
+ break;
+ }
+ xpt_done(ccb);
+
+ return;
+}
+
+static void
+ufshci_cam_poll(struct cam_sim *sim)
+{
+ struct ufshci_controller *ctrlr = sim2ctrlr(sim);
+
+ ufshci_ctrlr_poll(ctrlr);
+}
+
+int
+ufshci_sim_attach(struct ufshci_controller *ctrlr)
+{
+ device_t dev;
+ struct cam_devq *devq;
+ int max_trans;
+
+ dev = ctrlr->dev;
+ max_trans = ctrlr->max_hw_pend_io;
+ if ((devq = cam_simq_alloc(max_trans)) == NULL) {
+ printf("Failed to allocate a simq\n");
+ return (ENOMEM);
+ }
+
+ ctrlr->ufshci_sim = cam_sim_alloc(ufshci_cam_action, ufshci_cam_poll,
+ "ufshci", ctrlr, device_get_unit(dev), &ctrlr->sc_mtx, max_trans,
+ max_trans, devq);
+ if (ctrlr->ufshci_sim == NULL) {
+ printf("Failed to allocate a sim\n");
+ cam_simq_free(devq);
+ return (ENOMEM);
+ }
+
+ mtx_lock(&ctrlr->sc_mtx);
+ if (xpt_bus_register(ctrlr->ufshci_sim, ctrlr->dev, 0) != CAM_SUCCESS) {
+ cam_sim_free(ctrlr->ufshci_sim, /*free_devq*/ TRUE);
+ cam_simq_free(devq);
+ mtx_unlock(&ctrlr->sc_mtx);
+ printf("Failed to create a bus\n");
+ return (ENOMEM);
+ }
+
+ if (xpt_create_path(&ctrlr->ufshci_path, /*periph*/ NULL,
+ cam_sim_path(ctrlr->ufshci_sim), CAM_TARGET_WILDCARD,
+ CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
+ xpt_bus_deregister(cam_sim_path(ctrlr->ufshci_sim));
+ cam_sim_free(ctrlr->ufshci_sim, /*free_devq*/ TRUE);
+ cam_simq_free(devq);
+ mtx_unlock(&ctrlr->sc_mtx);
+ printf("Failed to create a path\n");
+ return (ENOMEM);
+ }
+ mtx_unlock(&ctrlr->sc_mtx);
+
+ return (0);
+}
+
+void
+ufshci_sim_detach(struct ufshci_controller *ctrlr)
+{
+ int error;
+
+ if (ctrlr->ufshci_path != NULL) {
+ xpt_free_path(ctrlr->ufshci_path);
+ ctrlr->ufshci_path = NULL;
+ }
+
+ if (ctrlr->ufshci_sim != NULL) {
+ error = xpt_bus_deregister(cam_sim_path(ctrlr->ufshci_sim));
+ if (error == 0) {
+ /* accessing the softc is not possible after this */
+ ctrlr->ufshci_sim->softc = NULL;
+ ufshci_printf(ctrlr,
+ "%s: %s:%d:%d caling "
+ "cam_sim_free sim %p refc %u mtx %p\n",
+ __func__, ctrlr->sc_name,
+ cam_sim_path(ctrlr->ufshci_sim), ctrlr->sc_unit,
+ ctrlr->ufshci_sim, ctrlr->ufshci_sim->refcount,
+ ctrlr->ufshci_sim->mtx);
+ } else {
+ panic("%s: %s: CAM layer is busy: errno %d\n", __func__,
+ ctrlr->sc_name, error);
+ }
+
+ cam_sim_free(ctrlr->ufshci_sim, /* free_devq */ TRUE);
+ ctrlr->ufshci_sim = NULL;
+ }
+}
diff --git a/sys/dev/ufshci/ufshci_sysctl.c b/sys/dev/ufshci/ufshci_sysctl.c
new file mode 100644
--- /dev/null
+++ b/sys/dev/ufshci/ufshci_sysctl.c
@@ -0,0 +1,233 @@
+/*-
+ * Copyright (c) 2025, Samsung Electronics Co., Ltd.
+ * Written by Jaeyoon Choi
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/sysctl.h>
+
+#include "ufshci_private.h"
+
+static int
+ufshci_sysctl_timeout_period(SYSCTL_HANDLER_ARGS)
+{
+ uint32_t *ptr = arg1;
+ uint32_t newval = *ptr;
+ int error = sysctl_handle_int(oidp, &newval, 0, req);
+
+ if (error || (req->newptr == NULL))
+ return (error);
+
+ if (newval > UFSHCI_MAX_TIMEOUT_PERIOD ||
+ newval < UFSHCI_MIN_TIMEOUT_PERIOD) {
+ return (EINVAL);
+ } else {
+ *ptr = newval;
+ }
+
+ return (0);
+}
+
+static int
+ufshci_sysctl_num_cmds(SYSCTL_HANDLER_ARGS)
+{
+ struct ufshci_controller *ctrlr = arg1;
+ int64_t num_cmds = 0;
+ int i;
+
+ num_cmds = ctrlr->task_mgmt_req_queue.hwq[UFSHCI_SDB_Q].num_cmds;
+
+ if (ctrlr->transfer_req_queue.hwq != NULL) {
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ num_cmds += ctrlr->transfer_req_queue.hwq[i].num_cmds;
+ }
+
+ return (sysctl_handle_64(oidp, &num_cmds, 0, req));
+}
+
+static int
+ufshci_sysctl_num_intr_handler_calls(SYSCTL_HANDLER_ARGS)
+{
+ struct ufshci_controller *ctrlr = arg1;
+ int64_t num_intr_handler_calls = 0;
+ int i;
+
+ num_intr_handler_calls =
+ ctrlr->task_mgmt_req_queue.hwq[UFSHCI_SDB_Q].num_intr_handler_calls;
+
+ if (ctrlr->transfer_req_queue.hwq != NULL) {
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ num_intr_handler_calls += ctrlr->transfer_req_queue
+ .hwq[i]
+ .num_intr_handler_calls;
+ }
+
+ return (sysctl_handle_64(oidp, &num_intr_handler_calls, 0, req));
+}
+
+static int
+ufshci_sysctl_num_retries(SYSCTL_HANDLER_ARGS)
+{
+ struct ufshci_controller *ctrlr = arg1;
+ int64_t num_retries = 0;
+ int i;
+
+ num_retries = ctrlr->task_mgmt_req_queue.hwq[UFSHCI_SDB_Q].num_retries;
+
+ if (ctrlr->transfer_req_queue.hwq != NULL) {
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ num_retries +=
+ ctrlr->transfer_req_queue.hwq[i].num_retries;
+ }
+
+ return (sysctl_handle_64(oidp, &num_retries, 0, req));
+}
+
+static int
+ufshci_sysctl_num_failures(SYSCTL_HANDLER_ARGS)
+{
+ struct ufshci_controller *ctrlr = arg1;
+ int64_t num_failures = 0;
+ int i;
+
+ num_failures =
+ ctrlr->task_mgmt_req_queue.hwq[UFSHCI_SDB_Q].num_failures;
+
+ if (ctrlr->transfer_req_queue.hwq != NULL) {
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ num_failures +=
+ ctrlr->transfer_req_queue.hwq[i].num_failures;
+ }
+
+ return (sysctl_handle_64(oidp, &num_failures, 0, req));
+}
+
+static void
+ufshci_sysctl_initialize_queue(struct ufshci_hw_queue *hwq,
+ struct sysctl_ctx_list *ctrlr_ctx, struct sysctl_oid *que_tree)
+{
+ struct sysctl_oid_list *que_list = SYSCTL_CHILDREN(que_tree);
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_entries",
+ CTLFLAG_RD, &hwq->num_entries, 0,
+ "Number of entries in hardware queue");
+ SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_trackers",
+ CTLFLAG_RD, &hwq->num_trackers, 0,
+ "Number of trackers pre-allocated for this queue pair");
+ SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_head", CTLFLAG_RD,
+ &hwq->sq_head, 0,
+ "Current head of submission queue (as observed by driver)");
+ SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_tail", CTLFLAG_RD,
+ &hwq->sq_tail, 0,
+ "Current tail of submission queue (as observed by driver)");
+ SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "cq_head", CTLFLAG_RD,
+ &hwq->cq_head, 0,
+ "Current head of completion queue (as observed by driver)");
+
+ SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_cmds", CTLFLAG_RD,
+ &hwq->num_cmds, "Number of commands submitted");
+ SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_intr_handler_calls",
+ CTLFLAG_RD, &hwq->num_intr_handler_calls,
+ "Number of times interrupt handler was invoked (will typically be "
+ "less than number of actual interrupts generated due to "
+ "interrupt aggregation)");
+ SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_retries",
+ CTLFLAG_RD, &hwq->num_retries, "Number of commands retried");
+ SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_failures",
+ CTLFLAG_RD, &hwq->num_failures,
+ "Number of commands ending in failure after all retries");
+
+ /* TODO: Implement num_ignored */
+ /* TODO: Implement recovery state */
+ /* TODO: Implement dump debug */
+}
+
+void
+ufshci_sysctl_initialize_ctrlr(struct ufshci_controller *ctrlr)
+{
+ struct sysctl_ctx_list *ctrlr_ctx;
+ struct sysctl_oid *ctrlr_tree, *que_tree, *ioq_tree;
+ struct sysctl_oid_list *ctrlr_list, *ioq_list;
+#define QUEUE_NAME_LENGTH 16
+ char queue_name[QUEUE_NAME_LENGTH];
+ int i;
+
+ ctrlr_ctx = device_get_sysctl_ctx(ctrlr->dev);
+ ctrlr_tree = device_get_sysctl_tree(ctrlr->dev);
+ ctrlr_list = SYSCTL_CHILDREN(ctrlr_tree);
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "major_version",
+ CTLFLAG_RD, &ctrlr->major_version, 0, "UFS spec major version");
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "minor_version",
+ CTLFLAG_RD, &ctrlr->minor_version, 0, "UFS spec minor version");
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "io_queue_mode",
+ CTLFLAG_RD, &ctrlr->transfer_req_queue.queue_mode, 0,
+ "Active host-side queuing scheme "
+ "(Single-Doorbell or Multi-Circular-Queue)");
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "num_io_queues",
+ CTLFLAG_RD, &ctrlr->num_io_queues, 0, "Number of I/O queue pairs");
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "cap", CTLFLAG_RD,
+ &ctrlr->cap, 0, "Number of I/O queue pairs");
+
+ SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, "timeout_period",
+ CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, &ctrlr->timeout_period,
+ 0, ufshci_sysctl_timeout_period, "IU",
+ "Timeout period for I/O queues (in seconds)");
+
+ SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, "num_cmds",
+ CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, ctrlr, 0,
+ ufshci_sysctl_num_cmds, "IU", "Number of commands submitted");
+
+ SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
+ "num_intr_handler_calls", CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
+ ctrlr, 0, ufshci_sysctl_num_intr_handler_calls, "IU",
+ "Number of times interrupt handler was invoked (will "
+ "typically be less than number of actual interrupts "
+ "generated due to coalescing)");
+
+ SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, "num_retries",
+ CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, ctrlr, 0,
+ ufshci_sysctl_num_retries, "IU", "Number of commands retried");
+
+ SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, "num_failures",
+ CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, ctrlr, 0,
+ ufshci_sysctl_num_failures, "IU",
+ "Number of commands ending in failure after all retries");
+
+ que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO, "utmrq",
+ CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
+ "UTP Task Management Request Queue");
+
+ ufshci_sysctl_initialize_queue(
+ &ctrlr->task_mgmt_req_queue.hwq[UFSHCI_SDB_Q], ctrlr_ctx, que_tree);
+
+ /*
+ * Make sure that we've constructed the I/O queues before setting up the
+ * sysctls. Failed controllers won't allocate it, but we want the rest
+ * of the sysctls to diagnose things.
+ */
+ if (ctrlr->transfer_req_queue.hwq != NULL) {
+ ioq_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO,
+ "ioq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
+ "UTP Transfer Request Queue (I/O Queue)");
+ ioq_list = SYSCTL_CHILDREN(ioq_tree);
+
+ for (i = 0; i < ctrlr->num_io_queues; i++) {
+ snprintf(queue_name, QUEUE_NAME_LENGTH, "%d", i);
+ que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ioq_list,
+ OID_AUTO, queue_name, CTLFLAG_RD | CTLFLAG_MPSAFE,
+ NULL, "IO Queue");
+ ufshci_sysctl_initialize_queue(
+ &ctrlr->transfer_req_queue.hwq[i], ctrlr_ctx,
+ que_tree);
+ }
+ }
+}
diff --git a/sys/dev/ufshci/ufshci_uic_cmd.c b/sys/dev/ufshci/ufshci_uic_cmd.c
new file mode 100644
--- /dev/null
+++ b/sys/dev/ufshci/ufshci_uic_cmd.c
@@ -0,0 +1,218 @@
+/*-
+ * Copyright (c) 2025, Samsung Electronics Co., Ltd.
+ * Written by Jaeyoon Choi
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+
+#include "ufshci_private.h"
+#include "ufshci_reg.h"
+
+int
+ufshci_uic_power_mode_ready(struct ufshci_controller *ctrlr)
+{
+ uint32_t is;
+ int timeout;
+
+ /* Wait for the IS flag to change */
+ timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms);
+ int delta = 10;
+
+ while (1) {
+ is = ufshci_mmio_read_4(ctrlr, is);
+ if (UFSHCIV(UFSHCI_IS_REG_UPMS, is))
+ break;
+ if (timeout - ticks < 0) {
+ ufshci_printf(ctrlr,
+ "Power mode is not changed "
+ "within %d ms\n",
+ ctrlr->device_init_timeout_in_ms);
+ return (ENXIO);
+ }
+
+ DELAY(delta);
+ delta = min(1000, delta * 2);
+ }
+
+ return (0);
+}
+
+int
+ufshci_uic_cmd_ready(struct ufshci_controller *ctrlr)
+{
+ uint32_t hcs;
+ int timeout;
+
+ /* Wait for the HCS flag to change */
+ timeout = ticks + MSEC_2_TICKS(ctrlr->uic_cmd_timeout_in_ms);
+ int delta = 10;
+
+ while (1) {
+ hcs = ufshci_mmio_read_4(ctrlr, hcs);
+ if (UFSHCIV(UFSHCI_HCS_REG_UCRDY, hcs))
+ break;
+ if (timeout - ticks < 0) {
+ ufshci_printf(ctrlr,
+ "UIC command is not ready "
+ "within %d ms\n",
+ ctrlr->uic_cmd_timeout_in_ms);
+ return (ENXIO);
+ }
+
+ DELAY(delta);
+ delta = min(1000, delta * 2);
+ }
+
+ return (0);
+}
+
+static int
+ufshci_uic_wait_cmd(struct ufshci_controller *ctrlr,
+ struct ufshci_uic_cmd *uic_cmd)
+{
+ uint32_t is;
+ int timeout;
+
+ mtx_assert(&ctrlr->uic_cmd_lock, MA_OWNED);
+
+ /* Wait for the IS flag to change */
+ timeout = ticks + MSEC_2_TICKS(ctrlr->uic_cmd_timeout_in_ms);
+ int delta = 10;
+
+ while (1) {
+ is = ufshci_mmio_read_4(ctrlr, is);
+ if (UFSHCIV(UFSHCI_IS_REG_UCCS, is))
+ break;
+ if (timeout - ticks < 0) {
+ ufshci_printf(ctrlr,
+ "UIC command is not completed "
+ "within %d ms\n",
+ ctrlr->uic_cmd_timeout_in_ms);
+ return (ENXIO);
+ }
+
+ DELAY(delta);
+ delta = min(1000, delta * 2);
+ }
+
+ return (0);
+}
+
+static int
+ufshci_uic_send_cmd(struct ufshci_controller *ctrlr,
+ struct ufshci_uic_cmd *uic_cmd, uint32_t *return_value)
+{
+ int error;
+
+ mtx_lock(&ctrlr->uic_cmd_lock);
+
+ error = ufshci_uic_cmd_ready(ctrlr);
+ if (error) {
+ mtx_unlock(&ctrlr->uic_cmd_lock);
+ return (ENXIO);
+ }
+
+ ufshci_mmio_write_4(ctrlr, ucmdarg1, uic_cmd->argument1);
+ ufshci_mmio_write_4(ctrlr, ucmdarg2, uic_cmd->argument2);
+ ufshci_mmio_write_4(ctrlr, ucmdarg3, uic_cmd->argument3);
+
+ ufshci_mmio_write_4(ctrlr, uiccmd, uic_cmd->opcode);
+
+ error = ufshci_uic_wait_cmd(ctrlr, uic_cmd);
+
+ mtx_unlock(&ctrlr->uic_cmd_lock);
+
+ if (error)
+ return (ENXIO);
+
+ if (return_value != NULL)
+ *return_value = ufshci_mmio_read_4(ctrlr, ucmdarg3);
+
+ return (0);
+}
+
+int
+ufshci_uic_send_dme_link_startup(struct ufshci_controller *ctrlr)
+{
+ struct ufshci_uic_cmd uic_cmd;
+ uic_cmd.opcode = UFSHCI_DME_LINK_STARTUP;
+ uic_cmd.argument1 = 0;
+ uic_cmd.argument2 = 0;
+ uic_cmd.argument3 = 0;
+
+ return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, NULL));
+}
+
+int
+ufshci_uic_send_dme_get(struct ufshci_controller *ctrlr, uint16_t attribute,
+ uint32_t *return_value)
+{
+ struct ufshci_uic_cmd uic_cmd;
+
+ uic_cmd.opcode = UFSHCI_DME_GET;
+ uic_cmd.argument1 = attribute << 16;
+ uic_cmd.argument2 = 0;
+ uic_cmd.argument3 = 0;
+
+ return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, return_value));
+}
+
+int
+ufshci_uic_send_dme_set(struct ufshci_controller *ctrlr, uint16_t attribute,
+ uint32_t value)
+{
+ struct ufshci_uic_cmd uic_cmd;
+
+ uic_cmd.opcode = UFSHCI_DME_SET;
+ uic_cmd.argument1 = attribute << 16;
+ /* This drvier always sets only volatile values. */
+ uic_cmd.argument2 = UFSHCI_ATTR_SET_TYPE_NORMAL << 16;
+ uic_cmd.argument3 = value;
+
+ return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, NULL));
+}
+
+int
+ufshci_uic_send_dme_peer_get(struct ufshci_controller *ctrlr,
+ uint16_t attribute, uint32_t *return_value)
+{
+ struct ufshci_uic_cmd uic_cmd;
+
+ uic_cmd.opcode = UFSHCI_DME_PEER_GET;
+ uic_cmd.argument1 = attribute << 16;
+ uic_cmd.argument2 = 0;
+ uic_cmd.argument3 = 0;
+
+ return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, return_value));
+}
+
+int
+ufshci_uic_send_dme_peer_set(struct ufshci_controller *ctrlr,
+ uint16_t attribute, uint32_t value)
+{
+ struct ufshci_uic_cmd uic_cmd;
+
+ uic_cmd.opcode = UFSHCI_DME_PEER_SET;
+ uic_cmd.argument1 = attribute << 16;
+ /* This drvier always sets only volatile values. */
+ uic_cmd.argument2 = UFSHCI_ATTR_SET_TYPE_NORMAL << 16;
+ uic_cmd.argument3 = value;
+
+ return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, NULL));
+}
+
+int
+ufshci_uic_send_dme_endpoint_reset(struct ufshci_controller *ctrlr)
+{
+ struct ufshci_uic_cmd uic_cmd;
+
+ uic_cmd.opcode = UFSHCI_DME_ENDPOINT_RESET;
+ uic_cmd.argument1 = 0;
+ uic_cmd.argument2 = 0;
+ uic_cmd.argument3 = 0;
+
+ return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, NULL));
+}
diff --git a/sys/modules/ufshci/Makefile b/sys/modules/ufshci/Makefile
new file mode 100644
--- /dev/null
+++ b/sys/modules/ufshci/Makefile
@@ -0,0 +1,22 @@
+.PATH: ${SRCTOP}/sys/dev/ufshci
+
+KMOD = ufshci
+
+SRCS = ufshci.c \
+ ufshci_pci.c \
+ ufshci_ctrlr.c \
+ ufshci_dev.c \
+ ufshci_ctrlr_cmd.c \
+ ufshci_uic_cmd.c \
+ ufshci_req_queue.c \
+ ufshci_req_sdb.c \
+ ufshci_sim.c \
+ ufshci_sysctl.c \
+ bus_if.h \
+ device_if.h \
+ opt_cam.h \
+ pci_if.h
+
+EXPORT_SYMS= YES
+
+.include <bsd.kmod.mk>

File Metadata

Mime Type
text/plain
Expires
Sun, Oct 26, 10:47 PM (10 h, 29 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
24254522
Default Alt Text
D50370.id156571.diff (148 KB)

Event Timeline